gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Ported from a Java benchmark whose history is :
# This is adapted from a benchmark written by John Ellis and Pete Kovac
# of Post Communications.
# It was modified by Hans Boehm of Silicon Graphics.
#
# This is no substitute for real applications. No actual application
# is likely to behave in exactly this way. However, this benchmark was
# designed to be more representative of real applications than other
# Java GC benchmarks of which we are aware.
# It attempts to model those properties of allocation requests that
# are important to current GC techniques.
# It is designed to be used either to obtain a single overall performance
# number, or to give a more detailed estimate of how collector
# performance varies with object lifetimes. It prints the time
# required to allocate and collect balanced binary trees of various
# sizes. Smaller trees result in shorter object lifetimes. Each cycle
# allocates roughly the same amount of memory.
# Two data structures are kept around during the entire process, so
# that the measured performance is representative of applications
# that maintain some live in-memory data. One of these is a tree
# containing many pointers. The other is a large array containing
# double precision floating point numbers. Both should be of comparable
# size.
#
# The results are only really meaningful together with a specification
# of how much memory was used. It is possible to trade memory for
# better time performance. This benchmark should be run in a 32 MB
# heap, though we don't currently know how to enforce that uniformly.
#
# Unlike the original Ellis and Kovac benchmark, we do not attempt
# measure pause times. This facility should eventually be added back
# in. There are several reasons for omitting it for now. The original
# implementation depended on assumptions about the thread scheduler
# that don't hold uniformly. The results really measure both the
# scheduler and GC. Pause time measurements tend to not fit well with
# current benchmark suites. As far as we know, none of the current
# commercial Java implementations seriously attempt to minimize GC pause
# times.
#
# Known deficiencies:
# - No way to check on memory use
# - No cyclic data structures
# - No attempt to measure variation with object size
# - Results are sensitive to locking cost, but we dont
# check for proper locking
import os, time
USAGE = """gcbench [num_repetitions] [--depths=N,N,N..] [--threads=N]"""
ENABLE_THREADS = True
class Node(object):
def __init__(self, l=None, r=None):
self.left = l
self.right = r
kStretchTreeDepth = 18 # about 16Mb (for Java)
kLongLivedTreeDepth = 16 # about 4Mb (for Java)
kArraySize = 500000 # about 4Mb
kMinTreeDepth = 4
kMaxTreeDepth = 16
def tree_size(i):
"Nodes used by a tree of a given size"
return (1 << (i + 1)) - 1
def num_iters(i):
"Number of iterations to use for a given tree depth"
return 2 * tree_size(kStretchTreeDepth) / tree_size(i);
def populate(depth, node):
"Build tree top down, assigning to older objects."
if depth <= 0:
return
else:
depth -= 1
node.left = Node()
node.right = Node()
populate(depth, node.left)
populate(depth, node.right)
def make_tree(depth):
"Build tree bottom-up"
if depth <= 0:
return Node()
else:
return Node(make_tree(depth-1), make_tree(depth-1))
def print_diagnostics():
"ought to print free/total memory"
pass
def time_construction(depth):
niters = num_iters(depth)
print "Creating %d trees of depth %d" % (niters, depth)
t_start = time.time()
for i in range(niters):
temp_tree = Node()
populate(depth, temp_tree)
temp_tree = None
t_finish = time.time()
print "\tTop down constrution took %f ms" % ((t_finish-t_start)*1000.)
t_start = time.time()
for i in range(niters):
temp_tree = make_tree(depth)
temp_tree = None
t_finish = time.time()
print "\tBottom up constrution took %f ms" % ((t_finish-t_start)*1000.)
DEFAULT_DEPTHS = range(kMinTreeDepth, kMaxTreeDepth+1, 2)
def time_constructions(depths):
for d in depths:
time_construction(d)
def time_parallel_constructions(depths, nthreads):
import threading
threadlist = []
print "Starting %d parallel threads..." % (nthreads,)
for n in range(nthreads):
t = threading.Thread(target=time_constructions, args=(depths,))
t.start()
threadlist.append(t)
for t in threadlist:
t.join()
print "All %d threads finished" % (nthreads,)
def main(depths=DEFAULT_DEPTHS, threads=0):
print "Garbage Collector Test"
print " Stretching memory with a binary tree of depth %d" % kStretchTreeDepth
print_diagnostics()
t_start = time.time()
temp_tree = make_tree(kStretchTreeDepth)
temp_tree = None
# Create a long lived object
print " Creating a long-lived binary tree of depth %d" % kLongLivedTreeDepth
long_lived_tree = Node()
populate(kLongLivedTreeDepth, long_lived_tree)
# Create long-lived array, filling half of it
print " Creating a long-lived array of %d doubles" % kArraySize
array = [0.0] * kArraySize
i = 1
while i < kArraySize/2:
array[i] = 1.0/i
i += 1
print_diagnostics()
if threads:
time_parallel_constructions(depths, threads)
else:
time_constructions(depths)
if long_lived_tree is None or array[1024] != 1.0/1024:
raise Failed
t_finish = time.time()
print_diagnostics()
print "Completed in %f ms." % ((t_finish-t_start)*1000.)
class Failed(Exception):
pass
def argerror():
print "Usage:"
print " ", USAGE
return 2
def entry_point(argv):
depths = DEFAULT_DEPTHS
threads = 0
repeatcount = 1
for arg in argv[1:]:
if arg.startswith('--threads='):
arg = arg[len('--threads='):]
if not ENABLE_THREADS:
print "threads disabled (they cannot be translated)"
return 1
try:
threads = int(arg)
except ValueError:
return argerror()
elif arg.startswith('--depths='):
arg = arg[len('--depths='):].split(',')
try:
depths = [int(s) for s in arg]
except ValueError:
return argerror()
else:
try:
repeatcount = int(arg)
except ValueError:
return argerror()
for i in range(repeatcount):
main(depths, threads)
return 0
if __name__ == '__main__':
import sys
sys.exit(entry_point(sys.argv))
|
|
#!/usr/bin/env python3
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Deploys and runs a test package on a Fuchsia target."""
import argparse
import os
import shutil
import sys
import tempfile
import ffx_session
from common_args import AddCommonArgs, AddTargetSpecificArgs, \
ConfigureLogging, GetDeploymentTargetForArgs
from net_test_server import SetupTestServer
from run_test_package import RunTestPackage, RunTestPackageArgs
from runner_exceptions import HandleExceptionAndReturnExitCode
DEFAULT_TEST_SERVER_CONCURRENCY = 4
TEST_DATA_DIR = '/tmp'
TEST_FILTER_PATH = TEST_DATA_DIR + '/test_filter.txt'
TEST_LLVM_PROFILE_DIR = 'llvm-profile'
TEST_PERF_RESULT_FILE = 'test_perf_summary.json'
TEST_RESULT_FILE = 'test_summary.json'
TEST_REALM_NAME = 'chromium_tests'
FILTER_DIR = 'testing/buildbot/filters'
class TestOutputs(object):
"""An abstract base class for extracting outputs generated by a test."""
def __init__(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def GetFfxSession(self):
raise NotImplementedError()
def GetDevicePath(self, path):
"""Returns an absolute device-local variant of a path."""
raise NotImplementedError()
def GetFile(self, glob, destination):
"""Places all files/directories matched by a glob into a destination."""
raise NotImplementedError()
def GetCoverageProfiles(self, destination):
"""Places all coverage files from the target into a destination."""
raise NotImplementedError()
class TargetTestOutputs(TestOutputs):
"""A TestOutputs implementation for CFv1 tests, where tests emit files into
/tmp that are retrieved from the device via ssh."""
def __init__(self, target, package_name, test_realms):
super(TargetTestOutputs, self).__init__()
self._target = target
self._package_name = package_name
self._test_realms = test_realms
def GetFfxSession(self):
return None # ffx is not used to run CFv1 tests.
def GetDevicePath(self, path):
return TEST_DATA_DIR + '/' + path
def GetFile(self, glob, destination):
"""Places all files/directories matched by a glob into a destination."""
self._target.GetFile(self.GetDevicePath(glob),
destination,
for_package=self._package_name,
for_realms=self._test_realms)
def GetCoverageProfiles(self, destination):
# Copy all the files in the profile directory. /* is used instead of
# recursively copying due to permission issues for the latter.
self._target.GetFile(self.GetDevicePath(TEST_LLVM_PROFILE_DIR + '/*'),
destination, None, None)
class CustomArtifactsTestOutputs(TestOutputs):
"""A TestOutputs implementation for CFv2 tests, where tests emit files into
/custom_artifacts that are retrieved from the device automatically via ffx."""
def __init__(self, target):
super(CustomArtifactsTestOutputs, self).__init__()
self._target = target
self._ffx_session_context = ffx_session.FfxSession(target._log_manager)
self._ffx_session = None
def __enter__(self):
self._ffx_session = self._ffx_session_context.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._ffx_session = None
self._ffx_session_context.__exit__(exc_type, exc_val, exc_tb)
return False
def GetFfxSession(self):
assert self._ffx_session
return self._ffx_session
def GetDevicePath(self, path):
return '/custom_artifacts/' + path
def GetOutputDirectory(self):
return self._ffx_session.get_output_dir()
def GetFile(self, glob, destination):
"""Places all files/directories matched by a glob into a destination."""
shutil.copy(
os.path.join(self.GetOutputDirectory(), 'artifact-0', 'custom-0', glob),
destination)
def GetCoverageProfiles(self, destination):
# Copy all the files in the profile directory.
# TODO(https://fxbug.dev/77634): Switch to ffx-based extraction once it is
# implemented.
self._target.GetFile(
'/tmp/test_manager:0/children/debug_data:0/data/' +
TEST_LLVM_PROFILE_DIR + '/*', destination)
def MakeTestOutputs(component_version, target, package_name, test_realms):
if component_version == '2':
return CustomArtifactsTestOutputs(target)
return TargetTestOutputs(target, package_name, test_realms)
def AddTestExecutionArgs(arg_parser):
test_args = arg_parser.add_argument_group('testing',
'Test execution arguments')
test_args.add_argument('--gtest_filter',
help='GTest filter to use in place of any default.')
test_args.add_argument(
'--gtest_repeat',
help='GTest repeat value to use. This also disables the '
'test launcher timeout.')
test_args.add_argument(
'--test-launcher-retry-limit',
help='Number of times that test suite will retry failing '
'tests. This is multiplicative with --gtest_repeat.')
test_args.add_argument('--test-launcher-print-test-stdio',
choices=['auto', 'always', 'never'],
help='Controls when full test output is printed.'
'auto means to print it when the test failed.')
test_args.add_argument('--test-launcher-shard-index',
type=int,
default=os.environ.get('GTEST_SHARD_INDEX'),
help='Index of this instance amongst swarming shards.')
test_args.add_argument('--test-launcher-total-shards',
type=int,
default=os.environ.get('GTEST_TOTAL_SHARDS'),
help='Total number of swarming shards of this suite.')
test_args.add_argument('--gtest_break_on_failure',
action='store_true',
default=False,
help='Should GTest break on failure; useful with '
'--gtest_repeat.')
test_args.add_argument('--single-process-tests',
action='store_true',
default=False,
help='Runs the tests and the launcher in the same '
'process. Useful for debugging.')
test_args.add_argument('--test-launcher-batch-limit',
type=int,
help='Sets the limit of test batch to run in a single '
'process.')
# --test-launcher-filter-file is specified relative to --out-dir,
# so specifying type=os.path.* will break it.
test_args.add_argument(
'--test-launcher-filter-file',
default=None,
help='Filter file(s) passed to target test process. Use ";" to separate '
'multiple filter files ')
test_args.add_argument('--test-launcher-jobs',
type=int,
help='Sets the number of parallel test jobs.')
test_args.add_argument('--test-launcher-summary-output',
help='Where the test launcher will output its json.')
test_args.add_argument('--enable-test-server',
action='store_true',
default=False,
help='Enable Chrome test server spawner.')
test_args.add_argument(
'--test-launcher-bot-mode',
action='store_true',
default=False,
help='Informs the TestLauncher to that it should enable '
'special allowances for running on a test bot.')
test_args.add_argument('--isolated-script-test-output',
help='If present, store test results on this path.')
test_args.add_argument(
'--isolated-script-test-perf-output',
help='If present, store chartjson results on this path.')
test_args.add_argument('--use-run',
dest='use_run_test_component',
default=True,
action='store_false',
help='Run the test package using run rather than '
'hermetically using run-test-component.')
test_args.add_argument(
'--code-coverage',
default=False,
action='store_true',
help='Gather code coverage information and place it in '
'the output directory.')
test_args.add_argument('--code-coverage-dir',
default=os.getcwd(),
help='Directory to place code coverage information. '
'Only relevant when --code-coverage set to true. '
'Defaults to current directory.')
test_args.add_argument('--child-arg',
action='append',
help='Arguments for the test process.')
test_args.add_argument('--gtest_also_run_disabled_tests',
default=False,
action='store_true',
help='Run tests prefixed with DISABLED_')
test_args.add_argument('child_args',
nargs='*',
help='Arguments for the test process.')
def MapFilterFileToPackageFile(filter_file):
# TODO(crbug.com/1279803): Until one can send file to the device when running
# a test, filter files must be read from the test package.
if not FILTER_DIR in filter_file:
raise ValueError('CFv2 tests only support registered filter files present '
' in the test package')
return '/pkg/' + filter_file[filter_file.index(FILTER_DIR):]
def main():
parser = argparse.ArgumentParser()
AddTestExecutionArgs(parser)
AddCommonArgs(parser)
AddTargetSpecificArgs(parser)
args = parser.parse_args()
# Flag out_dir is required for tests launched with this script.
if not args.out_dir:
raise ValueError("out-dir must be specified.")
if args.component_version == "2":
args.use_run_test_component = False
if (args.code_coverage and args.component_version != "2"
and not args.use_run_test_component):
if args.enable_test_server:
# TODO(1254563): Tests that need access to the test server cannot be run
# as test component under CFv1. Because code coverage requires it, force
# the test to run as a test component. It is expected that test that tries
# to use the external test server will fail.
args.use_run_test_component = True
else:
raise ValueError('Collecting code coverage info requires using '
'run-test-component.')
ConfigureLogging(args)
child_args = []
if args.test_launcher_shard_index != None:
child_args.append(
'--test-launcher-shard-index=%d' % args.test_launcher_shard_index)
if args.test_launcher_total_shards != None:
child_args.append(
'--test-launcher-total-shards=%d' % args.test_launcher_total_shards)
if args.single_process_tests:
child_args.append('--single-process-tests')
if args.test_launcher_bot_mode:
child_args.append('--test-launcher-bot-mode')
if args.test_launcher_batch_limit:
child_args.append('--test-launcher-batch-limit=%d' %
args.test_launcher_batch_limit)
# Only set --test-launcher-jobs if the caller specifies it, in general.
# If the caller enables the test-server then we need to launch the right
# number of instances to match the maximum number of parallel test jobs, so
# in that case we set --test-launcher-jobs based on the number of CPU cores
# specified for the emulator to use.
test_concurrency = None
if args.test_launcher_jobs:
test_concurrency = args.test_launcher_jobs
elif args.enable_test_server:
if args.device == 'device':
test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
else:
test_concurrency = args.cpu_cores
if test_concurrency:
child_args.append('--test-launcher-jobs=%d' % test_concurrency)
if args.test_launcher_print_test_stdio:
child_args.append('--test-launcher-print-test-stdio=%s' %
args.test_launcher_print_test_stdio)
if args.gtest_filter:
child_args.append('--gtest_filter=' + args.gtest_filter)
if args.gtest_repeat:
child_args.append('--gtest_repeat=' + args.gtest_repeat)
child_args.append('--test-launcher-timeout=-1')
if args.test_launcher_retry_limit:
child_args.append(
'--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
if args.gtest_break_on_failure:
child_args.append('--gtest_break_on_failure')
if args.gtest_also_run_disabled_tests:
child_args.append('--gtest_also_run_disabled_tests')
if args.child_arg:
child_args.extend(args.child_arg)
if args.child_args:
child_args.extend(args.child_args)
test_realms = []
if args.use_run_test_component:
test_realms = [TEST_REALM_NAME]
try:
with GetDeploymentTargetForArgs(args) as target, \
MakeTestOutputs(args.component_version,
target,
args.package_name,
test_realms) as test_outputs:
if args.test_launcher_summary_output:
child_args.append('--test-launcher-summary-output=' +
test_outputs.GetDevicePath(TEST_RESULT_FILE))
if args.isolated_script_test_output:
child_args.append('--isolated-script-test-output=' +
test_outputs.GetDevicePath(TEST_RESULT_FILE))
if args.isolated_script_test_perf_output:
child_args.append('--isolated-script-test-perf-output=' +
test_outputs.GetDevicePath(TEST_PERF_RESULT_FILE))
target.Start()
target.StartSystemLog(args.package)
if args.test_launcher_filter_file:
if args.component_version == "2":
# TODO(crbug.com/1279803): Until one can send file to the device when
# running a test, filter files must be read from the test package.
test_launcher_filter_files = map(
MapFilterFileToPackageFile,
args.test_launcher_filter_file.split(';'))
child_args.append('--test-launcher-filter-file=' +
';'.join(test_launcher_filter_files))
else:
test_launcher_filter_files = args.test_launcher_filter_file.split(';')
with tempfile.NamedTemporaryFile('a+b') as combined_filter_file:
for filter_file in test_launcher_filter_files:
with open(filter_file, 'rb') as f:
combined_filter_file.write(f.read())
combined_filter_file.seek(0)
target.PutFile(combined_filter_file.name,
TEST_FILTER_PATH,
for_package=args.package_name,
for_realms=test_realms)
child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)
test_server = None
if args.enable_test_server:
assert test_concurrency
test_server = SetupTestServer(target, test_concurrency,
args.package_name, test_realms)
run_package_args = RunTestPackageArgs.FromCommonArgs(args)
if args.use_run_test_component:
run_package_args.test_realm_label = TEST_REALM_NAME
run_package_args.use_run_test_component = True
if args.component_version == "2":
run_package_args.output_directory = test_outputs.GetOutputDirectory()
returncode = RunTestPackage(target, test_outputs.GetFfxSession(),
args.package, args.package_name,
args.component_version, child_args,
run_package_args)
if test_server:
test_server.Stop()
if args.code_coverage:
test_outputs.GetCoverageProfiles(args.code_coverage_dir)
if args.test_launcher_summary_output:
test_outputs.GetFile(TEST_RESULT_FILE,
args.test_launcher_summary_output)
if args.isolated_script_test_output:
test_outputs.GetFile(TEST_RESULT_FILE, args.isolated_script_test_output)
if args.isolated_script_test_perf_output:
test_outputs.GetFile(TEST_PERF_RESULT_FILE,
args.isolated_script_test_perf_output)
return returncode
except:
return HandleExceptionAndReturnExitCode()
if __name__ == '__main__':
sys.exit(main())
|
|
#! /usr/bin/env python
"""
Module with S/N calculation functions.
We strongly recommend users to read Mawet et al. (2014) before using routines
of this module: https://ui.adsabs.harvard.edu/abs/2014ApJ...792...97M/abstract
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, O. Absil @ ULg, V. Christiaens'
__all__ = ['snr',
'snrmap',
'significance',
'frame_report']
import numpy as np
import photutils
from scipy.stats import norm, t
from hciplot import plot_frames
from skimage import draw
from matplotlib import pyplot as plt
from astropy.convolution import convolve, Tophat2DKernel
from astropy.stats import median_absolute_deviation as mad
from multiprocessing import cpu_count
from ..conf.utils_conf import pool_map, iterable, sep
from ..conf import time_ini, timing, check_array
from ..var import get_annulus_segments, frame_center, dist
def snrmap(array, fwhm, approximated=False, plot=False, known_sources=None,
nproc=None, array2=None, use2alone=False,
exclude_negative_lobes=False, verbose=True, **kwargs):
"""Parallel implementation of the S/N map generation function. Applies the
S/N function (small samples penalty) at each pixel.
The S/N is computed as in Mawet et al. (2014) for each radial separation.
https://ui.adsabs.harvard.edu/abs/2014ApJ...792...97M/abstract
*** DISCLAIMER ***
Signal-to-noise ratio is not significance! For a conversion from snr to
n-sigma (i.e. the equivalent confidence level of a Gaussian n-sigma), use
the significance() function.
Parameters
----------
array : numpy ndarray
Input frame (2d array).
fwhm : float
Size in pixels of the FWHM.
approximated : bool, optional
If True, an approximated S/N map is generated.
plot : bool, optional
If True plots the S/N map. False by default.
known_sources : None, tuple or tuple of tuples, optional
To take into account existing sources. It should be a tuple of float/int
or a tuple of tuples (of float/int) with the coordinate(s) of the known
sources.
nproc : int or None
Number of processes for parallel computing.
array2 : numpy ndarray, optional
Additional image (e.g. processed image with negative derotation angles)
enabling to have more noise samples. Should have the
same dimensions as array.
use2alone: bool, optional
Whether to use array2 alone to estimate the noise (might be useful to
estimate the snr of extended disk features).
verbose: bool, optional
Whether to print timing or not.
**kwargs : dictionary, optional
Arguments to be passed to ``plot_frames`` to customize the plot (and to
save it to disk).
Returns
-------
snrmap : 2d numpy ndarray
Frame with the same size as the input frame with each pixel.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=2, msg='array')
sizey, sizex = array.shape
snrmap_array = np.zeros_like(array)
width = min(sizey, sizex) / 2 - 1.5 * fwhm
mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
mask = np.ma.make_mask(mask)
# by making a bool mask *after* applying the mask to the array, we also mask
# out zero values from the array. This logic cannot be simplified by using
# mode="ind"!
yy, xx = np.where(mask)
coords = zip(xx, yy)
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if known_sources is None:
# proxy to S/N calculation
if approximated:
cy, cx = frame_center(array)
tophat_kernel = Tophat2DKernel(fwhm / 2)
array = convolve(array, tophat_kernel)
width = min(sizey, sizex) / 2 - 1.5 * fwhm
mask = get_annulus_segments(array, (fwhm / 2) + 1, width - 1,
mode="mask")[0]
mask = np.ma.make_mask(mask)
yy, xx = np.where(mask)
coords = [(int(x), int(y)) for (x, y) in zip(xx, yy)]
res = pool_map(nproc, _snr_approx, array, iterable(coords), fwhm,
cy, cx)
res = np.array(res)
yy = res[:, 0]
xx = res[:, 1]
snr_value = res[:, 2]
snrmap_array[yy.astype(int), xx.astype(int)] = snr_value
# computing s/n map with Mawet+14 definition
else:
res = pool_map(nproc, snr, array, iterable(coords), fwhm, True,
array2, use2alone, exclude_negative_lobes)
res = np.array(res)
yy = res[:, 0]
xx = res[:, 1]
snr_value = res[:, -1]
snrmap_array[yy.astype('int'), xx.astype('int')] = snr_value
# masking known sources
else:
if not isinstance(known_sources, tuple):
raise TypeError("`known_sources` must be a tuple or tuple of "
"tuples")
else:
source_mask = np.zeros_like(array)
if isinstance(known_sources[0], tuple):
for coor in known_sources:
source_mask[coor[::-1]] = 1
elif isinstance(known_sources[0], int):
source_mask[known_sources[1], known_sources[0]] = 1
else:
raise TypeError("`known_sources` seems to have wrong type. It "
"must be a tuple of ints or tuple of tuples "
"(of ints)")
# checking the mask with the sources
if source_mask[source_mask == 1].shape[0] > 50:
msg = 'Input source mask is too crowded (check its validity)'
raise RuntimeError(msg)
soury, sourx = np.where(source_mask == 1)
sources = []
coor_ann = []
arr_masked_sources = array.copy()
centery, centerx = frame_center(array)
for y, x in zip(soury, sourx):
radd = dist(centery, centerx, int(y), int(x))
if int(radd) < centery - np.ceil(fwhm):
sources.append((y, x))
for source in sources:
y, x = source
radd = dist(centery, centerx, int(y), int(x))
anny, annx = get_annulus_segments(array, int(radd-fwhm),
int(np.round(3 * fwhm)))[0]
ciry, cirx = draw.circle(y, x, int(np.ceil(fwhm)))
# masking the sources positions (using the MAD of pixels in annulus)
arr_masked_sources[ciry, cirx] = mad(array[anny, annx])
# S/Ns of annulus without the sources
coor_ann = [(x, y) for (x, y) in zip(annx, anny) if (x, y) not in
zip(cirx, ciry)]
res = pool_map(nproc, snr, arr_masked_sources, iterable(coor_ann),
fwhm, True, array2, use2alone,
exclude_negative_lobes)
res = np.array(res)
yy_res = res[:, 0]
xx_res = res[:, 1]
snr_value = res[:, 4]
snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value
coor_ann += coor_ann
# S/Ns of the rest of the frame without the annulus
coor_rest = [(x, y) for (x, y) in zip(xx, yy) if (x, y) not in coor_ann]
res = pool_map(nproc, snr, array, iterable(coor_rest), fwhm, True,
array2, use2alone, exclude_negative_lobes)
res = np.array(res)
yy_res = res[:, 0]
xx_res = res[:, 1]
snr_value = res[:, 4]
snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value
if plot:
plot_frames(snrmap_array, colorbar=True, title='S/N map', **kwargs)
if verbose:
print("S/N map created using {} processes".format(nproc))
timing(start_time)
return snrmap_array
def _snr_approx(array, source_xy, fwhm, centery, centerx):
"""
array - frame convolved with top hat kernel
"""
sourcex, sourcey = source_xy
rad = dist(centery, centerx, sourcey, sourcex)
ind_aper = draw.circle(sourcey, sourcex, fwhm/2.)
# noise : STDDEV in convolved array of 1px wide annulus (while
# masking the flux aperture) * correction of # of resolution elements
ind_ann = draw.circle_perimeter(int(centery), int(centerx), int(rad))
array2 = array.copy()
array2[ind_aper] = mad(array[ind_ann]) # mask
n2 = (2 * np.pi * rad) / fwhm - 1
noise = array2[ind_ann].std() * np.sqrt(1+(1/n2))
# signal : central px minus the mean of the pxs (masked) in 1px annulus
signal = array[sourcey, sourcex] - array2[ind_ann].mean()
snr_value = signal / noise
return sourcey, sourcex, snr_value
def snr(array, source_xy, fwhm, full_output=False, array2=None, use2alone=False,
exclude_negative_lobes=False, plot=False, verbose=False):
"""
Calculate the S/N (signal to noise ratio) of a test resolution element
in a residual frame (e.g. post-processed with LOCI, PCA, etc). Implements
the approach described in Mawet et al. 2014 on small sample statistics,
where a student t-test (eq. 9) can be used to determine S/N (and contrast)
in high contrast imaging. 3 extra possibilities compared to Mawet et al.
2014 (https://ui.adsabs.harvard.edu/abs/2014ApJ...792...97M/abstract):
* possibility to provide a second array (e.g. obtained with opposite
derotation angles) to have more apertures for noise estimation
* possibility to exclude negative ADI lobes directly adjacent to the
tested xy location, to not bias the noise estimate
* possibility to use only the second array for the noise estimation
(useful for images containing a lot of disk/extended signals).
*** DISCLAIMER ***
Signal-to-noise ratio is not significance! For a conversion from snr to
n-sigma (i.e. the equivalent confidence level of a Gaussian n-sigma), use
the significance() function.
Parameters
----------
array : numpy ndarray, 2d
Post-processed frame where we want to measure S/N.
source_xy : tuple of floats
X and Y coordinates of the planet or test speckle.
fwhm : float
Size in pixels of the FWHM.
full_output : bool, optional
If True returns back the S/N value, the y, x input coordinates, noise
and flux.
array2 : None or numpy ndarray, 2d, optional
Additional image (e.g. processed image with negative derotation angles)
enabling to have more apertures for noise estimation at each radial
separation. Should have the same dimensions as array.
use2alone : bool, opt
Whether to use array2 alone to estimate the noise (can be useful to
estimate the S/N of extended disk features)
exclude_negative_lobes : bool, opt
Whether to include the adjacent aperture lobes to the tested location
or not. Can be set to True if the image shows significant neg lobes.
plot : bool, optional
Plots the frame and the apertures considered for clarity.
verbose: bool, optional
Chooses whether to print some output or not.
Returns
-------
snr_vale : float
Value of the S/N for the given test resolution element.
sourcey : numpy ndarray
[full_output=True] Input coordinates (``source_xy``) in Y.
sourcex : numpy ndarray
[full_output=True] Input coordinates (``source_xy``) in X.
f_source : float
[full_output=True] Flux in test elemnt.
backgr_apertures_std : float
[full_output=True] Standard deviation of the background apertures
fluxes.
"""
check_array(array, dim=2, msg='array')
if not isinstance(source_xy, tuple):
raise TypeError("`source_xy` must be a tuple of floats")
if array2 is not None:
if not array2.shape == array.shape:
raise TypeError('`array2` has not the same shape as input array')
sourcex, sourcey = source_xy
centery, centerx = frame_center(array)
sep = dist(centery, centerx, float(sourcey), float(sourcex))
if not sep > (fwhm/2)+1:
raise RuntimeError('`source_xy` is too close to the frame center')
sens = 'clock' # counterclock
angle = np.arcsin(fwhm/2./sep)*2
number_apertures = int(np.floor(2*np.pi/angle))
yy = np.zeros((number_apertures))
xx = np.zeros((number_apertures))
cosangle = np.cos(angle)
sinangle = np.sin(angle)
xx[0] = sourcex - centerx
yy[0] = sourcey - centery
for i in range(number_apertures-1):
if sens == 'clock':
xx[i+1] = cosangle*xx[i] + sinangle*yy[i]
yy[i+1] = cosangle*yy[i] - sinangle*xx[i]
elif sens == 'counterclock':
xx[i+1] = cosangle*xx[i] - sinangle*yy[i]
yy[i+1] = cosangle*yy[i] + sinangle*xx[i]
xx += centerx
yy += centery
rad = fwhm/2.
if exclude_negative_lobes:
xx = np.concatenate(([xx[0]], xx[2:-1]))
yy = np.concatenate(([yy[0]], yy[2:-1]))
apertures = photutils.CircularAperture((xx, yy), r=rad) # Coordinates (X,Y)
fluxes = photutils.aperture_photometry(array, apertures, method='exact')
fluxes = np.array(fluxes['aperture_sum'])
if array2 is not None:
fluxes2 = photutils.aperture_photometry(array2, apertures,
method='exact')
fluxes2 = np.array(fluxes2['aperture_sum'])
if use2alone:
fluxes = np.concatenate(([fluxes[0]], fluxes2[:]))
else:
fluxes = np.concatenate((fluxes, fluxes2))
f_source = fluxes[0].copy()
fluxes = fluxes[1:]
n2 = fluxes.shape[0]
backgr_apertures_std = fluxes.std(ddof=1)
snr_vale = (f_source - fluxes.mean())/(backgr_apertures_std *
np.sqrt(1+(1/n2)))
if verbose:
msg1 = 'S/N for the given pixel = {:.3f}'
msg2 = 'Integrated flux in FWHM test aperture = {:.3f}'
msg3 = 'Mean of background apertures integrated fluxes = {:.3f}'
msg4 = 'Std-dev of background apertures integrated fluxes = {:.3f}'
print(msg1.format(snr_vale))
print(msg2.format(f_source))
print(msg3.format(fluxes.mean()))
print(msg4.format(backgr_apertures_std))
if plot:
_, ax = plt.subplots(figsize=(6, 6))
ax.imshow(array, origin='lower', interpolation='nearest', alpha=0.5,
cmap='gray')
for i in range(xx.shape[0]):
# Circle takes coordinates as (X,Y)
aper = plt.Circle((xx[i], yy[i]), radius=fwhm/2., color='r',
fill=False, alpha=0.8)
ax.add_patch(aper)
cent = plt.Circle((xx[i], yy[i]), radius=0.8, color='r', fill=True,
alpha=0.5)
ax.add_patch(cent)
aper_source = plt.Circle((sourcex, sourcey), radius=0.7,
color='b', fill=True, alpha=0.5)
ax.add_patch(aper_source)
ax.grid(False)
plt.show()
if full_output:
return sourcey, sourcex, f_source, backgr_apertures_std, snr_vale
else:
return snr_vale
def significance(snr, rad, fwhm, student_to_gauss=True):
""" Converts a S/N ratio (measured as in Mawet et al. 2014) into the
equivalent gaussian significance, i.e. the n-sigma with the same confidence
level as the S/N at the given separation.
Parameters
----------
snr : float or numpy array
SNR value(s)
rad : float or numpy array
Radial separation(s) from the star in pixels. If an array, it should be
the same shape as snr and provide the radial separation corresponding
to each snr measurement.
fwhm : float
Full Width Half Maximum of the PSF.
student_to_gauss : bool, optional
Whether the conversion is from Student SNR to Gaussian significance. If
False, will assume the opposite: Gaussian significance to Student SNR.
Returns
-------
sigma : float
Gaussian significance in terms of n-sigma
"""
if student_to_gauss:
sigma = norm.ppf(t.cdf(snr,(rad/fwhm)*2*np.pi-2))
else:
sigma = t.ppf(norm.cdf(snr), (rad/fwhm)*2*np.pi-2)
return sigma
def frame_report(array, fwhm, source_xy=None, verbose=True):
""" Gets information from given frame: Integrated flux in aperture, S/N of
central pixel (either ``source_xy`` or max value), mean S/N in aperture.
Parameters
----------
array : numpy ndarray
2d array or input frame.
fwhm : float
Size of the FWHM in pixels.
source_xy : tuple of floats or list (of tuples of floats)
X and Y coordinates of the center(s) of the source(s).
verbose : bool, optional
If True prints to stdout the frame info.
Returns
-------
source_xy : tuple of floats or list (of tuples of floats)
X and Y coordinates of the center(s) of the source(s).
obj_flux : list of floats
Integrated flux in aperture.
snr_centpx : list of floats
S/N of the ``source_xy`` pixels.
meansnr_pixels : list of floats
Mean S/N of pixels in 1xFWHM apertures centered on ``source_xy``.
"""
if array.ndim != 2:
raise TypeError('Array is not 2d.')
obj_flux = []
meansnr_pixels = []
snr_centpx = []
if source_xy is not None:
if isinstance(source_xy, (list, tuple)):
if not isinstance(source_xy[0], tuple):
source_xy = [source_xy]
else:
raise TypeError("`source_xy` must be a tuple of floats or tuple "
"of tuples")
for xy in source_xy:
x, y = xy
if verbose:
print(sep)
print('Coords of chosen px (X,Y) = {:.1f}, {:.1f}'.format(x, y))
# we get integrated flux on aperture with diameter=1FWHM
aper = photutils.CircularAperture((x, y), r=fwhm / 2.)
obj_flux_i = photutils.aperture_photometry(array, aper,
method='exact')
obj_flux_i = obj_flux_i['aperture_sum'][0]
# we get the mean and stddev of SNRs on aperture
yy, xx = draw.circle(y, x, fwhm / 2)
snr_pixels_i = [snr(array, (x_, y_), fwhm, plot=False,
verbose=False) for y_, x_ in zip(yy, xx)]
meansnr_i = np.mean(snr_pixels_i)
stdsnr_i = np.std(snr_pixels_i)
pxsnr_i = snr(array, (x, y), fwhm, plot=False, verbose=False)
obj_flux.append(obj_flux_i)
meansnr_pixels.append(meansnr_i)
snr_centpx.append(pxsnr_i)
if verbose:
msg0 = 'Flux in a centered 1xFWHM circular aperture = {:.3f}'
print(msg0.format(obj_flux_i))
print('Central pixel S/N = {:.3f}'.format(pxsnr_i))
print(sep)
print('Inside a centered 1xFWHM circular aperture:')
msg1 = 'Mean S/N (shifting the aperture center) = {:.3f}'
print(msg1.format(meansnr_i))
msg2 = 'Max S/N (shifting the aperture center) = {:.3f}'
print(msg2.format(np.max(snr_pixels_i)))
msg3 = 'stddev S/N (shifting the aperture center) = {:.3f}'
print(msg3.format(stdsnr_i))
print('')
else:
y, x = np.where(array == array.max())
y, x = y[0], x[0] # assuming there is only one max, taking 1st if clump
source_xy = (x, y)
if verbose:
print(sep)
print('Coords of Max px (X,Y) = {:.1f}, {:.1f}'.format(x, y))
# we get integrated flux on aperture with diameter=1FWHM
aper = photutils.CircularAperture((x, y), r=fwhm / 2.)
obj_flux_i = photutils.aperture_photometry(array, aper, method='exact')
obj_flux_i = obj_flux_i['aperture_sum'][0]
# we get the mean and stddev of SNRs on aperture
yy, xx = draw.circle(y, x, fwhm / 2.)
snr_pixels_i = [snr(array, (x_, y_), fwhm, plot=False,
verbose=False) for y_, x_ in zip(yy, xx)]
meansnr_pixels = np.mean(snr_pixels_i)
stdsnr_i = np.std(snr_pixels_i)
pxsnr_i = snr(array, (x, y), fwhm, plot=False, verbose=False)
obj_flux.append(obj_flux_i)
snr_centpx.append(pxsnr_i)
if verbose:
msg0 = 'Flux in a centered 1xFWHM circular aperture = {:.3f}'
print(msg0.format(obj_flux_i))
print('Central pixel S/N = {:.3f}'.format(pxsnr_i))
print(sep)
print('Inside a centered 1xFWHM circular aperture:')
msg1 = 'Mean S/N (shifting the aperture center) = {:.3f}'
print(msg1.format(meansnr_pixels))
msg2 = 'Max S/N (shifting the aperture center) = {:.3f}'
print(msg2.format(np.max(snr_pixels_i)))
msg3 = 'stddev S/N (shifting the aperture center) = {:.3f}'
print(msg3.format(stdsnr_i))
print(sep)
return source_xy, obj_flux, snr_centpx, meansnr_pixels
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import Popen, PIPE
import sys
from time import sleep, time
from collections import defaultdict
import unittest
from nose import SkipTest
from six.moves.http_client import HTTPConnection
from swiftclient import get_auth, head_account
from swift.obj.diskfile import get_data_dir
from swift.common.ring import Ring
from swift.common.utils import readconf, renamer
from swift.common.manager import Manager
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from test.probe import CHECK_SERVER_TIMEOUT, VALIDATE_RSYNC
ENABLED_POLICIES = [p for p in POLICIES if not p.is_deprecated]
POLICIES_BY_TYPE = defaultdict(list)
for p in POLICIES:
POLICIES_BY_TYPE[p.policy_type].append(p)
def get_server_number(ipport, ipport2server):
server_number = ipport2server[ipport]
server, number = server_number[:-1], server_number[-1:]
try:
number = int(number)
except ValueError:
# probably the proxy
return server_number, None
return server, number
def start_server(ipport, ipport2server, pids, check=True):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).start(number=number, wait=False)
if err:
raise Exception('unable to start %s' % (
server if not number else '%s%s' % (server, number)))
if check:
return check_server(ipport, ipport2server, pids)
return None
def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
server = ipport2server[ipport]
if server[:-1] in ('account', 'container', 'object'):
if int(server[-1]) > 4:
return None
path = '/connect/1/2'
if server[:-1] == 'container':
path += '/3'
elif server[:-1] == 'object':
path += '/3/4'
try_until = time() + timeout
while True:
try:
conn = HTTPConnection(*ipport)
conn.request('GET', path)
resp = conn.getresponse()
# 404 because it's a nonsense path (and mount_check is false)
# 507 in case the test target is a VM using mount_check
if resp.status not in (404, 507):
raise Exception(
'Unexpected status %s' % resp.status)
break
except Exception as err:
if time() > try_until:
print err
print 'Giving up on %s:%s after %s seconds.' % (
server, ipport, timeout)
raise err
sleep(0.1)
else:
try_until = time() + timeout
while True:
try:
url, token = get_auth('http://%s:%d/auth/v1.0' % ipport,
'test:tester', 'testing')
account = url.split('/')[-1]
head_account(url, token)
return url, token, account
except Exception as err:
if time() > try_until:
print err
print 'Giving up on proxy:8080 after 30 seconds.'
raise err
sleep(0.1)
return None
def kill_server(ipport, ipport2server, pids):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).kill(number=number)
if err:
raise Exception('unable to kill %s' % (server if not number else
'%s%s' % (server, number)))
try_until = time() + 30
while True:
try:
conn = HTTPConnection(*ipport)
conn.request('GET', '/')
conn.getresponse()
except Exception as err:
break
if time() > try_until:
raise Exception(
'Still answering on %s:%s after 30 seconds' % ipport)
sleep(0.1)
def kill_nonprimary_server(primary_nodes, ipport2server, pids):
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
for ipport, server in ipport2server.items():
if ipport in primary_ipports:
server_type = server[:-1]
break
else:
raise Exception('Cannot figure out server type for %r' % primary_nodes)
for ipport, server in list(ipport2server.items()):
if server[:-1] == server_type and ipport not in primary_ipports:
kill_server(ipport, ipport2server, pids)
return ipport
def add_ring_devs_to_ipport2server(ring, server_type, ipport2server,
servers_per_port=0):
# We'll number the servers by order of unique occurrence of:
# IP, if servers_per_port > 0 OR there > 1 IP in ring
# ipport, otherwise
unique_ip_count = len(set(dev['ip'] for dev in ring.devs if dev))
things_to_number = {}
number = 0
for dev in filter(None, ring.devs):
ip = dev['ip']
ipport = (ip, dev['port'])
unique_by = ip if servers_per_port or unique_ip_count > 1 else ipport
if unique_by not in things_to_number:
number += 1
things_to_number[unique_by] = number
ipport2server[ipport] = '%s%d' % (server_type,
things_to_number[unique_by])
def store_config_paths(name, configs):
for server_name in (name, '%s-replicator' % name):
for server in Manager([server_name]):
for i, conf in enumerate(server.conf_files(), 1):
configs[server.server][i] = conf
def get_ring(ring_name, required_replicas, required_devices,
server=None, force_validate=None, ipport2server=None,
config_paths=None):
if not server:
server = ring_name
ring = Ring('/etc/swift', ring_name=ring_name)
if ipport2server is None:
ipport2server = {} # used internally, even if not passed in
if config_paths is None:
config_paths = defaultdict(dict)
store_config_paths(server, config_paths)
repl_name = '%s-replicator' % server
repl_configs = {i: readconf(c, section_name=repl_name)
for i, c in config_paths[repl_name].items()}
servers_per_port = any(int(c.get('servers_per_port', '0'))
for c in repl_configs.values())
add_ring_devs_to_ipport2server(ring, server, ipport2server,
servers_per_port=servers_per_port)
if not VALIDATE_RSYNC and not force_validate:
return ring
# easy sanity checks
if ring.replica_count != required_replicas:
raise SkipTest('%s has %s replicas instead of %s' % (
ring.serialized_path, ring.replica_count, required_replicas))
if len(ring.devs) != required_devices:
raise SkipTest('%s has %s devices instead of %s' % (
ring.serialized_path, len(ring.devs), required_devices))
for dev in ring.devs:
# verify server is exposing mounted device
ipport = (dev['ip'], dev['port'])
_, server_number = get_server_number(ipport, ipport2server)
conf = repl_configs[server_number]
for device in os.listdir(conf['devices']):
if device == dev['device']:
dev_path = os.path.join(conf['devices'], device)
full_path = os.path.realpath(dev_path)
if not os.path.exists(full_path):
raise SkipTest(
'device %s in %s was not found (%s)' %
(device, conf['devices'], full_path))
break
else:
raise SkipTest(
"unable to find ring device %s under %s's devices (%s)" % (
dev['device'], server, conf['devices']))
# verify server is exposing rsync device
if conf.get('vm_test_mode', False):
rsync_export = '%s%s' % (server, dev['replication_port'])
else:
rsync_export = server
cmd = "rsync rsync://localhost/%s" % rsync_export
p = Popen(cmd, shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
if p.returncode:
raise SkipTest('unable to connect to rsync '
'export %s (%s)' % (rsync_export, cmd))
for line in stdout.splitlines():
if line.rsplit(None, 1)[-1] == dev['device']:
break
else:
raise SkipTest("unable to find ring device %s under rsync's "
"exported devices for %s (%s)" %
(dev['device'], rsync_export, cmd))
return ring
def get_policy(**kwargs):
kwargs.setdefault('is_deprecated', False)
# go through the policies and make sure they match the
# requirements of kwargs
for policy in POLICIES:
# TODO: for EC, pop policy type here and check it first
matches = True
for key, value in kwargs.items():
try:
if getattr(policy, key) != value:
matches = False
except AttributeError:
matches = False
if matches:
return policy
raise SkipTest('No policy matching %s' % kwargs)
class ProbeTest(unittest.TestCase):
"""
Don't instantiate this directly, use a child class instead.
"""
def setUp(self):
p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
print stdout
Manager(['all']).stop()
self.pids = {}
try:
self.ipport2server = {}
self.configs = defaultdict(dict)
self.account_ring = get_ring(
'account',
self.acct_cont_required_replicas,
self.acct_cont_required_devices,
ipport2server=self.ipport2server,
config_paths=self.configs)
self.container_ring = get_ring(
'container',
self.acct_cont_required_replicas,
self.acct_cont_required_devices,
ipport2server=self.ipport2server,
config_paths=self.configs)
self.policy = get_policy(**self.policy_requirements)
self.object_ring = get_ring(
self.policy.ring_name,
self.obj_required_replicas,
self.obj_required_devices,
server='object',
ipport2server=self.ipport2server,
config_paths=self.configs)
self.servers_per_port = any(
int(readconf(c, section_name='object-replicator').get(
'servers_per_port', '0'))
for c in self.configs['object-replicator'].values())
Manager(['main']).start(wait=False)
for ipport in self.ipport2server:
check_server(ipport, self.ipport2server, self.pids)
proxy_ipport = ('127.0.0.1', 8080)
self.ipport2server[proxy_ipport] = 'proxy'
self.url, self.token, self.account = check_server(
proxy_ipport, self.ipport2server, self.pids)
self.replicators = Manager(
['account-replicator', 'container-replicator',
'object-replicator'])
self.updaters = Manager(['container-updater', 'object-updater'])
except BaseException:
try:
raise
finally:
try:
Manager(['all']).kill()
except Exception:
pass
def tearDown(self):
Manager(['all']).kill()
def device_dir(self, server, node):
server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
repl_server = '%s-replicator' % server_type
conf = readconf(self.configs[repl_server][config_number],
section_name=repl_server)
return os.path.join(conf['devices'], node['device'])
def storage_dir(self, server, node, part=None, policy=None):
policy = policy or self.policy
device_path = self.device_dir(server, node)
path_parts = [device_path, get_data_dir(policy)]
if part is not None:
path_parts.append(str(part))
return os.path.join(*path_parts)
def config_number(self, node):
_server_type, config_number = get_server_number(
(node['ip'], node['port']), self.ipport2server)
return config_number
def is_local_to(self, node1, node2):
"""
Return True if both ring devices are "local" to each other (on the same
"server".
"""
if self.servers_per_port:
return node1['ip'] == node2['ip']
# Without a disambiguating IP, for SAIOs, we have to assume ports
# uniquely identify "servers". SAIOs should be configured to *either*
# have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
# ports per server (i.e. sdb1 & sdb5 would have same port numbers in
# the 8-disk EC ring).
return node1['port'] == node2['port']
def get_to_final_state(self):
# these .stop()s are probably not strictly necessary,
# but may prevent race conditions
self.replicators.stop()
self.updaters.stop()
self.replicators.once()
self.updaters.once()
self.replicators.once()
def kill_drive(self, device):
if os.path.ismount(device):
os.system('sudo umount %s' % device)
else:
renamer(device, device + "X")
def revive_drive(self, device):
disabled_name = device + "X"
if os.path.isdir(disabled_name):
renamer(device + "X", device)
else:
os.system('sudo mount %s' % device)
class ReplProbeTest(ProbeTest):
acct_cont_required_replicas = 3
acct_cont_required_devices = 4
obj_required_replicas = 3
obj_required_devices = 4
policy_requirements = {'policy_type': REPL_POLICY}
class ECProbeTest(ProbeTest):
acct_cont_required_replicas = 3
acct_cont_required_devices = 4
obj_required_replicas = 6
obj_required_devices = 8
policy_requirements = {'policy_type': EC_POLICY}
if __name__ == "__main__":
for server in ('account', 'container'):
try:
get_ring(server, 3, 4,
force_validate=True)
except SkipTest as err:
sys.exit('%s ERROR: %s' % (server, err))
print '%s OK' % server
for policy in POLICIES:
try:
get_ring(policy.ring_name, 3, 4,
server='object', force_validate=True)
except SkipTest as err:
sys.exit('object ERROR (%s): %s' % (policy.name, err))
print 'object OK (%s)' % policy.name
|
|
import html
from collections import namedtuple
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
from corehq.apps.sms.models import (
INCOMING,
OUTGOING,
SMS,
WORKFLOW_FORWARD,
Keyword,
MessagingEvent,
MessagingSubEvent,
)
from corehq.messaging.scheduling.models import (
ImmediateBroadcast,
MigratedReminder,
ScheduledBroadcast,
)
from corehq.messaging.scheduling.views import (
EditConditionalAlertView,
EditScheduleView,
)
from corehq.util.quickcache import quickcache
def get_status_display(event, sms=None):
"""
event can be a MessagingEvent or MessagingSubEvent
"""
# If sms without error, short circuit to the sms status display
if event.status != MessagingEvent.STATUS_ERROR and sms:
return get_sms_status_display(sms)
# If survey without error, short circuit to the survey status display
if (isinstance(event, MessagingSubEvent) and
event.status == MessagingEvent.STATUS_COMPLETED and
event.xforms_session_id):
return _(event.xforms_session.status)
status = event.status
error_code = event.error_code
# If we have a MessagingEvent with no error_code it means there's
# an error in the subevent
if status == MessagingEvent.STATUS_ERROR and not error_code:
error_code = MessagingEvent.ERROR_SUBEVENT_ERROR
# If we have a MessagingEvent that's completed but it's tied to
# unfinished surveys, then mark it as being in progress
if (
isinstance(event, MessagingEvent) and
event.status == MessagingEvent.STATUS_COMPLETED and
MessagingSubEvent.objects.filter(
parent_id=event.pk,
content_type=MessagingEvent.CONTENT_SMS_SURVEY,
# without this line, django does a left join which is not what we want
xforms_session_id__isnull=False,
xforms_session__session_is_open=True
).count() > 0
):
status = MessagingEvent.STATUS_IN_PROGRESS
status = dict(MessagingEvent.STATUS_CHOICES).get(status, '-')
error_message = (MessagingEvent.ERROR_MESSAGES.get(error_code, None)
if error_code else None)
error_message = _(error_message) if error_message else ''
if event.additional_error_text:
error_message += ' %s' % event.additional_error_text
# Sometimes the additional information from touchforms has < or >
# characters, so we need to escape them for display
if error_message:
return '%s - %s' % (_(status), html.escape(error_message))
else:
return _(status)
def get_sms_status_display(sms):
slug, detail = get_sms_status_display_raw(sms)
display = SMS.STATUS_DISPLAY[slug]
detail = f" - {detail}" if detail else ""
return f"{display}{detail}"
def get_sms_status_display_raw(sms):
if sms.error:
error = sms.system_error_message
if error:
error_message = SMS.ERROR_MESSAGES.get(error, error)
return SMS.STATUS_ERROR, _(error_message)
return SMS.STATUS_ERROR, None
if not sms.processed:
return SMS.STATUS_QUEUED, None
if sms.direction == INCOMING:
return SMS.STATUS_RECEIVED, None
detail = ""
if sms.is_status_pending():
detail = _("message ID: {id}").format(id=sms.backend_message_id)
if sms.direction == OUTGOING:
if sms.workflow == WORKFLOW_FORWARD:
return SMS.STATUS_FORWARDED, detail
if sms.custom_metadata and sms.custom_metadata.get('gateway_delivered', False):
return SMS.STATUS_DELIVERED, detail
return SMS.STATUS_SENT, detail
return SMS.STATUS_UNKNOWN, detail
def _get_keyword_display_raw(keyword_id):
from corehq.apps.reminders.views import (
EditStructuredKeywordView,
EditNormalKeywordView,
)
try:
keyword = Keyword.objects.get(couch_id=keyword_id)
except Keyword.DoesNotExist:
return None, None
urlname = (EditStructuredKeywordView.urlname if keyword.is_structured_sms()
else EditNormalKeywordView.urlname)
return keyword.description, reverse(urlname, args=[keyword.domain, keyword_id])
def _get_keyword_display(keyword_id, content_cache):
if keyword_id in content_cache:
return content_cache[keyword_id]
display, url = _get_keyword_display_raw(keyword_id)
if not display:
display = _('(Deleted Keyword)')
else:
display = format_html('<a target="_blank" href="{}">{}</a>', url, display)
content_cache[keyword_id] = display
return display
def _get_reminder_display_raw(domain, handler_id):
try:
info = MigratedReminder.objects.get(handler_id=handler_id)
if info.rule_id:
return _get_case_rule_display_raw(domain, info.rule_id)
except MigratedReminder.DoesNotExist:
pass
return None, None
def _get_reminder_display(domain, handler_id, content_cache):
if handler_id in content_cache:
return content_cache[handler_id]
result, url = _get_reminder_display_raw(domain, handler_id)
if not result:
result = _("(Deleted Conditional Alert)")
elif url:
result = format_html('<a target="_blank" href="{}">{}</a>', url, result)
content_cache[handler_id] = result
return result
def _get_scheduled_broadcast_display_raw(domain, broadcast_id):
try:
broadcast = ScheduledBroadcast.objects.get(domain=domain, pk=broadcast_id)
except ScheduledBroadcast.DoesNotExist:
return "-", None
if not broadcast.deleted:
return broadcast.name, reverse(EditScheduleView.urlname, args=[
domain, EditScheduleView.SCHEDULED_BROADCAST, broadcast_id
])
return None, None
def _get_scheduled_broadcast_display(domain, broadcast_id, content_cache):
cache_key = 'scheduled-broadcast-%s' % broadcast_id
if cache_key in content_cache:
return content_cache[cache_key]
result, url = _get_scheduled_broadcast_display_raw(domain, broadcast_id)
if not result:
result = _("(Deleted Broadcast)")
elif url:
result = format_html('<a target="_blank" href="{}">{}</a>', url, result)
content_cache[cache_key] = result
return result
def _get_immediate_broadcast_display_raw(domain, broadcast_id):
try:
broadcast = ImmediateBroadcast.objects.get(domain=domain, pk=broadcast_id)
except ImmediateBroadcast.DoesNotExist:
return '-', None
if not broadcast.deleted:
return broadcast.name, reverse(EditScheduleView.urlname, args=[
domain, EditScheduleView.IMMEDIATE_BROADCAST, broadcast_id
])
return None, None
def _get_immediate_broadcast_display(domain, broadcast_id, content_cache):
cache_key = 'immediate-broadcast-%s' % broadcast_id
if cache_key in content_cache:
return content_cache[cache_key]
result, url = _get_immediate_broadcast_display_raw(domain, broadcast_id)
if not result:
result = _("(Deleted Broadcast)")
elif url:
result = format_html('<a target="_blank" href="{}">{}</a>', url, result)
content_cache[cache_key] = result
return result
def _get_case_rule_display_raw(domain, rule_id):
try:
rule = AutomaticUpdateRule.objects.get(domain=domain, pk=rule_id)
except AutomaticUpdateRule.DoesNotExist:
return "-", None
if not rule.deleted:
return rule.name, reverse(EditConditionalAlertView.urlname, args=[domain, rule_id])
return None, None
def _get_case_rule_display(domain, rule_id, content_cache):
cache_key = 'case-rule-%s' % rule_id
if cache_key in content_cache:
return content_cache[cache_key]
result, url = _get_case_rule_display_raw(domain, rule_id)
if not result:
result = _("(Deleted Conditional Alert)")
elif url:
result = format_html('<a target="_blank" href="{}">{}</a>', url, result)
content_cache[cache_key] = result
return result
EventStub = namedtuple('EventStub', 'source source_id content_type form_name')
def get_event_display(domain, event, content_cache):
if event.source == MessagingEvent.SOURCE_KEYWORD and event.source_id:
return _get_keyword_display(event.source_id, content_cache)
elif event.source == MessagingEvent.SOURCE_REMINDER and event.source_id:
return _get_reminder_display(domain, event.source_id, content_cache)
elif event.source == MessagingEvent.SOURCE_SCHEDULED_BROADCAST and event.source_id:
return _get_scheduled_broadcast_display(domain, event.source_id, content_cache)
elif event.source == MessagingEvent.SOURCE_IMMEDIATE_BROADCAST and event.source_id:
return _get_immediate_broadcast_display(domain, event.source_id, content_cache)
elif event.source == MessagingEvent.SOURCE_CASE_RULE and event.source_id:
return _get_case_rule_display(domain, event.source_id, content_cache)
elif event.content_type in (
MessagingEvent.CONTENT_SMS_SURVEY,
MessagingEvent.CONTENT_IVR_SURVEY,
):
return ('%s (%s)' % (_(dict(MessagingEvent.CONTENT_CHOICES).get(event.content_type)),
event.form_name or _('Unknown')))
content_choices = dict(MessagingEvent.CONTENT_CHOICES)
return _(content_choices.get(event.content_type, '-'))
@quickcache(["domain", "source", "source_id"], timeout=5 * 60)
def get_source_display(domain, source, source_id):
if not source_id:
return None
if source == MessagingEvent.SOURCE_KEYWORD:
display, _ = _get_keyword_display_raw(source_id)
return display or "deleted-keyword"
elif source == MessagingEvent.SOURCE_REMINDER:
display, _ = _get_reminder_display_raw(domain, source_id)
return display or "deleted-conditional-alert"
elif source == MessagingEvent.SOURCE_SCHEDULED_BROADCAST:
display, _ = _get_scheduled_broadcast_display_raw(domain, source_id)
return display or "deleted-broadcast"
elif source == MessagingEvent.SOURCE_IMMEDIATE_BROADCAST:
display, _ = _get_immediate_broadcast_display_raw(domain, source_id)
return display or "deleted-broadcast"
elif source == MessagingEvent.SOURCE_CASE_RULE:
display, _ = _get_case_rule_display_raw(domain, source_id)
return display or "deleted-conditional-alert"
return None
def get_event_display_api(event):
if event.source_id:
source_display = get_source_display(event.domain, event.source, event.source_id)
if source_display:
return source_display
detail = ""
if event.content_type in (
MessagingEvent.CONTENT_SMS_SURVEY,
MessagingEvent.CONTENT_IVR_SURVEY,
):
form_name = event.form_name or "unknown-form"
detail = f" ({form_name})"
type_ = MessagingEvent.CONTENT_TYPE_SLUGS.get(event.content_type, "unknown")
return f"{type_}{detail}"
|
|
"""
Design Matrix
=============
This tutorial illustrates how to use the Design_Matrix class to flexibly create
design matrices that can then be used with the Brain_Data class to perform
univariate regression.
Design Matrices can be thought of as "enhanced" pandas dataframes; they can do
everything a pandas dataframe is capable of, with some added features. Design
Matrices follow a data organization format common in many machine learning
applications such as the sci-kit learn API: 2d tables organized as observations
by features. In the context of neuro-imaging this often translates to TRs by
conditions of interest + nuisance covariates (1st level analysis), or
participants by conditions/groups (2nd level analysis).
"""
#########################################################################
# Design Matrix Basics
# --------------------
#
# Lets just create a basic toy design matrix by hand corresponding to a single participant's data from an experiment with 12 TRs, collected at a temporal resolution of 1.5s. For this example we'll have 4 unique "stimulus conditions" that each occur for 2 TRs (3s) with 1 TR (1.5s) of rest between events.
from nltools.data import Design_Matrix
import numpy as np
TR = 1.5 # Design Matrices take a sampling_freq argument specified in hertz which can be converted as 1./TR
dm = Design_Matrix(np.array([
[0,0,0,0],
[0,0,0,0],
[1,0,0,0],
[1,0,0,0],
[0,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,0],
[0,0,1,0],
[0,0,1,0],
[0,0,0,0],
[0,0,0,1],
[0,0,0,1],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]
]),
sampling_freq = 1./TR,
columns=['face_A','face_B','house_A','house_B']
)
#########################################################################
# Notice how this look exactly like a pandas dataframe. That's because design matrices are *subclasses* of dataframes with some extra attributes and methods.
print(dm)
#########################################################################
# Let's take a look at some of that meta-data. We can see that no columns have been convolved as of yet and this design matrix has no polynomial terms (e.g. such as an intercept or linear trend).
print(dm.details())
#########################################################################
# We can also easily visualize the design matrix using an SPM/AFNI/FSL style heatmap
dm.heatmap()
#########################################################################
# Adding nuisiance covariates
# ---------------------------
#
# Legendre Polynomials
# ********************
#
# A common operation is adding an intercept and polynomial trend terms (e.g. linear and quadtratic) as nuisance regressors. This is easy to do. Consistent with other software packages, these are orthogonal Legendre poylnomials on the scale -1 to 1.
# with include_lower = True (default), 2 here means: 0-intercept, 1-linear-trend, 2-quadtratic-trend
dm_with_nuissance = dm.add_poly(2,include_lower=True)
dm_with_nuissance.heatmap()
#########################################################################
# We can see that 3 new columns were added to the design matrix. We can also inspect the change to the meta-data. Notice that the Design Matrix is aware of the existence of three polynomial terms now.
print(dm_with_nuissance.details())
#########################################################################
# Discrete Cosine Basis Functions
# *******************************
#
# Polynomial variables are not the only type of nuisance covariates that can be generated for you. Design Matrix also supports the creation of discrete-cosine basis functions ala SPM. This will create a series of filters added as new columns based on a specified duration, defaulting to 180s. Let's create DCT filters for 20s durations in our toy data.
# Short filter duration for our simple example
dm_with_cosine = dm.add_dct_basis(duration=20)
dm_with_cosine.heatmap()
#########################################################################
# Data operations
# ---------------
#
# Performing convolution
# **********************
#
# Design Matrix makes it easy to perform convolution and will auto-ignore all columns that are consider polynomials. The default convolution kernel is the Glover (1999) HRF parameterized by the glover_hrf implementation in nipy (see nltools.externals.hrf for details). However, any arbitrary kernel can be passed as a 1d numpy array, or multiple kernels can be passed as a 2d numpy array for highly flexible convolution across many types of data (e.g. SCR).
dm_with_nuissance_c = dm_with_nuissance.convolve()
print(dm_with_nuissance_c.details())
dm_with_nuissance_c.heatmap()
#########################################################################
# Design Matrix can do many different data operations in addition to convolution such as upsampling and downsampling to different frequencies, zscoring, etc. Check out the API documentation for how to use these methods.
#########################################################################
# File Reading
# ------------
#
# Creating a Design Matrix from an onsets file
# ********************************************
#
# Nltools provides basic file-reading support for 2 or 3 column formatted onset files. Users can look at the onsets_to_dm function as a template to build more complex file readers if desired or to see additional features. Nltools includes an example onsets file where each event lasted exactly 1 TR and TR = 2s. Lets use that to create a design matrix with an intercept and linear trend
from nltools.utils import get_resource_path
from nltools.file_reader import onsets_to_dm
from nltools.data import Design_Matrix
import os
TR = 2.0
sampling_freq = 1./TR
onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt')
dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq, run_length=160, sort=True,add_poly=1)
dm.heatmap()
#########################################################################
# Creating a Design Matrix from a generic csv file
# ************************************************
#
# Alternatively you can read a generic csv file and transform it into a Design Matrix using pandas file reading capability. Here we'll read in an example covariates file that contains the output of motion realignment estimated during a fMRI preprocessing pipeline.
import pandas as pd
covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv')
cov = pd.read_csv(covariatesFile)
cov = Design_Matrix(cov, sampling_freq =sampling_freq)
cov.heatmap(vmin=-1,vmax=1) # alter plot to scale of covs; heatmap takes Seaborn heatmap arguments
#########################################################################
# Working with multiple Design Matrices
# -------------------------------------
#
# Vertically "stacking" Design Matrices
# *************************************
# A common task is creating a separate design matrix for multiple runs of an experiment, (or multiple subjects) and vertically appending them to each other so that regression can be performed across all runs of an experiment. However, in order to account for run-differences its important (and common practice) to include separate run-wise polynomials (e.g. intercepts). Design Matrix's append method is intelligent and flexible enough to keep columns separated during appending automatically.
# Lets use the design matrix with polynomials from above
# Stack "run 1" on top of "run 2"
runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,axis=0)
runs_1_and_2.heatmap()
#########################################################################
# Separating columns during append operations
# *******************************************
# Notice that all polynomials have been kept separated for you automatically and have been renamed to reflect the fact that they come from different runs. But Design Matrix is even more flexible. Let's say you want to estimate separate run-wise coefficients for all house stimuli too. Simply pass that into the `unique_cols` parameter of append.
runs_1_and_2 = dm_with_nuissance.append(dm_with_nuissance,unique_cols=['house*'],axis=0)
runs_1_and_2.heatmap()
#########################################################################
# Now notice how all stimuli that begin with 'house' have been made into separate columns for each run. In general `unique_cols` can take a list of columns to keep separated or simple wild cards that either begin with a term e.g. `"house*"` or end with one `"*house"`.
#########################################################################
# Putting it all together
# -----------------------
#
# A realistic workflow
# ********************
# Let's combine all the examples above to build a work flow for a realistic first-level analysis fMRI analysis. This will include loading onsets from multiple experimental runs, and concatenating them into a large multi-run design matrix where we estimate a single set of coefficients for our variables of interest, but make sure we account for run-wise differences nuisiance covarites (e.g. motion) and baseline, trends, etc. For simplicity we'll just reuse the same onsets and covariates file multiple times.
num_runs = 4
TR = 2.0
sampling_freq = 1./TR
all_runs = Design_Matrix(sampling_freq = sampling_freq)
for i in range(num_runs):
# 1) Load in onsets for this run
onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt')
dm = onsets_to_dm(onsetsFile, sampling_freq=sampling_freq,run_length=160,sort=True)
# 2) Convolve them with the hrf
dm = dm.convolve()
# 2) Load in covariates for this run
covariatesFile = os.path.join(get_resource_path(),'covariates_example.csv')
cov = pd.read_csv(covariatesFile)
cov = Design_Matrix(cov, sampling_freq = sampling_freq)
# 3) In the covariates, fill any NaNs with 0, add intercept and linear trends and dct basis functions
cov = cov.fillna(0)
# Retain a list of nuisance covariates (e.g. motion and spikes) which we'll also want to also keep separate for each run
cov_columns = cov.columns
cov = cov.add_poly(1).add_dct_basis()
# 4) Join the onsets and covariates together
full = dm.append(cov,axis=1)
# 5) Append it to the master Design Matrix keeping things separated by run
all_runs = all_runs.append(full,axis=0,unique_cols=cov.columns)
all_runs.heatmap(vmin=-1,vmax=1)
#########################################################################
# We can see the left most columns of our multi-run design matrix contain our conditions of interest (stacked across all runs), the middle columns includes separate run-wise nuisiance covariates (motion, spikes) and the right most columns contain run specific polynomials (intercept, trends, etc).
#########################################################################
# Data Diagnostics
# ----------------
#
# Let's actually check if our design is estimable. Design Matrix provides a few tools for cleaning up highly correlated columns (resulting in failure if trying to perform regression), replacing data, and computing collinearity. By default the `clean` method will drop any columns correlated at r >= .95
all_runs_cleaned = all_runs.clean(verbose=True)
all_runs_cleaned.heatmap(vmin=-1,vmax=1)
#########################################################################
# Whoops, looks like above some of our polynomials and dct basis functions are highly correlated, but the clean method detected that and dropped them for us. In practice you'll often include polynomials or dct basis functions rather than both, but this was just an illustrative example.
#########################################################################
# Estimating a first-level model
# ------------------------------
#
# You can now set this multi-run Design Matrix as the `X` attribute of a Brain_Data object containing EPI data for these four runs and estimate a regression in just a few lines of code.
# This code is commented because we don't actually have niftis loaded for the purposes of this tutorial
# See the other tutorials for more details on working with nifti files and Brain_Data objects
# Assuming you already loaded up Nifti images like this
# list_of_niftis = ['run_1.nii.gz','run_2.nii.gz','run_3.nii.gz','run_4.nii.gz']
# all_run_data = Brain_Data(list_of_niftis)
# Set our Design Matrix to the X attribute of Brain_Data object
# all_run_data.X = all_runs_cleaned
# Run the regression
# results = all_run_data.regress()
# This will produce N beta, t, and p images
# where N is the number of columns in the design matrix
|
|
import six
import warnings
from datetime import datetime
from .. import errors
from .. import utils
from ..utils.utils import create_networking_config, create_endpoint_config
class ContainerApiMixin(object):
@utils.check_resource
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
'stream': stream and 1 or 0,
}
u = self._url("/containers/{0}/attach", container)
response = self._post(u, params=params, stream=stream)
return self._get_result(container, stream, response)
@utils.check_resource
def attach_socket(self, container, params=None, ws=False):
if params is None:
params = {
'stdout': 1,
'stderr': 1,
'stream': 1
}
if ws:
return self._attach_websocket(container, params)
u = self._url("/containers/{0}/attach", container)
return self._get_raw_response_socket(self.post(
u, None, params=self._attach_params(params), stream=True))
@utils.check_resource
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author,
'changes': changes
}
u = self._url("/commit")
return self._result(self._post_json(u, data=conf, params=params),
json=True)
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
'size': 1 if size else 0,
'trunc_cmd': 1 if trunc else 0,
'since': since,
'before': before
}
if filters:
params['filters'] = utils.convert_filters(filters)
u = self._url("/containers/json")
res = self._result(self._get(u, params=params), True)
if quiet:
return [{'Id': x['Id']} for x in res]
if trunc:
for x in res:
x['Id'] = x['Id'][:12]
return res
@utils.check_resource
def copy(self, container, resource):
if utils.version_gte(self._version, '1.20'):
warnings.warn(
'Client.copy() is deprecated for API version >= 1.20, '
'please use get_archive() instead',
DeprecationWarning
)
res = self._post_json(
self._url("/containers/{0}/copy".format(container)),
data={"Resource": resource},
stream=True
)
self._raise_for_status(res)
return res.raw
def create_container(self, image, command=None, hostname=None, user=None,
detach=False, stdin_open=False, tty=False,
mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None,
network_disabled=False, name=None, entrypoint=None,
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if host_config and utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion(
'host_config is not supported in API < 1.15'
)
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open,
tty, mem_limit, ports, environment, dns, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
volume_driver, stop_signal, networking_config,
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
return utils.create_container_config(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
params = {
'name': name
}
res = self._post_json(u, data=config, params=params)
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
if not kwargs:
kwargs = {}
if 'version' in kwargs:
raise TypeError(
"create_host_config() got an unexpected "
"keyword argument 'version'"
)
kwargs['version'] = self._version
return utils.create_host_config(*args, **kwargs)
def create_networking_config(self, *args, **kwargs):
return create_networking_config(*args, **kwargs)
def create_endpoint_config(self, *args, **kwargs):
return create_endpoint_config(self._version, *args, **kwargs)
@utils.check_resource
def diff(self, container):
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
@utils.check_resource
def export(self, container):
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
self._raise_for_status(res)
return res.raw
@utils.check_resource
@utils.minimum_version('1.20')
def get_archive(self, container, path):
params = {
'path': path
}
url = self._url('/containers/{0}/archive', container)
res = self._get(url, params=params, stream=True)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
res.raw,
utils.decode_json_header(encoded_stat) if encoded_stat else None
)
@utils.check_resource
def inspect_container(self, container):
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
@utils.check_resource
def kill(self, container, signal=None):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
params['signal'] = int(signal)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def logs(self, container, stdout=True, stderr=True, stream=False,
timestamps=False, tail='all', since=None, follow=None):
if utils.compare_version('1.11', self._version) >= 0:
if follow is None:
follow = stream
params = {'stderr': stderr and 1 or 0,
'stdout': stdout and 1 or 0,
'timestamps': timestamps and 1 or 0,
'follow': follow and 1 or 0,
}
if utils.compare_version('1.13', self._version) >= 0:
if tail != 'all' and (not isinstance(tail, int) or tail < 0):
tail = 'all'
params['tail'] = tail
if since is not None:
if utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'since is not supported in API < 1.19'
)
else:
if isinstance(since, datetime):
params['since'] = utils.datetime_to_timestamp(since)
elif (isinstance(since, int) and since > 0):
params['since'] = since
url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
return self.attach(
container,
stdout=stdout,
stderr=stderr,
stream=stream,
logs=True
)
@utils.check_resource
def pause(self, container):
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.check_resource
def port(self, container, private_port):
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
private_port = str(private_port)
h_ports = None
# Port settings is None when the container is running with
# network_mode=host.
port_settings = json_.get('NetworkSettings', {}).get('Ports')
if port_settings is None:
return None
if '/' in private_port:
return port_settings.get(private_port)
h_ports = port_settings.get(private_port + '/tcp')
if h_ports is None:
h_ports = port_settings.get(private_port + '/udp')
return h_ports
@utils.check_resource
@utils.minimum_version('1.20')
def put_archive(self, container, path, data):
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
res = self._put(url, params=params, data=data)
self._raise_for_status(res)
return res.status_code == 200
@utils.check_resource
def remove_container(self, container, v=False, link=False, force=False):
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource
def rename(self, container, name):
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def resize(self, container, height, width):
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def restart(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
publish_all_ports=None, links=None, privileged=None,
dns=None, dns_search=None, volumes_from=None, network_mode=None,
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None):
if utils.compare_version('1.10', self._version) < 0:
if dns is not None:
raise errors.InvalidVersion(
'dns is only supported for API version >= 1.10'
)
if volumes_from is not None:
raise errors.InvalidVersion(
'volumes_from is only supported for API version >= 1.10'
)
if utils.compare_version('1.15', self._version) < 0:
if security_opt is not None:
raise errors.InvalidVersion(
'security_opt is only supported for API version >= 1.15'
)
if ipc_mode:
raise errors.InvalidVersion(
'ipc_mode is only supported for API version >= 1.15'
)
if utils.compare_version('1.17', self._version) < 0:
if read_only is not None:
raise errors.InvalidVersion(
'read_only is only supported for API version >= 1.17'
)
if pid_mode is not None:
raise errors.InvalidVersion(
'pid_mode is only supported for API version >= 1.17'
)
if utils.compare_version('1.18', self._version) < 0:
if ulimits is not None:
raise errors.InvalidVersion(
'ulimits is only supported for API version >= 1.18'
)
start_config_kwargs = dict(
binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
publish_all_ports=publish_all_ports, links=links, dns=dns,
privileged=privileged, dns_search=dns_search, cap_add=cap_add,
cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
network_mode=network_mode, restart_policy=restart_policy,
extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
)
start_config = None
if any(v is not None for v in start_config_kwargs.values()):
if utils.compare_version('1.15', self._version) > 0:
warnings.warn(
'Passing host config parameters in start() is deprecated. '
'Please use host_config in create_container instead!',
DeprecationWarning
)
start_config = self.create_host_config(**start_config_kwargs)
url = self._url("/containers/{0}/start", container)
res = self._post_json(url, data=start_config)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource
def stats(self, container, decode=None, stream=True):
url = self._url("/containers/{0}/stats", container)
if stream:
return self._stream_helper(self._get(url, stream=True),
decode=decode)
else:
return self._result(self._get(url, params={'stream': False}),
json=True)
@utils.check_resource
def stop(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
timeout=(timeout + (self.timeout or 0)))
self._raise_for_status(res)
@utils.check_resource
def top(self, container, ps_args=None):
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
params['ps_args'] = ps_args
return self._result(self._get(u, params=params), True)
@utils.check_resource
def unpause(self, container):
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.22')
@utils.check_resource
def update_container(
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
mem_reservation=None, memswap_limit=None, kernel_memory=None
):
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
data['BlkioWeight'] = blkio_weight
if cpu_period:
data['CpuPeriod'] = cpu_period
if cpu_shares:
data['CpuShares'] = cpu_shares
if cpu_quota:
data['CpuQuota'] = cpu_quota
if cpuset_cpus:
data['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
data['CpusetMems'] = cpuset_mems
if mem_limit:
data['Memory'] = utils.parse_bytes(mem_limit)
if mem_reservation:
data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
if memswap_limit:
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
res = self._post_json(url, data=data)
return self._result(res, True)
@utils.check_resource
def wait(self, container, timeout=None):
url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
json_ = res.json()
if 'StatusCode' in json_:
return json_['StatusCode']
return -1
|
|
from __future__ import print_function, absolute_import
from pymatgen.io.adf import AdfKey, AdfTask, AdfOutput, AdfInput
from pymatgen.core.structure import Molecule
import unittest
import os
from os.path import join
__author__ = 'Xin Chen, chenxin13@mails.tsinghua.edu.cn'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', 'molecules')
geometry_string = """GEOMETRY
smooth conservepoints
optim all cartesian
iterations 250
step rad=0.15 angle=10.0
hessupd BFGS
converge e=0.001 grad=0.0003 rad=0.01 angle=0.5
END
"""
zlmfit_string = """ZLMFIT
AtomDepQuality
10 good
12 normal
subend
END
"""
atoms_string = """ATOMS
O -0.90293455 0.66591421 0.00000000
H 0.05706545 0.66591421 0.00000000
H -1.22338913 1.57085004 0.00000000
END
"""
h2oxyz = """3
0.0
O -0.90293455 0.66591421 0.0
H 0.05706545 0.66591421 0.0
H -1.22338913 1.57085004 0.0
"""
rhb18xyz = """19
0.0
Rh -0.453396 -0.375115 0.000000
B 0.168139 3.232791 0.000000
B -0.270938 1.639058 0.000000
B 0.206283 2.604044 1.459430
B 0.404410 1.880136 2.866764
B -0.103309 0.887485 1.655272
B 0.436856 0.371367 3.299887
B 0.016593 -0.854959 1.930982
B 0.563233 -1.229713 3.453066
B 0.445855 -2.382027 2.415013
B 0.206283 2.604044 -1.459430
B 0.404410 1.880136 -2.866764
B -0.103309 0.887485 -1.655272
B 0.436856 0.371367 -3.299887
B 0.563233 -1.229713 -3.453066
B 0.016593 -0.854959 -1.930982
B 0.200456 -2.309538 -0.836316
B 0.200456 -2.309538 0.836316
B 0.445855 -2.382027 -2.415013
"""
def readfile(file_object):
"""
Return the content of the file as a string.
Parameters
----------
file_object : file or str
The file to read. This can be either a File object or a file path.
Returns
-------
content : str
The content of the file.
"""
if hasattr(file_object, "read"):
return file_object.read()
elif isinstance(file_object, str):
f = open(file_object, "r")
content = f.read()
f.close()
return content
else:
raise ValueError("``file_object`` must be a string or a file object!")
class AdfKeyTest(unittest.TestCase):
def test_simple(self):
unrestricted = AdfKey("unrestricted")
self.assertEqual(str(unrestricted).strip(), 'UNRESTRICTED')
def test_options(self):
charge = AdfKey("charge", [-1, 0])
charge_string = "CHARGE -1 0\n"
self.assertEqual(str(charge), "CHARGE -1 0\n")
self.assertEqual(str(AdfKey.from_dict(charge.as_dict())), charge_string)
def test_subkeys(self):
smooth = AdfKey("smooth", ["conservepoints"])
optim = AdfKey("optim", ["all", "cartesian"])
iterations = AdfKey("iterations", [250])
step = AdfKey("step", [("rad", 0.15), ("angle", 10.0)])
hessupd = AdfKey("hessupd", ["BFGS"])
converge = AdfKey("converge", [("e", 1.0e-3), ("grad", 3.0e-4),
("rad", 1.0e-2), ("angle", 0.5)])
geo = AdfKey("geometry", subkeys=[smooth, optim, iterations, step,
hessupd, converge])
self.assertEqual(str(geo), geometry_string)
self.assertEqual(str(AdfKey.from_dict(geo.as_dict())), geometry_string)
self.assertTrue(geo.has_subkey("optim"))
def test_end(self):
geo = AdfKey("Geometry")
self.assertEqual(str(geo), "GEOMETRY\nEND\n")
def test_subkeys_subkeys(self):
atom_dep_quality = AdfKey("AtomDepQuality",
subkeys=[AdfKey("10", ["good"]),
AdfKey("12", ["normal"])])
zlmfit = AdfKey("zlmfit", subkeys=[atom_dep_quality])
self.assertEqual(str(zlmfit), zlmfit_string)
self.assertEqual(str(AdfKey.from_dict(zlmfit.as_dict())), zlmfit_string)
def test_from_string(self):
k1 = AdfKey.from_string("CHARGE -1 0")
self.assertEqual(k1.key, "CHARGE")
self.assertListEqual(k1.options, [-1, 0])
k2 = AdfKey.from_string("step rad=0.15 angle=10.0")
self.assertEqual(k2.key, "step")
self.assertListEqual(k2.options[0], ['rad', 0.15])
self.assertListEqual(k2.options[1], ['angle', 10.0])
k3 = AdfKey.from_string("GEOMETRY\noptim all\niterations 100\nEND\n")
self.assertEqual(k3.key, "GEOMETRY")
self.assertEqual(k3.subkeys[0].options[0], "all")
self.assertEqual(k3.subkeys[1].options[0], 100)
k4 = AdfKey.from_string(
"""SCF
iterations 300
converge 1.0e-7 1.0e-7
mixing 0.2
diis n=100 ok=0.0001 cyc=100 cx=5.0 cxx=10.0
END"""
)
self.assertEqual(k4.key, "SCF")
self.assertEqual(k4.subkeys[0].key, "iterations")
self.assertEqual(k4.subkeys[1].key, "converge")
self.assertEqual(k4.subkeys[1].options[0], 1E-7)
self.assertEqual(k4.subkeys[2].options[0], 0.2)
def test_option_operations(self):
k1 = AdfKey("Charge", [-1, 0])
k1.add_option(2)
self.assertListEqual(k1.options, [-1, 0, 2])
k1.remove_option(0)
self.assertListEqual(k1.options, [0, 2])
k2 = AdfKey.from_string("step rad=0.15 angle=10.0")
k2.add_option(["length", 0.1])
self.assertListEqual(k2.options[2], ["length", 0.1])
k2.remove_option("rad")
self.assertListEqual(k2.options[0], ["angle", 10.0])
def test_atom_block_key(self):
block = AdfKey("atoms")
o = Molecule.from_str(h2oxyz, "xyz")
for site in o:
block.add_subkey(AdfKey(str(site.specie), list(site.coords)))
self.assertEqual(str(block), atoms_string)
energy_task = """TITLE ADF_RUN
UNITS
length angstrom
angle degree
END
XC
GGA PBE
END
BASIS
type DZ
core small
END
SCF
iterations 300
END
GEOMETRY SinglePoint
END
"""
class AdfTaskTest(unittest.TestCase):
def test_energy(self):
task = AdfTask()
self.assertEqual(str(task), energy_task)
def test_serialization(self):
task = AdfTask()
o = AdfTask.from_dict(task.as_dict())
self.assertEqual(task.title, o.title)
self.assertEqual(task.basis_set, o.basis_set)
self.assertEqual(task.scf, o.scf)
self.assertEqual(task.geo, o.geo)
self.assertEqual(task.operation, o.operation)
self.assertEqual(task.units, o.units)
self.assertEqual(str(task), str(o))
rhb18 = {"title": "RhB18",
"basis_set": AdfKey.from_string("BASIS\ntype TZP\ncore small\nEND"),
"xc": AdfKey.from_string("XC\nHybrid PBE0\nEND"),
"units": AdfKey.from_string("UNITS\nlength angstrom\nEND"),
"other_directives": [AdfKey.from_string("SYMMETRY"),
AdfKey.from_string("RELATIVISTIC scalar zora"),
AdfKey.from_string("INTEGRATION 6.0 6.0 6.0"),
AdfKey.from_string("SAVE TAPE21"),
AdfKey.from_string("A1FIT 10.0")],
"geo_subkeys": [AdfKey.from_string("optim all"),
AdfKey.from_string("iterations 300"),
AdfKey.from_string("step rad=0.15 angle=10.0"),
AdfKey.from_string("hessupd BFGS")],
"scf": AdfKey.from_string(
"""SCF
iterations 300
converge 1.0e-7 1.0e-7
mixing 0.2
lshift 0.0
diis n=100 ok=0.0001 cyc=100 cx=5.0 cxx=10.0
END"""
)}
class AdfInputTest(unittest.TestCase):
def setUp(self):
self.tempfile = "./adf.temp"
def test_main(self):
o = Molecule.from_str(rhb18xyz, "xyz")
o.set_charge_and_spin(-1, 3)
task = AdfTask("optimize", **rhb18)
inp = AdfInput(task)
inp.write_file(o, self.tempfile)
s = readfile(join(test_dir, "adf", "RhB18_adf.inp"))
self.assertEqual(readfile(self.tempfile), s)
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
class AdfOutputTest(unittest.TestCase):
def test_analytical_freq(self):
filename = join(test_dir, "adf", "analytical_freq", "adf.out")
o = AdfOutput(filename)
self.assertAlmostEqual(o.final_energy, -0.54340325)
self.assertEqual(len(o.energies), 4)
self.assertEqual(len(o.structures), 4)
self.assertAlmostEqual(o.frequencies[0], 1553.931)
self.assertAlmostEqual(o.frequencies[2], 3793.086)
self.assertAlmostEqual(o.normal_modes[0][2], 0.071)
self.assertAlmostEqual(o.normal_modes[0][6], 0.000)
self.assertAlmostEqual(o.normal_modes[0][7], -0.426)
self.assertAlmostEqual(o.normal_modes[0][8], -0.562)
def test_numerical_freq(self):
filename = join(test_dir, "adf", "numerical_freq", "adf.out")
o = AdfOutput(filename)
self.assertEqual(o.freq_type, 'Numerical')
self.assertEqual(o.final_structure.num_sites, 4)
self.assertEqual(len(o.frequencies), 6)
self.assertEqual(len(o.normal_modes), 6)
self.assertAlmostEqual(o.frequencies[0], 938.21)
self.assertAlmostEqual(o.frequencies[3], 3426.64)
self.assertAlmostEqual(o.frequencies[4], 3559.35)
self.assertAlmostEqual(o.frequencies[5], 3559.35)
self.assertAlmostEqual(o.normal_modes[1][0], 0.067)
self.assertAlmostEqual(o.normal_modes[1][3], -0.536)
self.assertAlmostEqual(o.normal_modes[1][7], 0.000)
self.assertAlmostEqual(o.normal_modes[1][9], -0.536)
def test_single_point(self):
filename = join(test_dir, "adf", "sp", "adf.out")
o = AdfOutput(filename)
self.assertAlmostEqual(o.final_energy, -0.74399276)
self.assertEqual(len(o.final_structure), 4)
if __name__ == "__main__":
unittest.main()
|
|
"""
Storefront and Metadata Model Access
"""
import logging
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Count
from django.db.models.functions import Lower
from django.db.models import F
import msgpack
# import ozpcenter.api.listing.serializers as listing_serializers
from ozpcenter.utils import str_to_bool
from ozpcenter import models
from ozpcenter.pipe import pipes
from ozpcenter.pipe import pipeline
from ozpcenter.recommend import recommend_utils
from ozpcenter.recommend.recommend import RecommenderProfileResultSet
logger = logging.getLogger('ozp-center.' + str(__name__))
# TODO: Finish in future to increase speed
# def dictfetchall(cursor):
# "Returns all rows from a cursor as a dict"
# desc = cursor.description
# return [
# dict(zip([col[0] for col in desc], row))
# for row in cursor.fetchall()
# ]
#
#
# def get_sql_statement():
# schema_class_str = str(connection.SchemaEditorClass)
# is_deleted = None
# is_enabled = None
#
# if 'sqlite' in schema_class_str:
# is_deleted = '0'
# is_enabled = '1'
# elif 'postgres' in schema_class_str:
# is_deleted = 'False'
# is_enabled = 'True'
# else:
# raise Exception('Get SQL Statment ENGINE Error')
#
# sql_statement = '''
#
# '''.format(is_enabled, is_deleted)
# return sql_statement
#
#
# def get_user_listings(username, request, exclude_orgs=None):
# """
# Get User listings
#
# Returns:
# Python object of listings
# """
# exclude_orgs = exclude_orgs or []
#
# mapping_dict = {}
#
# cursor = connection.cursor()
#
# cursor.execute(get_sql_statement())
# rows = dictfetchall(cursor)
#
# categories_set = set()
# tags_set = set()
# contacts_set = set()
# profile_set = set()
# intents_set = set()
#
# for row in rows:
# if row['id'] not in mapping_dict:
# mapping_dict[row['id']] = {
# "id": row['id'],
# "unique_name": row['unique_name'],
# "is_enabled": row['is_enabled'],
# "is_private": row['is_private'],
#
# "required_listings_id": row['required_listings_id'],
#
# "total_rate1": row['total_rate1'],
# "total_rate2": row['total_rate2'],
# "total_rate3": row['total_rate3'],
# "total_rate4": row['total_rate4'],
# "total_rate5": row['total_rate5'],
# "avg_rate": row['avg_rate'],
# "total_reviews": row['total_reviews'],
# "total_votes": row['total_votes'],
# "feedback_score": row['feedback_score'],
#
# "approved_date": row['approved_date'],
#
# "usage_requirements": row['usage_requirements'],
# "system_requirements": row['system_requirements'],
# "iframe_compatible": row['iframe_compatible'],
#
# "what_is_new": row['what_is_new'],
#
# "is_deleted": row['is_deleted'],
# "security_marking": row['security_marking'],
# "version_name": row['version_name'],
# "approval_status": row['approval_status'],
# "current_rejection_id": row['current_rejection_id'],
# "is_featured": row['is_featured'],
# "title": row['title'],
# "description_short": row['description_short'],
#
#
# "launch_url": row['launch_url'],
# "edited_date": row['edited_date'],
# "description": row['description'],
#
# # One to One
# "listing_type": {"title": row['listing_type_title']},
#
# "agency": {'title': row['agency_title'],
# 'short_name': row['agency_short_name']},
#
# "small_icon": {"id": row['small_icon_id'],
# 'url': request.build_absolute_uri(reverse('image-detail', args=[row['small_icon_id']])),
# "security_marking": row['small_icon_security_marking']},
#
# "large_icon": {"id": row['large_icon_id'],
# 'url': request.build_absolute_uri(reverse('image-detail', args=[row['large_icon_id']])),
# "security_marking": row['large_icon_security_marking']},
#
# "banner_icon": {"id": row['banner_icon_id'],
# 'url': request.build_absolute_uri(reverse('image-detail', args=[row['banner_icon_id']])),
# "security_marking": row['banner_icon_security_marking']},
#
# "large_banner_icon": {"id": row['large_banner_icon_id'],
# 'url': request.build_absolute_uri(reverse('image-detail', args=[row['large_banner_icon_id']])),
# "security_marking": row['large_banner_icon_security_marking']},
#
# "last_activity_id": row['last_activity_id']
#
# }
#
# # Many to Many
# # Categorys
#
# if not mapping_dict[row['id']].get('categories'):
# mapping_dict[row['id']]['categories'] = {}
# if row['category_id']:
# current_data = {'title': row['category_title'], 'description': row['category_description']}
# categories_set.add(row['category_id'])
#
# if row['category_id'] not in mapping_dict[row['id']]['categories']:
# mapping_dict[row['id']]['categories'][row['category_id']] = current_data
#
# # Tags
# if not mapping_dict[row['id']].get('tags'):
# mapping_dict[row['id']]['tags'] = {}
# if row['tag_id']:
# current_data = {'name': row['tag_name']}
# tags_set.add(row['tag_id'])
#
# if row['tag_id'] not in mapping_dict[row['id']]['tags']:
# mapping_dict[row['id']]['tags'][row['tag_id']] = current_data
#
# # Contacts
# if not mapping_dict[row['id']].get('contacts'):
# mapping_dict[row['id']]['contacts'] = {}
# if row['contact_id']:
# current_data = {'id': row['contact_id'],
# 'secure_phone': row['contact_secure_phone'],
# 'unsecure_phone': row['contact_unsecure_phone'],
# 'email': row['contact_email'],
# 'name': row['contact_name'],
# 'organization': row['contact_organization'],
# 'contact_type': {'name': row['contact_type_name']}}
# contacts_set.add(row['contact_id'])
#
# if row['contact_id'] not in mapping_dict[row['id']]['contacts']:
# mapping_dict[row['id']]['contacts'][row['contact_id']] = current_data
#
# # Profile
# if not mapping_dict[row['id']].get('owners'):
# mapping_dict[row['id']]['owners'] = {}
# if row['profile_id']:
# current_data = {'display_name': row['owner_display_name'],
# 'user': {'username': row['owner_username']}}
# profile_set.add(row['profile_id'])
#
# if row['profile_id'] not in mapping_dict[row['id']]['owners']:
# mapping_dict[row['id']]['owners'][row['profile_id']] = current_data
#
# # Intent
# if not mapping_dict[row['id']].get('intents'):
# mapping_dict[row['id']]['intents'] = {}
# if row['intent_id']:
# intents_set.add(row['intent_id'])
# if row['intent_id'] not in mapping_dict[row['id']]['intents']:
# mapping_dict[row['id']]['intents'][row['intent_id']] = None
#
# for profile_key in mapping_dict:
# profile_map = mapping_dict[profile_key]
# profile_map['owners'] = [profile_map['owners'][p_key] for p_key in profile_map['owners']]
# profile_map['tags'] = [profile_map['tags'][p_key] for p_key in profile_map['tags']]
# profile_map['categories'] = [profile_map['categories'][p_key] for p_key in profile_map['categories']]
# profile_map['contacts'] = [profile_map['contacts'][p_key] for p_key in profile_map['contacts']]
# profile_map['intents'] = [profile_map['intents'][p_key] for p_key in profile_map['intents']]
#
# output_list = []
#
# for listing_id in mapping_dict:
# listing_values = mapping_dict[listing_id]
#
# if listing_values['is_private'] is True:
# if listing_values['agency']['title'] not in exclude_orgs:
# output_list.append(listing_values)
# else:
# output_list.append(listing_values)
#
# return output_list
# def get_storefront_new(username, request):
# """
# Returns data for /storefront api invocation including:
# * recommended listings (max=10)
# * featured listings (max=12)
# * recent (new) listings (max=24)
# * most popular listings (max=36)
# Args:
# username
# Returns:
# {
# 'recommended': [Listing],
# 'featured': [Listing],
# 'recent': [Listing],
# 'most_popular': [Listing]
# }
# """
# extra_data = {}
# profile = models.Profile.objects.get(user__username=username)
# if profile.highest_role() == 'APPS_MALL_STEWARD':
# exclude_orgs = []
# elif profile.highest_role() == 'ORG_STEWARD':
# user_orgs = profile.stewarded_organizations.all()
# user_orgs = [i.title for i in user_orgs]
# exclude_orgs = [agency.title for agency in models.Agency.objects.exclude(title__in=user_orgs)]
# else:
# user_orgs = profile.organizations.all()
# user_orgs = [i.title for i in user_orgs]
# exclude_orgs = [agency.title for agency in models.Agency.objects.exclude(title__in=user_orgs)]
# current_listings = get_user_listings(username, request, exclude_orgs)
# # Get Recommended Listings for owner
# if profile.is_beta_user():
# recommendation_listing_ids, recommended_entry_data = get_recommendation_listing_ids(profile)
# listing_ids_list = set(recommendation_listing_ids)
# recommended_listings_raw = []
# for current_listing in current_listings:
# if current_listing['id'] in listing_ids_list:
# recommended_listings_raw.append(current_listing)
# recommended_listings = pipeline.Pipeline(recommend_utils.ListIterator(recommended_listings_raw),
# [pipes.JitterPipe(),
# pipes.ListingDictPostSecurityMarkingCheckPipe(username),
# pipes.LimitPipe(10)]).to_list()
# else:
# recommended_listings = []
# # Get Featured Listings
# featured_listings = pipeline.Pipeline(recommend_utils.ListIterator(current_listings),
# [pipes.ListingDictPostSecurityMarkingCheckPipe(username, featured=True),
# pipes.LimitPipe(12)]).to_list()
# # Get Recent Listings
# recent_listings = pipeline.Pipeline(recommend_utils.ListIterator(current_listings),
# [pipes.ListingDictPostSecurityMarkingCheckPipe(username),
# pipes.LimitPipe(24)]).to_list()
# most_popular_listings = pipeline.Pipeline(recommend_utils.ListIterator(sorted(current_listings, key=lambda k: (k['avg_rate'], ['total_reviews']), reverse=True)),
# [pipes.ListingDictPostSecurityMarkingCheckPipe(username),
# pipes.LimitPipe(36)]).to_list()
# # TODO 2PI filtering
# data = {
# 'recommended': recommended_listings,
# 'featured': featured_listings,
# 'recent': recent_listings,
# 'most_popular': most_popular_listings
# }
# return data, extra_data
def get_storefront_recommended(request_profile, pre_fetch=True, randomize_recommended=True, ordering=None):
"""
Get Recommended Listings for storefront
from ozpcenter.api.storefront.model_access import get_storefront_recommended
get_storefront_recommended(Profile.objects.first())
from ozpcenter import models
listing_ids_list = [1,5,6,7]
request_profile = Profile.objects.first()
"""
recommender_profile_result_set = RecommenderProfileResultSet.from_profile_instance(request_profile, randomize_recommended)
recommender_profile_result_set.process()
recommended_listings = recommender_profile_result_set.recommended_listings
extra_data = {}
extra_data['recommender_profile_result_set'] = recommender_profile_result_set
sorted_recommended_listings = custom_sort_listings(recommended_listings, ordering)
return sorted_recommended_listings, extra_data
def get_storefront_featured(request_profile, pre_fetch=True, ordering=None):
"""
Get Featured Listings for storefront
"""
username = request_profile.user.username
# Get Featured Listings
featured_listings_raw = models.Listing.objects.for_user_organization_minus_security_markings(
username).filter(
is_featured=True,
approval_status=models.Listing.APPROVED,
is_enabled=True,
is_deleted=False).order_by(F('featured_date').desc(nulls_last=True))
featured_listings = pipeline.Pipeline(recommend_utils.ListIterator([listing for listing in featured_listings_raw]),
[pipes.ListingPostSecurityMarkingCheckPipe(username)]).to_list()
sorted_featured_listings = custom_sort_listings(featured_listings, ordering)
return sorted_featured_listings
def get_storefront_recent(request_profile, pre_fetch=True, ordering=None):
"""
Get Recent Listings for storefront
"""
username = request_profile.user.username
# Get Recent Listings
if not ordering:
ordering = []
recent_listings_raw = models.Listing.objects.for_user_organization_minus_security_markings(
username).order_by('-approved_date').filter(
approval_status=models.Listing.APPROVED,
is_enabled=True,
is_deleted=False)
recent_listings = pipeline.Pipeline(recommend_utils.ListIterator([listing for listing in recent_listings_raw]),
[pipes.ListingPostSecurityMarkingCheckPipe(username),
pipes.LimitPipe(24)]).to_list()
sorted_recent_listings = custom_sort_listings(recent_listings, ordering)
return sorted_recent_listings
def get_storefront_most_popular(request_profile, pre_fetch=True, ordering=None):
"""
Get Most Popular Listings for storefront
"""
username = request_profile.user.username
# Get most popular listings via a weighted average
most_popular_listings_raw = models.Listing.objects.for_user_organization_minus_security_markings(
username).filter(
approval_status=models.Listing.APPROVED,
is_enabled=True,
is_deleted=False).order_by('-avg_rate', '-total_reviews')
most_popular_listings = pipeline.Pipeline(recommend_utils.ListIterator([listing for listing in most_popular_listings_raw]),
[pipes.ListingPostSecurityMarkingCheckPipe(username),
pipes.LimitPipe(36)]).to_list()
sorted_most_popular_listings = custom_sort_listings(most_popular_listings, ordering)
return sorted_most_popular_listings
def get_storefront(request, pre_fetch=False, section=None, ordering=None):
"""
Returns data for /storefront api invocation including:
* recommended listings (max=10)
* featured listings (no limit)
* recent (new) listings (max=24)
* most popular listings (max=36)
NOTE: think about adding Bookmark status to this later on
Args:
username
pre_fetch
section(str): recommended, featured, recent, most_popular, all
Returns:
{
'recommended': [Listing],
'featured': [Listing],
'recent': [Listing],
'most_popular': [Listing]
}
"""
try:
request_profile = models.Profile.objects.get(user__username=request.user)
randomize_recommended = str_to_bool(request.query_params.get('randomize', True))
section = section or 'all'
data = {}
extra_data = {}
if section == 'all' or section == 'recommended':
recommended_listings, extra_data = get_storefront_recommended(request_profile,
pre_fetch,
randomize_recommended,
ordering)
data['recommended'] = recommended_listings
else:
data['recommended'] = []
if section == 'all' or section == 'featured':
data['featured'] = get_storefront_featured(request_profile,
pre_fetch,
ordering)
else:
data['featured'] = []
if section == 'all' or section == 'recent':
data['recent'] = get_storefront_recent(request_profile,
pre_fetch,
ordering)
else:
data['recent'] = []
if section == 'all' or section == 'most_popular':
data['most_popular'] = get_storefront_most_popular(request_profile,
pre_fetch,
ordering)
else:
data['most_popular'] = []
except Exception:
# raise Exception({'error': True, 'msg': 'Error getting storefront: {0!s}'.format(str(e))})
raise # Should be catch in the django framwork
return data, extra_data
def custom_sort_listings(listings, ordering_str):
if not ordering_str:
return listings
ordering = [s.strip() for s in ordering_str.split(',')]
listing_ids = [x.id for x in listings]
sorted_listings = models.Listing.objects.filter(id__in=listing_ids)
for field in ordering:
if 'title' in field:
# case insensitive sort for title
if field.startswith('-'):
sorted_listings = sorted_listings.order_by(Lower(field[1:])).reverse()
else:
sorted_listings = sorted_listings.order_by(Lower(field))
else:
sorted_listings = sorted_listings.order_by(field)
return sorted_listings.all()
def values_query_set_to_dict(vqs):
return [item for item in vqs]
def get_metadata(username):
"""
Returns metadata including:
* categories
* organizations (agencies)
* listing types
* intents
* contact types
* work roles
Key: metadata
"""
try:
data = {}
data['categories'] = values_query_set_to_dict(models.Category.objects.all().values(
'id', 'title', 'description').order_by(Lower('title')))
data['listing_types'] = values_query_set_to_dict(models.ListingType.objects.all().values(
'title', 'description'))
data['contact_types'] = values_query_set_to_dict(models.ContactType.objects.all().values(
'id', 'name', 'required'))
data['intents'] = values_query_set_to_dict(models.Intent.objects.all().values(
'action', 'media_type', 'label', 'icon', 'id'))
data['work_roles'] = values_query_set_to_dict(models.WorkRole.objects.all().values(
'id', 'name'))
agency_listing_count_queryset = models.Listing.objects.for_user(username).filter(approval_status=models.Listing.APPROVED, is_enabled=True)
agency_listing_count_queryset = agency_listing_count_queryset.values('agency__id',
'agency__title',
'agency__short_name',
'agency__icon').annotate(listing_count=Count('agency__id')).order_by('agency__short_name')
data['agencies'] = [{'id': record['agency__id'],
'title': record['agency__title'],
'short_name': record['agency__short_name'],
'icon': record['agency__icon'],
'listing_count': record['listing_count']} for record in agency_listing_count_queryset]
for i in data['intents']:
# i['icon'] = models.Image.objects.get(id=i['icon']).image_url()
i['icon'] = '/TODO'
return data
except Exception as e:
return {'error': True, 'msg': 'Error getting metadata: {0!s}'.format(str(e))}
|
|
import bcrypt
import calendar
import os
import time
import model
WEB_SESSION_DB_NAME = 'websdb.dat'
DEFAULT_EXPIRY_SECS = 60 * 60
def ensure_string( s ):
if( isinstance( s, unicode ) ):
return s.encode( 'utf-8' )
else:
return str( s )
class WebSessionAccess:
def __init__( self ):
self.__db_session = model.DBSession()
def __update_sessions( self ):
now = calendar.timegm( time.gmtime() )
self.__db_session.query( model.Session ) \
.filter( model.Session.expires_time <= now ) \
.delete()
return now + DEFAULT_EXPIRY_SECS
def begin_session( self ):
return self.renew_session( None )
def renew_session( self, session_id ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
expires = self.__update_sessions()
if( session_id is not None ):
session = self.__db_session.query( model.Session ) \
.filter( model.Session.session_id == session_id ) \
.first()
else:
session = None
if( session is None ):
session = model.Session( expires )
self.__db_session.add( session )
session_id = session.session_id
self.__db_session.commit()
return session_id
except:
self.__db_session.rollback()
raise
def get_session_info( self, session_id ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
expires = self.__update_sessions()
session = self.__db_session.query( model.Session ) \
.filter( model.Session.session_id == session_id ) \
.first()
if( session is not None ):
session.expires_time = expires
access_level = session.access_level
if( session.user is not None ):
user_name = session.user.user_name
else:
user_name = None
else:
access_level = model.ACCESS_LEVEL_NONE
user_name = None
self.__db_session.commit()
return access_level, user_name
except:
self.__db_session.rollback()
raise
def login( self, session_id, user_name, password ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
expires = self.__update_sessions()
session = self.__db_session.query( model.Session ) \
.filter( model.Session.session_id == session_id ) \
.first()
user_info = self.__db_session.query( model.User ) \
.filter( model.User.user_name == user_name ) \
.first()
if( session is None or user_info is None ):
self.__db_session.commit()
return False
password = ensure_string( password )
password_hash = ensure_string( user_info.password_hash )
if( bcrypt.hashpw( password, password_hash ) == password_hash ):
session.user_id = user_info.user_id
session.access_level = user_info.access_level
session.expires_time = expires
self.__db_session.commit()
return True
else:
self.__db_session.commit()
return False
except:
self.__db_session.rollback()
raise
def logout( self, session_id ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
expires = self.__update_sessions()
session = self.__db_session.query( model.Session ) \
.filter( model.Session.session_id == session_id ) \
.first()
if( session is None ):
self.__db_session.commit()
return
session.user_id = None
session.access_level = model.ACCESS_LEVEL_NONE
session.expires_time = expires
self.__db_session.commit()
except:
self.__db_session.rollback()
raise
def create_user( self, user_name, password, access_level = model.ACCESS_LEVEL_NONE ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
user_info = self.__db_session.query( model.User ) \
.filter( model.User.user_name == user_name ) \
.first()
if( user_info is not None ):
self.__db_session.commit()
return False
password = ensure_string( password )
password_hash = bcrypt.hashpw( password, bcrypt.gensalt( 14 ) )
user_info = model.User( user_name, password_hash )
user_info.access_level = access_level
self.__db_session.add( user_info )
self.__db_session.commit()
return True
except:
self.__db_session.rollback()
raise
def drop_user( self, user_name ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
user_info = self.__db_session.query( model.User ) \
.filter( model.User.user_name == user_name ) \
.first()
if( user_info is None ):
self.__db_session.commit()
return
self.__db_session.query( model.Session ) \
.filter( model.Session.user_id == user_info.user_id ) \
.update( { model.Session.access_level : model.ACCESS_LEVEL_NONE,
model.Session.user_id : None } )
self.__db_session.delete( user_info )
self.__db_session.commit()
except:
self.__db_session.rollback()
raise
def set_password( self, user_name, password ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
user_info = self.__db_session.query( model.User ) \
.filter( model.User.user_name == user_name ) \
.first()
if( user_info is None ):
self.__db_session.commit()
return False
password = ensure_string( password )
user_info.password_hash = bcrypt.hashpw( password, bcrypt.gensalt( 14 ) )
self.__db_session.commit()
return True
except:
self.__db_session.rollback()
raise
def promote( self, user_name, access_level ):
self.__db_session.execute( 'BEGIN EXCLUSIVE' )
try:
user_info = self.__db_session.query( model.User ) \
.filter( model.User.user_name == user_name ) \
.first()
if( user_info is None ):
self.__db_session.commit()
return False
user_info.access_level = access_level
self.__db_session.query( model.Session ) \
.filter( model.Session.user_id == user_info.user_id ) \
.update( { model.Session.access_level : access_level } )
self.__db_session.commit()
return True
except:
self.__db_session.rollback()
raise
def init( library_path ):
model.init( os.path.join( library_path, WEB_SESSION_DB_NAME ) )
def dispose():
model.dispose()
|
|
# Copyright (c) 2010-2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logging middleware for the Swift proxy.
This serves as both the default logging implementation and an example of how
to plug in your own logging format/method.
The logging format implemented below is as follows:
client_ip remote_addr datetime request_method request_path protocol
status_int referer user_agent auth_token bytes_recvd bytes_sent
client_etag transaction_id headers request_time source log_info
request_start_time request_end_time
These values are space-separated, and each is url-encoded, so that they can
be separated with a simple .split()
* remote_addr is the contents of the REMOTE_ADDR environment variable, while
client_ip is swift's best guess at the end-user IP, extracted variously
from the X-Forwarded-For header, X-Cluster-Ip header, or the REMOTE_ADDR
environment variable.
* source (swift.source in the WSGI environment) indicates the code
that generated the request, such as most middleware. (See below for
more detail.)
* log_info (swift.log_info in the WSGI environment) is for additional
information that could prove quite useful, such as any x-delete-at
value or other "behind the scenes" activity that might not
otherwise be detectable from the plain log information. Code that
wishes to add additional log information should use code like
``env.setdefault('swift.log_info', []).append(your_info)`` so as to
not disturb others' log information.
* Values that are missing (e.g. due to a header not being present) or zero
are generally represented by a single hyphen ('-').
The proxy-logging can be used twice in the proxy server's pipeline when there
is middleware installed that can return custom responses that don't follow the
standard pipeline to the proxy server.
For example, with staticweb, the middleware might intercept a request to
/v1/AUTH_acc/cont/, make a subrequest to the proxy to retrieve
/v1/AUTH_acc/cont/index.html and, in effect, respond to the client's original
request using the 2nd request's body. In this instance the subrequest will be
logged by the rightmost middleware (with a swift.source set) and the outgoing
request (with body overridden) will be logged by leftmost middleware.
Requests that follow the normal pipeline (use the same wsgi environment
throughout) will not be double logged because an environment variable
(swift.proxy_access_log_made) is checked/set when a log is made.
All middleware making subrequests should take care to set swift.source when
needed. With the doubled proxy logs, any consumer/processor of swift's proxy
logs should look at the swift.source field, the rightmost log value, to decide
if this is a middleware subrequest or not. A log processor calculating
bandwidth usage will want to only sum up logs with no swift.source.
"""
import time
from urllib import quote, unquote
from swift.common.swob import Request
from swift.common.utils import (get_logger, get_remote_client,
get_valid_utf8_str, config_true_value,
InputProxy, list_from_csv, get_policy_index)
QUOTE_SAFE = '/:'
class ProxyLoggingMiddleware(object):
"""
Middleware that logs Swift proxy requests in the swift log format.
"""
def __init__(self, app, conf, logger=None):
self.app = app
self.log_hdrs = config_true_value(conf.get(
'access_log_headers',
conf.get('log_headers', 'no')))
log_hdrs_only = list_from_csv(conf.get(
'access_log_headers_only', ''))
self.log_hdrs_only = [x.title() for x in log_hdrs_only]
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*
# settings.
self.valid_methods = conf.get(
'access_log_statsd_valid_http_methods',
conf.get('log_statsd_valid_http_methods',
'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS'))
self.valid_methods = [m.strip().upper() for m in
self.valid_methods.split(',') if m.strip()]
access_log_conf = {}
for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
'log_udp_port', 'log_statsd_host', 'log_statsd_port',
'log_statsd_default_sample_rate',
'log_statsd_sample_rate_factor',
'log_statsd_metric_prefix'):
value = conf.get('access_' + key, conf.get(key, None))
if value:
access_log_conf[key] = value
self.access_logger = logger or get_logger(access_log_conf,
log_route='proxy-access')
self.access_logger.set_statsd_prefix('proxy-server')
self.reveal_sensitive_prefix = int(
conf.get('reveal_sensitive_prefix', 16))
def method_from_req(self, req):
return req.environ.get('swift.orig_req_method', req.method)
def req_already_logged(self, env):
return env.get('swift.proxy_access_log_made')
def mark_req_logged(self, env):
env['swift.proxy_access_log_made'] = True
def obscure_sensitive(self, value):
if value and len(value) > self.reveal_sensitive_prefix:
return value[:self.reveal_sensitive_prefix] + '...'
return value
def log_request(self, req, status_int, bytes_received, bytes_sent,
start_time, end_time, resp_headers=None):
"""
Log a request.
:param req: swob.Request object for the request
:param status_int: integer code for the response status
:param bytes_received: bytes successfully read from the request body
:param bytes_sent: bytes yielded to the WSGI server
:param start_time: timestamp request started
:param end_time: timestamp request completed
:param resp_headers: dict of the response headers
"""
resp_headers = resp_headers or {}
req_path = get_valid_utf8_str(req.path)
the_request = quote(unquote(req_path), QUOTE_SAFE)
if req.query_string:
the_request = the_request + '?' + req.query_string
logged_headers = None
if self.log_hdrs:
if self.log_hdrs_only:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items()
if k in self.log_hdrs_only)
else:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items())
method = self.method_from_req(req)
end_gmtime_str = time.strftime('%d/%b/%Y/%H/%M/%S',
time.gmtime(end_time))
duration_time_str = "%.4f" % (end_time - start_time)
start_time_str = "%.9f" % start_time
end_time_str = "%.9f" % end_time
policy_index = get_policy_index(req.headers, resp_headers)
self.access_logger.info(' '.join(
quote(str(x) if x else '-', QUOTE_SAFE)
for x in (
get_remote_client(req),
req.remote_addr,
end_gmtime_str,
method,
the_request,
req.environ.get('SERVER_PROTOCOL'),
status_int,
req.referer,
req.user_agent,
self.obscure_sensitive(req.headers.get('x-auth-token')),
bytes_received,
bytes_sent,
req.headers.get('etag', None),
req.environ.get('swift.trans_id'),
logged_headers,
duration_time_str,
req.environ.get('swift.source'),
','.join(req.environ.get('swift.log_info') or ''),
start_time_str,
end_time_str,
policy_index
)))
# Log timing and bytes-transferred data to StatsD
metric_name = self.statsd_metric_name(req, status_int, method)
# Only log data for valid controllers (or SOS) to keep the metric count
# down (egregious errors will get logged by the proxy server itself).
if metric_name:
self.access_logger.timing(metric_name + '.timing',
(end_time - start_time) * 1000)
self.access_logger.update_stats(metric_name + '.xfer',
bytes_received + bytes_sent)
def statsd_metric_name(self, req, status_int, method):
if req.path.startswith('/v1/'):
try:
stat_type = [None, 'account', 'container',
'object'][req.path.strip('/').count('/')]
except IndexError:
stat_type = 'object'
else:
stat_type = req.environ.get('swift.source')
if stat_type is None:
return None
stat_method = method if method in self.valid_methods \
else 'BAD_METHOD'
return '.'.join((stat_type, stat_method, str(status_int)))
def __call__(self, env, start_response):
if self.req_already_logged(env):
return self.app(env, start_response)
self.mark_req_logged(env)
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
start_time = time.time()
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def status_int_for_logging(client_disconnect=False, start_status=None):
# log disconnected clients as '499' status code
if client_disconnect or input_proxy.client_disconnect:
ret_status_int = 499
elif start_status is None:
ret_status_int = int(
start_response_args[0][0].split(' ', 1)[0])
else:
ret_status_int = start_status
return ret_status_int
def iter_response(iterable):
iterator = iter(iterable)
try:
chunk = iterator.next()
while not chunk:
chunk = iterator.next()
except StopIteration:
chunk = ''
for h, v in start_response_args[0][1]:
if h.lower() in ('content-length', 'transfer-encoding'):
break
else:
if not chunk:
start_response_args[0][1].append(('Content-Length', '0'))
elif isinstance(iterable, list):
start_response_args[0][1].append(
('Content-Length', str(sum(len(i) for i in iterable))))
resp_headers = dict(start_response_args[0][1])
start_response(*start_response_args[0])
req = Request(env)
# Log timing information for time-to-first-byte (GET requests only)
method = self.method_from_req(req)
if method == 'GET':
status_int = status_int_for_logging()
metric_name = self.statsd_metric_name(req, status_int, method)
if metric_name:
self.access_logger.timing_since(
metric_name + '.first-byte.timing', start_time)
bytes_sent = 0
client_disconnect = False
try:
while chunk:
bytes_sent += len(chunk)
yield chunk
chunk = iterator.next()
except GeneratorExit: # generator was closed before we finished
client_disconnect = True
raise
finally:
status_int = status_int_for_logging(client_disconnect)
self.log_request(
req, status_int, input_proxy.bytes_received, bytes_sent,
start_time, time.time(), resp_headers=resp_headers)
close_method = getattr(iterable, 'close', None)
if callable(close_method):
close_method()
try:
iterable = self.app(env, my_start_response)
except Exception:
req = Request(env)
status_int = status_int_for_logging(start_status=500)
self.log_request(
req, status_int, input_proxy.bytes_received, 0, start_time,
time.time())
raise
else:
return iter_response(iterable)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def proxy_logger(app):
return ProxyLoggingMiddleware(app, conf)
return proxy_logger
|
|
# -*- coding: utf-8 -*-
import socket
import struct
import errno
import json
from binascii import hexlify, unhexlify
from django.db import models
try:
from django.utils.timezone import now as dt_now
except ImportError:
import datetime
dt_now = datetime.datetime.now
from django_fields.fields import EncryptedCharField
import OpenSSL
try:
import gevent_openssl
GEVENT_OPEN_SSL=True
except:
GEVENT_OPEN_SSL=False
from .exceptions import NotificationPayloadSizeExceeded, InvalidPassPhrase
from .settings import get_setting
class BaseService(models.Model):
"""
A base service class intended to be subclassed.
"""
name = models.CharField(max_length=255)
hostname = models.CharField(max_length=255)
PORT = 0 # Should be overriden by subclass
connection = None
def _connect(self, certificate, private_key, passphrase=None):
"""
Establishes an encrypted SSL socket connection to the service.
After connecting the socket can be written to or read from.
"""
# ssl in Python < 3.2 does not support certificates/keys as strings.
# See http://bugs.python.org/issue3823
# Therefore pyOpenSSL which lets us do this is a dependancy.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
args = [OpenSSL.crypto.FILETYPE_PEM, private_key]
if passphrase is not None:
args.append(str(passphrase))
try:
pkey = OpenSSL.crypto.load_privatekey(*args)
except OpenSSL.crypto.Error:
raise InvalidPassPhrase
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
context.use_certificate(cert)
context.use_privatekey(pkey)
if GEVENT_OPEN_SSL:
self.connection = gevent_openssl.SSL.Connection(context, sock)
else:
self.connection = OpenSSL.SSL.Connection(context, sock)
self.connection.connect((self.hostname, self.PORT))
self.connection.set_connect_state()
self.connection.do_handshake()
def _disconnect(self):
"""
Closes the SSL socket connection.
"""
if self.connection is not None:
self.connection.shutdown()
self.connection.close()
class Meta:
abstract = True
class APNService(BaseService):
"""
Represents an Apple Notification Service either for live
or sandbox notifications.
`private_key` is optional if both the certificate and key are provided in
`certificate`.
"""
certificate = models.TextField()
private_key = models.TextField()
passphrase = EncryptedCharField(
null=True, blank=True, help_text='Passphrase for the private key',
block_type='MODE_CBC')
PORT = 2195
fmt = '!cH32sH%ds'
def _connect(self):
"""
Establishes an encrypted SSL socket connection to the service.
After connecting the socket can be written to or read from.
"""
return super(APNService, self)._connect(self.certificate, self.private_key, self.passphrase)
def push_notification_to_devices(self, notification, devices=None, chunk_size=100):
"""
Sends the specific notification to devices.
if `devices` is not supplied, all devices in the `APNService`'s device
list will be sent the notification.
"""
if devices is None:
devices = self.device_set.filter(is_active=True)
self._write_message(notification, devices, chunk_size)
def _write_message(self, notification, devices, chunk_size):
"""
Writes the message for the supplied devices to
the APN Service SSL socket.
"""
if not isinstance(notification, Notification):
raise TypeError('notification should be an instance of ios_notifications.models.Notification')
if not isinstance(chunk_size, int) or chunk_size < 1:
raise ValueError('chunk_size must be an integer greater than zero.')
payload = notification.payload
# Split the devices into manageable chunks.
# Chunk sizes being determined by the `chunk_size` arg.
device_length = devices.count() if isinstance(devices, models.query.QuerySet) else len(devices)
chunks = [devices[i:i + chunk_size] for i in xrange(0, device_length, chunk_size)]
for index in xrange(len(chunks)):
chunk = chunks[index]
self._connect()
for device in chunk:
if not device.is_active:
continue
try:
self.connection.send(self.pack_message(payload, device))
except (OpenSSL.SSL.WantWriteError, socket.error) as e:
if isinstance(e, socket.error) and isinstance(e.args, tuple) and e.args[0] != errno.EPIPE:
raise e # Unexpected exception, raise it.
self._disconnect()
i = chunk.index(device)
self.set_devices_last_notified_at(chunk[:i])
# Start again from the next device.
# We start from the next device since
# if the device no longer accepts push notifications from your app
# and you send one to it anyways, Apple immediately drops the connection to your APNS socket.
# http://stackoverflow.com/a/13332486/1025116
self._write_message(notification, chunk[i + 1:], chunk_size)
self._disconnect()
self.set_devices_last_notified_at(chunk)
if notification.pk or notification.persist:
notification.last_sent_at = dt_now()
notification.save()
def set_devices_last_notified_at(self, devices):
# Rather than do a save on every object,
# fetch another queryset and use it to update
# the devices in a single query.
# Since the devices argument could be a sliced queryset
# we can't rely on devices.update() even if devices is
# a queryset object.
Device.objects.filter(pk__in=[d.pk for d in devices]).update(last_notified_at=dt_now())
def pack_message(self, payload, device):
"""
Converts a notification payload into binary form.
"""
if len(payload) > 256:
raise NotificationPayloadSizeExceeded
if not isinstance(device, Device):
raise TypeError('device must be an instance of ios_notifications.models.Device')
msg = struct.pack(self.fmt % len(payload), chr(0), 32, unhexlify(device.token), len(payload), payload)
return msg
def __unicode__(self):
return self.name
class Meta:
unique_together = ('name', 'hostname')
class Notification(models.Model):
"""
Represents a notification which can be pushed to an iOS device.
"""
service = models.ForeignKey(APNService)
message = models.CharField(max_length=200, blank=True, help_text='Alert message to display to the user. Leave empty if no alert should be displayed to the user.')
badge = models.PositiveIntegerField(null=True, blank=True, help_text='New application icon badge number. Set to None if the badge number must not be changed.')
silent = models.NullBooleanField(null=True, blank=True, help_text='set True to send a silent notification')
sound = models.CharField(max_length=30, blank=True, help_text='Name of the sound to play. Leave empty if no sound should be played.')
created_at = models.DateTimeField(auto_now_add=True)
last_sent_at = models.DateTimeField(null=True, blank=True)
custom_payload = models.CharField(max_length=240, blank=True, help_text='JSON representation of an object containing custom payload.')
loc_payload = models.CharField(max_length=240, blank=True, help_text="JSON representation of an object containing the localization payload.")
def __init__(self, *args, **kwargs):
self.persist = get_setting('IOS_NOTIFICATIONS_PERSIST_NOTIFICATIONS')
super(Notification, self).__init__(*args, **kwargs)
def __unicode__(self):
return u'%s%s%s' % (self.message, ' ' if self.message and self.custom_payload else '', self.custom_payload)
@property
def extra(self):
"""
The extra property is used to specify custom payload values
outside the Apple-reserved aps namespace
http://developer.apple.com/library/mac/#documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/ApplePushService/ApplePushService.html#//apple_ref/doc/uid/TP40008194-CH100-SW1
"""
return json.loads(self.custom_payload) if self.custom_payload else None
@extra.setter
def extra(self, value):
if value is None:
self.custom_payload = ''
else:
if not isinstance(value, dict):
raise TypeError('must be a valid Python dictionary')
self.custom_payload = json.dumps(value) # Raises a TypeError if can't be serialized
@property
def loc_data(self):
"""
The loc_data property is used to specify localization paramaters within the 'alert' aps key.
https://developer.apple.com/library/ios/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/Chapters/ApplePushService.html
"""
return json.loads(self.loc_payload) if self.loc_payload else None
def set_loc_data(self, loc_key, loc_args, action_loc_key=None):
if not isinstance(loc_args, (list, tuple)):
raise TypeError("loc_args must be a list or tuple.")
loc_data = {
"loc-key": unicode(loc_key),
"loc-args": [unicode(a) for a in loc_args],
}
if action_loc_key:
loc_data['action-loc-key'] = unicode(action_loc_key)
self.loc_payload = json.dumps(loc_data)
def push_to_all_devices(self):
"""
Pushes this notification to all active devices using the
notification's related APN service.
"""
self.service.push_notification_to_devices(self)
def is_valid_length(self):
"""
Determines if a notification payload is a valid length.
returns bool
"""
return len(self.payload) <= 256
@property
def payload(self):
aps = {}
loc_data = self.loc_data
if loc_data:
aps['alert'] = loc_data
elif self.message:
aps['alert'] = self.message
if self.badge is not None:
aps['badge'] = self.badge
if self.sound:
aps['sound'] = self.sound
if self.silent:
aps['content-available'] = 1
message = {'aps': aps}
extra = self.extra
if extra is not None:
message.update(extra)
payload = json.dumps(message, separators=(',', ':'), ensure_ascii=False).encode('utf8')
return payload
class Device(models.Model):
"""
Represents an iOS device with unique token.
"""
token = models.CharField(max_length=64, blank=False, null=False)
is_active = models.BooleanField(default=True)
deactivated_at = models.DateTimeField(null=True, blank=True)
service = models.ForeignKey(APNService)
users = models.ManyToManyField(get_setting('AUTH_USER_MODEL'), null=True, blank=True, related_name='ios_devices')
added_at = models.DateTimeField(auto_now_add=True)
last_notified_at = models.DateTimeField(null=True, blank=True)
platform = models.CharField(max_length=30, blank=True, null=True)
display = models.CharField(max_length=30, blank=True, null=True)
os_version = models.CharField(max_length=20, blank=True, null=True)
def push_notification(self, notification):
"""
Pushes a ios_notifications.models.Notification instance to an the device.
For more details see http://developer.apple.com/library/mac/#documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/ApplePushService/ApplePushService.html
"""
if not isinstance(notification, Notification):
raise TypeError('notification should be an instance of ios_notifications.models.Notification')
self.service.push_notification_to_devices(notification, [self])
def __unicode__(self):
return self.token
class Meta:
unique_together = ('token', 'service')
class FeedbackService(BaseService):
"""
The service provided by Apple to inform you of devices which no longer have your app installed
and to which notifications have failed a number of times. Use this class to check the feedback
service and deactivate any devices it informs you about.
https://developer.apple.com/library/ios/#documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/CommunicatingWIthAPS/CommunicatingWIthAPS.html#//apple_ref/doc/uid/TP40008194-CH101-SW3
"""
apn_service = models.ForeignKey(APNService)
PORT = 2196
fmt = '!lh32s'
def _connect(self):
"""
Establishes an encrypted socket connection to the feedback service.
"""
return super(FeedbackService, self)._connect(self.apn_service.certificate, self.apn_service.private_key, self.apn_service.passphrase)
def call(self):
"""
Calls the feedback service and deactivates any devices the feedback service mentions.
"""
self._connect()
device_tokens = []
try:
while True:
data = self.connection.recv(38) # 38 being the length in bytes of the binary format feedback tuple.
timestamp, token_length, token = struct.unpack(self.fmt, data)
device_token = hexlify(token)
device_tokens.append(device_token)
except OpenSSL.SSL.ZeroReturnError:
# Nothing to receive
pass
finally:
self._disconnect()
devices = Device.objects.filter(token__in=device_tokens, service=self.apn_service)
devices.update(is_active=False, deactivated_at=dt_now())
return devices.count()
def __unicode__(self):
return self.name
class Meta:
unique_together = ('name', 'hostname')
|
|
"""Node (User) Class for Paxos Calendar."""
import os
import sys
import time
import thread
import pickle
import socket
import logging
from Bully import bully_algorithm
from Appointment import Appointment
from Calendar import Calendar
from Proposer import Proposer
from Acceptor import Acceptor
class Node(object):
"""
Node class.
node_id: Unique ID used for Node identification as well as for
unique proposal number generation; int.
calendar: Calendar object which contains Appointment objects.
proposer: Proposer object used in Synod Algorithm; passed node_id so
it can create unique proposal numbers.
acceptor: Acceptor object used in Synod Algorithm.
log: Dictionary of Calendar objects used in Paxos Algorithm;
intially empty, Synod Algorithm is used to fill each entry
of log where integer keys represents slots and the values
being the Calendar agreed upon via conscensus.
leader: The current leader elected via the bully algorithm;
initially None and updated every ~6 seconds.
"""
_ip_filename = "./IP_translations.txt"
def __init__(self, node_id):
"""Construct a Node object."""
if type(node_id) != int:
raise TypeError("node_id must be an int")
if node_id < 0:
raise ValueError("node id must be a nonnegative integer")
try:
Node._ip_table = Node._make_ip_table()
except IOError:
raise IOError("Node-to-IP translation file: " + ip_filename + " not found.")
self._node_id = node_id
self._calendar = Calendar()
self._proposer = Proposer(node_id,self._ip_table)
self._acceptor = Acceptor(self._ip_table)
self._log = {}
self._leader = None
self._terminate = False
self._is_Node = True
def insert(self, appointment):
"""Insert an Appointment into this Node's Calendar."""
#First create new Calendar with new appointment
from copy import deepcopy
new_calendar = deepcopy(self._calendar)
new_calendar += appointment
if self._log.keys():
next_log_slot = max(self._log.keys()) + 1
else:
next_log_slot = 0
#Then ask leader to propose the new Calendar
try:
leader_IP, leader_TCP, leader_UDP = self._ip_table[self._leader]
proposal_message = pickle.dumps(
("propose", Calendar.serialize(new_calendar), next_log_slot))
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(proposal_message, (leader_IP, leader_UDP))
udp_socket.close()
except KeyError as excinfo:
print "Unable to find leader, waiting until one is selected..."
while self._leader == None:
pass
print "Found leader, continuing...\n"
self.insert(appointment)
def delete(self, appointment):
"""Delete an Appointment in this Node's Calendar."""
#First create new Calendar without appointment
from copy import deepcopy
new_calendar = Calendar()
for self_appointment in self._calendar:
if self_appointment != appointment:
new_calendar += deepcopy(self_appointment)
if self._log.keys():
next_log_slot = max(self._log.keys()) + 1
else:
next_log_slot = 0
#Then ask leader to propose the new Calendar
try:
leader_IP, leader_TCP, leader_UDP = self._ip_table[self._leader]
proposal_message = pickle.dumps(
("propose", Calendar.serialize(new_calendar), next_log_slot))
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(proposal_message, (leader_IP, leader_UDP))
udp_socket.close()
except KeyError as excinfo:
print "Unable to find leader, waiting until one is selected..."
while self._leader == None:
pass
print "Found leader, continuing...\n"
self.delete(appointment)
def paxos(self):
"""Engage this Node in Paxos algorithm."""
def _parse_message(message):
"""
Parse UDP pickled tuple message.
Self is available from closure.
"""
valid_message_types = [
"propose", "prepare", "promise", "accept", "ack", "commit"]
message_type, message_args = message[0], message[1:]
#syntactic checking
if message_type not in valid_message_types:
logging.error("Invalid message type")
return
if 3 <= len(message_args) <= 4:
arg_0_is_int = type(message_args[0]) == int
arg_0_is_calendar = hasattr(message_args[0], "_is_Calendar")
arg_1_is_calendar = hasattr(message_args[1], "_is_Calendar")
if not arg_0_is_calendar:
arg_0_is_None = message_args[0] == None
else:
arg_0_is_None = False
if not arg_1_is_calendar:
arg_1_is_None = message_args[1] == None
else:
arg_1_is_None = False
#handle prepare messages
if message_type == "propose":
if arg_0_is_calendar:
#If in this conditional, we are the leader.
#First we have to fill any empty log slots
#'''
log_slots = self._log.keys()
proposed_slot = message[2]
for i in range(proposed_slot):
if i not in self._log.keys():
#dummy_message = ("propose", Calendar(), i, self._node_id)
#self._proposer._command_queue.append(dummy_message)
#time.sleep(.1)
slot_calendar = self._acceptor._accVals[i]
self._log[i] = slot_calendar
#'''
#Then we can add this new proposal
self._proposer._command_queue.append(message)
else:
logging.error(
"Propose message must be of form "
"'propose' Calendar")
#handle prepare messages
elif message_type == "prepare":
if arg_0_is_int:
self._acceptor._command_queue.append(message)
else:
logging.error(
"Prepare message must be of form 'prepare' int")
#handle promise messages
elif message_type == "promise":
if (arg_0_is_int and arg_1_is_calendar) or (arg_0_is_None and arg_1_is_None):
self._proposer._command_queue.append(message)
else:
logging.error(
"Promise message must be of form "
"'promise' int Calendar")
#handle accept messages
elif message_type == "accept":
if arg_0_is_int and arg_1_is_calendar:
self._acceptor._command_queue.append(message)
else:
print ' '.join([str(i) for i in message])
logging.error(
"Accept message must be of form "
"'accept' int Calendar")
#handle ack messages
elif message_type == "ack":
if arg_0_is_int and arg_1_is_calendar:
self._proposer._command_queue.append(message)
else:
logging.error(
"Ack message must be of form "
"'ack' int Calendar")
#handle commit messages
elif message_type == "commit":
if arg_0_is_calendar:
self._acceptor._command_queue.append(message)
else:
logging.error(
"Commit message must be of form 'commit' Calendar")
else:
logging.error("Invalid message parameters")
return
def _learner(self):
"""Poll the Acceptor commits queue to update Node's log."""
while True:
if self._acceptor._commits_queue:
(log_slot, v) = self._acceptor._commits_queue.pop()
self._log[log_slot] = v
self._calendar = self._log[max(self._log.keys())]
if self._terminate:
break
time.sleep(.001)
def _shut_down(self):
"""."""
while True:
if self._terminate:
self._proposer._terminate = True
self._acceptor._terminate = True
break
def _do_paxos(self):
"""Do Paxos algorithm for this Node."""
#Begin running the Acceptor and Proposer in the background
thread.start_new_thread(self._proposer.start, ())
thread.start_new_thread(self._acceptor.start, ())
thread.start_new_thread(_learner, (self,))
thread.start_new_thread(_shut_down, (self,))
IP, UDP_PORT = '0.0.0.0', self._ip_table[self._node_id][2]
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(4096) # buffer size is 1024 bytes
if data == "terminate":
sock.close()
break
#Quick lookup of ID of sender from IP received
sender_ID = filter(
lambda row: row[1][0] == addr[0],
self._ip_table.items())[0][0]
message = pickle.loads(data)
#bind sender_ID to message
message = message + (sender_ID,)
#construct deserailized version of message
new_message = []
for field in message:
if type(field) == str:
try:
deserialized_calendar = Calendar.deserialize(field)
new_message.append(deserialized_calendar)
except:
new_message.append(field)
else:
new_message.append(field)
new_message = tuple(new_message)
_parse_message(new_message)
thread.start_new_thread(_do_paxos, (self,))
def elect_leader(self, poll_time=6, timeout=3):
"""Engage this Node in leader selection."""
def _do_leader_election(self, poll_time, timeout):
"""Do leader election as new thread."""
IP, TCP_PORT = "0.0.0.0", self._ip_table[self._node_id][1]
recv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
recv_socket.bind((IP, TCP_PORT))
#backlog; 1 for each Node besides self
recv_socket.listen(4)
prev_leader = None
while True:
thread.start_new_thread(bully_algorithm, (self, recv_socket, timeout))
time.sleep(poll_time)
if self._leader != prev_leader:
logging.debug("NEW LEADER IS: " + str(self._leader))
prev_leader = self._leader
if self._terminate:
break
recv_socket.close()
thread.start_new_thread(_do_leader_election, (self, poll_time, timeout))
def terminate(self):
"""Initiate termination protocol; close all threads."""
#Set termination field
self._terminate = True
#Send special termination message to self
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_ip_info = self._ip_table[self._node_id]
my_IP, my_UDP_PORT = my_ip_info[0], my_ip_info[2]
s.sendto("terminate", (my_IP, my_UDP_PORT))
s.close()
#Sleep for a second to ensure everything closes before main
time.sleep(1)
@staticmethod
def save(Node, path="./", filename="state.pkl"):
"""Save this Node's log and Acceptor to stable storage."""
if not hasattr(Node, "_is_Node"):
raise TypeError("Node parameter must be a Node object")
if type(filename) != str or type(path) != str:
raise TypeError("path and filename must be strings")
if filename[-4:] != ".pkl":
raise ValueError("filename must have .pkl extension")
if not os.path.exists(path):
raise ValueError("path provided does not exist")
import pickle
with open(path + filename, 'w') as f:
state = (Node._node_id, Node._log, Node._acceptor)
pickle.dump(state, f)
@staticmethod
def load(path="./", filename="state.pkl"):
"""
Load log and Acceptor from stable storage if path and filename exist.
"""
def _rebuild_calendar(node, log):
"""Rebuild the calendar of node by reconstructing it from log."""
#Get the latest entry in the log for most up-to-date Calendar
node._calendar = log[max(log.keys())]
if type(filename) != str or type(path) != str:
raise TypeError("path and filename must be strings")
if filename[-4:] != ".pkl":
raise ValueError("filename must have .pkl extension")
if not os.path.exists(path+filename):
raise ValueError("path provided does not exist")
with open(path + filename, 'r') as f:
state = pickle.load(f)
node_id, log, acceptor = state
node = Node(node_id)
node._log = log
node._acceptor = acceptor
_rebuild_calendar(node, log)
return node
@staticmethod
def _make_ip_table():
"""Create the ID-to-IP translation table used for socket connection."""
table = {}
import re
pattern = r"^\d+,\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3},\d{4},\d{5}$"
with open(Node._ip_filename, "r") as f:
for translation in f:
match = re.match(pattern, translation.strip())
if not match:
raise ValueError(
"Every line in IP_translations.txt must be of "
"form ID,IP")
ID, IP, TCP_PORT, UDP_PORT, = translation.strip().split(',')
table[int(ID)] = [IP, int(TCP_PORT), int(UDP_PORT)]
return table
@staticmethod
def _parse_command(command, node):
"""Parse command provided, possibly involving provided node."""
def _do_show(argv, node):
"""Perform show command for debugging/user information."""
if len(argv) == 1:
raise ValueError(
"Invalid show argument; show needs argument "
"{calendar,log,acceptor,proposer,all}")
#Handle showing the calendar
if argv[1] == "calendar":
print node._calendar
#Handle showing the log
elif argv[1] == "log":
print "Log:"
#copy the log into a list ordered by slot number
ordered_slots = sorted(node._log.items(), key=lambda x: x[0])
#if -short flag not thrown, print entire log
if len(argv) == 2:
for slot in ordered_slots:
print "Slot " + str(slot[0]) + ' ' + str(slot[1])
#Short flag is thrown, just print names of Appointments in each
#Calendar slot
elif len(argv) == 3:
if argv[2] == "-s":
for slot in ordered_slots:
log_string = "Slot " + str(slot[0]) + " Calendar: \t"
log_string += ', '.join(
slot[1].get_appointment_names())
print log_string
print
else:
raise ValueError(
"Invalid show arguments; Only flags \"-s\" "
"permitted")
#Bad number of arguments to show log
else:
raise ValueError(
"Invalid show arguments; show log supports only a "
"single optional flag argument \"-s\"")
#Handle showing Node's Acceptor object
elif argv[1] == "acceptor":
print str(node._acceptor) + '\n'
#Handle showing Node's Proposer object
elif argv[1] == "proposer":
print str(node._proposer) + '\n'
#Handle printing entire state of Node
elif argv[1] == "all":
print "-" * 100
print "Node ID: " + str(node._node_id)
_do_show(['show', 'calendar'], node)
_do_show(['show', 'log', '-s'], node)
_do_show(['show', 'acceptor'], node)
_do_show(['show', 'proposer'], node)
print "-" * 100
else:
raise ValueError(
"Invalid show argument; show needs argument "
"{calendar,log,acceptor,proposer,all}")
def _parse_appointment(argv):
"""Try to parse an Appointment object from given argv."""
generic_error_msg = "Invalid command; Schedule and cancel " + \
"commands must be of form: \n" + \
"{schedule,cancel} [Appointment name] " + \
"(user1,user2,...usern) (start_time,end_time) [day]"
if len(argv) != 5:
raise ValueError(generic_error_msg)
name, participants, times, day = argv[1:]
participants = participants[1:-1].split(",")
try:
participants = [int(user[4:]) for user in participants]
except ValueError:
raise ValueError(
"Invalid command; participants must be of form "
"(user1,user2,...,usern)")
try:
start, end = times[1:-1].split(',')
except ValueError:
raise ValueError(
"Invalid command; times must be of form "
"(start_time,end_time)")
try:
return Appointment(name, day, start, end, participants)
except ValueError as excinfo:
raise ValueError("Invalid command; " + excinfo.message)
def _do_clear():
"""Perform clear command via ASCI escape code."""
print(chr(27) + "[2J")
argv = command.split()
if not argv:
return
#If command is to clear, clear the screen
if argv[0] == "clear":
_do_clear()
return
#If command was to show something, do show
if argv[0] == "show":
try:
_do_show(argv, node)
except ValueError as excinfo:
print excinfo
print
finally:
return
#If command is to schedule or cancel an Appointment, parse then
#initiate Synod algorithm
if argv[0] == "schedule":
try:
appointment = _parse_appointment(argv)
for user in appointment._participants:
node._ip_table[user]
#determine if the Appointment the user is trying to schedule
#is already in their Calendar or in conflict with some
#Appointment in their Calendar
conflict_cond = node._calendar._is_appointment_conflicting(
appointment)
in_cond = appointment in node._calendar
#if it's not already in the Calendar and not in conflict with
#any Appointment in it, begin Synod
if not conflict_cond and not in_cond:
node.insert(appointment)
else:
print "User scheduled appointment already in their " + \
"own Calendar or in conflict with their own " + \
"Calendar; ignoring.\n"
except KeyError:
print "User id is not in the IP table."
except ValueError as excinfo:
print excinfo
print
#fail-safe catch in case something fucks up and we don't know what
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()[:]
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
finally:
return
if argv[0] == "cancel":
try:
appointment = _parse_appointment(argv)
if appointment in node._calendar:
node.delete(appointment)
else:
print "User cancelled appointment not in their own " + \
"Calendar; ignoring.\n"
except ValueError as excinfo:
print excinfo
print
finally:
return
print "Invalid command; supported commands = {clear,show,schedule,cancel}"
print
def set_verbosity(verbose_level=3):
"""Set the level of verbosity of the Preprocessing."""
if not type(verbose_level) == int:
raise TypeError("verbose_level must be an int")
if verbose_level < 0 or verbose_level > 4:
raise ValueError("verbose_level must be between 0 and 4")
verbosity = [
logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG]
logging.basicConfig(
format='%(asctime)s:\t %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=verbosity[verbose_level])
def main():
"""Quick tests."""
"schedule yaboi (user0,user1,user2,user3) (4:00pm,6:00pm) Friday"
"schedule xxboi (user1,user3,user4) (1:30am,11:30am) Wednesday"
"schedule beez (user0,user1,user2,user3) (4:00pm,6:00pm) Saturday"
"schedule beez2 (user0,user1,user2,user3) (3:00pm,4:00pm) Saturday"
"schedule zo (user1,user2,user3) (12:30pm,1:30pm) Friday"
"schedule hamma (user1,user2,user3) (1:00am,1:30am) Friday"
"cancel yaboi (user0,user1,user2,user3) (4:00pm,6:00pm) Friday"
"cancel xxboi (user1,user3,user4) (1:30am,11:30am) Wednesday"
a1 = Appointment("zo","Friday","12:30pm","1:30pm", [1, 2, 8])
a2 = Appointment("xxboi","Wednesday","1:30am","11:30am", [1, 4, 5])
a3 = Appointment("lol","saturday","11:30am","12:30pm", [1])
a4 = Appointment("yeee","MondAy","11:30am","12:30pm", [1])
a5 = Appointment("lolololol","Thursday","11:30am","12:30pm", [1])
c = Calendar()
c1 = Calendar(a1)
c2 = Calendar(a1, a2)
c3 = Calendar(a1, a2, a3)
c4 = Calendar(a1, a2, a3, a4)
c5 = Calendar(a1, a2, a3, a4, a5)
set_verbosity(4)
N = Node(int(sys.argv[1]))
'''
N._log[0] = c1
N._log[1] = c2
N._log[2] = c3
N._log[3] = c4
N._log[4] = c5
'''
N._calendar = c
#try to load a previous state of this Node
#'''
try:
N = Node.load()
except ValueError:
pass
except IOError:
pass
#'''
N.elect_leader(poll_time=6, timeout=3)
N.paxos()
print("@> Node Started")
while True:
message = raw_input('')
if message == "quit":
Node.save(N)
N.terminate()
break
else:
Node._parse_command(message, N)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Supply Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3SupplyModel",
"supply_item_rheader",
"supply_item_pack_virtualfields"
]
import re
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
# @ToDo: Put the most common patterns at the top to optimise
um_patterns = ["\sper\s?(.*)$", # CHOCOLATE, per 100g
#"\((.*)\)$", # OUTWARD REGISTER for shipping (50 sheets)
"([0-9]+\s?(gramm?e?s?|L|g|kg))$", # Navarin de mouton 285 grammes
",\s(kit|pair|btl|bottle|tab|vial)\.?$", # STAMP, IFRC, Englishlue, btl.
"\s(bottle)\.?$", # MINERAL WATER, 1.5L bottle
",\s((bag|box|kit) of .*)\.?$", # (bag, diplomatic) LEAD SEAL, bag of 100
]
# =============================================================================
class S3SupplyModel(S3Model):
"""
Generic Supply functionality such as catalogs and items that is used
across multiple modules.
@ToDo: Break this class up where possible
- is this just supply_item_alt?
"""
names = ["supply_brand",
"supply_catalog",
"supply_item_category",
"supply_item",
"supply_item_entity",
"supply_catalog_item",
"supply_item_pack",
"supply_item_alt",
"supply_item_id",
"supply_item_entity_id",
"supply_item_pack_id",
"supply_item_represent",
"supply_item_add",
"supply_item_duplicate_fields",
"supply_item_pack_virtualfields",
]
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
organisation_id = self.org_organisation_id
organisation_represent = self.org_organisation_represent
# Shortcuts
add_component = self.add_component
comments = s3.comments
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
meta_fields = s3.meta_fields
super_link = self.super_link
# =====================================================================
# Brand
#
tablename = "supply_brand"
table = define_table(tablename,
Field("name", length=128,
notnull=True,
unique=True,
label = T("Name")),
comments(),
*meta_fields())
# CRUD strings
ADD_BRAND = T("Add Brand")
LIST_BRAND = T("List Brands")
crud_strings[tablename] = Storage(
title_create = ADD_BRAND,
title_display = T("Brand Details"),
title_list = LIST_BRAND,
title_update = T("Edit Brand"),
title_search = T("Search Brands"),
subtitle_create = T("Add New Brand"),
subtitle_list = T("Brands"),
label_list_button = LIST_BRAND,
label_create_button = ADD_BRAND,
label_delete_button = T("Delete Brand"),
msg_record_created = T("Brand added"),
msg_record_modified = T("Brand updated"),
msg_record_deleted = T("Brand deleted"),
msg_list_empty = T("No Brands currently registered"))
# Reusable Field
brand_id = S3ReusableField("brand_id", db.supply_brand, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "supply_brand.id",
"%(name)s",
sort=True)),
represent = self.supply_brand_represent,
label = T("Brand"),
comment = DIV(A(ADD_BRAND,
_class="colorbox",
_href=URL(c="supply", f="brand",
args="create",
vars=dict(format="popup")),
_target="top",
_title=ADD_BRAND),
DIV( _class="tooltip",
_title="%s|%s" % (T("Brand"),
T("The list of Brands are maintained by the Administrators.")))
),
ondelete = "RESTRICT")
# =====================================================================
# Catalog (of Items)
#
tablename = "supply_catalog"
table = define_table(tablename,
Field("name", length=128,
notnull=True,
unique=True,
label = T("Name")),
organisation_id(),
comments(),
*meta_fields())
# CRUD strings
ADD_CATALOG = T("Add Catalog")
LIST_CATALOG = T("List Catalogs")
crud_strings[tablename] = Storage(
title_create = ADD_CATALOG,
title_display = T("Catalog Details"),
title_list = LIST_CATALOG,
title_update = T("Edit Catalog"),
title_search = T("Search Catalogs"),
subtitle_create = T("Add New Catalog"),
subtitle_list = T("Catalogs"),
label_list_button = LIST_CATALOG,
label_create_button = ADD_CATALOG,
label_delete_button = T("Delete Catalog"),
msg_record_created = T("Catalog added"),
msg_record_modified = T("Catalog updated"),
msg_record_deleted = T("Catalog deleted"),
msg_list_empty = T("No Catalogs currently registered"))
# Reusable Field
catalog_id = S3ReusableField("catalog_id", db.supply_catalog,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "supply_catalog.id",
"%(name)s",
sort=True)),
represent = lambda id: \
s3_get_db_field_value(tablename = "supply_catalog",
fieldname = "name",
look_up_value = id) or NONE,
default = 1,
label = T("Catalog"),
comment = DIV(A(ADD_CATALOG,
_class="colorbox",
_href=URL(c="supply", f="catalog",
args="create",
vars=dict(format="popup")),
_target="top",
_title=ADD_CATALOG),
DIV( _class="tooltip",
_title="%s|%s" % (T("Catalog"),
T("The list of Catalogs are maintained by the Administrators.")))
),
ondelete = "RESTRICT")
# Categories as component of Catalogs
add_component("supply_item_category", supply_catalog="catalog_id")
# Catalog Items as component of Catalogs
add_component("supply_catalog_item", supply_catalog="catalog_id")
# =====================================================================
# Item Category
#
tablename = "supply_item_category"
table = define_table(tablename,
catalog_id(),
#Field("level", "integer"),
Field("parent_item_category_id",
"reference supply_item_category",
label = T("Parent"),
ondelete = "RESTRICT"),
Field("code", length=16,
label = T("Code"),
required = True),
Field("name", length=128,
label = T("Name")
),
Field("can_be_asset", "boolean",
default=True,
readable=settings.has_module("asset"),
writable=settings.has_module("asset"),
label=T("Items in Category can be Assets")),
Field("is_vehicle", "boolean",
default=False,
readable=settings.has_module("vehicle"),
writable=settings.has_module("vehicle"),
label=T("Items in Category are Vehicles")),
comments(),
*meta_fields())
# CRUD strings
ADD_ITEM_CATEGORY = T("Add Item Category")
LIST_ITEM_CATEGORIES = T("List Item Categories")
crud_strings[tablename] = Storage(
title_create = ADD_ITEM_CATEGORY,
title_display = T("Item Category Details"),
title_list = LIST_ITEM_CATEGORIES,
title_update = T("Edit Item Category"),
title_search = T("Search Item Categories"),
subtitle_create = T("Add New Item Category"),
subtitle_list = T("Item Categories"),
label_list_button = LIST_ITEM_CATEGORIES,
label_create_button = ADD_ITEM_CATEGORY,
label_delete_button = T("Delete Item Category"),
msg_record_created = T("Item Category added"),
msg_record_modified = T("Item Category updated"),
msg_record_deleted = T("Item Category deleted"),
msg_list_empty = T("No Item Categories currently registered"))
# Reusable Field
item_category_requires = IS_NULL_OR(IS_ONE_OF(db,
"supply_item_category.id",
"%(name)s",
sort=True))
item_category_comment = DIV(A(ADD_ITEM_CATEGORY,
_class="colorbox",
_href=URL(c="supply", f="item_category",
args="create",
vars=dict(format="popup")),
_target="top",
_title=ADD_ITEM_CATEGORY),
DIV( _class="tooltip",
_title="%s|%s" % (T("Item Category"),
ADD_ITEM_CATEGORY)),
)
table.parent_item_category_id.requires = item_category_requires
table.parent_item_category_id.represent = self.item_category_represent
item_category_id = S3ReusableField("item_category_id",
db.supply_item_category,
sortby="name",
requires=item_category_requires,
represent=self.item_category_represent,
label = T("Category"),
comment = item_category_comment,
ondelete = "RESTRICT")
# Categories as component of Categories
add_component("supply_item_category",
supply_item_category="parent_item_category_id")
# =====================================================================
# Item
#
# These are Template items
# Instances of these become Inventory Items & Request items
#
tablename = "supply_item"
table = define_table(tablename,
Field("name",
required = True,
label = T("Name"),
length=128, notnull=True),
Field("code",
label = T("Code"),
length=16),
Field("um",
length=128,
label = T("Unit of Measure"),
notnull=True,
default = "piece"),
# Needed to auto-create a catalog_item
item_category_id("item_category_id",
requires = IS_NULL_OR(IS_ONE_OF(db,
"supply_item_category.id",
"%(name)s",
sort=True,
# With the filter no categories are displayed because (I assume) no catalog_id field is in this table
# Not certain if this is the correct action to take so would prefer another opinion, hence this comment
# filterby = "catalog_id",
# filter_opts = [1]
)
)
),
brand_id(),
Field("model",
label = T("Model/Type"),
length=128),
Field("year",
"integer",
label = T("Year of Manufacture")),
Field("weight",
"double",
label = T("Weight (kg)"),
),
Field("length",
"double",
label = T("Length (m)"),
),
Field("width",
"double",
label = T("Width (m)"),
),
Field("height",
"double",
label = T("Height (m)"),
),
Field("volume",
"double",
label = T("Volume (m3)"),
),
# These comments do *not* pull through to an Inventory's Items or a Request's Items
comments(),
*meta_fields()
)
# Categories in Progress
#table.item_category_id_0.label = T("Category")
#table.item_category_id_1.readable = table.item_category_id_1.writable = False
#table.item_category_id_2.readable = table.item_category_id_2.writable = False
# CRUD strings
ADD_ITEM = T("Add New Item")
LIST_ITEMS = T("List Items")
crud_strings[tablename] = Storage(
title_create = ADD_ITEM,
title_display = T("Item Details"),
title_list = LIST_ITEMS,
title_update = T("Edit Item"),
title_search = T("Search Items"),
subtitle_create = T("Add New Item"),
subtitle_list = T("Items"),
label_list_button = LIST_ITEMS,
label_create_button = ADD_ITEM,
label_delete_button = T("Delete Item"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"),
msg_match = T("Matching Items"),
msg_no_match = T("No Matching Items")
)
# ---------------------------------------------------------------------
# Reusable Field
supply_item_id = S3ReusableField("item_id", db.supply_item, sortby="name", # 'item_id' for backwards-compatibility
requires = IS_ONE_OF(db, "supply_item.id",
self.supply_item_represent,
sort=True),
represent = self.supply_item_represent,
label = T("Item"),
widget = S3SearchAutocompleteWidget(
get_fieldname = "item_id",
tablename = "supply_catalog_item",
represent = lambda id: \
self.supply_item_represent(id,
show_link=False,
# @ToDo: this doesn't work
show_um=False,
none_value=None),
),
comment = DIV(A(ADD_ITEM,
_class="colorbox",
_href=URL(c="supply", f="item",
args="create",
vars=dict(format="popup")),
_target="top",
_title=ADD_ITEM),
DIV( _class="tooltip",
_title="%s|%s" % (T("Item"),
T("Type the name of an existing catalog item OR Click 'Add New Item' to add an item which is not in the catalog.")
)
)
),
ondelete = "RESTRICT")
# ---------------------------------------------------------------------
# Item Search Method
#
item_search = S3Search(
advanced=(S3SearchSimpleWidget(
name="item_search_text",
label=T("Search"),
comment=T("Search for an item by its code, name, model and/or comment."),
field=["code",
"name",
"model",
#"item_category_id$name",
"comments" ]
),
S3SearchOptionsWidget(
name="item_search_brand",
label=T("Brand"),
comment=T("Search for an item by brand."),
field=["brand_id"],
represent ="%(name)s",
cols = 3
),
S3SearchOptionsWidget(
name="item_search_year",
label=T("Year"),
comment=T("Search for an item by Year of Manufacture."),
field=["year"],
#represent ="%(name)s",
cols = 1
),
)
)
configure(tablename,
onaccept = self.supply_item_onaccept,
search_method = item_search)
# Catalog Items as component of Items
add_component("supply_catalog_item", supply_item="item_id")
# Packs as component of Items
add_component("supply_item_pack", supply_item="item_id")
if settings.get_supply_use_alt_name():
# Alternative Items as component of Items
add_component("supply_item_alt", supply_item="item_id")
# Inventory Items as component of Items
add_component("inv_inv_item", supply_item="item_id")
# Order Items as component of Items
add_component("inv_recv_item", supply_item="item_id")
# Procurement Plan Items as component of Items
add_component("proc_plan_item", supply_item="item_id")
# Request Items as component of Items
add_component("req_req_item", supply_item="item_id")
# =====================================================================
# Catalog Item
#
# This resource is used to link Items with Catalogs (n-to-n)
# Item Categories will also be catalog specific
#
script = SCRIPT("""
$(document).ready(function() {
S3FilterFieldChange({
'FilterField': 'catalog_id',
'Field': 'item_category_id',
'FieldPrefix': 'supply',
'FieldResource': 'item_category',
});
});""")
tablename = "supply_catalog_item"
table = define_table(tablename,
catalog_id(),
item_category_id("item_category_id",
#label = T("Group"),
# Filters item_category_id based on catalog_id
script = script,
),
supply_item_id(script = None), # No Item Pack Filter
comments(), # These comments do *not* pull through to an Inventory's Items or a Request's Items
*meta_fields())
# CRUD strings
ADD_ITEM = T("Add Catalog Item")
LIST_ITEMS = T("List Catalog Items")
crud_strings[tablename] = Storage(
title_create = ADD_ITEM,
title_display = T("Item Catalog Details"),
title_list = LIST_ITEMS,
title_update = T("Edit Catalog Item"),
title_search = T("Search Catalog Items"),
subtitle_create = T("Add Item to Catalog"),
subtitle_list = T("Catalog Items"),
label_list_button = LIST_ITEMS,
label_create_button = ADD_ITEM,
label_delete_button = T("Delete Catalog Item"),
msg_record_created = T("Catalog Item added"),
msg_record_modified = T("Catalog Item updated"),
msg_record_deleted = T("Catalog Item deleted"),
msg_list_empty = T("No Catalog Items currently registered"),
msg_match = T("Matching Catalog Items"),
msg_no_match = T("No Matching Catalog Items")
)
# ---------------------------------------------------------------------
# Catalog Item Search Method
#
def catalog_item_search_simple_widget(type):
return S3SearchSimpleWidget(
name="catalog_item_search_simple_%s" % type,
label=T("Search"),
comment= T("Search for an item by its code, name, model and/or comment."),
field=[#"comments", # Causes a major Join which kills servers
#"item_category_id$code", #These lines are causing issues...very slow - perhaps broken
#"item_category_id$name",
#"item_id$brand_id$name",
#"item_category_id$parent_item_category_id$code"
#"item_category_id$parent_item_category_id$name"
"item_id$code",
"item_id$name",
"item_id$model",
"item_id$comments"
],
)
catalog_item_search = S3Search(
simple=(catalog_item_search_simple_widget("simple") ),
advanced=(catalog_item_search_simple_widget("advanced"),
S3SearchOptionsWidget(
name="catalog_item_search_catalog",
label=T("Catalog"),
comment=T("Search for an item by catalog."),
field=["catalog_id"],
represent ="%(name)s",
cols = 3
),
S3SearchOptionsWidget(
name="catalog_item_search_category",
label=T("Category"),
comment=T("Search for an item by category."),
field=["item_category_id"],
represent = lambda id: \
self.item_category_represent(id, use_code=False),
cols = 3
),
S3SearchOptionsWidget(
name="catalog_item_search_brand",
label=T("Brand"),
comment=T("Search for an item by brand."),
field=["item_id$brand_id"],
represent ="%(name)s",
cols = 3
),
)
)
configure(tablename,
search_method = catalog_item_search)
# ---------------------------------------------------------------------
# Calculate once, instead of for each record
item_duplicate_fields = {}
for tablename in ["supply_item", "supply_catalog_item"]:
table = self[tablename]
item_duplicate_fields[tablename] = [field.name for field in table
if field.writable and
field.name != "id"]
configure("supply_item", deduplicate=self.item_duplicate)
configure("supply_catalog_item", deduplicate=self.item_duplicate)
configure("supply_item_category", deduplicate=self.item_duplicate)
# =====================================================================
# Item Pack
#
# Items can be distributed in different containers
#
tablename = "supply_item_pack"
table = define_table(tablename,
supply_item_id(empty=False),
Field("name", length=128,
default = T("piece"),
notnull=True, # Ideally this would reference another table for normalising Pack names
label = T("Name"),
),
Field("quantity", "double",
notnull=True,
label = T("Quantity"),
),
comments(),
*meta_fields())
# CRUD strings
ADD_ITEM_PACK = T("Add Item Pack")
LIST_ITEM_PACK = T("List Item Packs")
crud_strings[tablename] = Storage(
title_create = ADD_ITEM_PACK,
title_display = T("Item Pack Details"),
title_list = LIST_ITEM_PACK,
title_update = T("Edit Item Pack"),
title_search = T("Search Item Packs"),
subtitle_create = T("Add New Item Pack"),
subtitle_list = T("Item Packs"),
label_list_button = LIST_ITEM_PACK,
label_create_button = ADD_ITEM_PACK,
label_delete_button = T("Delete Item Pack"),
msg_record_created = T("Item Pack added"),
msg_record_modified = T("Item Pack updated"),
msg_record_deleted = T("Item Pack deleted"),
msg_list_empty = T("No Item Packs currently registered"))
# ---------------------------------------------------------------------
# Reusable Field
item_pack_id = S3ReusableField("item_pack_id", db.supply_item_pack,
sortby="name",
# Do not display any packs initially
# will be populated by S3FilterFieldChange
requires = IS_ONE_OF_EMPTY_SELECT(db,
"supply_item_pack.id",
self.item_pack_represent,
sort=True,
# @ToDo: Enforce "Required" for imports
# @ToDo: Populate based on item_id in controller instead of IS_ONE_OF_EMPTY_SELECT
# filterby = "item_id",
# filter_opts = [....],
),
represent = self.item_pack_represent,
label = T("Pack"),
comment = DIV(DIV( _class="tooltip",
_title="%s|%s" % (T("Item Packs"),
T("The way in which an item is normally distributed"))),
A( ADD_ITEM_PACK,
_class="colorbox",
_href=URL(c="supply", f="item_pack",
args="create",
vars=dict(format="popup")
),
_target="top",
_id = "item_pack_add",
_style = "display: none",
),
),
script = SCRIPT(
"""
S3FilterFieldChange({
'FilterField': 'item_id',
'Field': 'item_pack_id',
'FieldResource':'item_pack',
'FieldPrefix': 'supply',
'msgNoRecords': S3.i18n.no_packs,
'fncPrep': fncPrepItem,
'fncRepresent': fncRepresentItem
});"""),
ondelete = "RESTRICT")
#def record_pack_quantity(r):
# item_pack_id = r.get("item_pack_id", None)
# if item_pack_id:
# return s3_get_db_field_value(tablename = "supply_item_pack",
# fieldname = "quantity",
# look_up_value = item_pack_id)
# else:
# return None
configure(tablename,
deduplicate=self.item_pack_duplicate)
# Inventory items as component of Packs
add_component("inv_inv_item", supply_item_pack="item_pack_id")
# =====================================================================
# Alternative Items
#
# If the desired item isn't found, then these are designated as
# suitable alternatives
#
tablename = "supply_item_alt"
table = define_table(tablename,
supply_item_id(notnull=True),
Field("quantity",
"double",
label = T("Quantity"),
comment = DIV(_class = "tooltip",
_title = "%s|%s" %
(T("Quantity"),
T("The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item")
)
),
default = 1,
notnull=True),
supply_item_id("alt_item_id",
notnull=True),
comments(),
*meta_fields())
# CRUD strings
ADD_ALT_ITEM = T("Add Alternative Item")
LIST_ALT_ITEM = T("List Alternative Items")
crud_strings[tablename] = Storage(
title_create = ADD_ALT_ITEM,
title_display = T("Alternative Item Details"),
title_list = LIST_ALT_ITEM,
title_update = T("Edit Alternative Item"),
title_search = T("Search Alternative Items"),
subtitle_create = T("Add New Alternative Item"),
subtitle_list = T("Alternative Items"),
label_list_button = LIST_ALT_ITEM,
label_create_button = ADD_ALT_ITEM,
label_delete_button = T("Delete Alternative Item"),
msg_record_created = T("Alternative Item added"),
msg_record_modified = T("Alternative Item updated"),
msg_record_deleted = T("Alternative Item deleted"),
msg_list_empty = T("No Alternative Items currently registered"))
#def item_alt_represent(id):
# try:
# return supply_item_represent(db.supply_item_alt[id].item_id)
# except:
# return NONE
# Reusable Field - probably not needed
#item_alt_id = S3ReusableField("item_alt_id", db.supply_item_alt,
# sortby="name",
# requires = IS_NULL_OR(IS_ONE_OF(db,
# "supply_item_alt.id",
# item_alt_represent,
# sort=True)),
# represent = item_alt_represent,
# label = T("Alternative Item"),
# comment = DIV(DIV( _class="tooltip",
# _title="%s|%s" % (T("Alternative Item"),
# T("An item which can be used in place of another item"))),
# A( ADD_ALT_ITEM,
# _class="colorbox",
# _href=URL(# c="supply",
# f="item_alt",
# args="create",
# vars=dict(format="popup")
# ),
# _target="top",
# _id = "item_alt_add",
# _style = "display: none",
# ),
# ),
# ondelete = "RESTRICT")
# =====================================================================
# Item Super-Entity
#
# This super entity provides a common way to provide a foreign key to supply_item
# - it allows searching/reporting across Item types easily.
#
item_types = Storage(
inv_inv_item = T("Warehouse Stock"),
inv_recv_item = T("Order Item"),
proc_plan_item = T("Planned Procurement Item"),
)
tablename = "supply_item_entity"
table = self.super_entity(tablename, "item_entity_id", item_types,
# @ToDo: Make Items Trackable?
#super_link("track_id", "sit_trackable"),
#location_id(),
supply_item_id(represent = lambda id: \
self.supply_item_represent(id,
show_um=False,
show_link=True)),
item_pack_id(),
Field("quantity", "double",
label = T("Quantity"),
default = 1.0,
notnull = True),
*s3.ownerstamp()
)
# ---------------------------------------------------------------------
item_id = super_link("item_entity_id", "supply_item_entity",
#writable = True,
#readable = True,
#label = T("Status"),
#represent = item_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3ItemAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Item"),
# T("Enter some characters to bring up a list of possible matches")))
)
# ---------------------------------------------------------------------
# Item Search Method
#
item_entity_search = S3Search(
# Advanced Search only
advanced=(S3SearchSimpleWidget(
name="item_entity_search_text",
label=T("Search"),
comment=T("Search for an item by text."),
field=[ "item_id$name",
#"item_id$item_category_id$name",
#"site_id$name"
]
),
S3SearchOptionsWidget(
name="item_entity_search_category",
label=T("Code Share"),
field=["item_id$item_category_id"],
represent ="%(name)s",
comment=T("If none are selected, then all are searched."),
cols = 2
),
#S3SearchOptionsWidget(
# name="item_entity_search_country",
# label=T("Country"),
# field=["country"],
# represent ="%(name)s",
# comment=T("If none are selected, then all are searched."),
# cols = 2
#),
))
# ---------------------------------------------------------------------
configure(tablename,
search_method = item_entity_search)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return Storage(
supply_item_id = supply_item_id,
supply_item_entity_id = item_id,
supply_item_pack_id = item_pack_id,
supply_item_represent = self.supply_item_represent,
supply_item_pack_virtualfields = supply_item_pack_virtualfields,
supply_item_duplicate_fields = item_duplicate_fields,
supply_item_add = self.supply_item_add,
supply_item_pack_represent = self.item_pack_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
supply_item_id = S3ReusableField("item_id", "integer",
writable=False,
readable=False)
item_id = S3ReusableField("item_entity_id", "integer",
writable=False,
readable=False)()
item_pack_id = S3ReusableField("item_pack_id", "integer",
writable=False,
readable=False)
supply_item_pack_virtualfields = None
return Storage(
supply_item_id = supply_item_id,
supply_item_entity_id = item_id,
supply_item_pack_id = item_pack_id,
supply_item_pack_virtualfields = supply_item_pack_virtualfields,
)
# -------------------------------------------------------------------------
@staticmethod
def supply_item_add (quantity_1, pack_quantity_1,
quantity_2, pack_quantity_2):
"""
Adds item quantities together, accounting for different pack
quantities.
Returned quantity according to pack_quantity_1
Used by controllers/inv.py
"""
if pack_quantity_1 == pack_quantity_2:
# Faster calculation
return quantity_1 + quantity_2
else:
return ((quantity_1 * pack_quantity_1) +
(quantity_2 * pack_quantity_2)) / pack_quantity_1
# -------------------------------------------------------------------------
@staticmethod
def supply_brand_represent(id):
"""
"""
db = current.db
s3db = current.s3db
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
if not id:
return NONE
table = s3db.supply_brand
query = (table.id == id)
record = db(query).select(table.name,
limitby=(0, 1)).first()
if record:
return record.name
else:
return UNKNOWN
# -------------------------------------------------------------------------
@staticmethod
def item_category_represent(id, use_code=True):
"""
"""
db = current.db
s3db = current.s3db
cache=s3db.cache
if not id:
return current.messages.NONE
table = s3db.supply_item_category
represent = ""
item_category_id = id
while item_category_id:
query = (table.id == item_category_id)
r = db(query).select(table.code,
table.name,
table.parent_item_category_id,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1),
cache=cache).first()
if (r.code and use_code) or (not r.name and r.code):
represent_append = r.code
represent_join = "-"
else:
represent_append = r.name
represent_join = " - "
if represent:
represent = represent_join.join([represent_append,
represent])
else:
represent = represent_append
# Feed the loop
item_category_id = r.parent_item_category_id
return represent
# -------------------------------------------------------------------------
@staticmethod
def item_represent(id):
"""
Represent an item entity in option fields or list views
- unused, we use VirtualField instead
"""
T = current.T
db = current.db
s3db = current.s3db
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
item_str = NONE
item_table = s3db.supply_item_entity
if not id:
return item_str
if isinstance(id, Row) and "instance_type" in id:
# Do not repeat the lookup if already done by IS_ONE_OF
item = id
else:
item = db(item_table._id == id).select(item_table.instance_type,
limitby=(0, 1)).first()
if not item:
return item_str
instance_type = item.instance_type
if instance_type == "inv_inv_item":
item_str = T("In Stock")
elif instance_type == "inv_recv_item":
itable = s3db[instance_type]
rtable = s3db.inv_recv
query = (itable.item_entity_id == id) & \
(rtable.id == itable.recv_id)
eta = db(query).select(rtable.eta,
limitby=(0, 1)).first().eta
item_str = T("Due %(date)s") % dict(date=eta)
return item_str
# -------------------------------------------------------------------------
@staticmethod
def supply_item_represent(id,
# Needed for S3SearchAutocompleteWidget
show_um = False,
show_link = True,
none_value = None):
"""
Representation of a supply_item
"""
db = current.db
s3db = current.s3db
if not none_value:
none_value = current.messages.NONE
table = s3db.supply_item
btable = s3db.supply_brand
query = (table.id == id)
r = db(query).select(table.name,
table.model,
table.um,
btable.name,
left = btable.on(table.brand_id == btable.id),
limitby=(0, 1)).first()
if not r:
return none_value
represent = [r.supply_item.name,
r.supply_brand.name,
r.supply_item.model]
represent = [rep for rep in represent if rep]
represent = " - ".join(represent)
if show_um and r.supply_item.um:
represent = "%s (%s)" % (represent, r.supply_item.um)
local_request = current.request
local_request.extension = "html"
if show_link:
return A(represent,
_href = URL( r = local_request,
c = "supply",
f = "item",
args = [id]
)
)
else:
return represent
# ---------------------------------------------------------------------
@staticmethod
def item_pack_represent(id):
"""
"""
db = current.db
s3db = current.s3db
messages = current.messages
NONE = messages.NONE
table = s3db.supply_item_pack
query = (table.id == id) & \
(table.item_id == db.supply_item.id)
record = db(query).select(table.name,
table.quantity,
db.supply_item.um,
limitby = (0, 1)).first()
if record:
if record.supply_item_pack.quantity == 1:
return record.supply_item_pack.name
else:
return "%s (%s x %s)" % (record.supply_item_pack.name,
record.supply_item_pack.quantity,
record.supply_item.um)
else:
return NONE
# -------------------------------------------------------------------------
@staticmethod
def item_duplicate(job):
"""
Callback function used to look for duplicates during
the import process
"""
tablename = job.tablename
s3db = current.s3db
if tablename == "supply_item":
job.data.update(item_um_from_name(job.data.name,
job.data.um)
)
if tablename in ["supply_item", "supply_catalog_item"]:
resource_duplicate(tablename, job,
s3db.supply_item_duplicate_fields[tablename])
elif tablename == "supply_item_category":
resource_duplicate("supply_item_category", job,
fields = ["catalog_id",
"parent_item_category_id",
"code"])
# -------------------------------------------------------------------------
@staticmethod
def item_pack_duplicate(job):
"""
Callback function used to look for duplicates during
the import process
"""
tablename = job.tablename
# An Item Pack is a duplicate if both the Name & Item are identical
resource_duplicate(tablename, job,
fields = ["name",
"item_id",
])
# -------------------------------------------------------------------------
@staticmethod
def supply_item_onaccept(form):
"""
Create a catalog_item for this item
Update the UM (Unit of Measure) in the supply_item_pack table
"""
db = current.db
s3db = current.s3db
request = current.request
settings = current.deployment_settings
item_id = form.vars.id
if isinstance(form, SQLFORM):
# Create a supply_catalog_item for items added via browser
table = s3db.supply_catalog_item
catalog_id = request.vars.catalog_id
ctable = s3db.supply_catalog
if not catalog_id:
# Default Catalog
catalog = db(ctable.name == settings.get_supply_catalog_default()
).select( ctable.id, limitby=(0, 1)).first()
query = (table.item_id == item_id) & \
(table.deleted == False )
if not db(query).count():
table.insert(catalog_id = catalog_id,
item_category_id = form.vars.item_category_id,
item_id = item_id,
)
# Update UM
um = form.vars.um or s3db.supply_item.um.default
table = s3db.supply_item_pack
# Try to update the existing record
query = (table.item_id == item_id) & \
(table.quantity == 1) & \
(table.deleted == False)
if db(query).update(name = um) == 0:
# Create a new item packet
table.insert(item_id = item_id,
name = um,
quantity = 1)
# =============================================================================
def item_um_from_name(name, um):
"""
Retrieve the Unit of Measure from a name
"""
if not um:
for um_pattern in um_patterns:
m = re.search(um_pattern,name)
if m:
um = m.group(1).strip()
# Rename um from name
name = re.sub(um_pattern, "", name)
# Remove trailing , & wh sp
name = re.sub("(,)$", "", name).strip()
return dict(name = name,
um = um)
return {}
# =============================================================================
def resource_duplicate(tablename, job, fields=None):
"""
This callback will be called when importing supply items it will look
to see if the record being imported is a duplicate.
@param tablename: The name of the table being imported into
@param job: An S3ImportJob object which includes all the details
of the record being imported
@param fields: The fields which to check for duplicates with.
If not passed, can be calculated - but inefficient
If the record is a duplicate then it will set the job method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case
- the same UM
- and the same comments, if there are any
"""
db = current.db
# ignore this processing if the id is set
if job.id:
return
if job.tablename == tablename:
table = job.table
query = None
if not fields:
fields = [field.name for field in db[tablename]
if field.writable and field.name != "id"]
for field in fields:
value = field in job.data and job.data[field] or None
# Hack to get prepop working for Sahana Camp LA
if value:
try:
field_query = (table[field].lower() == value.lower())
except:
field_query = (table[field] == value)
# if not value:
# # Workaround
# if tablename == "supply_item_category" and field == "name":
# continue
# field_query = (table[field] == None)
# else:
# try:
# field_query = (table[field].lower() == value.lower())
# except:
# field_query = (table[field] == value)
if not query:
query = field_query
else:
query = query & field_query
if query:
_duplicate = db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.method = job.METHOD.UPDATE
# =============================================================================
def supply_item_rheader(r):
""" Resource Header for Items """
if r.representation == "html":
item = r.record
if item:
T = current.T
settings = current.deployment_settings
NONE = current.messages.NONE
tabs = [
(T("Edit Details"), None),
(T("Packs"), "item_pack"),
(T("Alternative Items"), "item_alt"),
(T("In Inventories"), "inv_item"),
(T("Requested"), "req_item"),
(T("In Catalogs"), "catalog_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR( TH("%s: " % table.name.label),
item.name,
),
TR( TH("%s: " % table.brand_id.label),
table.brand_id.represent(item.brand_id),
),
TR( TH("%s: " % table.model.label),
item.model or NONE,
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
class supply_item_pack_virtualfields(dict, object):
""" Virtual Field for pack_quantity """
def __init__(self,
tablename):
self.tablename = tablename
def pack_quantity(self):
if self.tablename == "inv_inv_item":
item_pack = self.inv_inv_item.item_pack_id
elif self.tablename == "req_req_item":
item_pack = self.req_req_item.item_pack_id
elif self.tablename == "req_commit_item":
item_pack = self.req_commit_item.item_pack_id
elif self.tablename == "inv_recv_item":
item_pack = self.inv_recv_item.item_pack_id
elif self.tablename == "inv_send_item":
item_pack = self.inv_send_item.item_pack_id
else:
item_pack = None
if item_pack:
return item_pack.quantity
else:
return None
# END =========================================================================
|
|
#!/system/xbin/env python3
# {{{
# Added changes necessary to make a full device manifest run to completion
# on a Samsung SM-P607T lt03ltetmo with python-3.4.2
# }}}
# {{{
# 20151125.224423.4390855 Added changes to compensate for failed
# surrogateescape conversion of filenames in the recycle bin.
# since it is possible to have bogus filenames that are outside
# the utf-8 codepoint set I changed the codepoint set to utf-16.
# that was not sufficient to fix the problem. So I changed from
# surrogateescape conversion to xmlcharrefreplace. That did the
# trick. On the way I added some exception handlers (try/except)
# blocks. I also added remote debug stuff
#
# pycharmRemoteDebugPath =`'C:\\bin\\PyCharmPro\\debug-eggs\\pycharm-debug-py3k'
# if os.path.exists(pycharmRemoteDebugPath):
# if False:
# sys.path.append(pycharmRemoteDebugPath)
# import pydevd
# pydevd.settrace('localhost', port=5656, stdoutToServer=True, stderrToServer=True)
# it seems to work splendidly.
#
# Also fixed skiplist handling and added :
# for url2skip in skiplist:
# if re.match(url2skip, urlPath, re.IGNORECASE):
# osList = skiplist[url2skip]
# if platformType in osList:
# raise SkipThisDirectory
#
# Also added writeObituary function and
# class SkipThisDirectory
# class testException
#
#
# 20151127.233840.9536542
# Clear out code inspection Items
#
#
# }}}
# {{{
# Recursively descend the current directory. Use minimum memory resources.
# Do a merge sort on the resulting output using the full path name as the
# sort key.
#
# 20151201.181122.8576819
# 20151201.192009.2997008
# 20151208.185909.3024239
# }}}
# {{{ imports
import sys
import time
import re
import os
import stat
import urllib.request
import errno
import datetime
import shutil
import socket
import json
import subprocess
import platform
import inspect
import importlib
import importlib.util
# }}}
# {{{ `itemsPerCarton` is the number of lines in each merge file.
# The number of cartons will be the total number of directory
# elements divided by the number `itemsPerCarton`.
# The memory consumed by the script in each partial sort
# increases as `itemsPerCarton` is increased.
# The memory consumed by the script in the final merge
# increases as `itemsPerCarton` is decreased, but since
# the merge is generaly less memory intensive, memory
# is not generally the limiting factor for a merge. OTOH
# if `itemsPerCarton` were set to 1, then the merge memory-usage
# would essentially be the same as if `itemsPerCarton` were
# greater than the total number of items to be sorted.
# See `Art of Computer Programming, Volume 3: Sorting
# and Searching` ISBN-13: 978-0201896855
itemsPerCarton = 8191
# }}}
# {{{ `topNode`
# start directory descend here
#
topNode = ''
# topNode = os.getcwd()
# }}}
# {{{ `encodeFix`
# default error handler for encoding exceptions
# surrogateescape not reliable enough
encodeFix = 'xmlcharrefreplace' # PEP 383 [ http://j.mp/1OwrztW ]
# }}}
# {{{ `fsEncoding`
# default file system encoding for this system
#
# fsEncoding = 'utf-8' # sys.getfilesystemencoding()
# fsEncoding = sys.getfilesystemencoding()
fsEncoding = 'utf-16' # sys.getfilesystemencoding()
# }}}
# {{{ `pantry` is a dictionary whose
# keys are the names of all the
# merge files
#
pantry = {}
# }}}
# {{{ `carton` is an array which contains the actual
# directory listing data for each merge file
#
carton = []
# }}}
# {{{ `cartonIdx` contains the fullpath names of all the
# carton files as `keys` and the number of entries
# in each carton as `values`.
#
cartonIdx = {}
# }}}
# {{{ `dfsIndex` is a unique base-56 encoded integer
# associated with each unique directory element
# that makes possible, putting directory entries
# back in (their original pre-sorted) order.
#
dfsIndex = -1
# }}}
# {{{ `nullstr` syntactic sugar for ``
#
nullstr = ''
# }}}
# {{{ `distinctHostName` not `localhost` ( I Hope )
#
distinctHostName = None
# }}}
# {{{ `fLog` global file handle for log output`
#
fLog = None
# }}}
# {{{ `fRaw` global file handle for raw toc output`
#
fRaw = None
# }}}
# {{{ `fSrt` global file handle for sorted toc output`
#
fSrt = None
# }}}
# {{{ `space` syntactic sugar for ` `
#
space = str(chr(32))
# }}}
# {{{ `Tab` syntactic sugar for tab
#
tabChr = str(chr(9))
# }}}
# {{{ `ctrlA` syntactic sugar for control-a
#
ctrlA = str(chr(1))
# }}}
# {{{ Number Base Alphabets
# I use base 56 for Inode Numbers on NTFS
# because the inodes can get pretty huge
# I only track INodes so I can track hard
# links on my disk
#
B56 = "0123456789ABCDEFGHJKMNPQRSTUVWXYZabcdefghjkmnpqrstuvwxyz"
#
# I use 3 hex digits to number my carton
# files ( most people call them buckets
# but in my mind the bucket does not belong
# in a pantry whereas a carton just might)
#
B16 = "0123456789ABCDEF"
#
# finally I use simple base 10 for nix inodes
# but since I select these encodings via
# the iNodeBase dictionary, the inline logic is
# same for android and windows
#
B10 = "0123456789"
# }}}
# {{{ `platformType` in linux,android,win32 ( I Hope )
#
platformType = 'android'
# }}}
# {{{ `elementTagHash`
# an element is either a...
# `F`ile
# `D`irectory
# `L`ink
# `U`nknown
#
elementTagHash = {
0b000: 'U',
0b001: 'F',
0b010: 'D',
0b011: 'D',
0b100: 'LU',
0b101: 'LF',
0b110: 'LD',
0b111: 'LD'
} # }}}
# {{{ regular expressions
#
# documentation claims this is
# unnecessary. As of 20140325
# python installed on host fultonJSheen
# does not treat <tab> <newline> <cr>
# as whitespace
#
WS = ' \t\n\r'
leadingDrive = re.compile(r"""
\A
( [a-z] )
:
[/\\]+
""".strip(WS), re.VERBOSE | re.IGNORECASE)
leadingSlash = re.compile(r"""
\A
[/\\]+
""".strip(WS), re.VERBOSE | re.IGNORECASE)
trailingSlash = re.compile(r"""
[/\\]+
\Z
""".strip(WS), re.VERBOSE | re.IGNORECASE)
anySlashes = re.compile(r"""
[/\\]+
""".strip(WS), re.VERBOSE | re.IGNORECASE)
anyPeriods = re.compile(r"""
[.]+
""".strip(WS), re.VERBOSE | re.IGNORECASE)
allDigits = re.compile(r"""
\A
\d+
\Z
""".strip(WS), re.VERBOSE | re.IGNORECASE)
platformID = {
'Linux-3.4.0-14.10-cm-qs600-3.2-g68558b0-armv7l-with-Linaro-14.04-trusty': 'linux',
'Linux-3.5.0-54-generic-x86_64-with-Ubuntu-12.04-precise' : 'linux',
'Linux-3.4.0-453951-armv7l-with' : 'android',
'Linux-3.4.0-1433887-armv7l-with' : 'android',
'Windows-7-6.1.7601-SP1' : 'win32',
'Windows-10.0.10586' : 'win32',
}
EXTERNAL_STORAGE = os.getenv('EXTERNAL_STORAGE')
if not EXTERNAL_STORAGE:
EXTERNAL_STORAGE = nullstr
SECONDARY_STORAGE = os.getenv('SECONDARY_STORAGE')
if not SECONDARY_STORAGE:
SECONDARY_STORAGE = nullstr
skiplist = {
'/proc' : ['linux', 'android'],
'///C:/%24recycle.bin': ['win32'],
'/sys/devices' : ['linux', 'android'],
'/dev' : ['linux', 'android'],
'/sys/dev' : ['linux', 'android'],
}
failSafeDirDict = {
'linux' : os.path.expanduser('~'),
'android': EXTERNAL_STORAGE.split(':')[0],
'win32' : os.getenv('TEMP'),
}
scratchDirDict = {
'linux' : os.path.expanduser('~') + '/00/log/tox',
'android': EXTERNAL_STORAGE.split(':')[0] + '/00/log/tox',
'win32' : 'C:/etc/tox',
}
localDirDict = {
'linux' : os.path.expanduser('~') + '/00/log/tox',
'android': SECONDARY_STORAGE.split(':')[0] + '/00/log/tox',
'win32' : 'C:/etc/tox',
}
drpBxDirDict = {
'linux' : os.path.expanduser('~') + '/Dropbox/tox',
'android': '/mnt/sdcard/00/log/tox',
'win32' : 'C:/drpbx/Dropbox/tox',
}
topNodeDict = {
'linux' : '/',
'android': '/',
'win32' : 'C:/',
}
iNodeFldWdth = {
'linux' : 10,
'android': 10,
'win32' : 12,
}
iNodeBase = {
'linux' : B10,
'android': B10,
'win32' : B56,
}
# }}}
class CannotCreateDirectory(Exception): # {{{
pass
# }}}
class SkipThisDirectory(Exception): # {{{
pass
# }}}
class testException(Exception): # {{{
pass
# }}}
class InputMergeObj(object): # {{{
__slots__ = [
'__currentLine',
'__errMsg',
'__exc00',
'__fullPath',
'__H',
'__metaData',
'__outData',
'__N',
'__lineKey',
]
def __init__(self, file_name: str):
self.__N = file_name
self.__lineKey = ctrlA
try:
self.__outData = None
self.__fullPath = None
self.__metaData = None
#
# at object instantiation read the first
# line in the text file and extract the
# sort key ( full path name )
#
self.__H = open(file_name, 'rt', encoding=fsEncoding)
self.__currentLine = self.__H.readline()
if self.__currentLine:
self.__lineKey = self.__currentLine.split(ctrlA)[-1]
except (FileNotFoundError, OSError) as __exc00:
__errMsg = "** <openInFile> == "
__errMsg += file_name
__errMsg += " cannot read this file **\n\n"
__errMsg += str(__exc00)
writeObituary(inspect.currentframe(), __errMsg)
sys.exit("open the pod bay doors hal")
def nxtLine(self):
self.__lineKey = ctrlA # default the key to assume EOF
if self.__currentLine:
#
# the current line is not empty
# so the end of file has not been
# reached
#
self.__currentLine = self.__H.readline()
if self.__currentLine:
self.__lineKey = self.__currentLine.split(ctrlA)[-1]
else:
self.__H.close()
return self.__lineKey
def cleanCurrentLine(self):
#
# clean line contains no ctrlA characters
# all fields are space separated except the
# last field which is separated with at tab
# character
#
self.__outData = self.__currentLine.split(ctrlA)
self.__fullPath = self.__outData.pop()
self.__metaData = space.join(self.__outData)
return tabChr.join([self.__metaData, self.__fullPath])
@property
def N(self):
return self.__N
@property
def lineKey(self):
return self.__lineKey
# }}}
class FsysElement: # {{{
def __init__(self):
self.Size = 0
self.MTime = time.gmtime(0)
self.TagKey = 0
self.Inode = 0
self.Tag = ' U'
self.LinkPtr = nullstr
self.dfsIndex = str(int_encode(dfsIndex, B56)).zfill(4)
# }}}
def microSecTS(): # {{{
return datetime.datetime.now().strftime('T%Y%m%d.%H%M%S.%f' + space)
# }}}
def createStamp(): # {{{
return time.strftime('.%Y%m%d.%H%M%S.', time.localtime())
# }}}
def mkdir_p(path): # {{{
#
# I clipped this from somewhere.
# it seems to work. But I dont
# remember where I got it.
#
try:
os.makedirs(path)
except OSError as exc00: # Python >2.5
if exc00.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise CannotCreateDirectory
# }}}
def int_encode(num, alphabet=B56): # {{{
#
# Encode a number in Base X
#
# `num`: The number to encode
# `alphabet`: The alphabet to use for encoding
#
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num //= base
arr.append(alphabet[rem])
arr.reverse()
return nullstr.join(arr)
# }}}
def getNetHostName4Android(): # {{{
#
# Getprop returns a name like:
#
# `android-1f6e8ad67260efb1`
# or
# `kindle-91cf73cdf`
# or
# `` ( for python on gs4 android 4.2.2 )
#
# I call this a hostile host name
#
byteRslt = None
errMesg = None
errCode = None
droidNetHostName = 'localhost'
try:
byteRslt = subprocess.check_output(['getprop', 'net.hostname'])
except subprocess.CalledProcessError as exc01:
errMesg = exc01.output # Output generated before error
errCode = exc01.returncode # Return error code
if errCode:
print('(**' + errMesg + '**)')
else:
if not byteRslt:
pass
elif byteRslt.rstrip().lstrip():
droidNetHostName = byteRslt.decode(fsEncoding).rstrip().lstrip()
else:
pass
return droidNetHostName
# }}}
def getFriendlyHostName4Android(): # {{{
#
# getFriendlyHostName4Android
# returns a name like:
#
# `myDogFido`
# or
# `myCatFluffy`
#
# I call this a friendly host name
#
hostileDroidHostName = getNetHostName4Android()
retVal = hostileDroidHostName
hostNameJsonFile = '/sdcard/etc/androidHosts.json'
# pdb.set_trace()
hostNameMap = {}
if os.path.isfile(hostNameJsonFile):
if os.access(hostNameJsonFile, os.R_OK):
try:
fH = open(hostNameJsonFile, 'rt', encoding=fsEncoding)
hostNameMap = json.load(fH)
except FileNotFoundError:
pass
if hostileDroidHostName in hostNameMap:
retVal = hostNameMap[hostileDroidHostName]
return retVal
# }}}
def establishDestinationDir(dirHash): # {{{
# {{{ dst Directory Logic
directoryPath = dirHash[platformType]
if os.path.exists(directoryPath):
alternatePath = directoryPath
altCount = 0
while os.path.isfile(alternatePath):
#
# prefered directoryPath exists as a file
#
alternatePath = directoryPath + "." + str(altCount)
altCount += 1
if altCount:
#
# Create alternate dst directory
#
directoryPath = alternatePath
try:
mkdir_p(directoryPath)
except CannotCreateDirectory:
directoryPath = failSafeDirDict[platformType]
else:
try:
mkdir_p(directoryPath)
except CannotCreateDirectory:
directoryPath = failSafeDirDict[platformType]
if not os.path.isdir(directoryPath):
errMsg000 = "<directoryPath> == "
errMsg000 += directoryPath
errMsg000 += " must be a directory"
sys.exit(errMsg000)
else:
if not os.access(directoryPath, os.W_OK):
errMsg000 = "<directoryPath> == "
errMsg000 += directoryPath
errMsg000 += " must be a writable directory"
sys.exit(errMsg000)
# }}}
return directoryPath
# }}}
def openOutFile(fN): # {{{
try:
handle = open(fN, 'wt', encoding=fsEncoding)
except OSError as exc_openwt_fail00:
errMsg010 = "** <openOutFile> == "
errMsg010 += fN
errMsg010 += " cannot write to this file **\n\n"
errMsg010 += str(exc_openwt_fail00)
writeObituary(inspect.currentframe(), errMsg010)
sys.exit("open the pod bay doors hal")
return handle
# }}}
def openInFile(fN): # {{{
try:
handle = open(fN, 'rt', encoding=fsEncoding)
except FileNotFoundError as exc_openrd_fail:
errMsg020 = "** <openInFile> == "
errMsg020 += fN
errMsg020 += " cannot read this file **\n\n"
errMsg020 += str(exc_openrd_fail)
writeObituary(inspect.currentframe(), errMsg020)
sys.exit("open the pod bay doors hal")
return handle
# }}}
def nextOutFile(nameType, stamp): # {{{
suffix = ".txt"
stringMatches = allDigits.match(nameType)
if stringMatches:
#
# A name type of all digits
# is a temporary carton file.
#
# Cartons fill a pantry.
#
# All the cartons in the pantry
# eventually get placed into a
# single crate (a srt.txt file).
#
nameType = int_encode(int(nameType), B16)
nameType = str(nameType).zfill(3)
outFName = establishDestinationDir(scratchDirDict)
suffix = ".tmp"
else:
outFName = establishDestinationDir(localDirDict)
outFName += "/"
baseName = topNode
baseName = leadingDrive.sub("\\1.slash.", baseName)
baseName = leadingSlash.sub("slash.", baseName)
baseName = trailingSlash.sub(nullstr, baseName)
baseName = anySlashes.sub(".", baseName)
baseName = distinctHostName + '.' + baseName
if "ezn" == nameType:
baseName += ".toc."
else:
baseName += stamp
baseName += nameType
baseName += suffix
baseName = anySlashes.sub(".", baseName)
baseName = anyPeriods.sub(".", baseName)
outFName += baseName
outFName = anyPeriods.sub(".", outFName)
outFName = anySlashes.sub("/", outFName)
if ".tmp" == suffix:
pantry[outFName] = 0 # initialize pantry size
outFHandle = openOutFile(outFName)
return {"outFHandle": outFHandle, "outFName": outFName, "baseName": baseName}
# }}}
def writeObituary(stackFrame, msg=None): # {{{
global fLog
global dfsIndex
if msg:
errMsg030 = msg
else:
errMsg030 = ""
errMsg030 = microSecTS() + \
str(int_encode(dfsIndex, B56)).zfill(4) + \
" <<< [[[Fatal Exception at line " + \
str(stackFrame.f_lineno) + \
" Triggered]]]::" + \
errMsg030 + \
">>>\n"
fLog.write(errMsg030)
fLog.close()
sys.exit(errMsg030)
# }}}
def coerse2str(dataIn): # {{{
#
# As of python version 3.1: On some systems, conversion using the file system encoding may fail.
# To compensate, Python uses the surrogateescape encoding error handler, which means that undecodable
# bytes are replaced by a Unicode character U+DCxx on decoding.
#
# This program only generates a manifest of files. So the filname encoding in the
# manifest need not be the same as the file name encoding on the physical medium
#
if isinstance(dataIn, bytes):
try:
retVal = dataIn.decode(fsEncoding, errors=encodeFix)
except UnicodeError as exc_unicode_00:
writeObituary(inspect.currentframe(), str(exc_unicode_00))
sys.exit("open the pod bay doors hal")
elif isinstance(dataIn, str):
try:
dataInBytes = dataIn.encode(fsEncoding, errors=encodeFix)
retVal = dataInBytes.decode(fsEncoding, errors=encodeFix)
if False:
if not (2 + dfsIndex) % 1111:
raise testException
except UnicodeError as exc_unicode_01:
writeObituary(inspect.currentframe(), str(exc_unicode_01))
sys.exit("open the pod bay doors hal")
else:
errMsg040 = "** <coerse2str> dataIn == "
errMsg040 += dataIn
errMsg040 += " 'dataIn' must be bytes or str **"
writeObituary(inspect.currentframe(), errMsg040)
sys.exit("open the pod bay doors hal")
return retVal # Instance of str
# }}}
def int_decode(string, alphabet=B56): # {{{
#
# Decode a Base X encoded string into the number
#
# Arguments:
# - `string`: The encoded string
# - `alphabet`: The alphabet to use for encoding
#
base = len(alphabet)
strlen = len(string)
num = 0
idx00 = 0
for char in string:
power = (strlen - (idx00 + 1))
num += alphabet.index(char) * (base ** power)
idx00 += 1
return num
# }}}
def WriteFsysElementInfo(path, fH, fN): # {{{
#
# send a line to a text file
#
element = carton[cartonIdx[path]]
iNodeEnc = iNodeBase[platformType]
iNodeStr = int_encode(element.Inode, iNodeEnc)
msg = str(element.Tag).rjust(2)
msg += ctrlA
msg += element.dfsIndex
msg += ctrlA
msg += time.strftime('%Y%m%d.%H%M%S', element.MTime)
msg += ctrlA
msg += str(iNodeStr).zfill(iNodeFldWdth[platformType])
msg += ctrlA
msg += str(element.Size).zfill(12)
msg += ctrlA
msg += path
msg += element.LinkPtr
msg += "\n"
try:
msg2write = coerse2str(msg)
except UnicodeError as exc_unicode_02:
errMsg050 = ": msg2write = coerse2str(msg)"
errMsg050 += " failed for file "
errMsg050 += fN
errMsg050 += "\n\n"
errMsg050 += str(exc_unicode_02)
writeObituary(inspect.currentframe(), errMsg050)
sys.exit("open the pod bay doors hal")
fH.write(msg2write)
# }}}
# {{{ Main descent Loop Initialization
def main(sysArgv: list, kwargs=None):
global dfsIndex
global platformType
global distinctHostName
global topNode
global fLog
global fRaw
global fSrt
platformKey = platform.platform()
if platformKey not in platformID:
err_msg = "[BAILINGOUT]::** platformKey == " + platformKey + " is not supported **"
sys.exit(err_msg)
platformType = platformID[platformKey]
if 'win32' == platformType:
#
# Belts & Suspenders here. 'pycharmRemoteDebugPath' is only
# needed if 'pydevd' is not somewhere on the 'PYTHONPATH'
#
pydevd_spec = importlib.util.find_spec('pydevd')
pydevd_is_available = pydevd_spec is not None
pycharmRemoteDebugPath = 'C:\\bin\\PyCharmPro\\debug-eggs\\pycharm-debug-py3k'
if os.path.exists(pycharmRemoteDebugPath):
if not pydevd_is_available:
sys.path.append(pycharmRemoteDebugPath)
pydevd_spec = importlib.util.find_spec('pydevd')
pydevd_is_available = pydevd_spec is not None
if pydevd_is_available:
import pydevd
ok2request_trace = os.path.exists(__file__ + ".debug_this")
if ok2request_trace:
pydevd.settrace('localhost', port=5656, stdoutToServer=True, stderrToServer=True)
# SSyncDiffHere SyncDiffHere SyncDiffHere SyncDiffHere SyncDiffHere SyncDiffHere yncDiffHere
cartonNumber = 0
uniqIdStamp = createStamp()
drpBxPath = establishDestinationDir(drpBxDirDict)
distinctHostName = socket.gethostname()
if 'localhost' == distinctHostName:
if 'android' == platformType:
distinctHostName = getFriendlyHostName4Android()
if 'main_caller' in kwargs:
try:
if os.path.basename(__file__) == kwargs['main_caller']:
os.chdir(topNodeDict[platformType])
topNode = os.getcwd()
elif 'ezdfstree.py' == kwargs['main_caller']:
topNode = os.getcwd()
elif 'dbxdfstree.py' == kwargs['main_caller']:
os.chdir(drpBxDirDict[platformType])
os.chdir("..")
topNode = os.getcwd()
except OSError as exc_chdir_fail_00:
errMsg055 = str(exc_chdir_fail_00)
sys.exit(errMsg055)
if '' == topNode:
errMsg060 = "** <topDirectory> == [ "
errMsg060 += sysArgv[0]
errMsg060 += " ] cannot cd to this directory **"
if os.path.isdir(sysArgv[0]):
try:
os.chdir(sysArgv[0])
topNode = os.getcwd()
except OSError as exc_chdir_fail_01:
errMsg060 += "\n\n"
errMsg060 += str(exc_chdir_fail_01)
sys.exit(errMsg060)
else:
errMsg060 = "** os.path.isdir("
errMsg060 += sysArgv[0]
errMsg060 += ") is False. cannot cd to this directory **"
sys.exit(errMsg060)
#
# error log file is a special carton
#
rslt = nextOutFile("log", uniqIdStamp)
dstLogFName = rslt["outFName"]
fLog = rslt["outFHandle"]
#
# error log file is a special carton
#
rslt = nextOutFile("raw", uniqIdStamp)
dstRawFName = rslt["outFName"]
fRaw = rslt["outFHandle"]
dirStack = []
dirStack.insert(0, topNode)
# }}}
while len(dirStack): # {{{ Main Outer Loop
thisDir = dirStack.pop()
thisDirCanonical = coerse2str(thisDir)
urlPath = urllib.request.pathname2url(thisDirCanonical)
try: # {{{
for url2skip in skiplist:
if re.match(url2skip, urlPath, re.IGNORECASE):
osList = skiplist[url2skip]
if platformType in osList:
raise SkipThisDirectory
try: # {{{
dirListing = os.listdir(thisDir)
# }}}
except OSError as exc02: # {{{
fLog.write(microSecTS() +
str(int_encode(dfsIndex, B56)).zfill(4) +
" <<< [[[Exception 0 Triggered]]]::" +
thisDir + str(exc02) +
">>>\n")
dirListing = []
# }}}
while len(dirListing): # {{{ Main inner Loop
eName = dirListing.pop()
dfsIndex += 1
if False:
if not (2 + dfsIndex) % 1111:
print(dfsIndex)
fullPath = os.path.join(thisDir, eName)
e = FsysElement()
try: # {{{
e.TagKey = 0
e.TagKey |= os.path.isfile(fullPath)
e.TagKey |= os.path.isdir(fullPath) << 1
e.TagKey |= os.path.islink(fullPath) << 2
e.Tag = elementTagHash[e.TagKey]
e.Inode = abs(os.stat(fullPath).st_ino)
if 'L' == e.Tag[0]:
e.LinkPtr = ' -> ' + os.readlink(fullPath)
e.MTime = time.localtime(os.path.getmtime(fullPath))
e.Size = os.lstat(fullPath)[stat.ST_SIZE]
# }}}
except OSError as exc03: # {{{ Exception Triggered
fLog.write(microSecTS() +
str(int_encode(dfsIndex, B56)).zfill(4) +
" <<< [[[Exception 1 Triggered]]]::" +
fullPath + str(exc03) +
">>>\n")
# }}}
cartonIdx[fullPath] = len(carton)
carton.append(e)
try: # {{{
WriteFsysElementInfo(fullPath, fRaw, dstRawFName)
# }}}
except Exception as exc04: # {{{
writeObituary(inspect.currentframe(), msg=str(exc04))
sys.exit("open the pod bay doors hal")
# }}}
if 'D' == e.Tag:
dirStack.insert(0, fullPath)
if itemsPerCarton == len(carton): # {{{
#
# The carton is full. Dump it to a file
#
rslt = nextOutFile(str(cartonNumber), uniqIdStamp)
dstFName = rslt["outFName"]
fOut = rslt["outFHandle"]
fLog.write(microSecTS() + '> ' + dstFName + "\n")
#
# pantry dictionary contains the full path names
# of all the carton files as indexes.
# associated with each carton file is a linecount
#
pantry[dstFName] = len(carton)
for fullPath in sorted(cartonIdx.keys()): # {{{
WriteFsysElementInfo(fullPath, fOut, dstFName)
# }}}
fOut.close()
#
# I only keep the `active` carton in memory.
# So, clear out the old. Make room for the new.
# This python slice semantics will take me
# some getting-used-to.
#
carton[:] = []
#
# Carton has been cleared so must also
# be the carton index.
#
cartonIdx.clear()
cartonNumber += 1
# }}}
# }}}
# }}}
except SkipThisDirectory: # {{{
pass
# }}}
# }}}
# {{{ Main descent Loop Cleanup
if len(carton): # {{{
#
# usually a partially filled carton
# will be left over. So, manage that condition.
#
rslt = nextOutFile(str(cartonNumber), uniqIdStamp)
dstFName = rslt["outFName"]
fOut = rslt["outFHandle"]
fLog.write(microSecTS() + '> ' + dstFName + "\n")
pantry[dstFName] = len(carton)
for fullPath in sorted(cartonIdx.keys()): # {{{
WriteFsysElementInfo(fullPath, fOut, dstFName)
# }}}
fOut.close() # }}}
# recursive descent is complete
# now I need to merge all of my
# cartons into a single crate
# which will be sorted by fullpathname
#
fRaw.close()
# }}}
# {{{ Initialize the merge operation
#
# put the names of all
# merge files in the mergeQ
#
mergeQ = []
tmpFileList = list(pantry.keys())
for fName in tmpFileList:
#
# open temp file for reading
#
bucket = InputMergeObj(fName)
#
# put the handle, FileName pair in the queue
#
mergeQ.append(bucket)
rslt = nextOutFile("srt", uniqIdStamp)
dstSrtFName = rslt["outFName"]
fSrt = rslt["outFHandle"]
therezWork2do = True
# }}}
while therezWork2do: # {{{ Main Merge Loop
minIdx = 0
if 1 < len(mergeQ):
for idx in list(range(1, len(mergeQ))):
if mergeQ[idx].lineKey < mergeQ[minIdx].lineKey:
minIdx = idx
bucket = mergeQ[minIdx]
fSrt.write(bucket.cleanCurrentLine())
if ctrlA == bucket.nxtLine():
fLog.write(microSecTS() + '< ' + mergeQ[minIdx].N + "\n")
mergeQ.pop(minIdx)
else:
therezWork2do = False
# }}}
# {{{ Merge Cleanup
bucket = mergeQ[0]
fSrt.write(bucket.cleanCurrentLine())
while ctrlA != bucket.nxtLine(): # {{{
#
# write out all the lines that remain
# in the last bucket
#
fSrt.write(bucket.cleanCurrentLine())
# }}}
fLog.write(microSecTS() + '< ' + mergeQ[0].N + "\n")
mergeQ.pop(0)
fSrt.close()
fLog.close()
#
# cleanup the temp files
#
tmpFileList = list(pantry.keys())
for fName in tmpFileList:
os.remove(fName)
rslt = nextOutFile("ezn", uniqIdStamp)
dstEzFName = rslt["outFName"]
dbxEzFName = drpBxPath + "/" + rslt["baseName"]
shutil.copy2(dstSrtFName, dstEzFName)
if not os.path.samefile(establishDestinationDir(localDirDict), drpBxPath):
shutil.copy2(dstEzFName, dbxEzFName)
print(dstRawFName)
print(dstSrtFName)
print(dstLogFName)
print(dstEzFName)
if dstEzFName != dbxEzFName:
print(dbxEzFName)
if __name__ == '__main__':
if not (re.search('\A utf [-] 8 \Z', sys.stdout.encoding, re.IGNORECASE | re.VERBOSE)):
print("please set python env PYTHONIOENCODING=UTF-8.", file=sys.stderr)
exit(1)
main(sys.argv[1:], {'main_caller': os.path.basename(__file__)})
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from guildwars2api.base import BaseResource
class Item(BaseResource):
"""
BaseResource class for an Item. See https://wiki.guildwars2.com/wiki/API:2/items
Parameters for a request
id: a single ID
ids: a comma-delimited string of IDs
lang: "en", "es", "fr", "de"
"""
resource = "items"
class Recipe(BaseResource):
resource = "recipes"
class RecipeSearch(BaseResource):
resource = "recipes/search"
class Skin(BaseResource):
resource = "skins"
class Continent(BaseResource):
resource = "continents"
class Floor(BaseResource):
resource = "floors"
def build_url(self, continent_id=None, floor_id=None, *args, **kwargs):
url_pieces = super(Floor, self).build_url(*args, **kwargs).split("?")
if len(url_pieces) > 1:
base_url, query_string = url_pieces
else:
base_url = url_pieces[0]
query_string = ""
url = base_url
if continent_id:
url = "{base_url}/{continent}".format(base_url=base_url, continent=continent_id)
if floor_id:
url = "{url}/{floor}".format(url=url, floor=floor_id)
if query_string != "":
url = "{url}?{query_string}".format(url=url, query_string=query_string)
return url
class Map(BaseResource):
resource = "maps"
class Listing(BaseResource):
resource = "commerce/listings"
class Exchange(BaseResource):
resource = "commerce/exchange"
def build_url(self, currency="coins", **kwargs):
url_pieces = super(Exchange, self).build_url(**kwargs).split("?")
if len(url_pieces) > 1:
base_url, query_string = url_pieces
else:
base_url = url_pieces[0]
query_string = ""
return "{base_url}/{currency}?{query_string}".format(
base_url=base_url,
currency=currency,
query_string=query_string,
)
class Transaction(BaseResource):
resource = "commerce/transactions"
def build_url(self, current_or_history=None, buy_or_sell=None, *args, **kwargs):
base_url = super(Transaction, self).build_url(**kwargs)
if current_or_history is None:
return base_url
if buy_or_sell is None:
return "{base_url}/{current_or_history}".format(
base_url=base_url,
current_or_history=current_or_history,
)
return "{base_url}/{current_or_history}/{buy_or_sell}".format(
base_url=base_url,
current_or_history=current_or_history,
buy_or_sell=buy_or_sell,
)
class Price(BaseResource):
resource = "commerce/prices"
class Build(BaseResource):
resource = "build"
class Color(BaseResource):
resource = "colors"
class File(BaseResource):
resource = "files"
class Quaggan(BaseResource):
resource = "quaggans"
class World(BaseResource):
resource = "worlds"
class Material(BaseResource):
resource = "materials"
class Bank(BaseResource):
resource = "account/bank"
class BankMaterial(BaseResource):
resource = "account/materials"
class Character(BaseResource):
resource = "characters"
class Inventory(BaseResource):
resource = "characters/{0}/inventory"
class Equipment(BaseResource):
resource = "characters/{0}/equipment"
class Account(BaseResource):
resource = "account"
class TokenInfo(BaseResource):
resource = "tokeninfo"
class Currency(BaseResource):
resource = "currencies"
class AccountWallet(BaseResource):
resource = "account/wallet"
class AccountDye(BaseResource):
resource = "account/dyes"
class AccountSkin(BaseResource):
resource = "account/skins"
class PvPStat(BaseResource):
resource = "pvp/stats"
class PvPGame(BaseResource):
resource = "pvp/games"
class Specialization(BaseResource):
resource = "characters/{0}/specializations"
class WvWObjective(BaseResource):
resource = "wvw/objectives"
class Mini(BaseResource):
resource = "minis"
class AccountMini(BaseResource):
resource = "account/minis"
class Achievement(BaseResource):
resource = "achievements"
class AccountAchievement(BaseResource):
resource = "account/achievements"
class GuildUpgrade(BaseResource):
resource = "guild/upgrades"
class GuildPermission(BaseResource):
resource = "guild/permissions"
class GuildMember(BaseResource):
resource = "guild/{0}/members"
class GuildRank(BaseResource):
resource = "guild/{0}/ranks"
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ssl import CERT_NONE
from airflow.hooks.base_hook import BaseHook
from pymongo import MongoClient, ReplaceOne
class MongoHook(BaseHook):
"""
PyMongo Wrapper to Interact With Mongo Database
Mongo Connection Documentation
https://docs.mongodb.com/manual/reference/connection-string/index.html
You can specify connection string options in extra field of your connection
https://docs.mongodb.com/manual/reference/connection-string/index.html#connection-string-options
If you want use DNS seedlist, set `srv` to True.
ex.
{"srv": true, "replicaSet": "test", "ssl": true, "connectTimeoutMS": 30000}
"""
conn_type = 'mongo'
def __init__(self, conn_id='mongo_default', *args, **kwargs):
super(MongoHook, self).__init__(source='mongo')
self.mongo_conn_id = conn_id
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson.copy()
self.client = None
srv = self.extras.pop('srv', False)
scheme = 'mongodb+srv' if srv else 'mongodb'
self.uri = '{scheme}://{creds}{host}{port}/{database}'.format(
scheme=scheme,
creds='{}:{}@'.format(
self.connection.login, self.connection.password
) if self.connection.login else '',
host=self.connection.host,
port='' if self.connection.port is None else ':{}'.format(self.connection.port),
database=self.connection.schema
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.client is not None:
self.close_conn()
def get_conn(self):
"""
Fetches PyMongo Client
"""
if self.client is not None:
return self.client
# Mongo Connection Options dict that is unpacked when passed to MongoClient
options = self.extras
# If we are using SSL disable requiring certs from specific hostname
if options.get('ssl', False):
options.update({'ssl_cert_reqs': CERT_NONE})
self.client = MongoClient(self.uri, **options)
return self.client
def close_conn(self):
client = self.client
if client is not None:
client.close()
self.client = None
def get_collection(self, mongo_collection, mongo_db=None):
"""
Fetches a mongo collection object for querying.
Uses connection schema as DB unless specified.
"""
mongo_db = mongo_db if mongo_db is not None else self.connection.schema
mongo_conn = self.get_conn()
return mongo_conn.get_database(mongo_db).get_collection(mongo_collection)
def aggregate(self, mongo_collection, aggregate_query, mongo_db=None, **kwargs):
"""
Runs an aggregation pipeline and returns the results
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.aggregate
https://api.mongodb.com/python/current/examples/aggregation.html
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.aggregate(aggregate_query, **kwargs)
def find(self, mongo_collection, query, find_one=False, mongo_db=None, **kwargs):
"""
Runs a mongo find query and returns the results
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if find_one:
return collection.find_one(query, **kwargs)
else:
return collection.find(query, **kwargs)
def insert_one(self, mongo_collection, doc, mongo_db=None, **kwargs):
"""
Inserts a single document into a mongo collection
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_one
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.insert_one(doc, **kwargs)
def insert_many(self, mongo_collection, docs, mongo_db=None, **kwargs):
"""
Inserts many docs into a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_many
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.insert_many(docs, **kwargs)
def update_one(self, mongo_collection, filter_doc, update_doc,
mongo_db=None, **kwargs):
"""
Updates a single document in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to update.
:type filter_doc: dict
:param update_doc: The modifications to apply.
:type update_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_one(filter_doc, update_doc, **kwargs)
def update_many(self, mongo_collection, filter_doc, update_doc,
mongo_db=None, **kwargs):
"""
Updates one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_many
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to update.
:type filter_doc: dict
:param update_doc: The modifications to apply.
:type update_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_many(filter_doc, update_doc, **kwargs)
def replace_one(self, mongo_collection, doc, filter_doc=None,
mongo_db=None, **kwargs):
"""
Replaces a single document in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.replace_one
.. note::
If no ``filter_doc`` is given, it is assumed that the replacement
document contain the ``_id`` field which is then used as filters.
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param doc: The new document.
:type doc: dict
:param filter_doc: A query that matches the documents to replace.
Can be omitted; then the _id field from doc will be used.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_doc:
filter_doc = {'_id': doc['_id']}
return collection.replace_one(filter_doc, doc, **kwargs)
def replace_many(self, mongo_collection, docs,
filter_docs=None, mongo_db=None, upsert=False, collation=None,
**kwargs):
"""
Replaces many documents in a mongo collection.
Uses bulk_write with multiple ReplaceOne operations
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
.. note::
If no ``filter_docs``are given, it is assumed that all
replacement documents contain the ``_id`` field which are then
used as filters.
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param docs: The new documents.
:type docs: list[dict]
:param filter_docs: A list of queries that match the documents to replace.
Can be omitted; then the _id fields from docs will be used.
:type filter_docs: list[dict]
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
:param upsert: If ``True``, perform an insert if no documents
match the filters for the replace operation.
:type upsert: bool
:param collation: An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
:type collation: pymongo.collation.Collation
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_docs:
filter_docs = [{'_id': doc['_id']} for doc in docs]
requests = [
ReplaceOne(
filter_docs[i],
docs[i],
upsert=upsert,
collation=collation)
for i in range(len(docs))
]
return collection.bulk_write(requests, **kwargs)
def delete_one(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):
"""
Deletes a single document in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_one
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the document to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_one(filter_doc, **kwargs)
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):
"""
Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs)
|
|
# CRITs environment chooser
import errno
import glob
import os
import sys
import django
import subprocess
from pymongo import ReadPreference, MongoClient
from mongoengine import connect
sys.path.insert(0, os.path.dirname(__file__))
# calculated paths for django and the site
# used as starting points for various other paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# Version
CRITS_VERSION = '4-master'
#the following gets the current git hash to be displayed in the footer and
#hides it if it is not a git repo
try:
HIDE_GIT_HASH = False
#get the short hand of current git hash
GIT_HASH = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
#get the long hand of the current git hash
GIT_HASH_LONG = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
#get the git branch
GIT_BRANCH = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
except:
#if it is not a git repo, clear out all values and hide them
GIT_HASH = ''
GIT_HASH_LONG = ''
HIDE_GIT_HASH = True
GIT_BRANCH = ''
APPEND_SLASH = True
TEST_RUN = False
# Set to DENY|SAMEORIGIN|ALLOW-FROM uri
# Default: SAMEORIGIN
# More details: https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options
#X_FRAME_OPTIONS = 'ALLOW-FROM https://www.example.com'
# Setup for runserver or Apache
if 'runserver' in sys.argv:
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
elif 'test' in sys.argv:
TEST_RUN = True
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
else:
DEVEL_INSTANCE = False
SERVICE_MODEL = 'process'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
LOGIN_URL = "/login/"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
# MongoDB Default Configuration
# Tip: To change database settings, override by using
# template from config/database_example.py
MONGO_HOST = 'localhost' # server to connect to
MONGO_PORT = 27017 # port MongoD is running on
MONGO_DATABASE = 'crits' # database name to connect to
MONGO_SSL = False # whether MongoD has SSL enabled
MONGO_USER = '' # username used to authenticate to mongo (normally empty)
MONGO_PASSWORD = '' # password for the mongo user
# File storage backends
S3 = "S3"
GRIDFS = "GRIDFS"
# DB to use for files
FILE_DB = GRIDFS
# S3 buckets
BUCKET_PCAPS = "pcaps"
BUCKET_OBJECTS = "objects"
BUCKET_SAMPLES = "samples"
# Import custom Database config
dbfile = os.path.join(SITE_ROOT, 'config/database.py')
if os.path.exists(dbfile):
execfile(dbfile)
if TEST_RUN:
MONGO_DATABASE = 'crits-unittest'
# Read preference to configure which nodes you can read from
# Possible values:
# primary: queries are sent to the primary node in a replicSet
# secondary: queries are allowed if sent to primary or secondary
# (for single host) or are distributed to secondaries
# if you are connecting through a router
# More info can be found here:
# http://api.mongodb.org/python/current/api/pymongo/index.html
MONGO_READ_PREFERENCE = ReadPreference.PRIMARY
# MongoDB default collections
COL_ACTORS = "actors" # main collection for actors
COL_ACTOR_IDENTIFIERS = "actor_identifiers" # main collection for actor identifiers
COL_ACTOR_THREAT_IDENTIFIERS = "actor_threat_identifiers" # actor threat identifiers
COL_ACTOR_THREAT_TYPES = "actor_threat_types" # actor threat types
COL_ACTOR_MOTIVATIONS = "actor_motivations" # actor motivations
COL_ACTOR_SOPHISTICATIONS = "actor_sophistications" # actor sophistications
COL_ACTOR_INTENDED_EFFECTS = "actor_intended_effects" # actor intended effects
COL_ANALYSIS_RESULTS = "analysis_results" # analysis results
COL_AUDIT_LOG = "audit_log" # audit log entries
COL_BACKDOOR_DETAILS = "backdoor_details" # backdoor information
COL_BUCKET_LISTS = "bucket_lists" # bucketlist information
COL_CAMPAIGNS = "campaigns" # campaigns list
COL_CERTIFICATES = "certificates" # certificates list
COL_COMMENTS = "comments" # comments collection
COL_CONFIG = "config" # config collection
COL_COUNTS = "counts" # general counts for dashboard
COL_DIVISION_DATA = "division_data" # information on divisions within company
COL_DOMAINS = "domains" # root domains with FQDNs and IP information
COL_EFFECTIVE_TLDS = "effective_tlds" # list of effective TLDs from Mozilla to determine root domains
COL_EMAIL = "email" # main email collection
COL_EVENTS = "events" # main events collection
COL_EVENT_TYPES = "event_types" # event types for events
COL_EXPLOIT_DETAILS = "exploit_details" # list of CVE's
COL_EXPLOITS = "exploits" # exploit count generated by MapReduce
COL_FILETYPES = "filetypes" # list of filetypes in system generated by MapReduce
COL_IDB_ACTIONS = "idb_actions" # list of available actions to be taken with indicators
COL_INDICATORS = "indicators" # main indicators collection
COL_INTERNAL_LOCATIONS = "internal_locations" # site locations for company
COL_IPS = "ips" # IPs collection
COL_LOCATIONS = "locations" # Locations collection
COL_NOTIFICATIONS = "notifications" # notifications collection
COL_OBJECTS = "objects" # objects that are files that have been added
COL_OBJECT_TYPES = "object_types" # types of objects that can be added
COL_PCAPS = "pcaps" # main pcaps collection
COL_RAW_DATA = "raw_data" # main raw data collection
COL_RAW_DATA_TYPES = "raw_data_types" # list of available raw data types
COL_RELATIONSHIP_TYPES = "relationship_types" # list of available relationship types
COL_SAMPLES = "sample" # main samples collection
COL_SCREENSHOTS = "screenshots" # main screenshots collection
COL_SECTOR_LISTS = "sector_lists" # sector lists information
COL_SECTORS = "sectors" # available sectors
COL_SERVICES = "services" # list of services for scanning
COL_SOURCE_ACCESS = "source_access" # source access ACL collection
COL_SOURCES = "sources" # source information generated by MapReduce
COL_STATISTICS = "statistics" # list of statistics for different objects (campaigns, for example)
COL_TARGETS = "targets" # target information for use in email
COL_USERS = "users" # main users collection
COL_USER_ROLES = "user_roles" # main user roles collection
COL_YARAHITS = "yarahits" # yara hit counts for samples
# MongoDB connection pool
if MONGO_USER:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL,
username=MONGO_USER, password=MONGO_PASSWORD)
else:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL)
# Get config from DB
c = MongoClient(MONGO_HOST, MONGO_PORT, ssl=MONGO_SSL)
db = c[MONGO_DATABASE]
if MONGO_USER:
db.authenticate(MONGO_USER, MONGO_PASSWORD)
coll = db[COL_CONFIG]
crits_config = coll.find_one({})
if not crits_config:
crits_config = {}
# Populate settings
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
# NOTE: we are setting ALLOWED_HOSTS to ['*'] by default which will work
# everywhere but is insecure for production installations (no less secure
# than setting DEBUG to True). This is done because we can't anticipate
# the host header for every CRITs install and this should work "out of
# the box".
ALLOWED_HOSTS = crits_config.get('allowed_hosts', ['*'])
COMPANY_NAME = crits_config.get('company_name', 'My Company')
CLASSIFICATION = crits_config.get('classification', 'unclassified')
CRITS_EMAIL = crits_config.get('crits_email', '')
CRITS_EMAIL_SUBJECT_TAG = crits_config.get('crits_email_subject_tag', '')
CRITS_EMAIL_END_TAG = crits_config.get('crits_email_end_tag', True)
DEBUG = crits_config.get('debug', True)
if crits_config.get('email_host', None):
EMAIL_HOST = crits_config.get('email_host', None)
if crits_config.get('email_port', None):
EMAIL_PORT = int(crits_config.get('email_port', None))
ENABLE_API = crits_config.get('enable_api', False)
ENABLE_TOASTS = crits_config.get('enable_toasts', False)
GIT_REPO_URL = crits_config.get('git_repo_url', '')
HTTP_PROXY = crits_config.get('http_proxy', None)
INSTANCE_NAME = crits_config.get('instance_name', 'My Instance')
INSTANCE_URL = crits_config.get('instance_url', '')
INVALID_LOGIN_ATTEMPTS = crits_config.get('invalid_login_attempts', 3) - 1
LANGUAGE_CODE = crits_config.get('language_code', 'en-us')
LDAP_AUTH = crits_config.get('ldap_auth', False)
LDAP_SERVER = crits_config.get('ldap_server', '')
LDAP_USERDN = crits_config.get('ldap_userdn', '')
LDAP_USERCN = crits_config.get('ldap_usercn', '')
LOG_DIRECTORY = crits_config.get('log_directory', os.path.join(SITE_ROOT, '..', 'logs'))
LOG_LEVEL = crits_config.get('log_level', 'INFO')
QUERY_CACHING = crits_config.get('query_caching', False)
RAR_PATH = crits_config.get('rar_path', '/usr/bin/unrar')
RT_URL = crits_config.get('rt_url', None)
SECURE_COOKIE = crits_config.get('secure_cookie', True)
SERVICE_DIRS = tuple(crits_config.get('service_dirs', []))
SERVICE_MODEL = crits_config.get('service_model', SERVICE_MODEL)
SERVICE_POOL_SIZE = int(crits_config.get('service_pool_size', 12))
SESSION_TIMEOUT = int(crits_config.get('session_timeout', 12)) * 60 * 60
SPLUNK_SEARCH_URL = crits_config.get('splunk_search_url', None)
TEMP_DIR = crits_config.get('temp_dir', '/tmp')
TIME_ZONE = crits_config.get('timezone', 'America/New_York')
ZIP7_PATH = crits_config.get('zip7_path', '/usr/bin/7za')
REMOTE_USER = crits_config.get('remote_user', False)
PASSWORD_COMPLEXITY_REGEX = crits_config.get('password_complexity_regex', '(?=^.{8,}$)((?=.*\d)|(?=.*\W+))(?![.\n])(?=.*[A-Z])(?=.*[a-z]).*$')
PASSWORD_COMPLEXITY_DESC = crits_config.get('password_complexity_desc', '8 characters, at least 1 capital, 1 lowercase and 1 number/special')
DEPTH_MAX = crits_config.get('depth_max', '10')
TOTAL_MAX = crits_config.get('total_max', '250')
REL_MAX = crits_config.get('rel_max', '50')
TOTP = crits_config.get('totp', False)
COLLECTION_TO_BUCKET_MAPPING = {
COL_PCAPS: BUCKET_PCAPS,
COL_OBJECTS: BUCKET_OBJECTS,
COL_SAMPLES: BUCKET_SAMPLES
}
# check Log Directory
if not os.path.exists(LOG_DIRECTORY):
LOG_DIRECTORY = os.path.join(SITE_ROOT, '..', 'logs')
# Custom settings for Django
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# DATE and DATETIME Formats
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s.u'
PY_DATE_FORMAT = '%Y-%m-%d'
PY_TIME_FORMAT = '%H:%M:%S.%f'
PY_DATETIME_FORMAT = ' '.join([PY_DATE_FORMAT, PY_TIME_FORMAT])
OLD_PY_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
PY_FORM_DATETIME_FORMATS = [PY_DATETIME_FORMAT, OLD_PY_DATETIME_FORMAT]
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, '../extras/www')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/'
STATIC_ROOT = os.path.join(SITE_ROOT, '../extras/www/static')
STATIC_URL = '/static/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'crits.core.views.base_context',
'crits.core.views.collections',
'crits.core.views.user_context',
)
ROOT_URLCONF = 'crits.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, '../documentation'),
os.path.join(SITE_ROOT, 'core/templates'),
os.path.join(SITE_ROOT, 'actors/templates'),
os.path.join(SITE_ROOT, 'core/dashboard/templates'),
os.path.join(SITE_ROOT, 'campaigns/templates'),
os.path.join(SITE_ROOT, 'certificates/templates'),
os.path.join(SITE_ROOT, 'comments/templates'),
os.path.join(SITE_ROOT, 'config/templates'),
os.path.join(SITE_ROOT, 'domains/templates'),
os.path.join(SITE_ROOT, 'emails/templates'),
os.path.join(SITE_ROOT, 'events/templates'),
os.path.join(SITE_ROOT, 'indicators/templates'),
os.path.join(SITE_ROOT, 'ips/templates'),
os.path.join(SITE_ROOT, 'locations/templates'),
os.path.join(SITE_ROOT, 'objects/templates'),
os.path.join(SITE_ROOT, 'pcaps/templates'),
os.path.join(SITE_ROOT, 'raw_data/templates'),
os.path.join(SITE_ROOT, 'relationships/templates'),
os.path.join(SITE_ROOT, 'samples/templates'),
os.path.join(SITE_ROOT, 'screenshots/templates'),
os.path.join(SITE_ROOT, 'services/templates'),
os.path.join(SITE_ROOT, 'standards/templates'),
os.path.join(SITE_ROOT, 'stats/templates'),
os.path.join(SITE_ROOT, 'targets/templates'),
)
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'core/static'),
os.path.join(SITE_ROOT, 'actors/static'),
os.path.join(SITE_ROOT, 'dashboards/static'),
os.path.join(SITE_ROOT, 'campaigns/static'),
os.path.join(SITE_ROOT, 'certificates/static'),
os.path.join(SITE_ROOT, 'comments/static'),
os.path.join(SITE_ROOT, 'domains/static'),
os.path.join(SITE_ROOT, 'emails/static'),
os.path.join(SITE_ROOT, 'events/static'),
os.path.join(SITE_ROOT, 'indicators/static'),
os.path.join(SITE_ROOT, 'ips/static'),
os.path.join(SITE_ROOT, 'locations/static'),
os.path.join(SITE_ROOT, 'objects/static'),
os.path.join(SITE_ROOT, 'pcaps/static'),
os.path.join(SITE_ROOT, 'raw_data/static'),
os.path.join(SITE_ROOT, 'relationships/static'),
os.path.join(SITE_ROOT, 'samples/static'),
os.path.join(SITE_ROOT, 'screenshots/static'),
os.path.join(SITE_ROOT, 'services/static'),
os.path.join(SITE_ROOT, 'config/static'),
os.path.join(SITE_ROOT, 'targets/static'),
)
INSTALLED_APPS = (
'crits.core',
'crits.dashboards',
'django.contrib.auth',
'mongoengine.django.mongo_auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crits.actors',
'crits.campaigns',
'crits.certificates',
'crits.domains',
'crits.emails',
'crits.events',
'crits.indicators',
'crits.ips',
'crits.locations',
'crits.objects',
'crits.pcaps',
'crits.raw_data',
'crits.relationships',
'crits.samples',
'crits.screenshots',
'crits.services',
'crits.stats',
'crits.targets',
'tastypie',
'tastypie_mongoengine',
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
MONGOENGINE_USER_DOCUMENT = 'crits.core.user.CRITsUser'
SESSION_ENGINE = 'mongoengine.django.sessions'
AUTHENTICATION_BACKENDS = (
#'mongoengine.django.auth.MongoEngineBackend',
'crits.core.user.CRITsAuthBackend',
)
if REMOTE_USER:
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'crits.core.user.CRITsRemoteUserBackend',
)
# Handle logging after all custom configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': "%(levelname)s %(asctime)s %(name)s %(message)s"
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'normal': {
'level': LOG_LEVEL,
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': os.path.join(LOG_DIRECTORY, 'crits.log'),
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'crits': {
'handlers': ['normal'],
'propagate': True,
'level': 'DEBUG',
},
},
}
# Handle creating log directories if they do not exist
for handler in LOGGING['handlers'].values():
log_file = handler.get('filename')
if log_file:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError as e:
# If file exists
if e.args[0] == errno.EEXIST:
pass
# re-raise on error that is not
# easy to automatically handle, such
# as permission errors
else:
raise
# CRITs Types
CRITS_TYPES = {
'Actor': COL_ACTORS,
'AnalysisResult': COL_ANALYSIS_RESULTS,
'Campaign': COL_CAMPAIGNS,
'Certificate': COL_CERTIFICATES,
'Comment': COL_COMMENTS,
'Domain': COL_DOMAINS,
'Email': COL_EMAIL,
'Event': COL_EVENTS,
'Indicator': COL_INDICATORS,
'IP': COL_IPS,
'Notification': COL_NOTIFICATIONS,
'PCAP': COL_PCAPS,
'RawData': COL_RAW_DATA,
'Sample': COL_SAMPLES,
'Screenshot': COL_SCREENSHOTS,
'Target': COL_TARGETS,
}
# Custom template lists for loading in different places in the UI
SERVICE_NAV_TEMPLATES = ()
SERVICE_CP_TEMPLATES = ()
SERVICE_TAB_TEMPLATES = ()
# discover services
for service_directory in SERVICE_DIRS:
if os.path.isdir(service_directory):
sys.path.insert(0, service_directory)
for d in os.listdir(service_directory):
abs_path = os.path.join(service_directory, d, 'templates')
if os.path.isdir(abs_path):
TEMPLATE_DIRS = TEMPLATE_DIRS + (abs_path,)
nav_items = os.path.join(abs_path, '%s_nav_items.html' % d)
cp_items = os.path.join(abs_path, '%s_cp_items.html' % d)
if os.path.isfile(nav_items):
SERVICE_NAV_TEMPLATES = SERVICE_NAV_TEMPLATES + ('%s_nav_items.html' % d,)
if os.path.isfile(cp_items):
SERVICE_CP_TEMPLATES = SERVICE_CP_TEMPLATES + ('%s_cp_items.html' % d,)
for tab_temp in glob.glob('%s/*_tab.html' % abs_path):
head, tail = os.path.split(tab_temp)
ctype = tail.split('_')[-2]
name = "_".join(tail.split('_')[:-2])
SERVICE_TAB_TEMPLATES = SERVICE_TAB_TEMPLATES + ((ctype, name, tail),)
# Allow configuration of the META or HEADER variable is used to find
# remote username when REMOTE_USER is enabled.
REMOTE_USER_META = 'REMOTE_USER'
# The next example could be used for reverse proxy setups
# where your frontend might pass Remote-User: header.
#
# WARNING: If you enable this, be 100% certain your backend is not
# directly accessible and this header could be spoofed by an attacker.
#
# REMOTE_USER_META = 'HTTP_REMOTE_USER'
# Import custom settings if it exists
csfile = os.path.join(SITE_ROOT, 'config/overrides.py')
if os.path.exists(csfile):
execfile(csfile)
|
|
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import typing
import asyncio
import logging
import warnings
import dataclasses
import pyuavcan.transport
from ._session import RedundantInputSession, RedundantOutputSession, RedundantSession
from ._error import InconsistentInferiorConfigurationError
from ._deduplicator import Deduplicator
from ._tracer import RedundantTracer, RedundantCapture
_logger = logging.getLogger(__name__)
@dataclasses.dataclass
class RedundantTransportStatistics(pyuavcan.transport.TransportStatistics):
"""
Aggregate statistics for all inferior transports in a redundant group.
This is an atomic immutable sample; it is not updated after construction.
"""
inferiors: typing.List[pyuavcan.transport.TransportStatistics] = dataclasses.field(default_factory=list)
"""
The ordering is guaranteed to match that of :attr:`RedundantTransport.inferiors`.
"""
class RedundantTransport(pyuavcan.transport.Transport):
"""
This is a composite over a set of :class:`pyuavcan.transport.Transport`.
Please read the module documentation for details.
"""
def __init__(self, *, loop: typing.Optional[asyncio.AbstractEventLoop] = None) -> None:
"""
:param loop: Deprecated.
"""
if loop:
warnings.warn("The loop argument is deprecated.", DeprecationWarning)
self._cols: typing.List[pyuavcan.transport.Transport] = []
self._rows: typing.Dict[pyuavcan.transport.SessionSpecifier, RedundantSession] = {}
self._unwrapped_capture_handlers: typing.List[typing.Callable[[RedundantCapture], None]] = []
self._check_matrix_consistency()
@property
def protocol_parameters(self) -> pyuavcan.transport.ProtocolParameters:
"""
Aggregate parameters constructed from all inferiors.
If there are no inferiors (i.e., if the instance is closed), the value is all-zeros.
Beware that if the set of inferiors is changed, this value may also be changed.
The values are obtained from the set of inferiors by applying the following reductions:
- min transfer-ID modulo
- min max-nodes
- min MTU
"""
ipp = [t.protocol_parameters for t in self._cols] or [
pyuavcan.transport.ProtocolParameters(
transfer_id_modulo=0,
max_nodes=0,
mtu=0,
)
]
return pyuavcan.transport.ProtocolParameters(
transfer_id_modulo=min(t.transfer_id_modulo for t in ipp),
max_nodes=min(t.max_nodes for t in ipp),
mtu=min(t.mtu for t in ipp),
)
@property
def local_node_id(self) -> typing.Optional[int]:
"""
All inferiors share the same local node-ID.
If there are no inferiors, the value is None (anonymous).
"""
if self._cols:
nid_set = set(x.local_node_id for x in self._cols)
if len(nid_set) == 1:
(out,) = nid_set
return out
# The following exception should not occur during normal operation unless one of the inferiors is
# reconfigured sneakily.
raise InconsistentInferiorConfigurationError(
f"Redundant transports have different node-IDs: {[x.local_node_id for x in self._cols]}"
)
return None
def get_input_session(
self, specifier: pyuavcan.transport.InputSessionSpecifier, payload_metadata: pyuavcan.transport.PayloadMetadata
) -> RedundantInputSession:
out = self._get_session(
specifier,
lambda fin: RedundantInputSession(
specifier, payload_metadata, lambda: self.protocol_parameters.transfer_id_modulo, fin
),
)
assert isinstance(out, RedundantInputSession)
self._check_matrix_consistency()
return out
def get_output_session(
self, specifier: pyuavcan.transport.OutputSessionSpecifier, payload_metadata: pyuavcan.transport.PayloadMetadata
) -> RedundantOutputSession:
out = self._get_session(specifier, lambda fin: RedundantOutputSession(specifier, payload_metadata, fin))
assert isinstance(out, RedundantOutputSession)
self._check_matrix_consistency()
return out
def sample_statistics(self) -> RedundantTransportStatistics:
return RedundantTransportStatistics(inferiors=[t.sample_statistics() for t in self._cols])
@property
def input_sessions(self) -> typing.Sequence[RedundantInputSession]:
return [s for s in self._rows.values() if isinstance(s, RedundantInputSession)]
@property
def output_sessions(self) -> typing.Sequence[RedundantOutputSession]:
return [s for s in self._rows.values() if isinstance(s, RedundantOutputSession)]
@property
def inferiors(self) -> typing.Sequence[pyuavcan.transport.Transport]:
"""
Read-only access to the list of inferior transports.
The inferiors are guaranteed to be ordered according to the temporal order of their attachment.
"""
return self._cols[:] # Return copy to prevent mutation
def attach_inferior(self, transport: pyuavcan.transport.Transport) -> None:
"""
Adds a new transport to the redundant group. The new transport shall not be closed.
If the transport is already added or it is the redundant transport itself (recursive attachment),
a :class:`ValueError` will be raised.
If the configuration of the new transport is not compatible with the other inferiors or with the
redundant transport instance itself, an instance of :class:`InconsistentInferiorConfigurationError`
will be raised.
Specifically, the following preconditions are checked:
- The new inferior shall operate on the same event loop as the redundant transport instance it is added to.
- The local node-ID shall be the same for all inferiors, or all shall be anonymous.
- The transfer-ID modulo shall meet *either* of the following conditions:
- Identical for all inferiors.
- Not less than :attr:`MONOTONIC_TRANSFER_ID_MODULO_THRESHOLD` for all inferiors.
If an exception is raised while the setup of the new inferior is in progress,
the operation will be rolled back to ensure state consistency.
"""
self._validate_inferior(transport)
self._cols.append(transport)
try:
for redundant_session in self._rows.values():
self._construct_inferior_session(transport, redundant_session)
except Exception:
self.detach_inferior(transport) # Roll back to ensure consistent states.
raise
finally:
self._check_matrix_consistency()
# Launch the capture as late as possible to not leave it dangling if the attachment failed.
for ch in self._unwrapped_capture_handlers:
transport.begin_capture(self._wrap_capture_handler(transport, ch))
def detach_inferior(self, transport: pyuavcan.transport.Transport) -> None:
"""
Removes the specified transport from the redundant group.
If there is no such transport, a :class:`ValueError` will be raised.
All sessions of the removed inferior that are managed by the redundant transport instance
will be automatically closed, but the inferior itself will not be
(the caller will have to do that manually if desired).
"""
if transport not in self._cols:
raise ValueError(f"{transport} is not an inferior of {self}")
index = self._cols.index(transport)
self._cols.remove(transport)
for owner in self._rows.values():
try:
owner._close_inferior(index) # pylint: disable=protected-access
except Exception as ex:
_logger.exception("%s could not close inferior session #%d in %s: %s", self, index, owner, ex)
self._check_matrix_consistency()
def close(self) -> None:
"""
Closes all redundant session instances, detaches and closes all inferior transports.
Any exceptions occurring in the process will be suppressed and logged.
Upon completion, the session matrix will be returned into its original empty state.
It can be populated back by adding new transports and/or instantiating new redundant sessions
if needed.
In other words, closing is reversible here, which is uncommon for the library;
consider this feature experimental.
If the session matrix is empty, this method has no effect.
"""
for s in list(self._rows.values()):
try:
s.close()
except Exception as ex: # pragma: no cover
_logger.exception("%s could not close %s: %s", self, s, ex)
for t in self._cols:
try:
t.close()
except Exception as ex: # pragma: no cover
_logger.exception("%s could not close inferior %s: %s", self, t, ex)
self._cols.clear()
assert not self._rows, "All sessions should have been unregistered"
self._check_matrix_consistency()
def begin_capture(self, handler: pyuavcan.transport.CaptureCallback) -> None:
"""
Stores the handler in the local list of handlers.
Invokes :class:`pyuavcan.transport.Transport.begin_capture` on each inferior.
If at least one inferior raises an exception, it is propagated immediately and the remaining inferiors
will remain in an inconsistent state.
When a new inferior is added later, the stored handlers will be automatically used to enable capture on it.
If such auto-restoration behavior is undesirable, configure capture individually per-inferior instead.
Every capture emitted by the inferiors is wrapped into :class:`RedundantCapture`,
which contains additional metadata about the inferior transport instance that emitted the capture.
This is done to let users understand which transport of the redundant group has
provided the capture and also this information is used by :class:`RedundantTracer`
to automatically manage transfer deduplication.
"""
self._unwrapped_capture_handlers.append(handler)
for c in self._cols:
c.begin_capture(self._wrap_capture_handler(c, handler))
@property
def capture_active(self) -> bool:
return len(self._unwrapped_capture_handlers) > 0
@staticmethod
def make_tracer() -> RedundantTracer:
"""
See :class:`RedundantTracer`.
"""
return RedundantTracer()
async def spoof(self, transfer: pyuavcan.transport.AlienTransfer, monotonic_deadline: float) -> bool:
"""
Simply propagates the call to every inferior.
The return value is a logical AND for all inferiors; False if there are no inferiors.
First exception to occur terminates the operation and is raised immediately.
This is different from regular sending; the assumption is that the caller necessarily wants to ensure
that spoofing takes place against every inferior.
If this is not the case, spoof each inferior separately.
"""
if not self._cols:
return False
gather = asyncio.gather(*[inf.spoof(transfer, monotonic_deadline) for inf in self._cols])
try:
results = await gather
except Exception:
gather.cancel()
raise
return all(results)
def _validate_inferior(self, transport: pyuavcan.transport.Transport) -> None:
# Prevent double-add.
if transport in self._cols:
raise ValueError(f"{transport} is already an inferior of {self}")
# Just out of abundance of paranoia.
if transport is self:
raise ValueError(f"A redundant transport cannot be an inferior of itself")
# If there are no other inferiors, no further checks are necessary.
if self._cols:
# Ensure all inferiors have the same node-ID.
if self.local_node_id != transport.local_node_id:
raise InconsistentInferiorConfigurationError(
f"The inferior has a different node-ID {transport.local_node_id}, expected {self.local_node_id}"
)
# Ensure all inferiors use the same transfer-ID overflow policy.
if self.protocol_parameters.transfer_id_modulo >= Deduplicator.MONOTONIC_TRANSFER_ID_MODULO_THRESHOLD:
if (
transport.protocol_parameters.transfer_id_modulo
< Deduplicator.MONOTONIC_TRANSFER_ID_MODULO_THRESHOLD
):
raise InconsistentInferiorConfigurationError(
f"The new inferior shall use monotonic transfer-ID counters in order to match the "
f"other inferiors in the redundant transport group"
)
else:
tid_modulo = self.protocol_parameters.transfer_id_modulo
if transport.protocol_parameters.transfer_id_modulo != tid_modulo:
raise InconsistentInferiorConfigurationError(
f"The transfer-ID modulo {transport.protocol_parameters.transfer_id_modulo} of the new "
f"inferior is not compatible with the other inferiors ({tid_modulo})"
)
def _get_session(
self,
specifier: pyuavcan.transport.SessionSpecifier,
session_factory: typing.Callable[[typing.Callable[[], None]], RedundantSession],
) -> RedundantSession:
if specifier not in self._rows:
def retire() -> None:
try:
del self._rows[specifier]
except LookupError:
pass
ses = session_factory(retire)
try:
for t in self._cols:
self._construct_inferior_session(t, ses)
except Exception:
ses.close()
raise
assert specifier not in self._rows
self._rows[specifier] = ses
return self._rows[specifier]
@staticmethod
def _construct_inferior_session(transport: pyuavcan.transport.Transport, owner: RedundantSession) -> None:
assert isinstance(transport, pyuavcan.transport.Transport)
if isinstance(owner, pyuavcan.transport.InputSession):
inferior: pyuavcan.transport.Session = transport.get_input_session(owner.specifier, owner.payload_metadata)
elif isinstance(owner, pyuavcan.transport.OutputSession):
inferior = transport.get_output_session(owner.specifier, owner.payload_metadata)
else:
assert False
assert isinstance(owner, RedundantSession) # MyPy makes me miss static typing so much.
# If anything whatsoever goes wrong, just roll everything back and re-raise the exception.
new_index = len(owner.inferiors)
try:
owner._add_inferior(inferior) # pylint: disable=protected-access
except Exception:
# The inferior MUST be closed manually because in the case of failure it is not registered
# in the redundant session.
inferior.close()
# If the inferior has not been added, this method will have no effect:
owner._close_inferior(new_index) # pylint: disable=protected-access
raise
def _check_matrix_consistency(self) -> None:
for row in self._rows.values():
assert len(row.inferiors) == len(self._cols)
def _wrap_capture_handler(
self,
inferior: pyuavcan.transport.Transport,
handler: typing.Callable[[RedundantCapture], None],
) -> pyuavcan.transport.CaptureCallback:
# If you are reading this, send me a postcard.
return lambda cap: handler(
RedundantCapture(
cap.timestamp,
inferior=cap,
iface_id=id(inferior),
transfer_id_modulo=self.protocol_parameters.transfer_id_modulo, # THIS IS PROBABLY SLOW?
)
)
def _get_repr_fields(self) -> typing.Tuple[typing.List[typing.Any], typing.Dict[str, typing.Any]]:
return list(self.inferiors), {}
|
|
# Copyright 2015 Christian Kramer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from opy.common.o_db_constants import OOperationType, OConst
from opy.database.o_db_profile_parser import OProfileParser, OElement, OGroup
from opy.database.protocol.o_op import OOperation
__author__ = 'daill'
class OOperationRecordUpdate(OOperation):
def __init__(self):
super().__init__(OOperationType.REQUEST_RECORD_UPDATE)
self.__request_profile_str = "(cluster-id:short)(cluster-position:long)(update-content:boolean)(record-content:bytes)(record-version:int)(record-type:byte)(mode:byte)"
self.__response_profile_str = "(record-version:int)(count-of-collection-changes:int)[{changes}(uuid-most-sig-bits:long)(uuid-least-sig-bits:long)(updated-file-id:long)(updated-page-index:long)(updated-page-offset:int)]*"
self.__request_profile = None
self.__response_profile = None
def getresponseprofile(self):
if self.__response_profile is None:
profile_parser = OProfileParser()
self.__response_profile = profile_parser.parse(self.getresponsehead() + self.__response_profile_str)
return self.__response_profile
def getrequestprofile(self):
if self.__request_profile is None:
profile_parser = OProfileParser()
self.__request_profile = profile_parser.parse(self.__request_profile_str)
return self.__request_profile
def decode(self, unpack_data, data):
"""
Need to override because of the dependencies of term and group
:param unpack_data:
:param data:
:return:
"""
data_dict = {}
error_state = False
rest = data
count_of_collection_changes = 0
def processelement(element: OElement):
nonlocal rest
nonlocal data_dict
nonlocal count_of_collection_changes
if isinstance(element, OGroup):
nonlocal data_dict
# save main state
main_dict = data_dict
main_dict[element.name] = list()
while (count_of_collection_changes > 0):
data_dict = {}
for sub_element in element.getelements():
rest = processelement(sub_element)
count_of_collection_changes -= 1
main_dict[element.name].append(data_dict)
data_dict = main_dict
else:
# handling of a term
rest, value = unpack_data(element.type, rest, name=element.name)
if element.name == "count-of-collection-changes":
# save value as indicator how often the following group will be repeated
count_of_collection_changes = value
# check if its and error
if element.name == OConst.SUCCESS_STATUS.value and value == 1:
logging.error("received an error from the server. start handling")
nonlocal error_state
error_state = True
return
data_dict[element.name] = value
return rest
def processprofile(elements):
"""
Iterate of the whole set of profile elements and unpack them
:param elements:
:return:
"""
for element in elements:
# fetch an error if it occurs
nonlocal error_state
if error_state:
return OConst.ERROR
processelement(element)
return OConst.OK
status = processprofile(self.getresponseprofile().getelements())
# return the status (OK|Error) to decide what to do next and the extracted data
return data_dict, status
class OOperationRecordDelete(OOperation):
def __init__(self):
super().__init__(OOperationType.REQUEST_RECORD_DELETE)
self.__request_profile_str = "(cluster-id:short)(cluster-position:long)(record-version:int)(mode:byte)"
self.__response_profile_str = "(payload-status:byte)"
self.__request_profile = None
self.__response_profile = None
def getresponseprofile(self):
if self.__response_profile is None:
profile_parser = OProfileParser()
self.__response_profile = profile_parser.parse(
self.getresponsehead() + self.__response_profile_str)
return self.__response_profile
def getrequestprofile(self):
if self.__request_profile is None:
profile_parser = OProfileParser()
self.__request_profile = profile_parser.parse(self.__request_profile_str)
return self.__request_profile
class OOperationRecordCreate(OOperation):
def __init__(self):
super().__init__(OOperationType.REQUEST_RECORD_CREATE)
# datasegment id removed since 2.0
# self.__request_profile_str = "(datasegment-id:int)(cluster-id:short)(record-content:bytes)(record-type:byte)(mode:byte)"
self.__request_profile_str = "(cluster-id:short)(record-content:bytes)(record-type:byte)(mode:byte)"
self.__response_profile_str = "(cluster-id:short)(cluster-position:long)(record-version:int)(count-of-collection-changes:int)[{update-info}(uuid-most-sig-bits:long)(uuid-least-sig-bits:long)(updated-file-id:long)(updated-page-index:long)(updated-page-offset:int)]*"
self.__request_profile = None
self.__response_profile = None
def getresponseprofile(self):
if self.__response_profile is None:
profile_parser = OProfileParser()
self.__response_profile = profile_parser.parse(
self.getresponsehead() + self.__response_profile_str)
return self.__response_profile
def getrequestprofile(self):
if self.__request_profile is None:
profile_parser = OProfileParser()
self.__request_profile = profile_parser.parse(self.__request_profile_str)
return self.__request_profile
def decode(self, unpack_data, data):
"""
Need to override because of the dependencies of term and group
:param unpack_data:
:param data:
:return:
"""
data_dict = {}
error_state = False
rest = data
count_of_collection_changes = 0
def processelement(element: OElement):
nonlocal rest
nonlocal data_dict
nonlocal count_of_collection_changes
if isinstance(element, OGroup):
nonlocal count_of_collection_changes
# save main state
main_dict = data_dict
main_dict[element.name] = list()
while (count_of_collection_changes > 0):
data_dict = {}
for sub_element in element.getelements():
rest = processelement(sub_element)
count_of_collection_changes -= 1
main_dict[element.name].append(data_dict)
data_dict = main_dict
else:
# handling of a term
rest, value = unpack_data(element.type, rest, name=element.name)
if element.name == "count-of-collection-changes":
# save value as indicator how often the following group will be repeated
count_of_collection_changes = value
# check if its and error
if element.name == OConst.SUCCESS_STATUS.value and value == 1:
logging.error("received an error from the server. start handling")
nonlocal error_state
error_state = True
return
data_dict[element.name] = value
return rest
def processprofile(elements):
"""
Iterate of the whole set of profile elements and unpack them
:param elements:
:return:
"""
for element in elements:
# fetch an error if it occurs
nonlocal error_state
if error_state:
return OConst.ERROR
processelement(element)
return OConst.OK
status = processprofile(self.getresponseprofile().getelements())
# return the status (OK|Error) to decide what to do next and the extracted data
return data_dict, status
class OOperationRecordLoad(OOperation):
def __init__(self):
super().__init__(OOperationType.REQUEST_RECORD_LOAD)
self.__request_profile_str = "(cluster-id:short)(cluster-position:long)(fetch-plan:string)(ignore-cache:byte)(load-tombstones:byte)"
self.__response_profile_str = "[{payload}(payload-status:byte)[{records}(record-type:byte)(record-version:int)(record-content:bytes)]*]+"
self.__request_profile = None
self.__response_profile = None
def getresponseprofile(self):
if self.__response_profile is None:
profile_parser = OProfileParser()
self.__response_profile = profile_parser.parse(
self.getresponsehead() + self.__response_profile_str)
return self.__response_profile
def getrequestprofile(self):
if self.__request_profile is None:
profile_parser = OProfileParser()
self.__request_profile = profile_parser.parse(self.__request_profile_str)
return self.__request_profile
def decode(self, unpack_data, data):
"""
Need to override because of the dependencies of term and group
:param unpack_data:
:param data:
:return:
"""
data_dict = {}
error_state = False
rest = data
def processelement(element: OElement):
nonlocal rest
nonlocal data_dict
if isinstance(element, OGroup):
# save main state
main_dict = data_dict
main_dict[element.name] = list()
while element.is_repeating:
data_dict = {}
for sub_element in element.getelements():
rest = processelement(sub_element)
main_dict[element.name].append(data_dict)
break
data_dict = main_dict
else:
# handling of a term
# check if there are bytes left
if not rest or len(rest) == 0:
return
rest, value = unpack_data(element.type, rest, name=element.name)
# check if its and error
if element.name == OConst.SUCCESS_STATUS.value and value == 1:
logging.error("received an error from the server. start handling")
nonlocal error_state
error_state = True
return
data_dict[element.name] = value
return rest
def processprofile(elements):
"""
Iterate of the whole set of profile elements and unpack them
:param elements:
:return:
"""
for element in elements:
# fetch an error if it occurs
nonlocal error_state
if error_state:
return OConst.ERROR.value
processelement(element)
return OConst.OK
status = processprofile(self.getresponseprofile().getelements())
# return the status (OK|Error) to decide what to do next and the extracted data
return data_dict, status
|
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for state value and action-value learning.
Value functions estimate the expected return (discounted sum of rewards) that
can be collected by an agent under a given policy of behaviour. This subpackage
implements a number of functions for value learning in discrete scalar action
spaces. Actions are assumed to be represented as indices in the range `[0, A)`
where `A` is the number of distinct actions.
"""
from typing import Union
import chex
import jax
import jax.numpy as jnp
from rlax._src import base
from rlax._src import clipping
from rlax._src import distributions
from rlax._src import multistep
Array = chex.Array
Numeric = chex.Numeric
def td_learning(
v_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
v_t: Numeric,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the TD-learning temporal difference error.
See "Learning to Predict by the Methods of Temporal Differences" by Sutton.
(https://link.springer.com/article/10.1023/A:1022633531479).
Args:
v_tm1: state values at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
v_t: state values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
TD-learning temporal difference error.
"""
chex.assert_rank([v_tm1, r_t, discount_t, v_t], 0)
chex.assert_type([v_tm1, r_t, discount_t, v_t], float)
target_tm1 = r_t + discount_t * v_t
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - v_tm1
def td_lambda(
v_tm1: Array,
r_t: Array,
discount_t: Array,
v_t: Array,
lambda_: Numeric,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates the TD(lambda) temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node74.html).
Args:
v_tm1: sequence of state values at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
v_t: sequence of state values at time t.
lambda_: mixing parameter lambda, either a scalar or a sequence.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
TD(lambda) temporal difference error.
"""
chex.assert_rank([v_tm1, r_t, discount_t, v_t, lambda_], [1, 1, 1, 1, {0, 1}])
chex.assert_type([v_tm1, r_t, discount_t, v_t, lambda_], float)
target_tm1 = multistep.lambda_returns(r_t, discount_t, v_t, lambda_)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - v_tm1
def sarsa(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
a_t: Numeric,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the SARSA temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node64.html.)
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
a_t: action index at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
SARSA temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, a_t],
[1, 0, 0, 0, 1, 0])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, a_t],
[float, int, float, float, float, int])
target_tm1 = r_t + discount_t * q_t[a_t]
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def expected_sarsa(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
probs_a_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the expected SARSA (SARSE) temporal difference error.
See "A Theoretical and Empirical Analysis of Expected Sarsa" by Seijen,
van Hasselt, Whiteson et al.
(http://www.cs.ox.ac.uk/people/shimon.whiteson/pubs/vanseijenadprl09.pdf).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
probs_a_t: action probabilities at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Expected SARSA temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, probs_a_t],
[1, 0, 0, 0, 1, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, probs_a_t],
[float, int, float, float, float, float])
target_tm1 = r_t + discount_t * jnp.dot(q_t, probs_a_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def sarsa_lambda(
q_tm1: Array,
a_tm1: Array,
r_t: Array,
discount_t: Array,
q_t: Array,
a_t: Array,
lambda_: Numeric,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates the SARSA(lambda) temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node77.html).
Args:
q_tm1: sequence of Q-values at time t-1.
a_tm1: sequence of action indices at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
q_t: sequence of Q-values at time t.
a_t: sequence of action indices at time t.
lambda_: mixing parameter lambda, either a scalar or a sequence.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
SARSA(lambda) temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, a_t, lambda_],
[2, 1, 1, 1, 2, 1, {0, 1}])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, a_t, lambda_],
[float, int, float, float, float, int, float])
qa_tm1 = base.batched_index(q_tm1, a_tm1)
qa_t = base.batched_index(q_t, a_t)
target_tm1 = multistep.lambda_returns(r_t, discount_t, qa_t, lambda_)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - qa_tm1
def q_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the Q-learning temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node65.html).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Q-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t], [1, 0, 0, 0, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t],
[float, int, float, float, float])
target_tm1 = r_t + discount_t * jnp.max(q_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def double_q_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t_value: Array,
q_t_selector: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the double Q-learning temporal difference error.
See "Double Q-learning" by van Hasselt.
(https://papers.nips.cc/paper/3964-double-q-learning.pdf).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t_value: Q-values at time t.
q_t_selector: selector Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Double Q-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t_value, q_t_selector],
[1, 0, 0, 0, 1, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t_value, q_t_selector],
[float, int, float, float, float, float])
target_tm1 = r_t + discount_t * q_t_value[q_t_selector.argmax()]
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def persistent_q_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
action_gap_scale: float,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the persistent Q-learning temporal difference error.
See "Increasing the Action Gap: New Operators for Reinforcement Learning"
by Bellemare, Ostrovski, Guez et al. (https://arxiv.org/abs/1512.04860).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
action_gap_scale: coefficient in [0, 1] for scaling the action gap term.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Persistent Q-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t], [1, 0, 0, 0, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t],
[float, int, float, float, float])
corrected_q_t = (
(1. - action_gap_scale) * jnp.max(q_t)
+ action_gap_scale * q_t[a_tm1]
)
target_tm1 = r_t + discount_t * corrected_q_t
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def qv_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
v_t: Numeric,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the QV-learning temporal difference error.
See "Two Novel On-policy Reinforcement Learning Algorithms based on
TD(lambda)-methods" by Wiering and van Hasselt
(https://ieeexplore.ieee.org/abstract/document/4220845.)
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
v_t: state values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
QV-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, v_t], [1, 0, 0, 0, 0])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, v_t],
[float, int, float, float, float])
target_tm1 = r_t + discount_t * v_t
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def qv_max(
v_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the QVMAX temporal difference error.
See "The QV Family Compared to Other Reinforcement Learning Algorithms" by
Wiering and van Hasselt (2009).
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.713.1931)
Args:
v_tm1: state values at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
QVMAX temporal difference error.
"""
chex.assert_rank([v_tm1, r_t, discount_t, q_t], [0, 0, 0, 1])
chex.assert_type([v_tm1, r_t, discount_t, q_t], float)
target_tm1 = r_t + discount_t * jnp.max(q_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - v_tm1
def q_lambda(
q_tm1: Array,
a_tm1: Array,
r_t: Array,
discount_t: Array,
q_t: Array,
lambda_: Numeric,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates Peng's or Watkins' Q(lambda) temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node78.html).
Args:
q_tm1: sequence of Q-values at time t-1.
a_tm1: sequence of action indices at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
q_t: sequence of Q-values at time t.
lambda_: mixing parameter lambda, either a scalar (e.g. Peng's Q(lambda)) or
a sequence (e.g. Watkin's Q(lambda)).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Q(lambda) temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],
[2, 1, 1, 1, 2, {0, 1}])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],
[float, int, float, float, float, float])
qa_tm1 = base.batched_index(q_tm1, a_tm1)
v_t = jnp.max(q_t, axis=-1)
target_tm1 = multistep.lambda_returns(r_t, discount_t, v_t, lambda_)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - qa_tm1
def retrace(
q_tm1: Array,
q_t: Array,
a_tm1: Array,
a_t: Array,
r_t: Array,
discount_t: Array,
pi_t: Array,
mu_t: Array,
lambda_: float,
eps: float = 1e-8,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates Retrace errors.
See "Safe and Efficient Off-Policy Reinforcement Learning" by Munos et al.
(https://arxiv.org/abs/1606.02647).
Args:
q_tm1: Q-values at time t-1.
q_t: Q-values at time t.
a_tm1: action index at time t-1.
a_t: action index at time t.
r_t: reward at time t.
discount_t: discount at time t.
pi_t: target policy probs at time t.
mu_t: behavior policy probs at time t.
lambda_: scalar mixing parameter lambda.
eps: small value to add to mu_t for numerical stability.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Retrace error.
"""
chex.assert_rank([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],
[2, 2, 1, 1, 1, 1, 2, 1])
chex.assert_type([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],
[float, float, int, int, float, float, float, float])
pi_a_t = base.batched_index(pi_t, a_t)
c_t = jnp.minimum(1.0, pi_a_t / (mu_t + eps)) * lambda_
target_tm1 = multistep.general_off_policy_returns_from_action_values(
q_t, a_t, r_t, discount_t, c_t, pi_t)
q_a_tm1 = base.batched_index(q_tm1, a_tm1)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_a_tm1
def retrace_continuous(q_tm1: Array,
q_t: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
log_rhos: Array,
lambda_: Union[Array, float],
stop_target_gradients: bool = True) -> Array:
"""Retrace continuous.
See "Safe and Efficient Off-Policy Reinforcement Learning" by Munos et al.
(https://arxiv.org/abs/1606.02647).
Args:
q_tm1: Q-values at times [0, ..., K - 1].
q_t: Q-values evaluated at actions collected using behavior
policy at times [1, ..., K - 1].
v_t: Value estimates of the target policy at times [1, ..., K].
r_t: reward at times [1, ..., K].
discount_t: discount at times [1, ..., K].
log_rhos: Log importance weight pi_target/pi_behavior evaluated at actions
collected using behavior policy [1, ..., K - 1].
lambda_: scalar or a vector of mixing parameter lambda.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Retrace error.
"""
chex.assert_rank([q_tm1, q_t, r_t, discount_t, log_rhos, lambda_],
[1, 1, 1, 1, 1, {0, 1}])
chex.assert_type([q_tm1, q_t, r_t, discount_t, log_rhos],
[float, float, float, float, float])
c_t = jnp.minimum(1.0, jnp.exp(log_rhos)) * lambda_
# The generalized returns are independent of Q-values and cs at the final
# state.
target_tm1 = multistep.general_off_policy_returns_from_q_and_v(
q_t, v_t, r_t, discount_t, c_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1
def categorical_l2_project(
z_p: Array,
probs: Array,
z_q: Array
) -> Array:
"""Projects a categorical distribution (z_p, p) onto a different support z_q.
The projection step minimizes an L2-metric over the cumulative distribution
functions (CDFs) of the source and target distributions.
Let kq be len(z_q) and kp be len(z_p). This projection works for any
support z_q, in particular kq need not be equal to kp.
See "A Distributional Perspective on RL" by Bellemare et al.
(https://arxiv.org/abs/1707.06887).
Args:
z_p: support of distribution p.
probs: probability values.
z_q: support to project distribution (z_p, probs) onto.
Returns:
Projection of (z_p, p) onto support z_q under Cramer distance.
"""
chex.assert_rank([z_p, probs, z_q], 1)
chex.assert_type([z_p, probs, z_q], float)
kp = z_p.shape[0]
kq = z_q.shape[0]
# Construct helper arrays from z_q.
d_pos = jnp.roll(z_q, shift=-1)
d_neg = jnp.roll(z_q, shift=1)
# Clip z_p to be in new support range (vmin, vmax).
z_p = jnp.clip(z_p, z_q[0], z_q[-1])[None, :]
assert z_p.shape == (1, kp)
# Get the distance between atom values in support.
d_pos = (d_pos - z_q)[:, None] # z_q[i+1] - z_q[i]
d_neg = (z_q - d_neg)[:, None] # z_q[i] - z_q[i-1]
z_q = z_q[:, None]
assert z_q.shape == (kq, 1)
# Ensure that we do not divide by zero, in case of atoms of identical value.
d_neg = jnp.where(d_neg > 0, 1. / d_neg, jnp.zeros_like(d_neg))
d_pos = jnp.where(d_pos > 0, 1. / d_pos, jnp.zeros_like(d_pos))
delta_qp = z_p - z_q # clip(z_p)[j] - z_q[i]
d_sign = (delta_qp >= 0.).astype(probs.dtype)
assert delta_qp.shape == (kq, kp)
assert d_sign.shape == (kq, kp)
# Matrix of entries sgn(a_ij) * |a_ij|, with a_ij = clip(z_p)[j] - z_q[i].
delta_hat = (d_sign * delta_qp * d_pos) - ((1. - d_sign) * delta_qp * d_neg)
probs = probs[None, :]
assert delta_hat.shape == (kq, kp)
assert probs.shape == (1, kp)
return jnp.sum(jnp.clip(1. - delta_hat, 0., 1.) * probs, axis=-1)
def categorical_td_learning(
v_atoms_tm1: Array,
v_logits_tm1: Array,
r_t: Numeric,
discount_t: Numeric,
v_atoms_t: Array,
v_logits_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements TD-learning for categorical value distributions.
See "A Distributional Perspective on Reinforcement Learning", by
Bellemere, Dabney and Munos (https://arxiv.org/pdf/1707.06887.pdf).
Args:
v_atoms_tm1: atoms of V distribution at time t-1.
v_logits_tm1: logits of V distribution at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
v_atoms_t: atoms of V distribution at time t.
v_logits_t: logits of V distribution at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Categorical Q learning loss (i.e. temporal difference error).
"""
chex.assert_rank(
[v_atoms_tm1, v_logits_tm1, r_t, discount_t, v_atoms_t, v_logits_t],
[1, 1, 0, 0, 1, 1])
chex.assert_type(
[v_atoms_tm1, v_logits_tm1, r_t, discount_t, v_atoms_t, v_logits_t],
[float, float, float, float, float, float])
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t + discount_t * v_atoms_t
# Convert logits to distribution.
v_t_probs = jax.nn.softmax(v_logits_t)
# Project using the Cramer distance and maybe stop gradient flow to targets.
target = categorical_l2_project(target_z, v_t_probs, v_atoms_tm1)
target = jax.lax.select(stop_target_gradients, jax.lax.stop_gradient(target),
target)
# Compute loss (i.e. temporal difference error).
return distributions.categorical_cross_entropy(
labels=target, logits=v_logits_tm1)
def categorical_q_learning(
q_atoms_tm1: Array,
q_logits_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_atoms_t: Array,
q_logits_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements Q-learning for categorical Q distributions.
See "A Distributional Perspective on Reinforcement Learning", by
Bellemere, Dabney and Munos (https://arxiv.org/pdf/1707.06887.pdf).
Args:
q_atoms_tm1: atoms of Q distribution at time t-1.
q_logits_tm1: logits of Q distribution at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_atoms_t: atoms of Q distribution at time t.
q_logits_t: logits of Q distribution at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Categorical Q-learning loss (i.e. temporal difference error).
"""
chex.assert_rank([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t
], [1, 2, 0, 0, 0, 1, 2])
chex.assert_type([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t
], [float, float, int, float, float, float, float])
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t + discount_t * q_atoms_t
# Convert logits to distribution, then find greedy action in state s_t.
q_t_probs = jax.nn.softmax(q_logits_t)
q_t_mean = jnp.sum(q_t_probs * q_atoms_t[jnp.newaxis, :], axis=1)
pi_t = jnp.argmax(q_t_mean)
# Compute distribution for greedy action.
p_target_z = q_t_probs[pi_t]
# Project using the Cramer distance and maybe stop gradient flow to targets.
target = categorical_l2_project(target_z, p_target_z, q_atoms_tm1)
target = jax.lax.select(stop_target_gradients, jax.lax.stop_gradient(target),
target)
# Compute loss (i.e. temporal difference error).
logit_qa_tm1 = q_logits_tm1[a_tm1]
return distributions.categorical_cross_entropy(
labels=target, logits=logit_qa_tm1)
def categorical_double_q_learning(
q_atoms_tm1: Array,
q_logits_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_atoms_t: Array,
q_logits_t: Array,
q_t_selector: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements double Q-learning for categorical Q distributions.
See "A Distributional Perspective on Reinforcement Learning", by
Bellemere, Dabney and Munos (https://arxiv.org/pdf/1707.06887.pdf)
and "Double Q-learning" by van Hasselt.
(https://papers.nips.cc/paper/3964-double-q-learning.pdf).
Args:
q_atoms_tm1: atoms of Q distribution at time t-1.
q_logits_tm1: logits of Q distribution at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_atoms_t: atoms of Q distribution at time t.
q_logits_t: logits of Q distribution at time t.
q_t_selector: selector Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Categorical double Q-learning loss (i.e. temporal difference error).
"""
chex.assert_rank([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t,
q_t_selector
], [1, 2, 0, 0, 0, 1, 2, 1])
chex.assert_type([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t,
q_t_selector
], [float, float, int, float, float, float, float, float])
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t + discount_t * q_atoms_t
# Select logits for greedy action in state s_t and convert to distribution.
p_target_z = jax.nn.softmax(q_logits_t[q_t_selector.argmax()])
# Project using the Cramer distance and maybe stop gradient flow to targets.
target = categorical_l2_project(target_z, p_target_z, q_atoms_tm1)
target = jax.lax.select(stop_target_gradients, jax.lax.stop_gradient(target),
target)
# Compute loss (i.e. temporal difference error).
logit_qa_tm1 = q_logits_tm1[a_tm1]
return distributions.categorical_cross_entropy(
labels=target, logits=logit_qa_tm1)
def _quantile_regression_loss(
dist_src: Array,
tau_src: Array,
dist_target: Array,
huber_param: float = 0.,
stop_target_gradients: bool = True,
) -> Numeric:
"""Compute (Huber) QR loss between two discrete quantile-valued distributions.
See "Distributional Reinforcement Learning with Quantile Regression" by
Dabney et al. (https://arxiv.org/abs/1710.10044).
Args:
dist_src: source probability distribution.
tau_src: source distribution probability thresholds.
dist_target: target probability distribution.
huber_param: Huber loss parameter, defaults to 0 (no Huber loss).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Quantile regression loss.
"""
chex.assert_rank([dist_src, tau_src, dist_target], 1)
chex.assert_type([dist_src, tau_src, dist_target], float)
# Calculate quantile error.
delta = dist_target[None, :] - dist_src[:, None]
delta_neg = (delta < 0.).astype(jnp.float32)
delta_neg = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(delta_neg), delta_neg)
weight = jnp.abs(tau_src[:, None] - delta_neg)
# Calculate Huber loss.
if huber_param > 0.:
loss = clipping.huber_loss(delta, huber_param)
else:
loss = jnp.abs(delta)
loss *= weight
# Average over target-samples dimension, sum over src-samples dimension.
return jnp.sum(jnp.mean(loss, axis=-1))
def quantile_q_learning(
dist_q_tm1: Array,
tau_q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
dist_q_t_selector: Array,
dist_q_t: Array,
huber_param: float = 0.,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements Q-learning for quantile-valued Q distributions.
See "Distributional Reinforcement Learning with Quantile Regression" by
Dabney et al. (https://arxiv.org/abs/1710.10044).
Args:
dist_q_tm1: Q distribution at time t-1.
tau_q_tm1: Q distribution probability thresholds.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
dist_q_t_selector: Q distribution at time t for selecting greedy action in
target policy. This is separate from dist_q_t as in Double Q-Learning, but
can be computed with the target network and a separate set of samples.
dist_q_t: target Q distribution at time t.
huber_param: Huber loss parameter, defaults to 0 (no Huber loss).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Quantile regression Q learning loss.
"""
chex.assert_rank([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t_selector, dist_q_t
], [2, 1, 0, 0, 0, 2, 2])
chex.assert_type([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t_selector, dist_q_t
], [float, float, int, float, float, float, float])
# Only update the taken actions.
dist_qa_tm1 = dist_q_tm1[:, a_tm1]
# Select target action according to greedy policy w.r.t. dist_q_t_selector.
q_t_selector = jnp.mean(dist_q_t_selector, axis=0)
a_t = jnp.argmax(q_t_selector)
dist_qa_t = dist_q_t[:, a_t]
# Compute target, do not backpropagate into it.
dist_target = r_t + discount_t * dist_qa_t
dist_target = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(dist_target), dist_target)
return _quantile_regression_loss(
dist_qa_tm1, tau_q_tm1, dist_target, huber_param)
def quantile_expected_sarsa(
dist_q_tm1: Array,
tau_q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
dist_q_t: Array,
probs_a_t: Array,
huber_param: float = 0.,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements Expected SARSA for quantile-valued Q distributions.
Args:
dist_q_tm1: Q distribution at time t-1.
tau_q_tm1: Q distribution probability thresholds.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
dist_q_t: target Q distribution at time t.
probs_a_t: action probabilities at time t.
huber_param: Huber loss parameter, defaults to 0 (no Huber loss).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Quantile regression Expected SARSA learning loss.
"""
chex.assert_rank([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t, probs_a_t
], [2, 1, 0, 0, 0, 2, 1])
chex.assert_type([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t, probs_a_t
], [float, float, int, float, float, float, float])
# Only update the taken actions.
dist_qa_tm1 = dist_q_tm1[:, a_tm1]
dist_qa_t = jnp.einsum('qa,a->q', dist_q_t, probs_a_t)
# Compute target, do not backpropagate into it.
dist_target = r_t + discount_t * dist_qa_t
dist_target = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(dist_target), dist_target)
return _quantile_regression_loss(
dist_qa_tm1, tau_q_tm1, dist_target, huber_param)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An evaluator of a specific application of a transform."""
# pytype: skip-file
from __future__ import absolute_import
import atexit
import collections
import logging
import random
import time
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Type
from future.utils import iteritems
import apache_beam.io as io
from apache_beam import coders
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.runners import common
from apache_beam.runners.common import DoFnRunner
from apache_beam.runners.common import DoFnState
from apache_beam.runners.dataflow.native_io.iobase import _NativeWrite # pylint: disable=protected-access
from apache_beam.runners.direct.direct_runner import _DirectReadFromPubSub
from apache_beam.runners.direct.direct_runner import _GroupByKeyOnly
from apache_beam.runners.direct.direct_runner import _StreamingGroupAlsoByWindow
from apache_beam.runners.direct.direct_runner import _StreamingGroupByKeyOnly
from apache_beam.runners.direct.direct_userstate import DirectUserStateContext
from apache_beam.runners.direct.sdf_direct_runner import ProcessElements
from apache_beam.runners.direct.sdf_direct_runner import ProcessFn
from apache_beam.runners.direct.sdf_direct_runner import SDFProcessElementInvoker
from apache_beam.runners.direct.test_stream_impl import _TestStream
from apache_beam.runners.direct.test_stream_impl import _WatermarkController
from apache_beam.runners.direct.util import KeyedWorkItem
from apache_beam.runners.direct.util import TransformResult
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import PairWithTiming
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import TimingInfo
from apache_beam.testing.test_stream import WatermarkEvent
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.transforms import core
from apache_beam.transforms.trigger import InMemoryUnmergedState
from apache_beam.transforms.trigger import TimeDomain
from apache_beam.transforms.trigger import _CombiningValueStateTag
from apache_beam.transforms.trigger import _ListStateTag
from apache_beam.transforms.trigger import _ValueStateTag
from apache_beam.transforms.trigger import create_trigger_driver
from apache_beam.transforms.userstate import get_dofn_specs
from apache_beam.transforms.userstate import is_stateful_dofn
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.typecheck import TypeCheckError
from apache_beam.utils import counters
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Timestamp
if TYPE_CHECKING:
from apache_beam.io.gcp.pubsub import _PubSubSource
from apache_beam.io.gcp.pubsub import PubsubMessage
from apache_beam.pipeline import AppliedPTransform
from apache_beam.runners.direct.evaluation_context import EvaluationContext
_LOGGER = logging.getLogger(__name__)
class TransformEvaluatorRegistry(object):
"""For internal use only; no backwards-compatibility guarantees.
Creates instances of TransformEvaluator for the application of a transform.
"""
_test_evaluators_overrides = {
} # type: Dict[Type[core.PTransform], Type[_TransformEvaluator]]
def __init__(self, evaluation_context):
# type: (EvaluationContext) -> None
assert evaluation_context
self._evaluation_context = evaluation_context
self._evaluators = {
io.Read: _BoundedReadEvaluator,
_DirectReadFromPubSub: _PubSubReadEvaluator,
core.Flatten: _FlattenEvaluator,
core.Impulse: _ImpulseEvaluator,
core.ParDo: _ParDoEvaluator,
_GroupByKeyOnly: _GroupByKeyOnlyEvaluator,
_StreamingGroupByKeyOnly: _StreamingGroupByKeyOnlyEvaluator,
_StreamingGroupAlsoByWindow: _StreamingGroupAlsoByWindowEvaluator,
_NativeWrite: _NativeWriteEvaluator,
_TestStream: _TestStreamEvaluator,
ProcessElements: _ProcessElementsEvaluator,
_WatermarkController: _WatermarkControllerEvaluator,
PairWithTiming: _PairWithTimingEvaluator,
} # type: Dict[Type[core.PTransform], Type[_TransformEvaluator]]
self._evaluators.update(self._test_evaluators_overrides)
self._root_bundle_providers = {
core.PTransform: DefaultRootBundleProvider,
_TestStream: _TestStreamRootBundleProvider,
}
def get_evaluator(
self, applied_ptransform, input_committed_bundle, side_inputs):
"""Returns a TransformEvaluator suitable for processing given inputs."""
assert applied_ptransform
assert bool(applied_ptransform.side_inputs) == bool(side_inputs)
# Walk up the class hierarchy to find an evaluable type. This is necessary
# for supporting sub-classes of core transforms.
for cls in applied_ptransform.transform.__class__.mro():
evaluator = self._evaluators.get(cls)
if evaluator:
break
if not evaluator:
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' %
(type(applied_ptransform.transform), self))
return evaluator(
self._evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def get_root_bundle_provider(self, applied_ptransform):
provider_cls = None
for cls in applied_ptransform.transform.__class__.mro():
provider_cls = self._root_bundle_providers.get(cls)
if provider_cls:
break
if not provider_cls:
raise NotImplementedError(
'Root provider for [%s] not implemented in runner %s' %
(type(applied_ptransform.transform), self))
return provider_cls(self._evaluation_context, applied_ptransform)
def should_execute_serially(self, applied_ptransform):
"""Returns True if this applied_ptransform should run one bundle at a time.
Some TransformEvaluators use a global state object to keep track of their
global execution state. For example evaluator for _GroupByKeyOnly uses this
state as an in memory dictionary to buffer keys.
Serially executed evaluators will act as syncing point in the graph and
execution will not move forward until they receive all of their inputs. Once
they receive all of their input, they will release the combined output.
Their output may consist of multiple bundles as they may divide their output
into pieces before releasing.
Args:
applied_ptransform: Transform to be used for execution.
Returns:
True if executor should execute applied_ptransform serially.
"""
if isinstance(applied_ptransform.transform,
(_GroupByKeyOnly,
_StreamingGroupByKeyOnly,
_StreamingGroupAlsoByWindow,
_NativeWrite)):
return True
elif (isinstance(applied_ptransform.transform, core.ParDo) and
is_stateful_dofn(applied_ptransform.transform.dofn)):
return True
return False
class RootBundleProvider(object):
"""Provides bundles for the initial execution of a root transform."""
def __init__(self, evaluation_context, applied_ptransform):
self._evaluation_context = evaluation_context
self._applied_ptransform = applied_ptransform
def get_root_bundles(self):
raise NotImplementedError
class DefaultRootBundleProvider(RootBundleProvider):
"""Provides an empty bundle by default for root transforms."""
def get_root_bundles(self):
input_node = pvalue.PBegin(self._applied_ptransform.transform.pipeline)
empty_bundle = (
self._evaluation_context.create_empty_committed_bundle(input_node))
return [empty_bundle]
class _TestStreamRootBundleProvider(RootBundleProvider):
"""Provides an initial bundle for the TestStream evaluator.
This bundle is used as the initial state to the TestStream. Each unprocessed
bundle emitted from the TestStream afterwards is its state: index into the
stream, and the watermark.
"""
def get_root_bundles(self):
test_stream = self._applied_ptransform.transform
# If there was an endpoint defined then get the events from the
# TestStreamService.
if test_stream.endpoint:
_TestStreamEvaluator.event_stream = _TestStream.events_from_rpc(
test_stream.endpoint, test_stream.output_tags, test_stream.coder)
else:
_TestStreamEvaluator.event_stream = (
_TestStream.events_from_script(test_stream._events))
bundle = self._evaluation_context.create_bundle(
pvalue.PBegin(self._applied_ptransform.transform.pipeline))
bundle.add(GlobalWindows.windowed_value(b'', timestamp=MIN_TIMESTAMP))
bundle.commit(None)
return [bundle]
class _TransformEvaluator(object):
"""An evaluator of a specific application of a transform."""
def __init__(self,
evaluation_context, # type: EvaluationContext
applied_ptransform, # type: AppliedPTransform
input_committed_bundle,
side_inputs
):
self._evaluation_context = evaluation_context
self._applied_ptransform = applied_ptransform
self._input_committed_bundle = input_committed_bundle
self._side_inputs = side_inputs
self._expand_outputs()
self._execution_context = evaluation_context.get_execution_context(
applied_ptransform)
self._step_context = self._execution_context.get_step_context()
def _expand_outputs(self):
outputs = set()
for pval in self._applied_ptransform.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval, )
for v in pvals:
outputs.add(v)
self._outputs = frozenset(outputs)
def _split_list_into_bundles(
self,
output_pcollection,
elements,
max_element_per_bundle,
element_size_fn):
"""Splits elements, an iterable, into multiple output bundles.
Args:
output_pcollection: PCollection that the elements belong to.
elements: elements to be chunked into bundles.
max_element_per_bundle: (approximately) the maximum element per bundle.
If it is None, only a single bundle will be produced.
element_size_fn: Function to return the size of a given element.
Returns:
List of output uncommitted bundles with at least one bundle.
"""
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles = [bundle]
for element in elements:
if max_element_per_bundle and bundle_size >= max_element_per_bundle:
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles.append(bundle)
bundle.output(element)
bundle_size += element_size_fn(element)
return bundles
def start_bundle(self):
"""Starts a new bundle."""
pass
def process_timer_wrapper(self, timer_firing):
"""Process timer by clearing and then calling process_timer().
This method is called with any timer firing and clears the delivered
timer from the keyed state and then calls process_timer(). The default
process_timer() implementation emits a KeyedWorkItem for the particular
timer and passes it to process_element(). Evaluator subclasses which
desire different timer delivery semantics can override process_timer().
"""
state = self._step_context.get_keyed_state(timer_firing.encoded_key)
state.clear_timer(
timer_firing.window, timer_firing.name, timer_firing.time_domain)
self.process_timer(timer_firing)
def process_timer(self, timer_firing):
"""Default process_timer() impl. generating KeyedWorkItem element."""
self.process_element(
GlobalWindows.windowed_value(
KeyedWorkItem(
timer_firing.encoded_key, timer_firings=[timer_firing])))
def process_element(self, element):
"""Processes a new element as part of the current bundle."""
raise NotImplementedError('%s do not process elements.' % type(self))
def finish_bundle(self):
# type: () -> TransformResult
"""Finishes the bundle and produces output."""
pass
class _BoundedReadEvaluator(_TransformEvaluator):
"""TransformEvaluator for bounded Read transform."""
# After some benchmarks, 1000 was optimal among {100,1000,10000}
MAX_ELEMENT_PER_BUNDLE = 1000
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
self._source = applied_ptransform.transform.source
self._source.pipeline_options = evaluation_context.pipeline_options
super(_BoundedReadEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def finish_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
def _read_values_to_bundles(reader):
read_result = [GlobalWindows.windowed_value(e) for e in reader]
return self._split_list_into_bundles(
output_pcollection,
read_result,
_BoundedReadEvaluator.MAX_ELEMENT_PER_BUNDLE,
lambda _: 1)
if isinstance(self._source, io.iobase.BoundedSource):
# Getting a RangeTracker for the default range of the source and reading
# the full source using that.
range_tracker = self._source.get_range_tracker(None, None)
reader = self._source.read(range_tracker)
bundles = _read_values_to_bundles(reader)
else:
with self._source.reader() as reader:
bundles = _read_values_to_bundles(reader)
return TransformResult(self, bundles, [], None, None)
class _WatermarkControllerEvaluator(_TransformEvaluator):
"""TransformEvaluator for the _WatermarkController transform.
This is used to enable multiple output watermarks for the TestStream.
"""
# The state tag used to store the watermark.
WATERMARK_TAG = _ValueStateTag('_WatermarkControllerEvaluator_Watermark_Tag')
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
self.transform = applied_ptransform.transform
super(_WatermarkControllerEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
self._init_state()
def _init_state(self):
"""Gets and sets the initial state.
This is used to keep track of the watermark hold between calls.
"""
transform_states = self._evaluation_context._transform_keyed_states
state = transform_states[self._applied_ptransform]
if self.WATERMARK_TAG not in state:
watermark_state = InMemoryUnmergedState()
watermark_state.set_global_state(self.WATERMARK_TAG, MIN_TIMESTAMP)
state[self.WATERMARK_TAG] = watermark_state
self._state = state[self.WATERMARK_TAG]
@property
def _watermark(self):
return self._state.get_global_state(self.WATERMARK_TAG)
@_watermark.setter
def _watermark(self, watermark):
self._state.set_global_state(self.WATERMARK_TAG, watermark)
def start_bundle(self):
self.bundles = []
def process_element(self, element):
# In order to keep the order of the elements between the script and what
# flows through the pipeline the same, emit the elements here.
event = element.value
if isinstance(event, WatermarkEvent):
self._watermark = event.new_watermark
elif isinstance(event, ElementEvent):
main_output = list(self._outputs)[0]
bundle = self._evaluation_context.create_bundle(main_output)
for tv in event.timestamped_values:
# Unreify the value into the correct window.
if isinstance(tv.value, WindowedValueHolder):
bundle.output(tv.value.windowed_value)
else:
bundle.output(
GlobalWindows.windowed_value(tv.value, timestamp=tv.timestamp))
self.bundles.append(bundle)
def finish_bundle(self):
# The watermark hold we set here is the way we allow the TestStream events
# to control the output watermark.
return TransformResult(
self, self.bundles, [], None, {None: self._watermark})
class _PairWithTimingEvaluator(_TransformEvaluator):
"""TransformEvaluator for the PairWithTiming transform.
This transform takes an element as an input and outputs
KV(element, `TimingInfo`). Where the `TimingInfo` contains both the
processing time timestamp and watermark.
"""
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_PairWithTimingEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def start_bundle(self):
main_output = list(self._outputs)[0]
self.bundle = self._evaluation_context.create_bundle(main_output)
watermark_manager = self._evaluation_context._watermark_manager
watermarks = watermark_manager.get_watermarks(self._applied_ptransform)
output_watermark = watermarks.output_watermark
now = Timestamp(seconds=watermark_manager._clock.time())
self.timing_info = TimingInfo(now, output_watermark)
def process_element(self, element):
result = WindowedValue((element.value, self.timing_info),
element.timestamp,
element.windows,
element.pane_info)
self.bundle.output(result)
def finish_bundle(self):
return TransformResult(self, [self.bundle], [], None, {})
class _TestStreamEvaluator(_TransformEvaluator):
"""TransformEvaluator for the TestStream transform.
This evaluator's responsibility is to retrieve the next event from the
_TestStream and either: advance the clock, advance the _TestStream watermark,
or pass the event to the _WatermarkController.
The _WatermarkController is in charge of emitting the elements to the
downstream consumers and setting its own output watermark.
"""
event_stream = None
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_TestStreamEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
self.test_stream = applied_ptransform.transform
self.is_done = False
def start_bundle(self):
self.bundles = []
self.watermark = MIN_TIMESTAMP
def process_element(self, element):
# The watermark of the _TestStream transform itself.
self.watermark = element.timestamp
# Set up the correct watermark holds in the Watermark controllers and the
# TestStream so that the watermarks will not automatically advance to +inf
# when elements start streaming. This can happen multiple times in the first
# bundle, but the operations are idempotent and adding state to keep track
# of this would add unnecessary code complexity.
events = []
if self.watermark == MIN_TIMESTAMP:
for event in self.test_stream._set_up(self.test_stream.output_tags):
events.append(event)
# Retrieve the TestStream's event stream and read from it.
try:
events.append(next(self.event_stream))
except StopIteration:
# Advance the watermarks to +inf to cleanly stop the pipeline.
self.is_done = True
events += ([
e for e in self.test_stream._tear_down(self.test_stream.output_tags)
])
for event in events:
# We can either have the _TestStream or the _WatermarkController to emit
# the elements. We chose to emit in the _WatermarkController so that the
# element is emitted at the correct watermark value.
if isinstance(event, (ElementEvent, WatermarkEvent)):
# The WATERMARK_CONTROL_TAG is used to hold the _TestStream's
# watermark to -inf, then +inf-1, then +inf. This watermark progression
# is ultimately used to set up the proper holds to allow the
# _WatermarkControllers to control their own output watermarks.
if event.tag == _TestStream.WATERMARK_CONTROL_TAG:
self.watermark = event.new_watermark
else:
main_output = list(self._outputs)[0]
bundle = self._evaluation_context.create_bundle(main_output)
bundle.output(GlobalWindows.windowed_value(event))
self.bundles.append(bundle)
elif isinstance(event, ProcessingTimeEvent):
self._evaluation_context._watermark_manager._clock.advance_time(
event.advance_by)
else:
raise ValueError('Invalid TestStream event: %s.' % event)
def finish_bundle(self):
unprocessed_bundles = []
# Continue to send its own state to itself via an unprocessed bundle. This
# acts as a heartbeat, where each element will read the next event from the
# event stream.
if not self.is_done:
unprocessed_bundle = self._evaluation_context.create_bundle(
pvalue.PBegin(self._applied_ptransform.transform.pipeline))
unprocessed_bundle.add(
GlobalWindows.windowed_value(b'', timestamp=self.watermark))
unprocessed_bundles.append(unprocessed_bundle)
# Returning the watermark in the dict here is used as a watermark hold.
return TransformResult(
self, self.bundles, unprocessed_bundles, None, {None: self.watermark})
class _PubSubReadEvaluator(_TransformEvaluator):
"""TransformEvaluator for PubSub read."""
# A mapping of transform to _PubSubSubscriptionWrapper.
# TODO(BEAM-7750): Prevents garbage collection of pipeline instances.
_subscription_cache = {} # type: Dict[AppliedPTransform, str]
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_PubSubReadEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
self.source = self._applied_ptransform.transform._source # type: _PubSubSource
if self.source.id_label:
raise NotImplementedError(
'DirectRunner: id_label is not supported for PubSub reads')
sub_project = None
if hasattr(self._evaluation_context, 'pipeline_options'):
from apache_beam.options.pipeline_options import GoogleCloudOptions
sub_project = (
self._evaluation_context.pipeline_options.view_as(
GoogleCloudOptions).project)
if not sub_project:
sub_project = self.source.project
self._sub_name = self.get_subscription(
self._applied_ptransform,
self.source.project,
self.source.topic_name,
sub_project,
self.source.subscription_name)
@classmethod
def get_subscription(
cls, transform, project, short_topic_name, sub_project, short_sub_name):
from google.cloud import pubsub
if short_sub_name:
return pubsub.SubscriberClient.subscription_path(project, short_sub_name)
if transform in cls._subscription_cache:
return cls._subscription_cache[transform]
sub_client = pubsub.SubscriberClient()
sub_name = sub_client.subscription_path(
sub_project,
'beam_%d_%x' % (int(time.time()), random.randrange(1 << 32)))
topic_name = sub_client.topic_path(project, short_topic_name)
sub_client.create_subscription(sub_name, topic_name)
atexit.register(sub_client.delete_subscription, sub_name)
cls._subscription_cache[transform] = sub_name
return cls._subscription_cache[transform]
def start_bundle(self):
pass
def process_element(self, element):
pass
def _read_from_pubsub(self, timestamp_attribute):
# type: (...) -> List[Tuple[Timestamp, PubsubMessage]]
from apache_beam.io.gcp.pubsub import PubsubMessage
from google.cloud import pubsub
def _get_element(message):
parsed_message = PubsubMessage._from_message(message)
if (timestamp_attribute and
timestamp_attribute in parsed_message.attributes):
rfc3339_or_milli = parsed_message.attributes[timestamp_attribute]
try:
timestamp = Timestamp(micros=int(rfc3339_or_milli) * 1000)
except ValueError:
try:
timestamp = Timestamp.from_rfc3339(rfc3339_or_milli)
except ValueError as e:
raise ValueError('Bad timestamp value: %s' % e)
else:
timestamp = Timestamp(
message.publish_time.seconds, message.publish_time.nanos // 1000)
return timestamp, parsed_message
# Because of the AutoAck, we are not able to reread messages if this
# evaluator fails with an exception before emitting a bundle. However,
# the DirectRunner currently doesn't retry work items anyway, so the
# pipeline would enter an inconsistent state on any error.
sub_client = pubsub.SubscriberClient()
try:
response = sub_client.pull(
self._sub_name, max_messages=10, return_immediately=True)
results = [_get_element(rm.message) for rm in response.received_messages]
ack_ids = [rm.ack_id for rm in response.received_messages]
if ack_ids:
sub_client.acknowledge(self._sub_name, ack_ids)
finally:
sub_client.api.transport.channel.close()
return results
def finish_bundle(self):
# type: () -> TransformResult
data = self._read_from_pubsub(self.source.timestamp_attribute)
if data:
output_pcollection = list(self._outputs)[0]
bundle = self._evaluation_context.create_bundle(output_pcollection)
# TODO(ccy): Respect the PubSub source's id_label field.
for timestamp, message in data:
if self.source.with_attributes:
element = message
else:
element = message.data
bundle.output(
GlobalWindows.windowed_value(element, timestamp=timestamp))
bundles = [bundle]
else:
bundles = []
assert self._applied_ptransform.transform is not None
if self._applied_ptransform.inputs:
input_pvalue = self._applied_ptransform.inputs[0]
else:
input_pvalue = pvalue.PBegin(self._applied_ptransform.transform.pipeline)
unprocessed_bundle = self._evaluation_context.create_bundle(input_pvalue)
# TODO(udim): Correct value for watermark hold.
return TransformResult(
self,
bundles, [unprocessed_bundle],
None, {None: Timestamp.of(time.time())})
class _FlattenEvaluator(_TransformEvaluator):
"""TransformEvaluator for Flatten transform."""
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_FlattenEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def start_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
self.bundle = self._evaluation_context.create_bundle(output_pcollection)
def process_element(self, element):
self.bundle.output(element)
def finish_bundle(self):
bundles = [self.bundle]
return TransformResult(self, bundles, [], None, None)
class _ImpulseEvaluator(_TransformEvaluator):
"""TransformEvaluator for Impulse transform."""
def finish_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle.output(GlobalWindows.windowed_value(b''))
return TransformResult(self, [bundle], [], None, None)
class _TaggedReceivers(dict):
"""Received ParDo output and redirect to the associated output bundle."""
def __init__(self, evaluation_context):
self._evaluation_context = evaluation_context
self._null_receiver = None
super(_TaggedReceivers, self).__init__()
class NullReceiver(common.Receiver):
"""Ignores undeclared outputs, default execution mode."""
def receive(self, element):
# type: (WindowedValue) -> None
pass
class _InMemoryReceiver(common.Receiver):
"""Buffers undeclared outputs to the given dictionary."""
def __init__(self, target, tag):
self._target = target
self._tag = tag
def receive(self, element):
# type: (WindowedValue) -> None
self._target[self._tag].append(element)
def __missing__(self, key):
if not self._null_receiver:
self._null_receiver = _TaggedReceivers.NullReceiver()
return self._null_receiver
class _ParDoEvaluator(_TransformEvaluator):
"""TransformEvaluator for ParDo transform."""
def __init__(self,
evaluation_context, # type: EvaluationContext
applied_ptransform, # type: AppliedPTransform
input_committed_bundle,
side_inputs,
perform_dofn_pickle_test=True
):
super(_ParDoEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
# This is a workaround for SDF implementation. SDF implementation adds state
# to the SDF that is not picklable.
self._perform_dofn_pickle_test = perform_dofn_pickle_test
def start_bundle(self):
transform = self._applied_ptransform.transform
self._tagged_receivers = _TaggedReceivers(self._evaluation_context)
for output_tag in self._applied_ptransform.outputs:
output_pcollection = pvalue.PCollection(None, tag=output_tag)
output_pcollection.producer = self._applied_ptransform
self._tagged_receivers[output_tag] = (
self._evaluation_context.create_bundle(output_pcollection))
self._tagged_receivers[output_tag].tag = output_tag
self._counter_factory = counters.CounterFactory()
# TODO(aaltay): Consider storing the serialized form as an optimization.
dofn = (
pickler.loads(pickler.dumps(transform.dofn))
if self._perform_dofn_pickle_test else transform.dofn)
args = transform.args if hasattr(transform, 'args') else []
kwargs = transform.kwargs if hasattr(transform, 'kwargs') else {}
self.user_state_context = None
self.user_timer_map = {}
if is_stateful_dofn(dofn):
kv_type_hint = self._applied_ptransform.inputs[0].element_type
if kv_type_hint and kv_type_hint != Any:
coder = coders.registry.get_coder(kv_type_hint)
self.key_coder = coder.key_coder()
else:
self.key_coder = coders.registry.get_coder(Any)
self.user_state_context = DirectUserStateContext(
self._step_context, dofn, self.key_coder)
_, all_timer_specs = get_dofn_specs(dofn)
for timer_spec in all_timer_specs:
self.user_timer_map['user/%s' % timer_spec.name] = timer_spec
self.runner = DoFnRunner(
dofn,
args,
kwargs,
self._side_inputs,
self._applied_ptransform.inputs[0].windowing,
tagged_receivers=self._tagged_receivers,
step_name=self._applied_ptransform.full_label,
state=DoFnState(self._counter_factory),
user_state_context=self.user_state_context)
self.runner.setup()
self.runner.start()
def process_timer(self, timer_firing):
if timer_firing.name not in self.user_timer_map:
_LOGGER.warning('Unknown timer fired: %s', timer_firing)
timer_spec = self.user_timer_map[timer_firing.name]
self.runner.process_user_timer(
timer_spec,
self.key_coder.decode(timer_firing.encoded_key),
timer_firing.window,
timer_firing.timestamp,
# TODO Add paneinfo to timer_firing in DirectRunner
None)
def process_element(self, element):
self.runner.process(element)
def finish_bundle(self):
self.runner.finish()
self.runner.teardown()
bundles = list(self._tagged_receivers.values())
result_counters = self._counter_factory.get_counters()
if self.user_state_context:
self.user_state_context.commit()
return TransformResult(self, bundles, [], result_counters, None)
class _GroupByKeyOnlyEvaluator(_TransformEvaluator):
"""TransformEvaluator for _GroupByKeyOnly transform."""
MAX_ELEMENT_PER_BUNDLE = None
ELEMENTS_TAG = _ListStateTag('elements')
COMPLETION_TAG = _CombiningValueStateTag('completed', any)
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_GroupByKeyOnlyEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def _is_final_bundle(self):
return (
self._execution_context.watermarks.input_watermark ==
WatermarkManager.WATERMARK_POS_INF)
def start_bundle(self):
self.global_state = self._step_context.get_keyed_state(None)
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
# The output type of a GroupByKey will be Tuple[Any, Any] or more specific.
# TODO(BEAM-2717): Infer coders earlier.
kv_type_hint = (
self._applied_ptransform.outputs[None].element_type or
self._applied_ptransform.transform.get_type_hints().input_types[0][0])
self.key_coder = coders.registry.get_coder(kv_type_hint.tuple_types[0])
def process_timer(self, timer_firing):
# We do not need to emit a KeyedWorkItem to process_element().
pass
def process_element(self, element):
assert not self.global_state.get_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG)
if (isinstance(element, WindowedValue) and
isinstance(element.value, collections.Iterable) and
len(element.value) == 2):
k, v = element.value
encoded_k = self.key_coder.encode(k)
state = self._step_context.get_keyed_state(encoded_k)
state.add_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG, v)
else:
raise TypeCheckError(
'Input to _GroupByKeyOnly must be a PCollection of '
'windowed key-value pairs. Instead received: %r.' % element)
def finish_bundle(self):
if self._is_final_bundle():
if self.global_state.get_state(None,
_GroupByKeyOnlyEvaluator.COMPLETION_TAG):
# Ignore empty bundles after emitting output. (This may happen because
# empty bundles do not affect input watermarks.)
bundles = []
else:
gbk_result = []
# TODO(ccy): perhaps we can clean this up to not use this
# internal attribute of the DirectStepContext.
for encoded_k in self._step_context.existing_keyed_state:
# Ignore global state.
if encoded_k is None:
continue
k = self.key_coder.decode(encoded_k)
state = self._step_context.get_keyed_state(encoded_k)
vs = state.get_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG)
gbk_result.append(GlobalWindows.windowed_value((k, vs)))
def len_element_fn(element):
_, v = element.value
return len(v)
bundles = self._split_list_into_bundles(
self.output_pcollection,
gbk_result,
_GroupByKeyOnlyEvaluator.MAX_ELEMENT_PER_BUNDLE,
len_element_fn)
self.global_state.add_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG, True)
hold = WatermarkManager.WATERMARK_POS_INF
else:
bundles = []
hold = WatermarkManager.WATERMARK_NEG_INF
self.global_state.set_timer(
None, '', TimeDomain.WATERMARK, WatermarkManager.WATERMARK_POS_INF)
return TransformResult(self, bundles, [], None, {None: hold})
class _StreamingGroupByKeyOnlyEvaluator(_TransformEvaluator):
"""TransformEvaluator for _StreamingGroupByKeyOnly transform.
The _GroupByKeyOnlyEvaluator buffers elements until its input watermark goes
to infinity, which is suitable for batch mode execution. During streaming
mode execution, we emit each bundle as it comes to the next transform.
"""
MAX_ELEMENT_PER_BUNDLE = None
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_StreamingGroupByKeyOnlyEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def start_bundle(self):
self.gbk_items = collections.defaultdict(list)
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
# The input type of a GroupByKey will be Tuple[Any, Any] or more specific.
kv_type_hint = self._applied_ptransform.inputs[0].element_type
key_type_hint = (kv_type_hint.tuple_types[0] if kv_type_hint else Any)
self.key_coder = coders.registry.get_coder(key_type_hint)
def process_element(self, element):
if (isinstance(element, WindowedValue) and
isinstance(element.value, collections.Iterable) and
len(element.value) == 2):
k, v = element.value
self.gbk_items[self.key_coder.encode(k)].append(v)
else:
raise TypeCheckError(
'Input to _GroupByKeyOnly must be a PCollection of '
'windowed key-value pairs. Instead received: %r.' % element)
def finish_bundle(self):
bundles = []
bundle = None
for encoded_k, vs in iteritems(self.gbk_items):
if not bundle:
bundle = self._evaluation_context.create_bundle(self.output_pcollection)
bundles.append(bundle)
kwi = KeyedWorkItem(encoded_k, elements=vs)
bundle.add(GlobalWindows.windowed_value(kwi))
return TransformResult(self, bundles, [], None, None)
class _StreamingGroupAlsoByWindowEvaluator(_TransformEvaluator):
"""TransformEvaluator for the _StreamingGroupAlsoByWindow transform.
This evaluator is only used in streaming mode. In batch mode, the
GroupAlsoByWindow operation is evaluated as a normal DoFn, as defined
in transforms/core.py.
"""
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_StreamingGroupAlsoByWindowEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
def start_bundle(self):
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
self.driver = create_trigger_driver(
self._applied_ptransform.transform.windowing,
clock=self._evaluation_context._watermark_manager._clock)
self.gabw_items = []
self.keyed_holds = {}
# The input type (which is the same as the output type) of a
# GroupAlsoByWindow will be Tuple[Any, Iter[Any]] or more specific.
kv_type_hint = self._applied_ptransform.outputs[None].element_type
key_type_hint = (kv_type_hint.tuple_types[0] if kv_type_hint else Any)
self.key_coder = coders.registry.get_coder(key_type_hint)
def process_element(self, element):
kwi = element.value
assert isinstance(kwi, KeyedWorkItem), kwi
encoded_k, timer_firings, vs = (
kwi.encoded_key, kwi.timer_firings, kwi.elements)
k = self.key_coder.decode(encoded_k)
state = self._step_context.get_keyed_state(encoded_k)
watermarks = self._evaluation_context._watermark_manager.get_watermarks(
self._applied_ptransform)
for timer_firing in timer_firings:
for wvalue in self.driver.process_timer(timer_firing.window,
timer_firing.name,
timer_firing.time_domain,
timer_firing.timestamp,
state,
watermarks.input_watermark):
self.gabw_items.append(wvalue.with_value((k, wvalue.value)))
if vs:
for wvalue in self.driver.process_elements(state,
vs,
watermarks.output_watermark,
watermarks.input_watermark):
self.gabw_items.append(wvalue.with_value((k, wvalue.value)))
self.keyed_holds[encoded_k] = state.get_earliest_hold()
def finish_bundle(self):
bundles = []
if self.gabw_items:
bundle = self._evaluation_context.create_bundle(self.output_pcollection)
for item in self.gabw_items:
bundle.add(item)
bundles.append(bundle)
return TransformResult(self, bundles, [], None, self.keyed_holds)
class _NativeWriteEvaluator(_TransformEvaluator):
"""TransformEvaluator for _NativeWrite transform."""
ELEMENTS_TAG = _ListStateTag('elements')
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
assert not side_inputs
super(_NativeWriteEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
assert applied_ptransform.transform.sink
self._sink = applied_ptransform.transform.sink
@property
def _is_final_bundle(self):
return (
self._execution_context.watermarks.input_watermark ==
WatermarkManager.WATERMARK_POS_INF)
@property
def _has_already_produced_output(self):
return (
self._execution_context.watermarks.output_watermark ==
WatermarkManager.WATERMARK_POS_INF)
def start_bundle(self):
self.global_state = self._step_context.get_keyed_state(None)
def process_timer(self, timer_firing):
# We do not need to emit a KeyedWorkItem to process_element().
pass
def process_element(self, element):
self.global_state.add_state(
None, _NativeWriteEvaluator.ELEMENTS_TAG, element)
def finish_bundle(self):
# finish_bundle will append incoming bundles in memory until all the bundles
# carrying data is processed. This is done to produce only a single output
# shard (some tests depends on this behavior). It is possible to have
# incoming empty bundles after the output is produced, these bundles will be
# ignored and would not generate additional output files.
# TODO(altay): Do not wait until the last bundle to write in a single shard.
if self._is_final_bundle:
elements = self.global_state.get_state(
None, _NativeWriteEvaluator.ELEMENTS_TAG)
if self._has_already_produced_output:
# Ignore empty bundles that arrive after the output is produced.
assert elements == []
else:
self._sink.pipeline_options = self._evaluation_context.pipeline_options
with self._sink.writer() as writer:
for v in elements:
writer.Write(v.value)
hold = WatermarkManager.WATERMARK_POS_INF
else:
hold = WatermarkManager.WATERMARK_NEG_INF
self.global_state.set_timer(
None, '', TimeDomain.WATERMARK, WatermarkManager.WATERMARK_POS_INF)
return TransformResult(self, [], [], None, {None: hold})
class _ProcessElementsEvaluator(_TransformEvaluator):
"""An evaluator for sdf_direct_runner.ProcessElements transform."""
# Maximum number of elements that will be produced by a Splittable DoFn before
# a checkpoint is requested by the runner.
DEFAULT_MAX_NUM_OUTPUTS = None
# Maximum duration a Splittable DoFn will process an element before a
# checkpoint is requested by the runner.
DEFAULT_MAX_DURATION = 1
def __init__(
self,
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs):
super(_ProcessElementsEvaluator, self).__init__(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs)
process_elements_transform = applied_ptransform.transform
assert isinstance(process_elements_transform, ProcessElements)
# Replacing the do_fn of the transform with a wrapper do_fn that performs
# SDF magic.
transform = applied_ptransform.transform
sdf = transform.sdf
self._process_fn = transform.new_process_fn(sdf)
transform.dofn = self._process_fn
assert isinstance(self._process_fn, ProcessFn)
self._process_fn.step_context = self._step_context
process_element_invoker = (
SDFProcessElementInvoker(
max_num_outputs=self.DEFAULT_MAX_NUM_OUTPUTS,
max_duration=self.DEFAULT_MAX_DURATION))
self._process_fn.set_process_element_invoker(process_element_invoker)
self._par_do_evaluator = _ParDoEvaluator(
evaluation_context,
applied_ptransform,
input_committed_bundle,
side_inputs,
perform_dofn_pickle_test=False)
self.keyed_holds = {}
def start_bundle(self):
self._par_do_evaluator.start_bundle()
def process_element(self, element):
assert isinstance(element, WindowedValue)
assert len(element.windows) == 1
window = element.windows[0]
if isinstance(element.value, KeyedWorkItem):
key = element.value.encoded_key
else:
# If not a `KeyedWorkItem`, this must be a tuple where key is a randomly
# generated key and the value is a `WindowedValue` that contains an
# `ElementAndRestriction` object.
assert isinstance(element.value, tuple)
key = element.value[0]
self._par_do_evaluator.process_element(element)
state = self._step_context.get_keyed_state(key)
self.keyed_holds[key] = state.get_state(
window, self._process_fn.watermark_hold_tag)
def finish_bundle(self):
par_do_result = self._par_do_evaluator.finish_bundle()
transform_result = TransformResult(
self,
par_do_result.uncommitted_output_bundles,
par_do_result.unprocessed_bundles,
par_do_result.counters,
par_do_result.keyed_watermark_holds,
par_do_result.undeclared_tag_values)
for key in self.keyed_holds:
transform_result.keyed_watermark_holds[key] = self.keyed_holds[key]
return transform_result
|
|
"""sympify -- convert objects SymPy internal format"""
from __future__ import print_function, division
from inspect import getmro
from .core import all_classes as sympy_classes
from .compatibility import iterable, string_types, range
from .evaluate import global_evaluate
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return ("Sympify of expression '%s' failed, because of exception being "
"raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__,
str(self.base_exc)))
converter = {} # See sympify docstring.
class CantSympify(object):
"""
Mix in this trait to a class to disallow sympification of its instances.
Examples
========
>>> from sympy.core.sympify import sympify, CantSympify
>>> class Something(dict):
... pass
...
>>> sympify(Something())
{}
>>> class Something(dict, CantSympify):
... pass
...
>>> sympify(Something())
Traceback (most recent call last):
...
SympifyError: SympifyError: {}
"""
pass
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
evaluate=None):
"""Converts an arbitrary expression to a type that can be used inside SymPy.
For example, it will convert Python ints into instance of sympy.Rational,
floats into instances of sympy.Float, etc. It is also able to coerce symbolic
expressions which inherit from Basic. This can be useful in cooperation
with SAGE.
It currently accepts as arguments:
- any object defined in sympy
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans, including ``None`` (will leave ``None`` unchanged)
- lists, sets or tuples containing any of the above
If the argument is already a type that SymPy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the expression could not be converted, a SympifyError is raised.
>>> sympify("x***2")
Traceback (most recent call last):
...
SympifyError: SympifyError: "could not parse u'x***2'"
Locals
------
The sympification happens with access to everything that is loaded
by ``from sympy import *``; anything used in a string that is not
defined by that import will be converted to a symbol. In the following,
the ``bitcount`` function is treated as a symbol and the ``O`` is
interpreted as the Order object (used with series) and it raises
an error when used improperly:
>>> s = 'bitcount(42)'
>>> sympify(s)
bitcount(42)
>>> sympify("O(x)")
O(x)
>>> sympify("O + 1")
Traceback (most recent call last):
...
TypeError: unbound method...
In order to have ``bitcount`` be recognized it can be imported into a
namespace dictionary and passed as locals:
>>> from sympy.core.compatibility import exec_
>>> ns = {}
>>> exec_('from sympy.core.evalf import bitcount', ns)
>>> sympify(s, locals=ns)
6
In order to have the ``O`` interpreted as a Symbol, identify it as such
in the namespace dictionary. This can be done in a variety of ways; all
three of the following are possibilities:
>>> from sympy import Symbol
>>> ns["O"] = Symbol("O") # method 1
>>> exec_('from sympy.abc import O', ns) # method 2
>>> ns.update(dict(O=Symbol("O"))) # method 3
>>> sympify("O + 1", locals=ns)
O + 1
If you want *all* single-letter and Greek-letter variables to be symbols
then you can use the clashing-symbols dictionaries that have been defined
there as private variables: _clash1 (single-letter variables), _clash2
(the multi-letter Greek names) or _clash (both single and multi-letter
names that are defined in abc).
>>> from sympy.abc import _clash1
>>> _clash1
{'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S}
>>> sympify('I & Q', _clash1)
And(I, Q)
Strict
------
If the option ``strict`` is set to ``True``, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> print(sympify(None))
None
>>> sympify(None, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: None
Evaluation
----------
If the option ``evaluate`` is set to ``False``, then arithmetic and
operators will be converted into their SymPy equivalents and the
``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
be denested first. This is done via an AST transformation that replaces
operators with their SymPy equivalents, so if an operand redefines any
of those operations, the redefined operators will not be used.
>>> sympify('2**2 / 3 + 5')
19/3
>>> sympify('2**2 / 3 + 5', evaluate=False)
2**2/3 + 5
Extending
---------
To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
just define a ``_sympy_`` method to your class. You can do that even to
classes that you do not own by subclassing or adding the method at runtime.
>>> from sympy import Matrix
>>> class MyList1(object):
... def __iter__(self):
... yield 1
... yield 2
... raise StopIteration
... def __getitem__(self, i): return list(self)[i]
... def _sympy_(self): return Matrix(self)
>>> sympify(MyList1())
Matrix([
[1],
[2]])
If you do not have control over the class definition you could also use the
``converter`` global dictionary. The key is the class and the value is a
function that takes a single argument and returns the desired SymPy
object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
>>> class MyList2(object): # XXX Do not do this if you control the class!
... def __iter__(self): # Use _sympy_!
... yield 1
... yield 2
... raise StopIteration
... def __getitem__(self, i): return list(self)[i]
>>> from sympy.core.sympify import converter
>>> converter[MyList2] = lambda x: Matrix(x)
>>> sympify(MyList2())
Matrix([
[1],
[2]])
Notes
=====
Sometimes autosimplification during sympification results in expressions
that are very different in structure than what was entered. Until such
autosimplification is no longer done, the ``kernS`` function might be of
some use. In the example below you can see how an expression reduces to
-1 by autosimplification, but does not do so when ``kernS`` is used.
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x
>>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
-1
>>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
>>> sympify(s)
-1
>>> kernS(s)
-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
"""
if evaluate is None:
evaluate = global_evaluate[0]
try:
cls = a.__class__
except AttributeError: # a is probably an old-style class object
cls = type(a)
if cls in sympy_classes:
return a
if cls is type(None):
if strict:
raise SympifyError(a)
else:
return a
try:
return converter[cls](a)
except KeyError:
for superclass in getmro(cls):
try:
return converter[superclass](a)
except KeyError:
continue
if isinstance(a, CantSympify):
raise SympifyError(a)
try:
return a._sympy_()
except AttributeError:
pass
if not isinstance(a, string_types):
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if iterable(a):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
if isinstance(a, dict):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a.items()])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via unicode()
# and try to parse it. If it fails, then we have no luck and
# return an exception
try:
from .compatibility import unicode
a = unicode(a)
except Exception as exc:
raise SympifyError(a, exc)
from sympy.parsing.sympy_parser import (parse_expr, TokenError,
standard_transformations)
from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
from sympy.parsing.sympy_parser import rationalize as t_rationalize
transformations = standard_transformations
if rational:
transformations += (t_rationalize,)
if convert_xor:
transformations += (t_convert_xor,)
try:
a = a.replace('\n', '')
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
except (TokenError, SyntaxError) as exc:
raise SympifyError('could not parse %r' % a, exc)
return expr
def _sympify(a):
"""
Short version of sympify for internal usage for __add__ and __eq__ methods
where it is ok to allow some things (like Python integers and floats) in
the expression. This excludes things (like strings) that are unwise to
allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
def kernS(s):
"""Use a hack to try keep autosimplification from joining Integer or
minus sign into an Add of a Mul; this modification doesn't
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y, z
The 2-arg Mul allows a leading Integer to be distributed but kernS will
prevent that:
>>> 2*(x + y)
2*x + 2*y
>>> kernS('2*(x + y)')
2*(x + y)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
"""
import re
from sympy.core.symbol import Symbol
hit = False
if '(' in s:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
kern = '_kern'
while kern in s:
kern += "_"
olds = s
# digits*( -> digits*kern*(
s = re.sub(r'(\d+)( *\* *)\(', r'\1*%s\2(' % kern, s)
# negated parenthetical
kern2 = kern + "2"
while kern2 in s:
kern2 += "_"
# step 1: -(...) --> kern-kern*(...)
target = r'%s-%s*(' % (kern, kern)
s = re.sub(r'- *\(', target, s)
# step 2: double the matching closing parenthesis
# kern-kern*(...) --> kern-kern*(...)kern2
i = nest = 0
while True:
j = s.find(target, i)
if j == -1:
break
j = s.find('(')
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + kern2 + s[j:]
i = j
# step 3: put in the parentheses
# kern-kern*(...)kern2 --> (-kern*(...))
s = s.replace(target, target.replace(kern, "(", 1))
s = s.replace(kern2, ')')
hit = kern in s
for i in range(2):
try:
expr = sympify(s)
break
except: # the kern might cause unknown errors, so use bare except
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
rep = {Symbol(kern): 1}
def _clear(expr):
if isinstance(expr, (list, tuple, set)):
return type(expr)([_clear(e) for e in expr])
if hasattr(expr, 'subs'):
return expr.subs(rep, hack2=True)
return expr
expr = _clear(expr)
# hope that kern is not there anymore
return expr
|
|
"""
Jennifer's scientific computing cheatsheet - arrays and numerical analysis.
Contents:
- numpy & ndarray basics
- Basic operations and functions with ndarrays
- Type conversions
- Basic indexing and slicing
- Boolean indexing
- Fancy indexing
- Conditional logic and array operations
- Transposing arrays and swapping axes
- Linear algebra
- Mathematical and statistical methods
- Sorting
- Uniqueness and set logic
- File input and output with arrays
Each section of this cheatsheet can be copy/pasted into ipython (using
the %paste magic command for indented code) and run separately in an
interactive session.
Many of these code snippets are pilfered / adapted from:
- Python for Data Analysis by Wes McKinney
https://github.com/pydata/pydata-book
This cheatsheet is part of a set: science_numpy.py, science_plots.py,
science_prettyplots, and science_data.py, covering the following
scientific computing modules, with a focus on atmospheric science
applications:
- numpy: Numerical python for N-D arrays, linear algebra, etc
- matplotlib: 2-D plots and visualizations
- basemap: Plotting geographic data
- pandas: Statistics for tabular (spreadsheet-like data)
- xray: N-D labeled datasets and netCDF I/O
"""
# Make float division the default for / operator, even when the
# operands are both integers
from __future__ import division
# Naming convention for numpy import:
import numpy as np
# ----------------------------------------------------------------------
print("\nWelcome to Jennifer's cheatsheet for scientific computing in Python!")
def heading(s):
"""Print a nice heading to the console."""
line = '-' *60
print('\n' + line + '\n' + s + '\n' + line)
heading('numpy: N-D arrays, linear algebra, FFT, random numbers\n'
'and other numerical operations.')
# ----------------------------------------------------------------------
# ndarray basics
# ----------------------------------------------------------------------
print("""
An N-D array in numpy is an object of type ndarray, a multi-dimensional
container for homogeneous data (i.e. all elements must be the same type).
""")
# Generate a 2x3 ndarray of random numbers
data = np.random.randn(2, 3)
print(data)
# The ndarray object attributes include the data type, dimension and shape
print(data.dtype)
print(data.ndim)
print(data.shape)
# Create ndarrays from Python lists
# -- If the data type isn't specified, numpy makes a smart guess based
# on the contents of the array
list1 = [6, 7.5, 8, 0, 1] # Python list
data1 = np.array(list1) # Numpy 1-D array
print(data1)
print(data1.dtype)
list2 = [[1, 2, 3, 4], [5, 6, 7, 8]] # Python nested list
data2 = np.array(list2) # Numpy 2-D array
print(data2)
print(data2.dtype)
print(data2.ndim)
print(data2.shape)
# Specifying data types for ndarrays
data1 = np.array([1, 2, 3], dtype=np.float64)
data2 = np.array([1, 2, 3], dtype=np.int32)
print(data1.dtype)
print(data2.dtype)
# Arrays of zeros, ones, and empty
zeros1 = np.zeros(10)
zeros2 = np.zeros((3, 6))
empty1 = np.empty((2, 3, 2)) # Empty arrays are initialized with misc garbage
ones1 = np.ones((4, 5))
# Ranges of numbers and evenly spaced line segements
x1 = np.arange(3, 15, 2) # ndarray version of Python built-in range() function
x2 = np.linspace(0, 4.5, 10) # Divides the range (inclusive) into 10 segments
# ----------------------------------------------------------------------
# Basic operations and functions with ndarrays
# ----------------------------------------------------------------------
# Basic math operations between arrays (or between an array and a scalar)
# are applied element-wise on the arrays
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print(arr)
print(arr + 10)
print(arr * arr)
print(arr - arr)
print(1 / arr)
print(arr ** 0.5)
# Universal functions: fast element-wise array functions
x1 = np.random.randn(5)
x2 = x1.round(2) # Round each element to 2 decimal places
print(x1)
print(x2)
arr = np.array([1., 4., 6., 9.])
print(np.sqrt(arr))
print(np.exp(arr))
x, y = np.random.randn(8).round(1), np.random.randn(8).round(1)
print(x)
print(y)
print(np.maximum(x, y)) # Element-wise maximum
arr = (np.random.randn(7) * 5).round(2)
print(np.modf(arr)) # Decimal and integer components of each element
# Boolean logic with arrays
x1 = np.linspace(0., 9., 10)
x2 = np.arange(0., 10.)
x3 = 10 * np.random.randn(10)
# -- Comparisons
comp = x1 == x3 # Array of True, False values for each element comparison
# -- Check if two arrays are equal
comp = np.array_equal(x1, x2) # True
comp = np.array_equal(x1, x3) # False
# ----------------------------------------------------------------------
# Type conversions
# ----------------------------------------------------------------------
# The astype() method of ndarray casts from one type to another
# -- Note: standard Python float corresponds to np.float64
arr = np.array([1, 2, 3, 4, 5])
float_arr = arr.astype(np.float64)
print(arr.dtype, float_arr.dtype)
arr = np.array([3.7, -1.2, -2.6, 0.5, 12.9, 10.1])
int_arr = arr.astype(np.int32)
print(arr)
print(int_arr)
# Converting numeric strings to numbers
numeric_strings = np.array(['1.25', '-9.6', '42'], dtype=np.string_)
arr = numeric_strings.astype(np.float64)
# Converting to the same dtype as another variable
int_array = np.arange(10)
float_array = np.array([1.0, 2.5, -3.1], dtype=np.float64)
arr = int_array.astype(float_array.dtype)
# ----------------------------------------------------------------------
# Basic indexing and slicing
# ----------------------------------------------------------------------
# 1-D array
arr = np.arange(10)
print(arr)
print(arr[5])
print(arr[5:8])
arr[5:8] = 12
print(arr)
# 2-D array
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(arr2d[2])
print(arr2d[0][2])
print(arr2d[0, 2])
# 3-D array
arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr3d)
print(arr3d[0])
print(arr3d[1, 0])
old_values = arr3d[0].copy()
arr3d[0] = 42
print(arr3d)
arr3d[0] = old_values
print(arr3d)
# Slices - views vs. copies
print("""\
Array slices are *views* on the original array, not copies, so if you modify
a view, the source array is modified too! To copy a slice, use .copy()
""")
arr = np.arange(10)
print(arr)
slice1 = arr[2:7] # slice1 is a view on arr
print(slice1)
slice1[:2] = -42 # This modifies the source array 'arr' also!
print(slice1)
print(arr) # Has changed!
# To make a copy of a slice, rather than a view, use the .copy() method
arr = np.arange(10)
print(arr)
slice1 = arr[2:7].copy() # Now we have a copy of the contents
print(slice1)
slice1[:2] = -42 # arr does not change
print(slice1)
print(arr) # The same as before!
# ----------------------------------------------------------------------
# Boolean indexing
# ----------------------------------------------------------------------
cats = np.array(['tabby', 'calico', 'siamese', 'tabby', 'siamese',
'calico', 'calico'])
def numbers_array(nrow, ncol):
return np.arange(nrow*ncol).reshape((nrow, ncol))
data = numbers_array(7, 4)
data[:,2] -= 20
print(cats)
print(data)
print(cats == 'tabby')
print(data[cats == 'tabby'])
print(data[cats == 'tabby', 2:])
print(data[cats == 'tabby', 3])
# numpy uses &, | and ! instead of and, or and not as in built-in Python
print(data[cats != 'tabby'])
print(data[-(cats == 'tabby')]) # Same as data[cats != 'tabby']
mask = (cats == 'tabby') | (cats == 'siamese')
print(mask)
print(data[mask])
# Change parts of the ndarray selected by Boolean indexing
data[data < 0] = 0
print(data)
data[cats != 'calico'] = -5
print(data)
# Note: Unlike slicing with numeric indices, Boolean indexing always creates
# a copy of the data.
subset = data[cats == 'calico'] # Makes a copy
print(subset)
subset[0] = 10 # Changes subset but not data
print(subset)
print(data) # Same as before
# ----------------------------------------------------------------------
# Fancy indexing
# ----------------------------------------------------------------------
a = numbers_array(8, 4)
print(a)
print(a[[4, 0, 2]]) # Rows 4, 0, 2
print(a[[-1, -3]]) # Rows -1 (last) and -3 (3rd last)
print(a[[1, 5, 7, 2], [0, 3, 1, 2]]) # Elements [1,0], [5,3], [7,1], [2,2]
print(a[[1, 5, 7, 2]][:, [0, 2]]) # Columns 0 and 2 of rows 1, 5, 7, 2
# The np.ix_ function returns an open mesh from multiple sequences
print(a[np.ix_([1,3], [2,0])]) # [[a[1,2] a[1,0]], [a[3,2] a[3,0]]]
# ----------------------------------------------------------------------
# Conditional logic and array operations
# ----------------------------------------------------------------------
xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
result = [(x if c else y) for x, y, c in zip(xarr, yarr, cond)]
print(result)
result = np.where(cond, xarr, yarr) # Vectorized version of above
print(result)
arr = np.random.randn(4, 4).round(1)
print(arr)
print(np.where(arr > 0, 2, -2))
print(np.where(arr > 0, 2, arr)) # Set only positive values to 2
# ----------------------------------------------------------------------
# Transposing arrays and swapping axes
# ----------------------------------------------------------------------
arr = numbers_array(3, 5)
print(arr)
print(arr.T) # Transpose
arr = np.arange(16).reshape((2, 2, 4))
print(arr)
print(arr.transpose((1, 0, 2)))
print(arr.swapaxes(1, 2))
# ----------------------------------------------------------------------
# Linear algebra
# ----------------------------------------------------------------------
x = np.array([[1., 2., 3.], [4., 5., 6.]])
y = np.array([[6., 23.], [-1, 7], [8, 9]])
print(x.dot(y)) # Dot product using .dot() method
print(np.dot(x,y)) # Dot product using np.dot()
X = np.random.randn(5, 5).round(1)
mat = X.T.dot(X)
inv = np.linalg.inv(mat)
print(mat.round(2))
print(inv.round(2))
print(mat.dot(inv).round(2))
# ----------------------------------------------------------------------
# Mathematical and statistical methods
# ----------------------------------------------------------------------
arr = np.random.randn(5, 4).round(2)
print(arr.mean())
print(np.mean(arr)) # Equivalent to .mean()
print(arr.mean(axis=0)) # Specify which axis to operate along
print(arr.sum())
print(arr.sum(axis=1)) # Sum along axis 1
print(arr.sum(1)) # Equivalent to .sum(1)
arr = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
print(arr.cumsum(0)) # Cumulative sum along axis 0
print(arr.cumprod(1)) # Cumulative product along axis 1
# Methods for boolean arrays
arr = np.random.randn(100)
npos = (arr > 0).sum() # Number of positive values in arr
bools = np.array([False, False, True, False])
bools.any() # True if any element in bools is True
bools.all() # True if all elements in bools are True
# ----------------------------------------------------------------------
# Sorting
# ----------------------------------------------------------------------
arr = np.random.randn(8).round(1)
print(arr)
arr.sort() # Sorts the array in place
print(arr)
arr = np.random.randn(5, 3).round(1)
print(arr)
arr.sort(1) # Sort along axis 1
print(arr)
large_arr = np.random.randn(1000)
large_arr.sort()
quant5 = large_arr[int(0.05 * len(large_arr))] # 5% quantile
print(quant5)
# ----------------------------------------------------------------------
# Uniqueness and set logic
# ----------------------------------------------------------------------
# Uniqueness
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
print(np.unique(names)) # ndarray of unique names
print(set(names)) # Python set object of unique names
print(sorted(np.unique(names))) # Sorted ndarray
ints = np.array([3, 3, 3, 2, 2, 1, 1, 4, 4])
print(np.unique(ints))
# Some set logic
values = np.array([6, 0, 0, 3, 2, 5, 6])
in_val = np.in1d(values, [2, 3, 6]) # in_val[i]=True if values[i] is 2, 3 or 6
# ----------------------------------------------------------------------
# File input and output with arrays
# ----------------------------------------------------------------------
# Storing arrays on disk in binary format
arr = np.arange(10)
np.save('some_array', arr)
np.load('some_array.npy')
# Loading text files
# arr = np.loadtxt('array_ex.txt', delimiter=',')
|
|
"""The tests for generic camera component."""
import asyncio
from http import HTTPStatus
from unittest.mock import patch
import aiohttp
import httpx
import pytest
import respx
from homeassistant import config as hass_config
from homeassistant.components.camera import async_get_mjpeg_stream
from homeassistant.components.generic import DOMAIN
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.common import AsyncMock, Mock, get_fixture_path
@respx.mock
async def test_fetching_url(hass, hass_client, fakeimgbytes_png):
"""Test that it fetches the given url."""
respx.get("http://example.com").respond(stream=fakeimgbytes_png)
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "http://example.com",
"username": "user",
"password": "pass",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == HTTPStatus.OK
assert respx.calls.call_count == 1
body = await resp.read()
assert body == fakeimgbytes_png
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 2
@respx.mock
async def test_fetching_without_verify_ssl(hass, hass_client, fakeimgbytes_png):
"""Test that it fetches the given url when ssl verify is off."""
respx.get("https://example.com").respond(stream=fakeimgbytes_png)
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"username": "user",
"password": "pass",
"verify_ssl": "false",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == HTTPStatus.OK
@respx.mock
async def test_fetching_url_with_verify_ssl(hass, hass_client, fakeimgbytes_png):
"""Test that it fetches the given url when ssl verify is explicitly on."""
respx.get("https://example.com").respond(stream=fakeimgbytes_png)
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"username": "user",
"password": "pass",
"verify_ssl": "true",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == HTTPStatus.OK
@respx.mock
async def test_limit_refetch(hass, hass_client, fakeimgbytes_png, fakeimgbytes_jpg):
"""Test that it fetches the given url."""
respx.get("http://example.com/5a").respond(stream=fakeimgbytes_png)
respx.get("http://example.com/10a").respond(stream=fakeimgbytes_png)
respx.get("http://example.com/15a").respond(stream=fakeimgbytes_jpg)
respx.get("http://example.com/20a").respond(status_code=HTTPStatus.NOT_FOUND)
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": 'http://example.com/{{ states.sensor.temp.state + "a" }}',
"limit_refetch_to_url_change": True,
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
hass.states.async_set("sensor.temp", "5")
with pytest.raises(aiohttp.ServerTimeoutError), patch(
"async_timeout.timeout", side_effect=asyncio.TimeoutError()
):
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 0
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
hass.states.async_set("sensor.temp", "10")
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 1
assert resp.status == HTTPStatus.OK
body = await resp.read()
assert body == fakeimgbytes_png
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 1
assert resp.status == HTTPStatus.OK
body = await resp.read()
assert body == fakeimgbytes_png
hass.states.async_set("sensor.temp", "15")
# Url change = fetch new image
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 2
assert resp.status == HTTPStatus.OK
body = await resp.read()
assert body == fakeimgbytes_jpg
# Cause a template render error
hass.states.async_remove("sensor.temp")
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 2
assert resp.status == HTTPStatus.OK
body = await resp.read()
assert body == fakeimgbytes_jpg
async def test_stream_source(hass, hass_client, hass_ws_client, fakeimgbytes_png):
"""Test that the stream source is rendered."""
respx.get("http://example.com").respond(stream=fakeimgbytes_png)
assert await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"stream_source": 'http://example.com/{{ states.sensor.temp.state + "a" }}',
"limit_refetch_to_url_change": True,
},
},
)
assert await async_setup_component(hass, "stream", {})
await hass.async_block_till_done()
hass.states.async_set("sensor.temp", "5")
with patch(
"homeassistant.components.camera.Stream.endpoint_url",
return_value="http://home.assistant/playlist.m3u8",
) as mock_stream_url:
# Request playlist through WebSocket
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "camera/stream", "entity_id": "camera.config_test"}
)
msg = await client.receive_json()
# Assert WebSocket response
assert mock_stream_url.call_count == 1
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["url"][-13:] == "playlist.m3u8"
async def test_stream_source_error(hass, hass_client, hass_ws_client, fakeimgbytes_png):
"""Test that the stream source has an error."""
respx.get("http://example.com").respond(stream=fakeimgbytes_png)
assert await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
# Does not exist
"stream_source": 'http://example.com/{{ states.sensor.temp.state + "a" }}',
"limit_refetch_to_url_change": True,
},
},
)
assert await async_setup_component(hass, "stream", {})
await hass.async_block_till_done()
with patch(
"homeassistant.components.camera.Stream.endpoint_url",
return_value="http://home.assistant/playlist.m3u8",
) as mock_stream_url:
# Request playlist through WebSocket
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "camera/stream", "entity_id": "camera.config_test"}
)
msg = await client.receive_json()
# Assert WebSocket response
assert mock_stream_url.call_count == 0
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"] is False
assert msg["error"] == {
"code": "start_stream_failed",
"message": "camera.config_test does not support play stream service",
}
async def test_setup_alternative_options(hass, hass_ws_client):
"""Test that the stream source is setup with different config options."""
assert await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"authentication": "digest",
"username": "user",
"password": "pass",
"stream_source": "rtsp://example.com:554/rtsp/",
"rtsp_transport": "udp",
},
},
)
await hass.async_block_till_done()
assert hass.data["camera"].get_entity("camera.config_test")
async def test_no_stream_source(hass, hass_client, hass_ws_client, fakeimgbytes_png):
"""Test a stream request without stream source option set."""
respx.get("http://example.com").respond(stream=fakeimgbytes_png)
assert await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"limit_refetch_to_url_change": True,
}
},
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.camera.Stream.endpoint_url",
return_value="http://home.assistant/playlist.m3u8",
) as mock_request_stream:
# Request playlist through WebSocket
client = await hass_ws_client(hass)
await client.send_json(
{"id": 3, "type": "camera/stream", "entity_id": "camera.config_test"}
)
msg = await client.receive_json()
# Assert the websocket error message
assert mock_request_stream.call_count == 0
assert msg["id"] == 3
assert msg["type"] == TYPE_RESULT
assert msg["success"] is False
assert msg["error"] == {
"code": "start_stream_failed",
"message": "camera.config_test does not support play stream service",
}
@respx.mock
async def test_camera_content_type(
hass, hass_client, fakeimgbytes_svg, fakeimgbytes_jpg
):
"""Test generic camera with custom content_type."""
urlsvg = "https://upload.wikimedia.org/wikipedia/commons/0/02/SVG_logo.svg"
respx.get(urlsvg).respond(stream=fakeimgbytes_svg)
urljpg = "https://upload.wikimedia.org/wikipedia/commons/0/0e/Felis_silvestris_silvestris.jpg"
respx.get(urljpg).respond(stream=fakeimgbytes_jpg)
cam_config_svg = {
"name": "config_test_svg",
"platform": "generic",
"still_image_url": urlsvg,
"content_type": "image/svg+xml",
}
cam_config_jpg = {
"name": "config_test_jpg",
"platform": "generic",
"still_image_url": urljpg,
"content_type": "image/jpeg",
}
await async_setup_component(
hass, "camera", {"camera": [cam_config_svg, cam_config_jpg]}
)
await hass.async_block_till_done()
client = await hass_client()
resp_1 = await client.get("/api/camera_proxy/camera.config_test_svg")
assert respx.calls.call_count == 1
assert resp_1.status == HTTPStatus.OK
assert resp_1.content_type == "image/svg+xml"
body = await resp_1.read()
assert body == fakeimgbytes_svg
resp_2 = await client.get("/api/camera_proxy/camera.config_test_jpg")
assert respx.calls.call_count == 2
assert resp_2.status == HTTPStatus.OK
assert resp_2.content_type == "image/jpeg"
body = await resp_2.read()
assert body == fakeimgbytes_jpg
@respx.mock
async def test_reloading(hass, hass_client):
"""Test we can cleanly reload."""
respx.get("http://example.com").respond(text="hello world")
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "http://example.com",
"username": "user",
"password": "pass",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == HTTPStatus.OK
assert respx.calls.call_count == 1
body = await resp.text()
assert body == "hello world"
yaml_path = get_fixture_path("configuration.yaml", "generic")
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/camera_proxy/camera.reload")
assert resp.status == HTTPStatus.OK
assert respx.calls.call_count == 2
body = await resp.text()
assert body == "hello world"
@respx.mock
async def test_timeout_cancelled(hass, hass_client, fakeimgbytes_png, fakeimgbytes_jpg):
"""Test that timeouts and cancellations return last image."""
respx.get("http://example.com").respond(stream=fakeimgbytes_png)
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "http://example.com",
"username": "user",
"password": "pass",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == HTTPStatus.OK
assert respx.calls.call_count == 1
assert await resp.read() == fakeimgbytes_png
respx.get("http://example.com").respond(stream=fakeimgbytes_jpg)
with patch(
"homeassistant.components.generic.camera.GenericCamera.async_camera_image",
side_effect=asyncio.CancelledError(),
):
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == 1
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
respx.get("http://example.com").side_effect = [
httpx.RequestError,
httpx.TimeoutException,
]
for total_calls in range(2, 3):
resp = await client.get("/api/camera_proxy/camera.config_test")
assert respx.calls.call_count == total_calls
assert resp.status == HTTPStatus.OK
assert await resp.read() == fakeimgbytes_png
async def test_no_still_image_url(hass, hass_client):
"""Test that the component can grab images from stream with no still_image_url."""
assert await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"stream_source": "rtsp://example.com:554/rtsp/",
},
},
)
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.generic.camera.GenericCamera.stream_source",
return_value=None,
) as mock_stream_source:
# First test when there is no stream_source should fail
resp = await client.get("/api/camera_proxy/camera.config_test")
await hass.async_block_till_done()
mock_stream_source.assert_called_once()
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
with patch("homeassistant.components.camera.create_stream") as mock_create_stream:
# Now test when creating the stream succeeds
mock_stream = Mock()
mock_stream.async_get_image = AsyncMock()
mock_stream.async_get_image.return_value = b"stream_keyframe_image"
mock_create_stream.return_value = mock_stream
# should start the stream and get the image
resp = await client.get("/api/camera_proxy/camera.config_test")
await hass.async_block_till_done()
mock_create_stream.assert_called_once()
mock_stream.async_get_image.assert_called_once()
assert resp.status == HTTPStatus.OK
assert await resp.read() == b"stream_keyframe_image"
async def test_frame_interval_property(hass):
"""Test that the frame interval is calculated and returned correctly."""
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"stream_source": "rtsp://example.com:554/rtsp/",
"framerate": 5,
},
},
)
await hass.async_block_till_done()
request = Mock()
with patch(
"homeassistant.components.camera.async_get_still_stream"
) as mock_get_stream:
await async_get_mjpeg_stream(hass, request, "camera.config_test")
assert mock_get_stream.call_args_list[0][0][3] == pytest.approx(0.2)
|
|
from folder import Folder
from article import Article
import permissions
from pyramid import security
from bson.objectid import ObjectId
import pyes
from cms import dbutil
from users import UserCollection, GroupCollection, User, generate_random_password
from trash import Trash
class Root(Folder):
_object_type = 'root'
RESERVED_NAMES = Folder.RESERVED_NAMES + ('users', 'groups', 'trash', 'login', 'logout', 'my_password')
# Map Content _object_type strings to their classes.
# This should contain all types that a CMS user could possibly create.
_content_type_factories = {
'folder': Folder,
'article': Article,
}
def __init__(self, request, **kwargs):
Folder.__init__(self, request, **kwargs)
self.__name__ = ''
self.__parent__ = None
self.__acl__ = permissions.root_acl
def index(self):
# Don't index the root.
pass
def get_content_factory(self, object_type):
return self._content_type_factories.get(object_type)
def get_content_by_id(self, _id):
if _id == 'trash':
return self['trash']
if _id == self._id:
return self
doc = self._get_collection().find_one({'_id': _id})
if doc is None:
return None
obj = self._construct_child_from_mongo_document(doc)
pid = doc['__parent__']
if pid == self._id:
obj.__parent__ = self
else:
parent = self.get_content_by_id(pid)
if parent:
obj.__parent__ = parent
if pid == 'trash':
obj.__name__ = str(obj._id)
else:
# Parent must have been deleted between call to this method and now.
return None
return obj
# FIXME: add more options to allow searching a specific doctype with extra type-specific filters?
def search_raw(self, fulltext=None, title=None, description=None, __name__=None, _object_type=None, _pub_state=None, path_id=None, start=0, size=10, fields=None, highlight_fields=None, viewable_only=False, default_operator='AND', sort=None):
"""
fulltext, title and description should be query strings and may contain
boolean operators and wildcards
__name__, _object_type and _pub_state should be either a string or sequence of strings (with OR logic implied) and must be exact matches (no wildcards)
path_id should be either an ObjectId or a sequence of ObjectIds
identifying one or more portions of the site to restrict the search to
sort should be a pyes-style sort string, in other words a comma-delimited list of field names each with the options suffix ":asc" or ":desc"
(example: "_object_type,_created:desc")
Returns a pyes result dictionary.
Keys are [u'hits', u'_shards', u'took', u'timed_out'].
result['hits'] has the keys: [u'hits', u'total', u'max_score']
result['took'] -> search time in ms
result['hits']['total'] -> total number of hits
result['hits']['hits'] -> list of hit dictionaries, each with the keys: [u'_score', u'_type', u'_id', u'_source', u'_index', u'highlight']
Although if the fields argument is a list of field names (instead
of the default value None), instead of a '_source' key, each hit will
have a '_fields' key whose value is a dictionary of the requested fields.
The "highlight" key will only be present if highlight_fields were used
and there was a match in at least one of those fields.
In that case, the value of "highlight" will be dictionary of strings.
Each dictionary key is a field name and each string is an HTML fragment
where the matched term is in an <em> tag.
"""
# Convert singleton values to lists
if __name__ and (type(__name__) in (str, unicode)):
__name__ = [__name__]
if _object_type and (type(_object_type) in (str, unicode)):
_object_type = [_object_type]
if _pub_state and (type(_pub_state) in (str, unicode)):
_pub_state = [_pub_state]
if type(path_id) == ObjectId:
path_id = [path_id]
query = pyes.MatchAllQuery()
if fulltext or title or description:
query = pyes.BoolQuery()
if fulltext: query.add_must(pyes.StringQuery(fulltext, default_operator=default_operator))
if title: query.add_must(pyes.StringQuery(title, search_fields=['title'], default_operator=default_operator))
if description: query.add_must(pyes.StringQuery(description, search_fields=['description'], default_operator=default_operator))
filters = []
if __name__:
filters.append(pyes.TermsFilter('__name__', __name__))
if _object_type:
filters.append(pyes.TermsFilter('_object_type', _object_type))
if _pub_state:
filters.append(pyes.TermsFilter('_pub_state', _pub_state))
if path_id:
# Convert ObjectIds to strings
filters.append(pyes.TermsFilter('_id_path', [str(x) for x in path_id]))
if viewable_only:
filters.append(pyes.TermsFilter('_view', security.effective_principals(self.request)))
if filters:
query = pyes.FilteredQuery(query, pyes.ANDFilter(filters))
search = pyes.Search(query=query, start=start, size=size, fields=fields)
if highlight_fields:
for field in highlight_fields:
search.add_highlight(field)
# FIXME: use new search() method???
return dbutil.get_es_conn(self.request).search_raw(search, dbutil.get_es_index_name(self.request), sort=sort or '_score')
def search(self, fulltext=None, title=None, description=None, __name__=None, _object_type=None, _pub_state=None, path_id=None, start=0, size=10, highlight_fields=None, viewable_only=False, default_operator='AND', sort=None):
# Return a dictionary with the keys:
# "total": total number of matching hits
# "took": search time in ms
# "items": a list of child objects and highlights for the specified batch of hits
# We just need the _id values (not _source, etc), so set fields=[]
result = self.search_raw(fulltext=fulltext, title=title, description=description, __name__=__name__, _object_type=_object_type, _pub_state=_pub_state, path_id=path_id, start=start, size=size, fields=[], highlight_fields=highlight_fields, viewable_only=viewable_only, default_operator='AND', sort=sort)
items = []
for hit in result['hits']['hits']:
_id = ObjectId(hit['_id'])
obj = self.get_content_by_id(_id)
if obj:
items.append(dict(object=obj, highlight=hit.get('highlight')))
return dict(
items = items,
total = result['hits']['total'],
took = result['took'],
)
def __getitem__(self, name):
if name == 'users':
users = UserCollection(self.request)
users.__name__ = 'users'
users.__parent__ = self
return users
elif name == 'groups':
groups = GroupCollection(self.request)
groups.__name__ = 'groups'
groups.__parent__ = self
return groups
elif name == 'trash':
trash = Trash(self.request)
trash.__name__ = 'trash'
trash.__parent__ = self
return trash
return Folder.__getitem__(self, name)
def get_user(self, username):
return self['users'].get_child(username)
def get_current_user(self):
return self['users'].get_child(security.authenticated_userid(self.request))
def get_user_by_email(self, email):
return self['users'].get_user_by_email(email)
def add_super_user(self, name='admin', password=None):
""" Add a new user in the superuser group.
This is particularly handy to bootstrap a new system in pshell.
"""
user= User(self.request, firstname=name.capitalize(), lastname='User', groups=['superuser'], active=True, email='')
if not password:
password = generate_random_password()
user.set_password(password)
self['users'].add_child(name, user)
print "Created superuser with username %s and password %s" % (name, password)
# Not for use by "civilians"...
def _find_local_roles_for_principal(self, principal):
return self._get_collection().find({'_local_roles.%s' % principal: {"$exists":1}}, fields=['_local_roles'])
def _get_content_with_local_roles_for_principal(self, principal):
result = []
for item in self._find_local_roles_for_principal(principal):
obj = self.get_content_by_id(item['_id'])
if obj: result.append(obj)
return result
def _remove_local_roles_for_principal(self, principal):
self._get_collection().update({'_local_roles.%s' % principal: {"$exists": 1}}, {'$unset': {'_local_roles.%s' % principal: 1}}, multi=True)
|
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
import datetime
from openerp import tools
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
from dateutil.relativedelta import relativedelta
def str_to_datetime(strdate):
return datetime.datetime.strptime(strdate, tools.DEFAULT_SERVER_DATE_FORMAT)
class fleet_vehicle_cost(osv.Model):
_name = 'fleet.vehicle.cost'
_description = 'Cost related to a vehicle'
_order = 'date desc, vehicle_id asc'
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, False)
for record in self.browse(cr,uid,ids,context=context):
if record.odometer_id:
res[record.id] = record.odometer_id.value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if not value:
raise except_orm(_('Operation not allowed!'), _('Emptying the odometer value of a vehicle is not allowed.'))
date = self.browse(cr, uid, id, context=context).date
if not(date):
date = fields.date.context_today(self, cr, uid, context=context)
vehicle_id = self.browse(cr, uid, id, context=context).vehicle_id
data = {'value': value, 'date': date, 'vehicle_id': vehicle_id.id}
odometer_id = self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
return self.write(cr, uid, id, {'odometer_id': odometer_id}, context=context)
_columns = {
'name': fields.related('vehicle_id', 'name', type="char", string='Name', store=True),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True, help='Vehicle concerned by this log'),
'cost_subtype_id': fields.many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost'),
'amount': fields.float('Total Price'),
'cost_type': fields.selection([('contract', 'Contract'), ('services','Services'), ('fuel','Fuel'), ('other','Other')], 'Category of the cost', help='For internal purpose only', required=True),
'parent_id': fields.many2one('fleet.vehicle.cost', 'Parent', help='Parent cost to this current cost'),
'cost_ids': fields.one2many('fleet.vehicle.cost', 'parent_id', 'Included Services'),
'odometer_id': fields.many2one('fleet.vehicle.odometer', 'Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Odometer Value', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
'date' :fields.date('Date',help='Date when the cost has been executed'),
'contract_id': fields.many2one('fleet.vehicle.log.contract', 'Contract', help='Contract attached to this cost'),
'auto_generated': fields.boolean('Automatically Generated', readonly=True, required=True),
}
_defaults ={
'cost_type': 'other',
}
def create(self, cr, uid, data, context=None):
#make sure that the data are consistent with values of parent and contract records given
if 'parent_id' in data and data['parent_id']:
parent = self.browse(cr, uid, data['parent_id'], context=context)
data['vehicle_id'] = parent.vehicle_id.id
data['date'] = parent.date
data['cost_type'] = parent.cost_type
if 'contract_id' in data and data['contract_id']:
contract = self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, data['contract_id'], context=context)
data['vehicle_id'] = contract.vehicle_id.id
data['cost_subtype_id'] = contract.cost_subtype_id.id
data['cost_type'] = contract.cost_type
if 'odometer' in data and not data['odometer']:
#if received value for odometer is 0, then remove it from the data as it would result to the creation of a
#odometer log with 0, which is to be avoided
del(data['odometer'])
return super(fleet_vehicle_cost, self).create(cr, uid, data, context=context)
class fleet_vehicle_tag(osv.Model):
_name = 'fleet.vehicle.tag'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class fleet_vehicle_state(osv.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages")
}
_sql_constraints = [('fleet_state_name_unique','unique(name)', 'State name already exists')]
class fleet_vehicle_model(osv.Model):
def _model_name_get_fnc(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.modelname
if record.brand_id.name:
name = record.brand_id.name + ' / ' + name
res[record.id] = name
return res
def on_change_brand(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {'value': {'image_medium': False}}
brand = self.pool.get('fleet.vehicle.model.brand').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': brand.image,
}
}
_name = 'fleet.vehicle.model'
_description = 'Model of a vehicle'
_order = 'name asc'
_columns = {
'name': fields.function(_model_name_get_fnc, type="char", string='Name', store=True),
'modelname': fields.char('Model name', required=True),
'brand_id': fields.many2one('fleet.vehicle.model.brand', 'Model Brand', required=True, help='Brand of the vehicle'),
'vendors': fields.many2many('res.partner', 'fleet_vehicle_model_vendors', 'model_id', 'partner_id', string='Vendors'),
'image': fields.related('brand_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('brand_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('brand_id', 'image_small', type="binary", string="Logo (small)"),
}
class fleet_vehicle_model_brand(osv.Model):
_name = 'fleet.vehicle.model.brand'
_description = 'Brand model of the vehicle'
_order = 'name asc'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Brand Name', required=True),
'image': fields.binary("Logo",
help="This field holds the image used as logo for the brand, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized logo of the brand. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the brand. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class fleet_vehicle(osv.Model):
_inherit = 'mail.thread'
def _vehicle_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = record.model_id.brand_id.name + '/' + record.model_id.modelname + ' / ' + record.license_plate
return res
def return_action_to_open(self, cr, uid, ids, context=None):
""" This opens the xml view specified in xml_id for the current vehicle """
if context is None:
context = {}
if context.get('xml_id'):
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet', context['xml_id'], context=context)
res['context'] = context
res['context'].update({'default_vehicle_id': ids[0]})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
return False
def act_show_log_cost(self, cr, uid, ids, context=None):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet','fleet_vehicle_costs_act', context=context)
res['context'] = context
res['context'].update({
'default_vehicle_id': ids[0],
'search_default_parent_false': True
})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, 0)
for record in self.browse(cr,uid,ids,context=context):
ids = self.pool.get('fleet.vehicle.odometer').search(cr, uid, [('vehicle_id', '=', record.id)], limit=1, order='value desc')
if len(ids) > 0:
res[record.id] = self.pool.get('fleet.vehicle.odometer').browse(cr, uid, ids[0], context=context).value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if value:
date = fields.date.context_today(self, cr, uid, context=context)
data = {'value': value, 'date': date, 'vehicle_id': id}
return self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
def _search_get_overdue_contract_reminder(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today,))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _search_contract_renewal_due_soon(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
datetime_today = datetime.datetime.strptime(today, tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = str((datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date > %s AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today, limit_date))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _get_contract_reminder_fnc(self, cr, uid, ids, field_names, unknow_none, context=None):
res= {}
for record in self.browse(cr, uid, ids, context=context):
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'toclose') and element.expiration_date:
current_date_str = fields.date.context_today(self, cr, uid, context=context)
due_time_str = element.expiration_date
current_date = str_to_datetime(current_date_str)
due_time = str_to_datetime(due_time_str)
diff_time = (due_time-current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < 15 and diff_time >= 0:
due_soon = True;
total += 1
if overdue or due_soon:
ids = self.pool.get('fleet.vehicle.log.contract').search(cr,uid,[('vehicle_id', '=', record.id), ('state', 'in', ('open', 'toclose'))], limit=1, order='expiration_date asc')
if len(ids) > 0:
#we display only the name of the oldest overdue/due soon contract
name=(self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, ids[0], context=context).cost_subtype_id.name)
res[record.id] = {
'contract_renewal_overdue': overdue,
'contract_renewal_due_soon': due_soon,
'contract_renewal_total': (total - 1), #we remove 1 from the real total for display purposes
'contract_renewal_name': name,
}
return res
def _get_default_state(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'vehicle_state_active')
except ValueError:
model_id = False
return model_id
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Odometer = self.pool['fleet.vehicle.odometer']
LogFuel = self.pool['fleet.vehicle.log.fuel']
LogService = self.pool['fleet.vehicle.log.services']
LogContract = self.pool['fleet.vehicle.log.contract']
Cost = self.pool['fleet.vehicle.cost']
return {
vehicle_id: {
'odometer_count': Odometer.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'fuel_logs_count': LogFuel.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'service_count': LogService.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'contract_count': LogContract.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'cost_count': Cost.search_count(cr, uid, [('vehicle_id', '=', vehicle_id), ('parent_id', '=', False)], context=context)
}
for vehicle_id in ids
}
_name = 'fleet.vehicle'
_description = 'Information on a vehicle'
_order= 'license_plate asc'
_columns = {
'name': fields.function(_vehicle_name_get_fnc, type="char", string='Name', store=True),
'company_id': fields.many2one('res.company', 'Company'),
'license_plate': fields.char('License Plate', required=True, help='License plate number of the vehicle (ie: plate number for a car)'),
'vin_sn': fields.char('Chassis Number', help='Unique number written on the vehicle motor (VIN/SN number)', copy=False),
'driver_id': fields.many2one('res.partner', 'Driver', help='Driver of the vehicle'),
'model_id': fields.many2one('fleet.vehicle.model', 'Model', required=True, help='Model of the vehicle'),
'log_fuel': fields.one2many('fleet.vehicle.log.fuel', 'vehicle_id', 'Fuel Logs'),
'log_services': fields.one2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs'),
'log_contracts': fields.one2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts'),
'cost_count': fields.function(_count_all, type='integer', string="Costs" , multi=True),
'contract_count': fields.function(_count_all, type='integer', string='Contracts', multi=True),
'service_count': fields.function(_count_all, type='integer', string='Services', multi=True),
'fuel_logs_count': fields.function(_count_all, type='integer', string='Fuel Logs', multi=True),
'odometer_count': fields.function(_count_all, type='integer', string='Odometer', multi=True),
'acquisition_date': fields.date('Acquisition Date', required=False, help='Date when the vehicle has been bought'),
'color': fields.char('Color', help='Color of the vehicle'),
'state_id': fields.many2one('fleet.vehicle.state', 'State', help='Current state of the vehicle', ondelete="set null"),
'location': fields.char('Location', help='Location of the vehicle (garage, ...)'),
'seats': fields.integer('Seats Number', help='Number of seats of the vehicle'),
'doors': fields.integer('Doors Number', help='Number of doors of the vehicle'),
'tag_ids' :fields.many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id','tag_id', 'Tags', copy=False),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Last Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.selection([('kilometers', 'Kilometers'),('miles','Miles')], 'Odometer Unit', help='Unit of the odometer ',required=True),
'transmission': fields.selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle'),
'fuel_type': fields.selection([('gasoline', 'Gasoline'), ('diesel', 'Diesel'), ('electric', 'Electric'), ('hybrid', 'Hybrid')], 'Fuel Type', help='Fuel Used by the vehicle'),
'horsepower': fields.integer('Horsepower'),
'horsepower_tax': fields.float('Horsepower Taxation'),
'power': fields.integer('Power', help='Power in kW of the vehicle'),
'co2': fields.float('CO2 Emissions', help='CO2 emissions of the vehicle'),
'image': fields.related('model_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('model_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('model_id', 'image_small', type="binary", string="Logo (small)"),
'contract_renewal_due_soon': fields.function(_get_contract_reminder_fnc, fnct_search=_search_contract_renewal_due_soon, type="boolean", string='Has Contracts to renew', multi='contract_info'),
'contract_renewal_overdue': fields.function(_get_contract_reminder_fnc, fnct_search=_search_get_overdue_contract_reminder, type="boolean", string='Has Contracts Overdued', multi='contract_info'),
'contract_renewal_name': fields.function(_get_contract_reminder_fnc, type="text", string='Name of contract to renew soon', multi='contract_info'),
'contract_renewal_total': fields.function(_get_contract_reminder_fnc, type="integer", string='Total of contracts due or overdue minus one', multi='contract_info'),
'car_value': fields.float('Car Value', help='Value of the bought vehicle'),
}
_defaults = {
'doors': 5,
'odometer_unit': 'kilometers',
'state_id': _get_default_state,
}
def on_change_model(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {}
model = self.pool.get('fleet.vehicle.model').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': model.image,
}
}
def create(self, cr, uid, data, context=None):
context = dict(context or {}, mail_create_nolog=True)
vehicle_id = super(fleet_vehicle, self).create(cr, uid, data, context=context)
vehicle = self.browse(cr, uid, vehicle_id, context=context)
self.message_post(cr, uid, [vehicle_id], body=_('%s %s has been added to the fleet!') % (vehicle.model_id.name,vehicle.license_plate), context=context)
return vehicle_id
def write(self, cr, uid, ids, vals, context=None):
"""
This function write an entry in the openchatter whenever we change important information
on the vehicle like the model, the drive, the state of the vehicle or its license plate
"""
for vehicle in self.browse(cr, uid, ids, context):
changes = []
if 'model_id' in vals and vehicle.model_id.id != vals['model_id']:
value = self.pool.get('fleet.vehicle.model').browse(cr,uid,vals['model_id'],context=context).name
oldmodel = vehicle.model_id.name or _('None')
changes.append(_("Model: from '%s' to '%s'") %(oldmodel, value))
if 'driver_id' in vals and vehicle.driver_id.id != vals['driver_id']:
value = self.pool.get('res.partner').browse(cr,uid,vals['driver_id'],context=context).name
olddriver = (vehicle.driver_id.name) or _('None')
changes.append(_("Driver: from '%s' to '%s'") %(olddriver, value))
if 'state_id' in vals and vehicle.state_id.id != vals['state_id']:
value = self.pool.get('fleet.vehicle.state').browse(cr,uid,vals['state_id'],context=context).name
oldstate = vehicle.state_id.name or _('None')
changes.append(_("State: from '%s' to '%s'") %(oldstate, value))
if 'license_plate' in vals and vehicle.license_plate != vals['license_plate']:
old_license_plate = vehicle.license_plate or _('None')
changes.append(_("License Plate: from '%s' to '%s'") %(old_license_plate, vals['license_plate']))
if len(changes) > 0:
self.message_post(cr, uid, [vehicle.id], body=", ".join(changes), context=context)
vehicle_id = super(fleet_vehicle,self).write(cr, uid, ids, vals, context)
return True
class fleet_vehicle_odometer(osv.Model):
_name='fleet.vehicle.odometer'
_description='Odometer log for a vehicle'
_order='date desc'
def _vehicle_log_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if record.date:
name = name+ ' / '+ str(record.date)
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'unit': odometer_unit,
}
}
_columns = {
'name': fields.function(_vehicle_log_name_get_fnc, type="char", string='Name', store=True),
'date': fields.date('Date'),
'value': fields.float('Odometer Value', group_operator="max"),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True),
'unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
}
_defaults = {
'date': fields.date.context_today,
}
class fleet_vehicle_log_fuel(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def on_change_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
else :
return {}
def on_change_price_per_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
else :
return {}
def on_change_amount(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value': {'price_per_liter': round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value': {'liter': round(amount / price_per_liter,2),}}
elif liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value': {'amount': round(liter * price_per_liter,2),}}
else :
return {}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_refueling')
except ValueError:
model_id = False
return model_id
_name = 'fleet.vehicle.log.fuel'
_description = 'Fuel log for vehicles'
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_columns = {
'liter': fields.float('Liter'),
'price_per_liter': fields.float('Price Per Liter'),
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference', size=64),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'fuel',
}
class fleet_vehicle_log_services(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_service_8')
except ValueError:
model_id = False
return model_id
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.services'
_description = 'Services for vehicles'
_columns = {
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference'),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'services'
}
class fleet_service_type(osv.Model):
_name = 'fleet.service.type'
_description = 'Type of services available on a vehicle'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'category': fields.selection([('contract', 'Contract'), ('service', 'Service'), ('both', 'Both')], 'Category', required=True, help='Choose wheter the service refer to contracts, vehicle services or both'),
}
class fleet_vehicle_log_contract(osv.Model):
def scheduler_manage_auto_costs(self, cr, uid, context=None):
#This method is called by a cron task
#It creates costs for contracts having the "recurring cost" field setted, depending on their frequency
#For example, if a contract has a reccuring cost of 200 with a weekly frequency, this method creates a cost of 200 on the first day of each week, from the date of the last recurring costs in the database to today
#If the contract has not yet any recurring costs in the database, the method generates the recurring costs from the start_date to today
#The created costs are associated to a contract thanks to the many2one field contract_id
#If the contract has no start_date, no cost will be created, even if the contract has recurring costs
vehicle_cost_obj = self.pool.get('fleet.vehicle.cost')
d = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT).date()
contract_ids = self.pool.get('fleet.vehicle.log.contract').search(cr, uid, [('state','!=','closed')], offset=0, limit=None, order=None,context=None, count=False)
deltas = {'yearly': relativedelta(years=+1), 'monthly': relativedelta(months=+1), 'weekly': relativedelta(weeks=+1), 'daily': relativedelta(days=+1)}
for contract in self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, contract_ids, context=context):
if not contract.start_date or contract.cost_frequency == 'no':
continue
found = False
last_cost_date = contract.start_date
if contract.generated_cost_ids:
last_autogenerated_cost_id = vehicle_cost_obj.search(cr, uid, ['&', ('contract_id','=',contract.id), ('auto_generated','=',True)], offset=0, limit=1, order='date desc',context=context, count=False)
if last_autogenerated_cost_id:
found = True
last_cost_date = vehicle_cost_obj.browse(cr, uid, last_autogenerated_cost_id[0], context=context).date
startdate = datetime.datetime.strptime(last_cost_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()
if found:
startdate += deltas.get(contract.cost_frequency)
while (startdate <= d) & (startdate <= datetime.datetime.strptime(contract.expiration_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()):
data = {
'amount': contract.cost_generated,
'date': startdate.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'vehicle_id': contract.vehicle_id.id,
'cost_subtype_id': contract.cost_subtype_id.id,
'contract_id': contract.id,
'auto_generated': True
}
cost_id = self.pool.get('fleet.vehicle.cost').create(cr, uid, data, context=context)
startdate += deltas.get(contract.cost_frequency)
return True
def scheduler_manage_contract_expiration(self, cr, uid, context=None):
#This method is called by a cron task
#It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
datetime_today = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = (datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
ids = self.search(cr, uid, ['&', ('state', '=', 'open'), ('expiration_date', '<', limit_date)], offset=0, limit=None, order=None, context=context, count=False)
res = {}
for contract in self.browse(cr, uid, ids, context=context):
if contract.vehicle_id.id in res:
res[contract.vehicle_id.id] += 1
else:
res[contract.vehicle_id.id] = 1
for vehicle, value in res.items():
self.pool.get('fleet.vehicle').message_post(cr, uid, vehicle, body=_('%s contract(s) need(s) to be renewed and/or closed!') % (str(value)), context=context)
return self.write(cr, uid, ids, {'state': 'toclose'}, context=context)
def run_scheduler(self, cr, uid, context=None):
self.scheduler_manage_auto_costs(cr, uid, context=context)
self.scheduler_manage_contract_expiration(cr, uid, context=context)
return True
def _vehicle_contract_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if record.cost_subtype_id.name:
name += ' / '+ record.cost_subtype_id.name
if record.date:
name += ' / '+ record.date
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'odometer_unit': odometer_unit,
}
}
def compute_next_year_date(self, strdate):
oneyear = datetime.timedelta(days=365)
curdate = str_to_datetime(strdate)
return datetime.datetime.strftime(curdate + oneyear, tools.DEFAULT_SERVER_DATE_FORMAT)
def on_change_start_date(self, cr, uid, ids, strdate, enddate, context=None):
if (strdate):
return {'value': {'expiration_date': self.compute_next_year_date(strdate),}}
return {}
def get_days_left(self, cr, uid, ids, prop, unknow_none, context=None):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
res = {}
for record in self.browse(cr, uid, ids, context=context):
if (record.expiration_date and (record.state == 'open' or record.state == 'toclose')):
today = str_to_datetime(time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
renew_date = str_to_datetime(record.expiration_date)
diff_time = (renew_date-today).days
res[record.id] = diff_time > 0 and diff_time or 0
else:
res[record.id] = -1
return res
def act_renew_contract(self, cr, uid, ids, context=None):
assert len(ids) == 1, "This operation should only be done for 1 single contract at a time, as it it suppose to open a window as result"
for element in self.browse(cr, uid, ids, context=context):
#compute end date
startdate = str_to_datetime(element.start_date)
enddate = str_to_datetime(element.expiration_date)
diffdate = (enddate - startdate)
default = {
'date': fields.date.context_today(self, cr, uid, context=context),
'start_date': datetime.datetime.strftime(str_to_datetime(element.expiration_date) + datetime.timedelta(days=1), tools.DEFAULT_SERVER_DATE_FORMAT),
'expiration_date': datetime.datetime.strftime(enddate + diffdate, tools.DEFAULT_SERVER_DATE_FORMAT),
}
newid = super(fleet_vehicle_log_contract, self).copy(cr, uid, element.id, default, context=context)
mod, modid = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'fleet_vehicle_log_contract_form')
return {
'name':_("Renew Contract"),
'view_mode': 'form',
'view_id': modid,
'view_type': 'tree,form',
'res_model': 'fleet.vehicle.log.contract',
'type': 'ir.actions.act_window',
'nodestroy': True,
'domain': '[]',
'res_id': newid,
'context': {'active_id':newid},
}
def _get_default_contract_type(self, cr, uid, context=None):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_contract_leasing')
except ValueError:
model_id = False
return model_id
def on_change_indic_cost(self, cr, uid, ids, cost_ids, context=None):
totalsum = 0.0
for element in cost_ids:
if element and len(element) == 3 and isinstance(element[2], dict):
totalsum += element[2].get('amount', 0.0)
return {
'value': {
'sum_cost': totalsum,
}
}
def _get_sum_cost(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for contract in self.browse(cr, uid, ids, context=context):
totalsum = 0
for cost in contract.cost_ids:
totalsum += cost.amount
res[contract.id] = totalsum
return res
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.contract'
_description = 'Contract information on a vehicle'
_order='state desc,expiration_date'
_columns = {
'name': fields.function(_vehicle_contract_name_get_fnc, type="text", string='Name', store=True),
'start_date': fields.date('Contract Start Date', help='Date when the coverage of the contract begins'),
'expiration_date': fields.date('Contract Expiration Date', help='Date when the coverage of the contract expirates (by default, one year after begin date)'),
'days_left': fields.function(get_days_left, type='integer', string='Warning Date'),
'insurer_id' :fields.many2one('res.partner', 'Supplier'),
'purchaser_id': fields.many2one('res.partner', 'Contractor', help='Person to which the contract is signed for'),
'ins_ref': fields.char('Contract Reference', size=64, copy=False),
'state': fields.selection([('open', 'In Progress'), ('toclose','To Close'), ('closed', 'Terminated')],
'Status', readonly=True, help='Choose wheter the contract is still valid or not',
copy=False),
'notes': fields.text('Terms and Conditions', help='Write here all supplementary informations relative to this contract', copy=False),
'cost_generated': fields.float('Recurring Cost Amount', help="Costs paid at regular intervals, depending on the cost frequency. If the cost frequency is set to unique, the cost will be logged at the start date"),
'cost_frequency': fields.selection([('no','No'), ('daily', 'Daily'), ('weekly','Weekly'), ('monthly','Monthly'), ('yearly','Yearly')], 'Recurring Cost Frequency', help='Frequency of the recuring cost', required=True),
'generated_cost_ids': fields.one2many('fleet.vehicle.cost', 'contract_id', 'Generated Costs'),
'sum_cost': fields.function(_get_sum_cost, type='float', string='Indicative Costs Total'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'purchaser_id': lambda self, cr, uid, ctx: self.pool.get('res.users').browse(cr, uid, uid, context=ctx).partner_id.id or False,
'date': fields.date.context_today,
'start_date': fields.date.context_today,
'state':'open',
'expiration_date': lambda self, cr, uid, ctx: self.compute_next_year_date(fields.date.context_today(self, cr, uid, context=ctx)),
'cost_frequency': 'no',
'cost_subtype_id': _get_default_contract_type,
'cost_type': 'contract',
}
def contract_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'closed'}, context=context)
def contract_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
class fleet_contract_state(osv.Model):
_name = 'fleet.contract.state'
_description = 'Contains the different possible status of a leasing contract'
_columns = {
'name':fields.char('Contract Status', required=True),
}
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
from cgi import FieldStorage
import datetime
import json
import logging
import cloudstorage
from google.appengine.ext import db, webapp
from mcfw.properties import azzert
from rogerthat.bizz.gcs import get_blobstore_cloudstorage_path, upload_to_gcs
from rogerthat.bizz.user import calculate_secure_url_digest
from rogerthat.consts import ROGERTHAT_ATTACHMENTS_BUCKET
from rogerthat.dal import parent_key_unsafe
from rogerthat.models import UserProfile
from rogerthat.rpc import users
from rogerthat.settings import get_server_settings
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils.channel import broadcast_via_iframe_result
from rogerthat.utils.crypto import decrypt
from rogerthat.utils.service import get_service_identity_tuple, create_service_identity_user
from solutions import translate as common_translate
from solutions.common import SOLUTION_COMMON
from solutions.common.bizz import SolutionModule
from solutions.common.bizz.loyalty import put_loyalty_slide, get_loyalty_slide_footer, redeem_lottery_winners
from solutions.common.dal import get_solution_settings
from solutions.common.handlers import JINJA_ENVIRONMENT
from solutions.common.models import SolutionInboxMessage
from solutions.common.models.loyalty import SolutionLoyaltySlide, SolutionLoyaltyExport, SolutionUserLoyaltySettings
from solutions.common.utils import is_default_service_identity, create_service_identity_user_wo_default, \
get_extension_for_content_type
import webapp2
class UploadLoyaltySlideHandler(webapp2.RequestHandler):
def post(self):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
slide_id = self.request.get("slide_id", "")
if slide_id == "":
slide_id = None
else:
slide_id = long(slide_id)
slide_name = self.request.get("slide_name", "")
try:
slide_time = long(self.request.get("slide_time", 10))
except:
self.response.out.write(broadcast_via_iframe_result(u"solutions.common.loyalty.slide.post_result",
error=u"Please fill in valid time!"))
return
uploaded_file = self.request.POST.get('slide_file') # type: FieldStorage
if not slide_id and not isinstance(uploaded_file, FieldStorage):
self.response.out.write(broadcast_via_iframe_result(u"solutions.common.loyalty.slide.post_result",
error=u"Please select a picture!"))
return
if not slide_id:
sln_settings = get_solution_settings(service_user)
if SolutionModule.HIDDEN_CITY_WIDE_LOTTERY in sln_settings.modules:
service_identity_user = create_service_identity_user_wo_default(service_user, service_identity)
p = parent_key_unsafe(service_identity_user, SOLUTION_COMMON)
sli = SolutionLoyaltySlide.all(keys_only=True).ancestor(p).get()
if sli:
self.response.out.write(broadcast_via_iframe_result(u"solutions.common.loyalty.slide.post_result",
error=u"A city can only have 1 active slide at a time!"))
return
gcs_filename = None
content_type = None
if isinstance(uploaded_file, FieldStorage):
content_type = uploaded_file.type
if not content_type.startswith("image/"):
self.response.out.write(broadcast_via_iframe_result(u"solutions.common.loyalty.slide.post_result",
error=u"The uploaded file is not an image!"))
return
date = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
gcs_filename = '%s/oca/loyalty_slides/%s/%s_%s.%s' % (ROGERTHAT_ATTACHMENTS_BUCKET,
service_user.email(),
date,
uploaded_file.filename,
get_extension_for_content_type(content_type))
upload_to_gcs(uploaded_file.value, content_type, gcs_filename)
put_loyalty_slide(service_user, service_identity, slide_id, slide_name, slide_time, gcs_filename, content_type)
self.response.out.write(broadcast_via_iframe_result(u"solutions.common.loyalty.slide.post_result"))
class LoyaltySlideDownloadHandler(webapp2.RequestHandler):
def get(self):
key = self.request.get("slide_key", None)
filename = get_blobstore_cloudstorage_path(key)
try:
gcs_stats = cloudstorage.stat(filename)
self.response.headers['Content-Type'] = gcs_stats.content_type
self.response.headers['Cache-Control'] = 'public, max-age=31536000' # Cache forever (1 year)
self.response.headers['Content-Disposition'] = 'inline; filename=%s' % str(key)
with cloudstorage.open(filename, 'r') as gcs_file:
self.response.write(gcs_file.read())
except cloudstorage.errors.NotFoundError:
logging.warn('%s NOT found in gcs', filename)
self.error(404)
class LoyaltySlidePreviewHandler(webapp2.RequestHandler):
def get(self):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
slide_id = self.request.get('i')
if not slide_id:
self.redirect("/ourcityapp")
return
slide_id = long(slide_id)
if is_default_service_identity(service_identity):
service_identity_user = service_user
else:
service_identity_user = create_service_identity_user(service_user, service_identity)
def trans():
slide = SolutionLoyaltySlide.get_by_id(slide_id, parent=parent_key_unsafe(service_identity_user, SOLUTION_COMMON))
return slide
slide = db.run_in_transaction(trans)
if not slide:
self.redirect("/ourcityapp")
return
server_settings = get_server_settings()
jinja_template = JINJA_ENVIRONMENT.get_template('loyalty_preview.html')
self.response.out.write(jinja_template.render({'slide_id': slide_id,
'full_url': slide.slide_url(),
'overlay_url': '%s/common/loyalty/slide/overlay' % (server_settings.baseUrl)}))
class LoyaltySlideOverlayHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Cache-Control'] = "public, max-age=31536000" # Cache forever (1 year)
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(get_loyalty_slide_footer())
class ExportLoyaltyHandler(webapp2.RequestHandler):
def get(self):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
year = self.request.get('y')
month = self.request.get('m')
export = SolutionLoyaltyExport.get(SolutionLoyaltyExport.create_key(service_user, service_identity, year, month))
if export:
self.response.headers['Content-Type'] = 'application/pdf'
self.response.headers['Content-Disposition'] = str(
'attachment; filename=loyalty_export %s-%s.pdf' % (month, year))
self.response.write(export.pdf)
self.response.set_status(200)
else:
self.response.set_status(404)
class LoyaltyNoMobilesUnsubscribeEmailHandler(webapp.RequestHandler):
def parse_data(self, email, data):
user = users.User(email)
data = base64.decodestring(data)
data = decrypt(user, data)
data = json.loads(data)
azzert(data["d"] == calculate_secure_url_digest(data))
return data, user
def get_user_info(self):
email = self.request.get("email", None)
data = self.request.get("data", None)
if not email or not data:
return None, None
try:
data_dict, _ = self.parse_data(email, data)
except:
logging.warn("Could not decipher url!", exc_info=True)
return None, None
app_user = users.User(email)
return data_dict, app_user
def get(self):
data_dict, app_user = self.get_user_info()
if not data_dict or not app_user:
language = self.request.get("language", DEFAULT_LANGUAGE)
title = common_translate(language, u'Error')
text = common_translate(language, u"error-occured-unknown-try-again")
else:
azzert(data_dict['a'] == "loyalty_no_mobiles_unsubscribe")
service_name = data_dict['n']
service_identity_user_email = data_dict['e']
suls_key = SolutionUserLoyaltySettings.createKey(app_user)
suls = SolutionUserLoyaltySettings.get(suls_key)
if not suls:
suls = SolutionUserLoyaltySettings(key=suls_key)
suls.reminders_disabled = False
suls.reminders_disabled_for = []
if service_identity_user_email not in suls.reminders_disabled_for:
suls.reminders_disabled_for.append(service_identity_user_email)
suls.put()
user_profile = db.get(UserProfile.createKey(app_user))
if user_profile:
language = self.request.get("language", user_profile.language)
else:
language = self.request.get("language", DEFAULT_LANGUAGE)
title = common_translate(language, u'You have been unsubscribed')
text = common_translate(language, u'You will not receive any loyalty updates from "%(name)s" anymore', name=service_name)
params = {
'title': title,
'text': text
}
jinja_template = JINJA_ENVIRONMENT.get_template('pages/loyalty_title_text.html')
self.response.out.write(jinja_template.render(params))
class LoyaltyLotteryConfirmWinnerHandler(LoyaltyNoMobilesUnsubscribeEmailHandler):
def get(self):
data_dict, app_user = self.get_user_info()
if not data_dict or not app_user:
language = self.request.get("language", DEFAULT_LANGUAGE)
title = common_translate(language, u'Error')
text = common_translate(language, u"error-occured-unknown-try-again")
else:
azzert(data_dict['a'] == "loyalty_no_mobiles_lottery_winner")
service_email = data_dict['e']
service_identity_user = users.User(service_email)
service_user, service_identity = get_service_identity_tuple(service_identity_user)
user_profile = db.get(UserProfile.createKey(app_user))
if user_profile:
language = self.request.get("language", user_profile.language)
inbox_message = SolutionInboxMessage.get(data_dict['mk'])
if redeem_lottery_winners(service_user, service_identity, app_user, user_profile.name, inbox_message):
title = common_translate(language, u'Success')
text = common_translate(language, u'loyalty-lottery-loot-receive')
else:
title = common_translate(language, u'Error')
text = common_translate(language, u'Unfortunately you have not confirmed on time and lost your chance')
else:
language = self.request.get("language", DEFAULT_LANGUAGE)
title = common_translate(language, u'Error')
text = common_translate(language, u"error-occured-unknown-try-again")
params = {
'title': title,
'text': text
}
jinja_template = JINJA_ENVIRONMENT.get_template('pages/loyalty_title_text.html')
self.response.out.write(jinja_template.render(params))
|
|
import math
import os
import click
import time
import datetime
from time import mktime
from itertools import chain, islice
import json
import copy
import pandas as pd
from cachetools import cached, TTLCache
from pymongo.errors import DuplicateKeyError
from tqdm import tqdm
from mwclient import Site
from pymongo import MongoClient
from scheduled_bots.pbb_tracker.connect_mysql import query_wikidata_mysql
from wikidataintegrator.wdi_helpers import id_mapper
from wikidataintegrator.wdi_core import WDItemEngine
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
CACHE_SIZE = 99999
CACHE_TIMEOUT_SEC = 300 # 5 min
site = Site(('https', 'www.wikidata.org'))
site.login(WDUSER, WDPASS)
def chunks(iterable, size):
it = iter(iterable)
item = list(islice(it, size))
while item:
yield item
item = list(islice(it, size))
@cached(TTLCache(CACHE_SIZE, CACHE_TIMEOUT_SEC))
def getConceptLabels(qids):
qids = "|".join({qid.replace("wd:", "") if qid.startswith("wd:") else qid for qid in qids})
try:
wd = site.api('wbgetentities', **{'ids': qids, 'languages': 'en', 'format': 'json', 'props': 'labels'})[
'entities']
return {k: v['labels']['en']['value'] if 'labels' in v and 'en' in v['labels'] else '' for k, v in wd.items()}
except Exception as e:
print(e)
return {k: "" for k in qids}
def get_property_types():
# {'CommonsMedia', 'Time', 'Quantity', 'WikibaseProperty', 'WikibaseItem', 'GlobeCoordinate',
# 'String', 'ExternalId', 'Math', 'Monolingualtext', 'TabularData', 'Url', 'GeoShape'}
query = "SELECT ?p ?pt WHERE {?p wikibase:propertyType ?pt}"
results = WDItemEngine.execute_sparql_query(query)['results']['bindings']
results = [{k: v['value'] for k, v in item.items()} for item in results]
prop_wdtype = {x['p'].split("/")[-1]: x['pt'].split("#")[-1] for x in results}
return prop_wdtype
PROP_TYPE = get_property_types()
class Change:
def __init__(self, change_type, qid='', pid='', value='', value_label='', user='',
timestamp='', reference=list(), revid=None, comment=''):
self.change_type = change_type
self.qid = qid
self.qid_label = ''
self.pid = pid
self.pid_label = ''
self.value = value
self.value_label = value_label
self.user = user
self.timestamp = timestamp
self.count = 0
self.metadata = dict()
self.reference = reference
self.ref_list = []
self.revid = revid
self.comment = comment
@property
def url(self):
return "https://www.wikidata.org/w/index.php?diff=prev&oldid={}".format(self.revid)
def __repr__(self):
return " | ".join(map(str, [self.change_type, self.qid, self.qid_label, self.pid, self.pid_label, self.value,
self.value_label, self.user]))
def to_dict(self):
d = copy.deepcopy(self.__dict__)
d['has_ref'] = bool(d['reference'])
del d['reference']
d['ref_str'] = ",".join(
"{} ({}):{} ({})".format(x['prop_label'], x['prop'], x['value_label'], x['value']) for x in d['ref_list'])
del d['ref_list']
d['merge'] = True if 'merge' in d['comment'] else False
d['url'] = self.url
return d
@staticmethod
def lookupLabels(changes):
pids = set(s.pid for s in changes)
qids = set(s.qid for s in changes)
values = set(s.value for s in changes if s.value and PROP_TYPE.get(s.pid) == "WikibaseItem")
ref_qids = set(chain(*[
[s['value'] for s in change.ref_list if s['value'] and PROP_TYPE.get(s['prop']) == "WikibaseItem"]
for change in changes]))
ref_pids = set(chain(*[[s['prop'] for s in change.ref_list] for change in changes]))
labels = dict()
x = pids | qids | values | ref_qids | ref_pids
x = set(y for y in x if y)
for chunk in tqdm(chunks(x, 500), total=len(x) / 500):
l = getConceptLabels(tuple(chunk))
labels.update(l)
for c in changes:
if c.pid and c.pid in labels:
c.pid_label = labels[c.pid]
if c.qid and c.qid in labels:
c.qid_label = labels[c.qid]
if c.value and c.value in labels:
c.value_label = labels[c.value]
for ref in c.ref_list:
ref['value_label'] = labels.get(ref['value'], '')
ref['prop_label'] = labels.get(ref['prop'], '')
def pretty_refs(self):
"""
refs = [{'hash': '6a25eeddbaf5d49fc4cbb053c46c837c2ae40581',
'snaks': {'P248': [{'datavalue': {'type': 'wikibase-entityid',
'value': {'entity-type': 'item',
'id': 'Q9049250',
'numeric-id': 9049250}},
'hash': 'c452e8fc259131192625f0201037bd6577681ccb',
'property': 'P248',
'snaktype': 'value'}]},
'snaks-order': ['P248']}]
"""
# "stated in (P248): WikiSkripta (Q9049250)|other prop (P1234): '123'"
ref_list = []
for ref in self.reference:
for snak in chain(*ref['snaks'].values()):
value = get_claim_value(snak)
prop = snak['property']
ref_list.append({'value': value, 'prop': prop, 'value_label': '', 'prop_label': ''})
self.ref_list = ref_list
def get_claim_value(claim):
mainsnak = claim
if 'datavalue' not in mainsnak:
print("no datavalue: {}".format(mainsnak))
return 'none'
if mainsnak['datavalue']['type'] in {'wikibase-entityid'}:
return mainsnak['datavalue']['value']['id']
elif mainsnak['datavalue']['type'] in {'external-id', 'string'}:
return mainsnak['datavalue']['value']
elif mainsnak['datavalue']['type'] in {'quantity'}:
v = mainsnak['datavalue']['value']
if 'lowerBound' in v:
return '^'.join((v['amount'], v['lowerBound'], v['upperBound'], v['unit']))
else:
return '^'.join((v['amount'], v['unit']))
elif mainsnak['datavalue']['type'] in {'monolingualtext'}:
return mainsnak['datavalue']['value']['text']
elif mainsnak['datavalue']['type'] in {'globe-coordinate', 'time', 'commonsMedia'}:
# print(mainsnak)
return 'none'
else:
print(mainsnak)
return 'none'
def detect_claim_change(claimsx, claimsy):
s = []
if len(claimsx) == 0:
claimsx = dict()
if len(claimsy) == 0:
claimsy = dict()
# props in x but not in y
props_missing_y = set(claimsx.keys()) - set(claimsy.keys())
for prop in props_missing_y:
for claim in claimsx[prop]:
s.append(Change("REMOVE", pid=prop, value=get_claim_value(claim['mainsnak']),
reference=claim.get('references', [])))
# props in y but not in x
props_missing_x = set(claimsy.keys()) - set(claimsx.keys())
for prop in props_missing_x:
for claim in claimsy[prop]:
s.append(Change("ADD", pid=prop, value=get_claim_value(claim['mainsnak']),
reference=claim.get('references', [])))
# for props in both, get the values
props_in_both = set(claimsx.keys()) & set(claimsy.keys())
for prop in props_in_both:
values_x = set(get_claim_value(claim['mainsnak']) for claim in claimsx[prop])
values_y = set(get_claim_value(claim['mainsnak']) for claim in claimsy[prop])
# values in x but not in y
missing_y = values_x - values_y
# values in y but not in x
missing_x = values_y - values_x
for m in missing_y:
s.append(Change("REMOVE", pid=prop, value=m))
for m in missing_x:
ref = [x.get('references', []) for x in claimsy[prop] if m == get_claim_value(x['mainsnak'])][0]
s.append(Change("ADD", pid=prop, value=m, reference=ref))
return s
def detect_changes(revisions, qid):
c = []
for idx in range(len(revisions) - 1):
y = revisions[idx]
x = revisions[idx + 1]
claimsx = x['claims']
claimsy = y['claims']
changes = detect_claim_change(claimsx, claimsy)
changes = [x for x in changes if x]
for change in changes:
change.qid = qid
change.user = revisions[idx]['user']
change.timestamp = revisions[idx]['timestamp']
change.metadata = revisions[0]['metadata'] if 'metadata' in revisions[0] else dict()
change.revid = revisions[idx]['revid']
change.comment = revisions[idx]['comment']
if changes:
c.append(changes)
return list(chain(*c))
def process_changes(changes):
# if a user adds a value to a prop, and then another user removes it, cancel out both revisions
# example: https://www.wikidata.org/w/index.php?title=Q27869338&action=history
changes = sorted(changes, key=lambda x: x.timestamp)
for c in changes:
for other in changes:
if (c != other) and (c.qid == other.qid) and (c.pid == other.pid) and (c.value == other.value):
if c.change_type == "ADD" and other.change_type == "REMOVE":
changes = [x for x in changes if x not in {c, other}]
return changes
def process_ld_changes(changes):
# only show the label changes if the first and last values are different
changes = sorted(changes, key=lambda x: x.timestamp)
if changes[0].value != changes[-1].value:
return [changes[0], changes[-1]]
else:
return []
def process_alias_changes(changes):
# only show the label changes if the first and last values are different
changes = sorted(changes, key=lambda x: x.timestamp)
if changes[0].value != changes[-1].value:
return [changes[0], changes[-1]]
else:
return []
def store_revision(coll, rev, metadata):
if '*' not in rev:
# this revision was deleted
return None
d = json.loads(rev['*'])
del rev['*']
d.update(rev)
d['_id'] = d['revid']
d['metadata'] = metadata if metadata else dict()
if isinstance(d['timestamp'], time.struct_time):
d['timestamp'] = datetime.datetime.fromtimestamp(mktime(d['timestamp']))
elif not isinstance(d['timestamp'], str):
d['timestamp'] = time.strftime(d['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
try:
coll.insert_one(d)
except DuplicateKeyError:
pass
def get_revisions_past_weeks(qids, weeks):
"""
Get the revision IDs for revisions on `qids` items in the past `weeks` weeks
:param qids: set of qids
:param weeks: int
:return:
"""
revisions = set()
qids_str = '"' + '","'.join(qids) + '"'
for week in tqdm(range(weeks)):
query = '''select rev_id, rev_page, rev_timestamp, page_id, page_namespace, page_title, page_touched FROM revision
inner join page on revision.rev_page = page.page_id WHERE
rev_timestamp > DATE_FORMAT(DATE_SUB(DATE_SUB(NOW(),INTERVAL {week} WEEK), INTERVAL 1 WEEK),'%Y%m%d%H%i%s') AND
rev_timestamp < DATE_FORMAT(DATE_SUB(NOW(), INTERVAL {week} WEEK),'%Y%m%d%H%i%s') AND
page_content_model = "wikibase-item" AND
page.page_title IN({qids});
'''.format(qids=qids_str, week=week)
revision_df = query_wikidata_mysql(query)
print(len(revision_df))
print(revision_df.head(2))
print(revision_df.tail(2))
revisions.update(set(revision_df.rev_id))
return revisions
def get_merges(qids, weeks):
# input: a list of revision IDs
# output: list of revision IDs that are tagged as a merge
# comment example: /* wbcreateredirect:0||Q20948851|Q9410367 */
qids_str = '"' + '","'.join(qids) + '"'
query = '''select page.*, redirect.* FROM page
inner join redirect on page.page_id = redirect.rd_from WHERE
(page.page_title IN({qids}) OR
redirect.rd_title IN ({qids}));'''.format(qids=qids_str)
redirect_df = query_wikidata_mysql(query)
redirect_df.page_title = redirect_df.page_title.apply(bytes.decode)
redirect_df.rd_title = redirect_df.rd_title.apply(bytes.decode)
redirect_df.page_links_updated = redirect_df.page_links_updated.apply(
lambda x: datetime.datetime.strptime(x.decode(), '%Y%m%d%H%M%S'))
redirect_df2 = redirect_df[redirect_df.page_links_updated > datetime.datetime.now() - datetime.timedelta(weeks=weeks)]
names = ['page_id', 'page_namespace', 'page_title', 'page_links_updated', 'page_latest', 'rd_from', 'rd_title']
redirect_df2 = redirect_df2[names]
return redirect_df2
def get_revision_ids_needed(coll, qids, weeks=1):
# Get the revision IDs for revisions on `qids` items in the past `weeks` weeks
# # excluding the ones we already have in `coll`
revisions = get_revisions_past_weeks(qids, weeks)
have_revisions = set([x['_id'] for x in coll.find({}, {'id': True})])
print(len(have_revisions))
need_revisions = revisions - have_revisions
print(len(need_revisions))
return need_revisions
def download_revisions(coll, revision_ids, pid, qid_extid_map):
for chunk in tqdm(chunks(revision_ids, 100), total=len(revision_ids) / 100):
revs = site.revisions(chunk, prop='ids|timestamp|flags|comment|user|content')
for rev in revs:
qid = rev['pagetitle']
if rev.get('contentmodel') != "wikibase-item":
continue
store_revision(coll, rev, {pid: qid_extid_map.get(qid, '')})
def process_revisions(coll, qids, weeks):
# process the changes for each qid
last_updated = datetime.datetime.now() - datetime.timedelta(weeks=weeks)
changes = []
for qid in tqdm(list(qids)[:]):
revisions = sorted(coll.find({'id': qid}), key=lambda x: x['timestamp'], reverse=True)
revisions = [r for r in revisions if r['timestamp'] > last_updated]
c = detect_changes(revisions, qid)
c = process_changes(c)
changes.extend(c)
return changes
def process_lda_revisions(coll, qids, weeks):
# we only care about what happened between the first and last revision
# not capturing intermediate changes
last_updated = datetime.datetime.now() - datetime.timedelta(weeks=weeks)
changes = []
for qid in tqdm(list(qids)[:]):
revisions = sorted(coll.find({'id': qid}), key=lambda x: x['timestamp'], reverse=True)
revisions = [r for r in revisions if r['timestamp'] > last_updated]
if not revisions:
continue
x = revisions[0]
y = revisions[-1]
user = x['user']
timestamp = x['timestamp']
revid = x['revid']
comment = x['comment']
xl = x['labels']['en']['value'] if 'en' in x['labels'] else ''
yl = y['labels']['en']['value'] if 'en' in y['labels'] else ''
if xl != yl:
changes.append(Change(change_type="labels", value=xl, qid=qid,
user=user, timestamp=timestamp, revid=revid, comment=comment))
xd = x['descriptions']['en']['value'] if 'en' in x['descriptions'] else ''
yd = y['descriptions']['en']['value'] if 'en' in y['descriptions'] else ''
if xd != yd:
changes.append(Change(change_type="descriptions", value=xd, qid=qid,
user=user, timestamp=timestamp, revid=revid, comment=comment))
x_aliases = set(a['value'] for a in x['aliases']['en']) if 'en' in x['aliases'] else set()
y_aliases = set(a['value'] for a in y['aliases']['en']) if 'en' in y['aliases'] else set()
for change in y_aliases - x_aliases:
changes.append(Change("remove_alias", value=change, qid=qid,
user=user, timestamp=timestamp, revid=revid, comment=comment))
for change in x_aliases - y_aliases:
changes.append(Change("add_alias", value=change, qid=qid,
user=user, timestamp=timestamp, revid=revid, comment=comment))
return changes
@click.command()
@click.option('--pid', default="P699", help='property filter')
@click.option('--idfilter', default='', help='additional filter. example "P703:Q15978631;P1057:Q847102"')
@click.option('--weeks', default=2, help='number of weeks ago')
@click.option('--force-update', is_flag=True, help='skip checking for existing revision')
@click.option('--filter-user', default="ProteinBoxBot", help='filter out changes by this user')
def main(pid, weeks, idfilter, force_update, filter_user):
"""
from tracker import *
pid="P699"
idfilter=""
weeks=52
force_update=False
filter_user="ProteinBoxBot"
"""
coll_name = pid + "_" + idfilter if idfilter else pid
save_name = coll_name + "_" + str(datetime.date.today()) + "_{}weeks".format(weeks) + ".xls"
writer = pd.ExcelWriter(save_name)
coll = MongoClient().wikidata[coll_name]
coll.create_index("id")
idfilter = [(k.split(":")[0], k.split(":")[1]) for k in idfilter.split(";")] if idfilter else []
extid_qid = id_mapper(pid, idfilter)
qid_extid = {v: k for k, v in extid_qid.items()}
qids = extid_qid.values()
"""
# what are the date extents of these items?
# get the most recent timestamp and figure out how many weeks ago it was
# warning, only checks the most recent timestamp!
# as in, if you request one week, and then one year, it won't get anything before one week ago
# unless force_update=True
weeks_to_dl = weeks
if not force_update:
timestamps = set(x['timestamp'] for x in coll.find({'id': {'$in': list(qids)}}, {'timestamp': True}))
if timestamps:
if datetime.date.today() == max(timestamps).date():
print("most recent revision is today, skipping")
weeks_to_dl = 0
else:
weeks_to_dl = math.ceil(abs((max(timestamps) - datetime.datetime.now()).days / 7)) + 1
print("Most recent revision stored: {}".format(max(timestamps)))
print("Getting revisions from the past {} weeks".format(weeks_to_dl))
need_revisions = get_revision_ids_needed(coll, qids, weeks=weeks_to_dl)
print("Downloading revisions")
download_revisions(coll, need_revisions, pid, qid_extid)
print("Processing changes in the past {} weeks".format(weeks))
changes = process_revisions(coll, qids, weeks)
for change in changes:
change.pretty_refs()
Change.lookupLabels(changes)
df = pd.DataFrame([x.to_dict() for x in changes])
# reorder columns
if not df.empty:
df = df[["revid", "url", "timestamp", "user", "change_type", "comment", "has_ref", "merge",
"metadata", "qid", "qid_label", "pid", "pid_label", "value", "value_label", "ref_str"]]
df.to_excel(writer, sheet_name="changes")
if not df.empty and filter_user:
df = df.query("user != @filter_user")
if not df.empty:
df = df.query("user != 'KrBot'")
df.to_excel(writer, sheet_name="changes_filtered")
print("Processing label changes in the past {} weeks".format(weeks))
lda_changes = process_lda_revisions(coll, qids, weeks)
Change.lookupLabels(lda_changes)
lda_df = pd.DataFrame([x.to_dict() for x in lda_changes])
if not lda_df.empty:
lda_df = lda_df[["revid", "url", "timestamp", "user", "change_type", "comment",
"merge", "qid", "qid_label", "value"]]
lda_df.to_excel(writer, sheet_name="labels")
"""
print("Getting redirects")
redirect_df = get_merges(qids, weeks)
redirect_df['history_url'] = redirect_df.page_title.apply(lambda x: "https://www.wikidata.org/w/index.php?title={}&action=history".format(x))
redirect_df['url'] = redirect_df.page_latest.apply(lambda x: "https://www.wikidata.org/w/index.php?diff={}".format(x))
redirect_df.to_excel(writer, sheet_name="redirects")
writer.save()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
"""
Copyright 2010-2017 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# batch_run_bbp.py
# v1.2, 20110819
Process a folder with XML files with broadband platform in batch mode
"""
import optparse
import os
import sys
import shutil
import time
import datetime
from install_cfg import InstallCfg
import bband_utils
import seqnum
# LOG run commands only
LOG_ONLY = True
def findXML():
parser = optparse.OptionParser()
parser.add_option("-i", "--in-dir", dest="inDir",
help="Input folder with XML files", metavar="IN_DIR")
parser.add_option("-r", "--resume", action="store_true", dest="resume",
help="Resume batch processing of XML files",
metavar="RESUME")
parser.add_option("-o", "--out-dir", dest="outDir",
help="Output folder for simulation data (indata, outdata, log, tmpdata) with simID",
metavar="OUT_DIR")
parser.add_option("-f", "--force", action="store_true", dest="force",
help="Force overwrite of BBP folders (indata, outdata, log, tmpdata) with same simID",
metavar="FORCE")
(options, args) = parser.parse_args()
if not options.inDir:
parser.error("Folder with XML files is required!")
if os.path.exists(options.inDir):
files = os.listdir(options.inDir)
else:
print "Invalid input dir: %s" % options.inDir
sys.exit(1)
if not files:
print "No XML files were found in input dir: %s" % options.inDir
sys.exit(1)
if options.outDir:
if not os.path.exists(options.outDir):
try:
os.mkdir(options.outDir)
except:
print ("Failed to create output dir: %s! Aborting..." %
(options.outDir))
sys.exit(1)
if not os.path.exists("%s/indata" % options.outDir):
try:
os.mkdir("%s/indata" % options.outDir)
except:
print ("Failed to create dir: %s/indata! Aborting..." %
(options.outDir))
sys.exit(1)
if not os.path.exists("%s/outdata" % options.outDir):
try:
os.mkdir("%s/outdata" % options.outDir)
except:
print ("Failed to create dir: %s/outdata! Aborting..." %
(options.outDir))
sys.exit(1)
if not os.path.exists("%s/tmpdata" % options.outDir):
try:
os.mkdir("%s/tmpdata" % options.outDir)
except:
print ("Failed to create dir: %s/tmpdata! Aborting..." %
(options.outDir))
sys.exit(1)
if not os.path.exists("%s/logs" % options.outDir):
try:
os.mkdir("%s/logs" % options.outDir)
except:
print ("Failed to create dir: %s/logs! Aborting..." %
(options.outDir))
sys.exit(1)
if options.force:
print "WARNING: Force overwrite is ON!"
print "Some existing BBP data folders will be overwritten!"
install = InstallCfg()
num_sims = 0
total_sims = 0
if options.inDir[-1:] == os.path.sep:
options.inDir = options.inDir[0:-1]
files = sorted([f for f in os.listdir(options.inDir) if os.path.isfile(options.inDir + os.path.sep + f)])
resume_list = ""
if options.resume==True:
if os.path.exists("%s/batch_resume.txt" % install.A_OUT_LOG_DIR):
resume_fp = open("%s/batch_resume.txt" % install.A_OUT_LOG_DIR,
'r')
resume_list = resume_fp.readlines()
resume_fp.close()
else:
if os.path.exists("%s/batch_resume.txt" % install.A_OUT_LOG_DIR):
os.remove("%s/batch_resume.txt" % install.A_OUT_LOG_DIR)
run_list = []
for file in files:
if file.endswith(".xml"):
total_sims += 1
if options.resume == True:
match = False
for i in resume_list:
if file == i.strip():
match = True
print "Skipping %s" % file
break
if match == True:
continue
run_list.append(os.path.abspath(os.path.join(options.inDir, file)))
num_sims += 1
if not num_sims == total_sims:
sims = "%d/%d" % (num_sims, total_sims)
else:
sims = str(num_sims)
# Setup the simlist and movelist for logging
simlist = []
mvlist = []
print "Preparing to run %s simulations from %s" % (sims, options.inDir)
run_count = 1
for file in run_list:
filename = os.path.basename(file)
file_base = filename[0:filename.find(".xml")]
pieces = file_base.split("_")
simID =- 1
if len(pieces) > 1:
try:
simID = int(pieces[0])
except ValueError:
print "Failed to fetch simID from XML file name: %s" % file
if simID==-1:
simID = int(seqnum.getSeqNum())
print "Running with generated simID: %d" % simID
t0 = time.time()
start_time = time.strftime("%Y/%m/%d-%H:%M:%S", time.localtime())
indatadir = "%s/%d" % (install.A_IN_DATA_DIR, simID)
outdatadir = "%s/%d" % (install.A_OUT_DATA_DIR, simID)
tmpdatadir = "%s/%d" % (install.A_TMP_DATA_DIR, simID)
logdir = "%s/%d" % (install.A_OUT_LOG_DIR, simID)
logfiledir = "%s/logs/%d" % (options.outDir, simID)
log_file = "%s/%s.log" % (logfiledir, file_base)
# Make sure we have an absolute path for log_file
log_file = os.path.abspath(log_file)
if not os.path.exists(logfiledir):
try:
os.mkdir(logfiledir)
except:
print ("Failed to create dir: %s! Aborting..." %
(logfiledir))
sys.exit(1)
dir_exists = False
if os.path.exists(indatadir):
if options.force:
shutil.rmtree(indatadir)
else:
dir_exists = True
if os.path.exists(tmpdatadir):
if options.force:
shutil.rmtree(tmpdatadir)
else:
dir_exists = True
if os.path.exists(outdatadir):
if options.force:
shutil.rmtree(outdatadir)
else:
dir_exists = True
if os.path.exists(logdir):
if options.force:
shutil.rmtree(logdir)
else:
dir_exists = True
if dir_exists:
print "BBP folders with simID %d exists!"
print "Force overwrite is OFF, skipping %s" % (simID, file)
continue
print "Processing file (%d/%d): %s" % (run_count, num_sims, file)
cmd = "%s/run_bbp.py -x %s -s %d -l %s" % (install.A_COMP_DIR,
file, simID, log_file)
if (LOG_ONLY == True):
simlist.append("%s\n" % (cmd))
if options.outDir:
# Notes:
# 1) force option not currently supported while
# logging sims
# 2) assumption is that dir outdir/simid
# does not already exist
od_indatadir = "%s/indata" % (options.outDir)
od_outdatadir = "%s/outdata" % (options.outDir)
od_tmpdatadir = "%s/tmpdata" % (options.outDir)
od_logdir = "%s/logs" % (options.outDir)
mvlist.append("mv %s %s\n" % (indatadir, od_indatadir))
mvlist.append("mv %s %s\n" % (tmpdatadir, od_tmpdatadir))
mvlist.append("mv %s %s\n" % (outdatadir, od_outdatadir))
mvlist.append("mv %s %s\n" % (logdir, od_logdir))
run_count += 1
continue
bband_utils.runprog(cmd, False)
if options.outDir:
od_indatadir = "%s/indata/%d" % (options.outDir, simID)
od_outdatadir = "%s/outdata/%d" % (options.outDir, simID)
od_tmpdatadir = "%s/tmpdata/%d" % (options.outDir, simID)
od_logdir = "%s/logs/%d" % (options.outDir, simID)
od_dir_exists = False
if os.path.exists(od_indatadir):
if options.force:
shutil.rmtree(od_indatadir)
else:
od_dir_exists = True
if os.path.exists(od_tmpdatadir):
if options.force:
shutil.rmtree(od_tmpdatadir)
else:
od_dir_exists = True
if os.path.exists(od_outdatadir):
if options.force:
shutil.rmtree(od_outdatadir)
else:
od_dir_exists = True
if os.path.exists(od_logdir):
if options.force:
shutil.rmtree(od_logdir)
else:
od_dir_exists = True
if dir_exists:
print "Warning: Folder(s) with simID %d exists in output folder %s! Force overwrite is OFF, output will be left in BBP folders!" % (simID, options.outDir)
else:
if os.path.exists(indatadir):
shutil.move(indatadir, od_indatadir)
if os.path.exists(tmpdatadir):
shutil.move(tmpdatadir, od_tmpdatadir)
if os.path.exists(outdatadir):
shutil.move(outdatadir, od_outdatadir)
if os.path.exists(logdir):
shutil.move(logdir, od_logdir)
run_time = time.time() - t0
run_time_str = str(datetime.timedelta(seconds=run_time))
end_time = time.strftime("%Y/%m/%d-%H:%M:%S", time.localtime())
if options.outDir:
if not od_dir_exists:
out_data_dir = od_outdatadir
else:
out_data_dir = "%s/%d" % (install.A_OUT_DATA_DIR, simID)
files = os.listdir(out_data_dir)
if files:
try:
run_log_fp = open("%s/batch_run.log" % options.inDir, 'a')
except IOError:
run_log_fp = open("%s/batch_run.log" % install.A_OUT_LOG_DIR,
'a')
run_log_fp.write("%d\t%s\t%s\t%s\t%s\t%s\n" %
(simID, os.path.abspath(file),
os.path.abspath(out_data_dir),
start_time, end_time, run_time_str))
run_log_fp.flush()
run_log_fp.close()
resume_fp = open("%s/batch_resume.txt" % install.A_OUT_LOG_DIR,
'a')
resume_fp.write("%s\n" % filename)
resume_fp.flush()
resume_fp.close()
run_count +=1
# Write the sims to the execlog
print "Opening %s/batch_run_bbp_sims.log" % (options.inDir)
execlog = open("%s/batch_run_bbp_sims.log" % (options.inDir), 'w')
for sim in simlist:
execlog.write(sim)
execlog.close()
# Write the moves to the execlog
print "Opening %s/batch_run_bbp_moves.log" % (options.inDir)
execlog = open("%s/batch_run_bbp_moves.log" % (options.inDir), 'w')
for mv in mvlist:
execlog.write(mv)
execlog.close()
if __name__ == '__main__':
findXML()
|
|
"""
Form classes
"""
from __future__ import unicode_literals
import copy
import datetime
from collections import OrderedDict
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from django.forms.fields import Field, FileField
from django.forms.utils import ErrorDict, ErrorList, flatatt
from django.forms.widgets import Media, MediaDefiningClass, Textarea, TextInput
from django.utils import six
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
__all__ = ('BaseForm', 'Form')
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""
Metaclass that collects Fields declared on the base classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_fields'] = OrderedDict(current_fields)
new_class = (super(DeclarativeFieldsMetaclass, mcs)
.__new__(mcs, name, bases, attrs))
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_fields'):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
def __str__(self):
return self.as_table()
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not bool(self._errors)
return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % {
'cls': self.__class__.__name__,
'bound': self.is_bound,
'valid': is_valid,
'fields': ';'.join(self.fields),
}
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key %r not found in '%s'" % (name, self.__class__.__name__))
return BoundField(self, field, name)
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'field_name': '',
})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row='<tr><td colspan="2">%s</td></tr>',
row_ender='</td></tr>',
help_text_html='<br /><span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row='<li>%s</li>',
row_ender='</li>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield'))
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If its value is None the errors will be treated as
NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, 'error_dict'):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(error_class='nonfield')
else:
self._errors[field] = self.error_class()
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
if code is None:
return field in self.errors
if field in self.errors:
for error in self.errors.as_data()[field]:
if error.code == code:
return True
return False
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@property
def changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual fields whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
if field.has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types):
raise TypeError
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return force_text(widget.render(name, self.value(), attrs=attrs))
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
# If this is an auto-generated default date, nix the
# microseconds for standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not getattr(self.field.widget, 'supports_microseconds', True)):
data = data.replace(microsecond=0)
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
|
|
from string import rfind
from string import digits
from Evaluation import Evaluation
class Algorithm:
"""Gives a cluster list after running HAC"""
def __init__(self,similarity_table,unordered_filelist, list_of_files):
self._list_of_files = list_of_files
#given as parameter, this is result of document representation
self.table=similarity_table
#the clusters are represented as regular expressions
#final regular expression is the string representation of cluster_list
self.cluster_list = list()
#a cluster is represented as [[1,2],3]
#all the hierarchy that group at a certain level is maintained in this cluster_dict
#this keeps height as key and cluster formed at that hieght as its value
self.cluster_dict = dict()
#contains the indexed filenames
#each filename is given a particular id
self.unordered_filelist = unordered_filelist
#contains the classes we have in our dataset
self.classes = list()
for name in self.unordered_filelist:
cls = name[:rfind(name, "/")]
cls = cls[rfind(cls, "/")+1:]
if cls not in self.classes:
self.classes.append(cls)
self.class_count = len(self.classes)
def hierarchical_agglomerative_clustering(self):
""" implements hac and returns a regular expression of clustering result"""
is_done = False #boolean to check if algo has completed
clust_id = 1; #actually works as cluster height, this is used as key in cluster_dict
while not is_done:
#self.print_table()
largest = -1
#a tuple contains pair of x, y at which largest value is found
index=()
for x in range(len(self.table)):
for y in range(len(self.table)):
#cheks if the two documents are not same
#and they have not been checked before
#and whether the current number is greater than previous greater (i.e above defined largest)
if (self.table[x][y]!=1 and self.table[y][x]!=1) and self.table[x][y] > largest:
largest = self.table[x][y]
index = (x,y)
#if valid index is available
if index:
x,y = index
#these variables are used to find the clusters uptil now,
#in which document y and document x exist
#and store the clust_id in respective variables
x_index = y_index = -1
#one shows that two docs are now combined as one cluster in table
self.table[x][y] = self.table[y][x] = 1
#finds y in clusters
for i in range(len(self.cluster_list)):
if self._is_value_in_cluster(self.cluster_list[i], index[0]) == True:
y_index = i
break
#finds x in clusters
for i in range(len(self.cluster_list)):
if self._is_value_in_cluster(self.cluster_list[i], index[1]) == True:
x_index = i
break
#if both documents are still not in the cluster
#create their cluster and add that to clust_dict
if y_index == -1 and x_index == -1:
self.cluster_list.append([x,y])
self.cluster_dict.update({clust_id:[x,y]})
clust_id += 1
#x and y are closest docs
#x is already part of some cluster
#find that cluster and add y to it at a new height
elif y_index == -1:
self.cluster_list[x_index] = [self.cluster_list[x_index],[x]]
self.cluster_dict.update({clust_id:self.cluster_list[x_index]})
clust_id += 1
#y and x are closest docs
#y is already part of some cluster
#find that cluster and add x to it at a new height
elif x_index == -1:
self.cluster_list[y_index] = [self.cluster_list[y_index],[y]]
self.cluster_dict.update({clust_id:self.cluster_list[y_index]})
clust_id += 1
#if both of them are part of different clusters
#combine those clusters in one at a new height
elif not x_index == -1 and not y_index == -1:
if not x_index == y_index:
#merge the two clusters if both are already selected before
a = self.cluster_list[x_index]
b = self.cluster_list[y_index]
self.cluster_list.remove(a)
self.cluster_list.remove(b)
self.cluster_list.append([a,b])
self.cluster_dict.update({clust_id:[a,b]})
clust_id += 1
#makeing all the columns and rows of x,y same
#so that they can be treated as identical docs
#becuase they are combined in a single cluster now
for i in range(len(self.table)):
if(self.table[x][i] > self.table[y][i]):
if not self.table[y][i] == 1: self.table[y][i] = self.table[x][i]
if not self.table[i][y] == 1:self.table[i][y] = self.table[x][i]
else:
if not self.table[x][i] == 1:self.table[x][i] = self.table[y][i]
if not self.table[i][x] == 1:self.table[i][x] = self.table[y][i]
#if no valid index is available then all docs are combined
else: is_done = True
#add the final cluster as the last level in the clust_dict
self.cluster_dict.update({clust_id:self.cluster_list})
return self.cluster_list
def get_clusters_by_class_count(self):
"""gets clusters at each level and return the list which maches the number of classes in the dataset"""
#for every class in classes
for i in range(self.class_count):
#cuts the dendrogram at certain level and return the cluster list for that level
lst = self.get_cluster_list_at_level(i+1)
#getting the number of different clusters
clusters = list()
for doc in lst:
if doc[1] not in clusters: clusters.append(doc[1])
if len(clusters) >= self.class_count: return lst
def get_clusters_by_max_purity(self):
purity = 0
#for every class in classes
for i in range(len(self.cluster_dict)):
#cuts the dendrogram at certain level and return the cluster list for that level
lst = self.get_cluster_list_at_level(i+1)
EvaluationMainObject = Evaluation()
EvaluationMainObject.SetCluterList(lst)
EvaluationMainObject.SetValueOfR()
EvaluationMainObject.SetDocumentDictionary(self._list_of_files)
EvaluationMainObject.CalculateIndividualPurity()
new_purity = EvaluationMainObject.GetTotalPurity()
if purity < new_purity and purity<1:
purity = new_purity
final_list = lst
return final_list
def get_clusters_at_all_levels(self):
"returns a dictionary with level as key, and tuple as value containing clusters, purity and entropy"
cluster_list = dict()
for i in range(len(self.cluster_dict)):
#cuts the dendrogram at certain level and return the cluster list for that level
lst = self.get_cluster_list_at_level(i+1)
EvaluationMainObject = Evaluation()
EvaluationMainObject.SetCluterList(lst)
EvaluationMainObject.SetValueOfR()
EvaluationMainObject.SetDocumentDictionary(self._list_of_files)
EvaluationMainObject.CalculateIndividualPurity()
purity = EvaluationMainObject.GetTotalPurity()
EvaluationMainObject.CalculateEntropy()
entropy = EvaluationMainObject.GetTotalEntropy()
cluster_list.update({i+1:(lst,purity,entropy)})
return cluster_list
def get_cluster_list_at_level(self,level_c):
#for c in self.cluster_dict:
# print str(c) + " " + str(self.cluster_dict[c])
level = level_c
level = len(self.cluster_dict)-(level-1)
docs = list()
#cut at a particular level
clusters_at_level = list()
if not isinstance(self.cluster_dict[level][0],list):
#clusters_at_level.append(self.cluster_dict[level])
for x in self.cluster_dict[level]:
clusters_at_level.append([x])
docs.extend(self.cluster_dict[level])
else:
clusters_at_level.extend(self.cluster_dict[level])
print self._get_docs_in_cluster(self.cluster_list)
for i in range(level-1,0,-1):
#get all docs at that particular level
docs = self._get_docs_in_cluster(clusters_at_level)
#search clusters on levels below
#if docs in them are not in docs
#add those clusters in clusters
new_clust = self.cluster_dict[i+1]
docs_in_new_clust = self._get_docs_in_cluster(new_clust)
for doc in docs_in_new_clust:
if doc not in docs:
clusters_at_level.append(new_clust)
docs.extend(docs_in_new_clust)
all_docs = self._get_docs_in_cluster(self.cluster_list)
for doc in all_docs:
if doc not in docs:
docs.append(doc)
clusters_at_level.append([doc])
print ""
lst = []
cluster_num = 0
for cluster in clusters_at_level:
cluster_num += 1
documents = self._get_docs_in_cluster(cluster)
for doc in documents:
name = self.unordered_filelist[int(doc)]
cls = name[:rfind(name, "/")]
cls = cls[rfind(cls, "/")+1:]
name = name[rfind(name, "/")+1:]
lst.append((name,cluster_num,cls))
return lst
def _is_value_in_cluster(self, cluster, value):
''' function checks whether the current document has already been selected for the cluster set. '''
if value in cluster:
return True
else:
return any(self._is_value_in_cluster(element,value) for element in cluster if isinstance(element, list))
def _get_docs_in_cluster(self, cluster):
docs = list()
reg_ex = str(cluster)
i = 0
while i in range(len(reg_ex)):
if reg_ex[i] in digits:
index = ""
while reg_ex[i] in digits:
index += reg_ex[i]
i+=1
docs.append(int(index))
else: i+=1
return docs
|
|
# Copyright 2020 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti db migrator."""
from __future__ import print_function
from builtins import object
import os
import sys
# Importing migrate.changeset adds some new methods to existing SQLAlchemy
# objects but we will not be calling the library directly.
import migrate.changeset # noqa: F401, pylint: disable=unused-import
from sqlalchemy.exc import OperationalError
from google.cloud.forseti.common.util import logger
import google.cloud.forseti.services.scanner.dao as scanner_dao
import google.cloud.forseti.services.inventory.storage as inventory_dao
import google.cloud.forseti.services.dao as general_dao
DB_NAME = os.environ.get('FORSETI_DB_NAME', 'forseti_security')
DB_USER = os.environ.get('SQL_DB_USER', '')
DB_PASSWORD = os.environ.get('SQL_DB_PASSWORD', '')
DEFAULT_DB_CONN_STR = (f'mysql+pymysql://{DB_USER}:{DB_PASSWORD}@'
f'127.0.0.1:3306/{DB_NAME}')
LOGGER = logger.get_logger(__name__)
class ColumnAction(object):
"""Column action class."""
DROP = 'DROP'
CREATE = 'CREATE'
ALTER = 'ALTER'
def alter_column(table, old_column, new_column):
"""Alter Column.
Args:
table (sqlalchemy.schema.Table): The sql alchemy table object.
old_column (sqlalchemy.schema.Column): The sql alchemy column object,
this is the column to be modified.
new_column (sqlalchemy.schema.Column): The sql alchemy column object,
this is the column to update to.
"""
LOGGER.info(f'Attempting to alter column {table.name}.{old_column.name}.')
# bind the old column with the corresponding table.
old_column.table = table
old_column.alter(name=new_column.name,
type=new_column.type,
nullable=new_column.nullable)
def create_column(table, column):
"""Create Column.
Args:
table (sqlalchemy.schema.Table): The sql alchemy table object.
column (sqlalchemy.schema.Column): The sql alchemy column object.
"""
if column.name not in table.columns:
LOGGER.info(f'Attempting to create column {table.name}.{column.name}')
column.create(table, populate_default=True)
def drop_column(table, column):
"""Create Column.
Args:
table (sqlalchemy.schema.Table): The sql alchemy table object.
column (sqlalchemy.schema.Column): The sql alchemy column object.
"""
LOGGER.info(f'Attempting to drop column {table.name}.{column.name}')
column.drop(table)
COLUMN_ACTION_MAPPING = {ColumnAction.DROP: drop_column,
ColumnAction.CREATE: create_column,
ColumnAction.ALTER: alter_column}
def migrate_schema(base, dao_classes):
"""Migrate database schema.
Args:
base (Base): Declarative base.
dao_classes (list): A list of dao classes.
"""
# Find all the Table objects for each of the classes.
# The format of tables is: {table_name: Table object}.
tables = base.metadata.tables
schema_update_actions_method = 'get_schema_update_actions'
for dao_class in dao_classes:
get_schema_update_actions = getattr(dao_class,
schema_update_actions_method,
None)
if not callable(get_schema_update_actions):
LOGGER.info(f'Table {dao_class.__tablename__} has not implemented '
f'the get_schema_update_actions method.')
continue
if dao_class.__tablename__ not in tables:
LOGGER.info(f'Table {dao_class.__tablename__} not found in '
f'existing database tables.')
continue
LOGGER.info('Updating table %s', dao_class.__tablename__)
table = tables.get(dao_class.__tablename__)
schema_update_actions = get_schema_update_actions()
for column_action, columns in schema_update_actions.items():
if column_action in [ColumnAction.CREATE, ColumnAction.DROP]:
_create_or_drop_columns(column_action, columns, table)
elif column_action in [ColumnAction.ALTER]:
_alter_columns(column_action, columns, table)
else:
LOGGER.warning('Unknown column action: %s', column_action)
def _alter_columns(column_action, columns, table):
"""Alter columns.
Args:
column_action (str): Column Action.
columns (dict): A dictionary of old_column: new_column.
table (sqlalchemy.schema.Table): The sql alchemy table object.
"""
column_action = column_action.upper()
for old_column, new_column in columns.items():
try:
COLUMN_ACTION_MAPPING.get(column_action)(table,
old_column,
new_column)
except OperationalError:
LOGGER.info('Failed to update db schema, table=%s',
table.name)
except Exception: # pylint: disable=broad-except
LOGGER.exception(
'Unexpected error happened when attempting '
'to update database schema, table: %s',
table.name)
def _create_or_drop_columns(column_action, columns, table):
"""Create or drop columns.
Args:
column_action (str): Column Action.
columns (list): A list of columns.
table (sqlalchemy.schema.Table): The sql alchemy table object.
"""
column_action = column_action.upper()
for column in columns:
try:
COLUMN_ACTION_MAPPING.get(column_action)(table,
column)
except OperationalError:
LOGGER.info('Failed to update db schema, table=%s',
table.name)
except Exception: # pylint: disable=broad-except
LOGGER.exception(
'Unexpected error happened when attempting '
'to update database schema, table: %s',
table.name)
def _find_subclasses(cls):
"""Find all the subclasses of a class.
Args:
cls (class): The parent class.
Returns:
list: Subclasses of the given parent class.
"""
results = []
for subclass in cls.__subclasses__():
results.append(subclass)
return results
if __name__ == '__main__':
# If the DB connection string is passed in, use that, otherwise
# fall back to the default DB connection string.
print(sys.argv)
DB_CONN_STR = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_DB_CONN_STR
SQL_ENGINE = general_dao.create_engine(DB_CONN_STR,
pool_recycle=3600)
# Create tables if not exists.
inventory_dao.initialize(SQL_ENGINE)
scanner_dao.initialize(SQL_ENGINE)
# Find all the child classes inherited from declarative base class.
SCANNER_DAO_CLASSES = _find_subclasses(scanner_dao.BASE)
INVENTORY_DAO_CLASSES = _find_subclasses(inventory_dao.BASE)
DECLARITIVE_BASE_MAPPING = {
scanner_dao.BASE: SCANNER_DAO_CLASSES,
inventory_dao.BASE: INVENTORY_DAO_CLASSES}
for declaritive_base, classes in DECLARITIVE_BASE_MAPPING.items():
declaritive_base.metadata.bind = SQL_ENGINE
migrate_schema(declaritive_base, classes)
|
|
"""Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE, ASCII
from re import escape as re_escape
from datetime import date as datetime_date
try:
from _thread import allocate_lock as _thread_allocate_lock
except:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE | ASCII)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the format string."""
global _TimeRE_cache, _regex_cache
with _cache_lock:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format))
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format)
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = fraction = 0
tz = -1
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return (time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz)), fraction)
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
return _strptime(data_string, format)[0]
|
|
# Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines classes, that describe C++ types
"""
from . import compilers
from . import algorithms_cache
class type_t(object):
"""base class for all types"""
def __init__(self):
object.__init__(self)
self.cache = algorithms_cache.type_algs_cache_t()
self._byte_size = 0
self._byte_align = 0
self.compiler = None
def __str__(self):
res = self.decl_string
if res[:2] == "::":
res = res[2:]
return res
def __eq__(self, other):
if not isinstance(other, type_t):
return False
return self.decl_string == other.decl_string
def __hash__(self):
return hash(self.decl_string)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return self.__class__.__name__ < other.__class__.__name__
return self.decl_string < other.decl_string
def build_decl_string(self, with_defaults=True):
raise NotImplementedError()
@property
def decl_string(self):
return self.build_decl_string()
@property
def partial_decl_string(self):
return self.build_decl_string(False)
def _clone_impl(self):
raise NotImplementedError()
def clone(self):
"returns new instance of the type"
answer = self._clone_impl()
return answer
@property
def byte_size(self):
"Size of this type in bytes @type: int"
return self._byte_size
@byte_size.setter
def byte_size(self, new_byte_size):
self._byte_size = new_byte_size
@property
def byte_align(self):
"Alignment of this type in bytes @type: int"
return self._byte_align
@byte_align.setter
def byte_align(self, new_byte_align):
self._byte_align = new_byte_align
# There are cases when GCC-XML reports something like this
# <Unimplemented id="_9482" tree_code="188" \
# tree_code_name="template_type_parm" node="0xcc4d5b0"/>
# In this case I will use this as type
class dummy_type_t(type_t):
"""provides :class:`type_t` interface for a string, that defines C++ type.
This class could be very useful in the code generator.
"""
def __init__(self, decl_string):
type_t.__init__(self)
self._decl_string = decl_string
def build_decl_string(self, with_defaults=True):
return self._decl_string
def _clone_impl(self):
return dummy_type_t(self._decl_string)
class unknown_t(type_t):
"type, that represents all C++ types, that could not be parsed by GCC-XML"
def __init__(self):
type_t.__init__(self)
def build_decl_string(self, with_defaults=True):
return '?unknown?'
def _clone_impl(self):
return self
class ellipsis_t(type_t):
"""type, that represents "..." in function definition"""
def __init__(self):
type_t.__init__(self)
def build_decl_string(self, with_defaults=True):
return '...'
def _clone_impl(self):
return self
##########################################################################
# Fundamental types:
class fundamental_t(type_t):
"""base class for all fundamental, build-in types"""
def __init__(self, name):
type_t.__init__(self)
self._name = name
def build_decl_string(self, with_defaults=True):
return self._name
def _clone_impl(self):
return self
class java_fundamental_t(fundamental_t):
"""base class for all JNI defined fundamental types"""
def __init__(self, name):
fundamental_t.__init__(self, name)
class void_t(fundamental_t):
"""represents void type"""
CPPNAME = 'void'
def __init__(self):
fundamental_t.__init__(self, void_t.CPPNAME)
class char_t(fundamental_t):
"""represents char type"""
CPPNAME = 'char'
def __init__(self):
fundamental_t.__init__(self, char_t.CPPNAME)
class signed_char_t(fundamental_t):
"""represents signed char type"""
CPPNAME = 'signed char'
def __init__(self):
fundamental_t.__init__(self, signed_char_t.CPPNAME)
class unsigned_char_t(fundamental_t):
"""represents unsigned char type"""
CPPNAME = 'unsigned char'
def __init__(self):
fundamental_t.__init__(self, unsigned_char_t.CPPNAME)
class wchar_t(fundamental_t):
"""represents wchar_t type"""
CPPNAME = 'wchar_t'
def __init__(self):
fundamental_t.__init__(self, wchar_t.CPPNAME)
class short_int_t(fundamental_t):
"""represents short int type"""
CPPNAME = 'short int'
def __init__(self):
fundamental_t.__init__(self, short_int_t.CPPNAME)
class short_unsigned_int_t(fundamental_t):
"""represents short unsigned int type"""
CPPNAME = 'short unsigned int'
def __init__(self):
fundamental_t.__init__(self, short_unsigned_int_t.CPPNAME)
class bool_t(fundamental_t):
"""represents bool type"""
CPPNAME = 'bool'
def __init__(self):
fundamental_t.__init__(self, bool_t.CPPNAME)
class int_t(fundamental_t):
"""represents int type"""
CPPNAME = 'int'
def __init__(self):
fundamental_t.__init__(self, int_t.CPPNAME)
class unsigned_int_t(fundamental_t):
"""represents unsigned int type"""
CPPNAME = 'unsigned int'
def __init__(self):
fundamental_t.__init__(self, unsigned_int_t.CPPNAME)
class long_int_t(fundamental_t):
"""represents long int type"""
CPPNAME = 'long int'
def __init__(self):
fundamental_t.__init__(self, long_int_t.CPPNAME)
class long_unsigned_int_t(fundamental_t):
"""represents long unsigned int type"""
CPPNAME = 'long unsigned int'
def __init__(self):
fundamental_t.__init__(self, long_unsigned_int_t.CPPNAME)
class long_long_int_t(fundamental_t):
"""represents long long int type"""
CPPNAME = 'long long int'
def __init__(self):
fundamental_t.__init__(self, long_long_int_t.CPPNAME)
class long_long_unsigned_int_t(fundamental_t):
"""represents long long unsigned int type"""
CPPNAME = 'long long unsigned int'
def __init__(self):
fundamental_t.__init__(self, long_long_unsigned_int_t.CPPNAME)
class float_t(fundamental_t):
"""represents float type"""
CPPNAME = 'float'
def __init__(self):
fundamental_t.__init__(self, float_t.CPPNAME)
class double_t(fundamental_t):
"""represents double type"""
CPPNAME = 'double'
def __init__(self):
fundamental_t.__init__(self, double_t.CPPNAME)
class long_double_t(fundamental_t):
"""represents long double type"""
CPPNAME = 'long double'
def __init__(self):
fundamental_t.__init__(self, long_double_t.CPPNAME)
class complex_double_t(fundamental_t):
"""represents complex double type"""
CPPNAME = 'complex double'
def __init__(self):
fundamental_t.__init__(self, complex_double_t.CPPNAME)
class complex_long_double_t(fundamental_t):
"""represents complex long double type"""
CPPNAME = 'complex long double'
def __init__(self):
fundamental_t.__init__(self, complex_long_double_t.CPPNAME)
class complex_float_t(fundamental_t):
"""represents complex float type"""
CPPNAME = 'complex float'
def __init__(self):
fundamental_t.__init__(self, complex_float_t.CPPNAME)
class jbyte_t(java_fundamental_t):
"""represents jbyte type"""
JNAME = 'jbyte'
def __init__(self):
java_fundamental_t.__init__(self, jbyte_t.JNAME)
class jshort_t(java_fundamental_t):
"""represents jshort type"""
JNAME = 'jshort'
def __init__(self):
java_fundamental_t.__init__(self, jshort_t.JNAME)
class jint_t(java_fundamental_t):
"""represents jint type"""
JNAME = 'jint'
def __init__(self):
java_fundamental_t.__init__(self, jint_t.JNAME)
class jlong_t(java_fundamental_t):
"""represents jlong type"""
JNAME = 'jlong'
def __init__(self):
java_fundamental_t.__init__(self, jlong_t.JNAME)
class jfloat_t(java_fundamental_t):
"""represents jfloat type"""
JNAME = 'jfloat'
def __init__(self):
java_fundamental_t.__init__(self, jfloat_t.JNAME)
class jdouble_t(java_fundamental_t):
"""represents jdouble type"""
JNAME = 'jdouble'
def __init__(self):
java_fundamental_t.__init__(self, jdouble_t.JNAME)
class jchar_t(java_fundamental_t):
"""represents jchar type"""
JNAME = 'jchar'
def __init__(self):
java_fundamental_t.__init__(self, jchar_t.JNAME)
class jboolean_t(java_fundamental_t):
"""represents jboolean type"""
JNAME = 'jboolean'
def __init__(self):
java_fundamental_t.__init__(self, jboolean_t.JNAME)
class int128_t(fundamental_t):
"""represents __int128_t type"""
CPPNAME = '__int128_t'
def __init__(self):
fundamental_t.__init__(self, int128_t.CPPNAME)
class uint128_t(fundamental_t):
"""represents __uint128_t type"""
CPPNAME = '__uint128_t'
def __init__(self):
fundamental_t.__init__(self, uint128_t.CPPNAME)
FUNDAMENTAL_TYPES = {
# adding java types
void_t.CPPNAME: void_t(),
char_t.CPPNAME: char_t(),
signed_char_t.CPPNAME: signed_char_t(),
unsigned_char_t.CPPNAME: unsigned_char_t(),
wchar_t.CPPNAME: wchar_t(),
short_int_t.CPPNAME: short_int_t(),
'signed ' + short_int_t.CPPNAME: short_int_t(),
short_unsigned_int_t.CPPNAME: short_unsigned_int_t(),
bool_t.CPPNAME: bool_t(),
int_t.CPPNAME: int_t(),
'signed ' + int_t.CPPNAME: int_t(),
unsigned_int_t.CPPNAME: unsigned_int_t(),
long_int_t.CPPNAME: long_int_t(),
long_unsigned_int_t.CPPNAME: long_unsigned_int_t(),
long_long_int_t.CPPNAME: long_long_int_t(),
long_long_unsigned_int_t.CPPNAME: long_long_unsigned_int_t(),
int128_t.CPPNAME: int128_t(),
uint128_t.CPPNAME: uint128_t(),
float_t.CPPNAME: float_t(),
double_t.CPPNAME: double_t(),
long_double_t.CPPNAME: long_double_t(),
complex_long_double_t.CPPNAME: complex_long_double_t(),
complex_double_t.CPPNAME: complex_double_t(),
complex_float_t.CPPNAME: complex_float_t(),
jbyte_t.JNAME: jbyte_t(),
jshort_t.JNAME: jshort_t(),
jint_t.JNAME: jint_t(),
jlong_t.JNAME: jlong_t(),
jfloat_t.JNAME: jfloat_t(),
jdouble_t.JNAME: jdouble_t(),
jchar_t.JNAME: jchar_t(),
jboolean_t.JNAME: jboolean_t(),
'__java_byte': jbyte_t(),
'__java_short': jshort_t(),
'__java_int': jint_t(),
'__java_long': jlong_t(),
'__java_float': jfloat_t(),
'__java_double': jdouble_t(),
'__java_char': jchar_t(),
'__java_boolean': jboolean_t()
}
"""
defines a mapping between fundamental type name and its synonym to the instance
of class that describes the type
"""
##########################################################################
# Compaund types:
class compound_t(type_t):
"""class that allows to represent compound types like `const int*`"""
def __init__(self, base):
type_t.__init__(self)
self._base = base
@property
def base(self):
"reference to internal/base class"
return self._base
@base.setter
def base(self, new_base):
self._base = new_base
class volatile_t(compound_t):
"""represents `volatile whatever` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' volatile'
def _clone_impl(self):
return volatile_t(self.base.clone())
class restrict_t(compound_t):
"""represents `restrict whatever` type"""
# The restrict keyword can be considered an extension to the strict
# aliasing rule. It allows the programmer to declare that pointers which
# share the same type (or were otherwise validly created) do not alias
# eachother. By using restrict the programmer can declare that any loads
# and stores through the qualified pointer (or through another pointer
# copied either directly or indirectly from the restricted pointer) are
# the only loads and stores to the same address during the lifetime of
# the pointer. In other words, the pointer is not aliased by any pointers
# other than its own copies.
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return '__restrict__ ' + self.base.build_decl_string(with_defaults)
def _clone_impl(self):
return restrict_t(self.base.clone())
class const_t(compound_t):
"""represents `whatever const` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' const'
def _clone_impl(self):
return const_t(self.base.clone())
class pointer_t(compound_t):
"""represents `whatever*` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' *'
def _clone_impl(self):
return pointer_t(self.base.clone())
class reference_t(compound_t):
"""represents `whatever&` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' &'
def _clone_impl(self):
return reference_t(self.base.clone())
class array_t(compound_t):
"""represents C++ array type"""
SIZE_UNKNOWN = -1
def __init__(self, base, size):
compound_t.__init__(self, base)
self._size = size
@property
def size(self):
"returns array size"
return self._size
@size.setter
def size(self, size):
"""sometimes there is a need to update the size of the array"""
self.cache.reset()
self._size = size
def build_decl_string(self, with_defaults=True):
# return self.base.build_decl_string(with_defaults) + '[%d]' %
# self.size
return self.__bds_for_multi_dim_arrays(None, with_defaults)
def __bds_for_multi_dim_arrays(self, parent_dims=None, with_defaults=True):
if parent_dims:
parent_dims.append(self.size)
else:
parent_dims = [self.size]
if isinstance(self.base, array_t):
return self.base.__bds_for_multi_dim_arrays(
parent_dims,
with_defaults)
else:
tmp = []
for s in parent_dims:
tmp.append('[%d]' % s)
return self.base.build_decl_string(with_defaults) + ''.join(tmp)
def _clone_impl(self):
return array_t(self.base.clone(), self.size)
class calldef_type_t(object):
"""base class for all types that describes "callable" declaration"""
def __init__(self, return_type=None, arguments_types=None):
object.__init__(self)
self._return_type = return_type
if arguments_types is None:
arguments_types = []
self._arguments_types = arguments_types
@property
def return_type(self):
"""reference to :class:`return type <type_t>`"""
return self._return_type
@return_type.setter
def return_type(self, new_return_type):
self._return_type = new_return_type
@property
def arguments_types(self):
"""list of argument :class:`types <type_t>`"""
return self._arguments_types
@arguments_types.setter
def arguments_types(self, new_arguments_types):
self._arguments_types = new_arguments_types
@property
def has_ellipsis(self):
return self.arguments_types and isinstance(
self.arguments_types[-1], ellipsis_t)
class free_function_type_t(type_t, calldef_type_t):
"""describes free function type"""
NAME_TEMPLATE = '%(return_type)s (*)( %(arguments)s )'
TYPEDEF_NAME_TEMPLATE = (
'%(return_type)s ( *%(typedef_name)s )( %(arguments)s )')
def __init__(self, return_type=None, arguments_types=None):
type_t.__init__(self)
calldef_type_t.__init__(self, return_type, arguments_types)
@staticmethod
def create_decl_string(return_type, arguments_types, with_defaults=True):
"""
returns free function type
:param return_type: function return type
:type return_type: :class:`type_t`
:param arguments_types: list of argument :class:`type <type_t>`
:rtype: :class:`free_function_type_t`
"""
f = lambda x: x.build_decl_string(with_defaults)
return free_function_type_t.NAME_TEMPLATE % {
'return_type': return_type.build_decl_string(with_defaults),
'arguments': ','.join(
map(
f,
arguments_types))}
def build_decl_string(self, with_defaults=True):
return self.create_decl_string(
self.return_type,
self.arguments_types,
with_defaults)
def _clone_impl(self):
rt_clone = None
if self.return_type:
rt_clone = self.return_type.clone()
return free_function_type_t(
rt_clone, [
arg.clone() for arg in self.arguments_types])
# TODO: create real typedef
def create_typedef(self, typedef_name, unused=None, with_defaults=True):
"""returns string, that contains valid C++ code, that defines typedef
to function type
:param name: the desired name of typedef
"""
# unused argument simplifies user code
f = lambda x: x.build_decl_string(with_defaults)
return free_function_type_t.TYPEDEF_NAME_TEMPLATE % {
'typedef_name': typedef_name,
'return_type': self.return_type.build_decl_string(with_defaults),
'arguments': ','.join(map(f, self.arguments_types))}
class member_function_type_t(type_t, calldef_type_t):
"""describes member function type"""
NAME_TEMPLATE = (
'%(return_type)s ( %(class)s::* )( %(arguments)s )%(has_const)s')
TYPEDEF_NAME_TEMPLATE = (
'%(return_type)s ( %(class)s::*%(typedef_name)s' +
')( %(arguments)s ) %(has_const)s')
def __init__(
self,
class_inst=None,
return_type=None,
arguments_types=None,
has_const=False):
type_t.__init__(self)
calldef_type_t.__init__(self, return_type, arguments_types)
self._has_const = has_const
self._class_inst = class_inst
@property
def has_const(self):
"""describes, whether function has const modifier"""
return self._has_const
@has_const.setter
def has_const(self, has_const):
self._has_const = has_const
@property
def class_inst(self):
"""reference to parent :class:`class <declaration_t>`"""
return self._class_inst
@class_inst.setter
def class_inst(self, class_inst):
self._class_inst = class_inst
# TODO: create real typedef
def create_typedef(
self,
typedef_name,
class_alias=None,
with_defaults=True):
"""creates typedef to the function type
:param typedef_name: desired type name
:rtype: string
"""
has_const_str = ''
if self.has_const:
has_const_str = 'const'
if None is class_alias:
if with_defaults:
class_alias = self.class_inst.decl_string
else:
class_alias = self.class_inst.partial_decl_string
f = lambda x: x.build_decl_string(with_defaults)
return member_function_type_t.TYPEDEF_NAME_TEMPLATE % {
'typedef_name': typedef_name,
'return_type': self.return_type.build_decl_string(with_defaults),
'class': class_alias,
'arguments': ','.join(
map(
f,
self.arguments_types)),
'has_const': has_const_str}
def create(self):
return self.build_decl_string(
self.return_type,
self.class_inst.decl_string,
self.arguments_types,
self.has_const)
@staticmethod
def create_decl_string(
return_type,
class_decl_string,
arguments_types,
has_const,
with_defaults=True):
has_const_str = ''
if has_const:
has_const_str = 'const'
return_type_decl_string = ''
if return_type:
return_type_decl_string = return_type.build_decl_string(
with_defaults)
f = lambda x: x.build_decl_string(with_defaults)
return member_function_type_t.NAME_TEMPLATE % {
'return_type': return_type_decl_string,
'class': class_decl_string,
'arguments': ','.join(
map(
f,
arguments_types)),
'has_const': has_const_str}
def build_decl_string(self, with_defaults=True):
return self.create_decl_string(
self.return_type,
self.class_inst.decl_string,
self.arguments_types,
self.has_const,
with_defaults)
def _clone_impl(self):
rt_clone = None
if self.return_type:
rt_clone = self.return_type.clone()
return member_function_type_t(
self.class_inst, rt_clone, [
arg.clone() for arg in self.arguments_types], self.has_const)
class member_variable_type_t(compound_t):
"""describes member variable type"""
NAME_TEMPLATE = '%(type)s ( %(class)s::* )'
def __init__(self, class_inst=None, variable_type=None):
compound_t.__init__(self, class_inst)
self._mv_type = variable_type
@property
def variable_type(self):
"""describes member variable :class:`type <type_t>`"""
return self._mv_type
@variable_type.setter
def variable_type(self, new_type):
self._mv_type = new_type
def build_decl_string(self, with_defaults=True):
return self.NAME_TEMPLATE % {
'type': self.variable_type.build_decl_string(with_defaults),
'class': self.base.build_decl_string(with_defaults)}
def _clone_impl(self):
return member_variable_type_t(
class_inst=self.base,
variable_type=self.variable_type.clone())
##########################################################################
# declarated types:
class declarated_t(type_t):
"""class that binds between to hierarchies: :class:`type_t`
and :class:`declaration_t`"""
def __init__(self, declaration):
type_t.__init__(self)
self._declaration = declaration
@property
def declaration(self):
"reference to :class:`declaration_t`"
return self._declaration
@declaration.setter
def declaration(self, new_declaration):
self._declaration = new_declaration
def build_decl_string(self, with_defaults=True):
if with_defaults:
return self._declaration.decl_string
else:
return self._declaration.partial_decl_string
def _clone_impl(self):
return declarated_t(self._declaration)
@property
def byte_size(self):
"Size of this type in bytes @type: int"
return self._declaration.byte_size
@property
def byte_align(self):
"alignment of this type in bytes @type: int"
return self._declaration.byte_align
class type_qualifiers_t(object):
"""contains additional information about type: mutable, static, extern"""
def __init__(self, has_static=False, has_mutable=False):
self._has_static = has_static
self._has_mutable = has_mutable
def __eq__(self, other):
if not isinstance(other, type_qualifiers_t):
return False
return self.has_static == other.has_static \
and self.has_mutable == other.has_mutable
def __hash__(self):
return super.__hash__(self)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, type_qualifiers_t):
return object.__lt__(self, other)
return self.has_static < other.has_static \
and self.has_mutable < other.has_mutable
@property
def has_static(self):
return self._has_static
@has_static.setter
def has_static(self, has_static):
self._has_static = has_static
@property
def has_extern(self):
"""synonym to static"""
return self.has_static
@has_extern.setter
def has_extern(self, has_extern):
self.has_static = has_extern
@property
def has_mutable(self):
return self._has_mutable
@has_mutable.setter
def has_mutable(self, has_mutable):
self._has_mutable = has_mutable
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import importlib.util
import logging
import math
import os
import sys
import warnings
from collections import defaultdict
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.logging.meters import safe_round
from fairseq.modules import gelu, gelu_accurate
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
logger = logging.getLogger(__name__)
def split_paths(paths: str) -> List[str]:
return paths.split(os.pathsep) if "://" not in paths else paths.split("|")
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, '__len__') and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None
):
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048*32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False)
norms.append(norm[0])
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.)
else:
return torch.tensor(0.)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
total_norm = torch.norm(
torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in grads])
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.
try:
return safe_round(base ** loss, round)
except OverflowError:
return float('inf')
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def set_torch_seed(seed):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
assert isinstance(seed, int)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@contextlib.contextmanager
def with_torch_seed(seed):
assert isinstance(seed, int)
rng_state = torch.get_rng_state()
cuda_rng_state = torch.cuda.get_rng_state()
set_torch_seed(seed)
yield
torch.set_rng_state(rng_state)
torch.cuda.set_rng_state(cuda_rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero().squeeze(dim=-1)
src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero().squeeze(dim=-1)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device(args):
import torch_xla.core.xla_model as xm
return xm.xla_device()
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
|
|
"""
kombu.utils
===========
Internal utilities.
"""
from __future__ import absolute_import, print_function
import importlib
import random
import sys
from contextlib import contextmanager
from itertools import count, repeat
from time import sleep
from uuid import UUID, uuid4 as _uuid4, _uuid_generate_random
from kombu.five import items, reraise, string_t
from .encoding import default_encode, safe_repr as _safe_repr
try:
import ctypes
except:
ctypes = None # noqa
__all__ = ['EqualityDict', 'say', 'uuid', 'kwdict', 'maybe_list',
'fxrange', 'fxrangemax', 'retry_over_time',
'emergency_dump_state', 'cached_property',
'reprkwargs', 'reprcall', 'nested']
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, string_t):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError as exc:
reraise(ValueError,
ValueError("Couldn't import {0!r}: {1}".format(name, exc)),
sys.exc_info()[2])
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
def eqhash(o):
try:
return o.__eqhash__()
except AttributeError:
return hash(o)
class EqualityDict(dict):
def __getitem__(self, key):
h = eqhash(key)
if h not in self:
return self.__missing__(key)
return dict.__getitem__(self, h)
def __setitem__(self, key, value):
return dict.__setitem__(self, eqhash(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, eqhash(key))
def say(m, *fargs, **fkwargs):
print(str(m).format(*fargs, **fkwargs), file=sys.stderr)
def uuid4():
# Workaround for http://bugs.python.org/issue4607
if ctypes and _uuid_generate_random: # pragma: no cover
buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(buffer)
return UUID(bytes=buffer.raw)
return _uuid4()
def uuid():
"""Generate a unique id, having - hopefully - a very small chance of
collision.
For now this is provided by :func:`uuid.uuid4`.
"""
return str(uuid4())
gen_unique_id = uuid
if sys.version_info >= (2, 6, 5):
def kwdict(kwargs):
return kwargs
else:
def kwdict(kwargs): # pragma: no cover # noqa
"""Make sure keyword arguments are not in Unicode.
This should be fixed in newer Python versions,
see: http://bugs.python.org/issue4978.
"""
return dict((key.encode('utf-8'), value)
for key, value in items(kwargs))
def maybe_list(v):
if v is None:
return []
if hasattr(v, '__iter__'):
return v
return [v]
def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False):
cur = start * 1.0
while 1:
if not stop or cur <= stop:
yield cur
cur += step
else:
if not repeatlast:
break
yield cur - step
def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0):
sum_, cur = 0, start * 1.0
while 1:
if sum_ >= max:
break
yield cur
if stop:
cur = min(cur + step, stop)
else:
cur += step
sum_ += cur
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2,
interval_max=30, callback=None):
"""Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take two arguments: ``exc`` and ``interval``, where
``exc`` is the exception instance, and ``interval`` is the time in
seconds to sleep next..
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
"""
retries = 0
interval_range = fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch as exc:
if max_retries is not None and retries >= max_retries:
raise
if callback:
callback()
tts = (errback(exc, interval_range, retries) if errback
else next(interval_range))
if tts:
for i in range(int(tts / interval_step)):
if callback:
callback()
sleep(interval_step)
def emergency_dump_state(state, open_file=open, dump=None):
from pprint import pformat
from tempfile import mktemp
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
say('EMERGENCY DUMP STATE TO FILE -> {0} <-', persist)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception as exc:
say('Cannot pickle state: {0!r}. Fallback to pformat.', exc)
fh.write(default_encode(pformat(state)))
finally:
fh.flush()
fh.close()
return persist
class cached_property(object):
"""Property descriptor that caches the return value
of the get function.
*Examples*
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection {0!r} deleted'.format(value)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj):
if obj is None:
return self
try:
value = obj.__dict__.pop(self.__name__)
except KeyError:
pass
else:
if self.__del is not None:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'):
return sep.join(fmt.format(k, _safe_repr(v)) for k, v in items(kwargs))
def reprcall(name, args=(), kwargs={}, sep=', '):
return '{0}({1}{2}{3})'.format(
name, sep.join(map(_safe_repr, args or ())),
(args and kwargs) and sep or '',
reprkwargs(kwargs, sep),
)
@contextmanager
def nested(*managers): # pragma: no cover
# flake8: noqa
"""Combine multiple context managers into a single nested
context manager."""
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
reraise(exc[0], exc[1], exc[2])
finally:
del(exc)
def shufflecycle(it):
it = list(it) # don't modify callers list
shuffle = random.shuffle
for _ in repeat(None):
shuffle(it)
yield it[0]
def entrypoints(namespace):
try:
from pkg_resources import iter_entry_points
except ImportError:
return iter([])
return ((ep, ep.load()) for ep in iter_entry_points(namespace))
class ChannelPromise(object):
def __init__(self, contract):
self.__contract__ = contract
def __call__(self):
try:
return self.__value__
except AttributeError:
value = self.__value__ = self.__contract__()
return value
def __repr__(self):
return '<promise: %r>' % (self(), )
def escape_regex(p, white=''):
# what's up with re.escape? that code must be neglected or someting
return ''.join(c if c.isalnum() or c in white
else ('\\000' if c == '\000' else '\\' + c)
for c in p)
|
|
#!/usr/bin/python
"""
Mr Freeze
A simple Python-based backup script to make sure your websites are kept on ice (aka, backed up).
"""
import os
import imp
import time
import glob
import logging
import smtplib
import argparse
import subprocess
logger = logging.getLogger('mr_freeze')
def snapshot(interval, site, settings):
"""
Performs archival of the elements defined in the sites dictionary
:param interval: The frequency that this snapshot should be saved on [hourly, daily, weekly, monthly]
:param site: Key name of a single site in the sites dictionary to run on. If None, run on all sites.
:param settings: The settings module
:return: None
"""
# Build a temporary dictionary containing either the target site, or point to all the sites.
site_list = {site: settings.sites[site]} if site else settings.sites
for (key, site) in site_list.items():
if interval not in site:
logger.info("%s is not configured for %s archival" % (key, interval))
continue
logger.info("starting snapshot of %s" % key)
start_time = time.time()
target_path = os.path.join(site['archive_dir'], interval)
# Find the existing snapshots
dirs = glob.glob(os.path.join(site['archive_dir'], interval) + '*')
logger.debug('List of current snapshots: %s' % dirs)
if len(dirs) >= site[interval]['max_snaps']:
logger.debug("Deleting oldest snapshot: %s" % dirs[-1])
os.system('rm -rf "%s"' % dirs[-1])
del dirs[-1]
# Rotate all the directories down (hourly.3 => hourly.4, hourly.2 => hourly.3, etc)
for x in reversed(range(0, len(dirs))):
src_dir = target_path + '.%d' % x
dst_dir = target_path + '.%d' % (x + 1)
logger.debug('rotating "%s" to "%s"' % (src_dir, dst_dir))
os.system('mv "%s" "%s"' % (src_dir, dst_dir))
# Re-glob the directories after the rotate
dirs = glob.glob(os.path.join(site['archive_dir'], interval) + '*')
# Create the new snapshot directory
os.system('mkdir %s.0' % target_path)
# Archive the source directory using rsync if this isn't the mysql backup
if key != 'mysql':
# Use the last snapshot as the hard-link src, if it exists.
# If it doesn't exist, use the site's src_dir as the hard-link source
link_dest = dirs[0] if len(dirs) else site['src_dir']
rsync_cmd = 'rsync -a --stats -h --delete --link-dest="%s" "%s" "%s.0"' % (link_dest, site['src_dir'], target_path)
logger.info(rsync_cmd)
proc = subprocess.Popen([rsync_cmd], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
logger.info('rsync output: %s' % out)
# Create a database snapshot
if 'sql_dump' in site[interval] and site[interval]['sql_dump']:
# Build the mysql command
mysql_cmd = "mysqldump -u '%s'" % settings.MYSQL_USER
# Make sure the DB is properly locked
mysql_cmd += " --single-transaction"
if settings.MYSQL_PASSWORD:
mysql_cmd += " --password='%s'" % settings.MYSQL_PASSWORD
# Export all DB's or just the site's?
if key == 'mysql':
mysql_cmd += ' --all-databases'
else:
mysql_cmd += " --databases '%s'" % site['db_name']
# gzip the results and plop the file right in the snapshot directory
if key != 'mysql':
sql_dump_file = os.path.join('%s.0' % target_path, '%s.sql.gz' % site['db_name'])
else:
sql_dump_file = os.path.join('%s.0' % target_path, 'all-databases.sql.gz')
mysql_cmd += " | gzip > '%s'" % sql_dump_file
proc = subprocess.Popen([mysql_cmd], stdout=subprocess.PIPE, shell=True)
proc.communicate()
logger.info('mysqldump saved to "%s"' % sql_dump_file)
# Save this for the summary
end_time = time.time() - start_time
site['snapshot_duration'] = end_time
logger.info('snapshot of %s completed in %0.2f seconds' % (key, end_time))
def verify_config(settings):
"""
Verify the configuration data.
:param settings: The settings module
:return: Raises ValueError if a configuration element is missing or not configured correctly
"""
if settings.sites is None:
raise ValueError("'sites' parameter not configured, or is empty.")
if os.path.exists(settings.BASE_ARCHIVE_DIR) is False:
raise ValueError('BASE_ARCHIVE_DIR (%s) does not exist' % settings.BASE_ARCHIVE_DIR)
for (key, site) in settings.sites.items():
# Verify the required keys are present for any configured intervals
for interval in ['hourly', 'daily', 'weekly', 'monthly']:
if interval in site:
if 'max_snaps' not in site[interval]:
raise ValueError('max_snaps not defined for %s: interval-%s' % (key, interval))
# Make sure the database is defined if sql_dump is set for this interval
if 'sql_dump' in site[interval] and site[interval]['sql_dump'] is True and key != 'mysql':
if 'db_name' not in site:
raise ValueError('sql_dump is set for %s and db_name not configured for %s' % (interval, key))
# Verify src_dir exists
if key != 'mysql' and os.path.exists(site['src_dir']) is False:
raise ValueError('%s: src_dir (%s) does not exist' % (key, site['src_dir']))
# If archive_dir doesn't exist, create it
if os.path.exists(site['archive_dir']) is False:
logger.info("%s: archive_dir (%s) does not exist: creating it for you!" % (key, site['archive_dir']))
os.mkdir(site['archive_dir'])
def check_environment():
"""
Check the environment for required packages and software versions
:return: None
"""
logger.info('environment check')
logger.info('rsync version\n%s' % os.popen('rsync --version').read())
logger.info('mysqldump version\n%s' % os.popen('mysqldump --version').read())
def send_email_summary(interval, settings):
"""
Send an email summary of the snapshot
:param interval: The type of interval that was snapped
:param settings: The settings module
:return: None
"""
# Make sure we should even send this email, based on the timing interval
if interval in settings.EMAIL_NOTIFY and settings.EMAIL_NOTIFY[interval] is False:
return
logger.info('sending email summary to %s' % settings.EMAIL_DEST_ADDR)
server = smtplib.SMTP(settings.SMTP_SERVER, settings.SMTP_PORT)
server.starttls()
server.login(settings.SMTP_LOGIN, settings.SMTP_PASSWORD)
# Build the subject
subject = '%s %s backup summary' % (settings.EMAIL_SUBJECT_PREFIX, interval)
# Build a summary
summary = '%-30s %-20s\n' % ('site name', 'duration (seconds)')
for (key, site) in settings.sites.items():
if 'snapshot_duration' in site:
summary += '%-30s %-20.2f\n' % (key, site['snapshot_duration'])
summary += '\n'
msg = """From: <%s>
To: <%s>
Subject: %s
Summary
%s
Log Output
%s
""" % (settings.EMAIL_SOURCE_ADDR, settings.EMAIL_DEST_ADDR, subject, summary, open('last_run.log', 'r').read())
server.sendmail(settings.EMAIL_SOURCE_ADDR, settings.EMAIL_DEST_ADDR, msg)
server.quit()
def main():
"""
Main entry point for freeze.py
:return: None - will sys.exit() on failure
"""
# Parse CLI arguments
parser = argparse.ArgumentParser(description='Mr. Freeze - A backup script')
parser.add_argument('--log_level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'],
help='The desired logging level', dest='log_level')
parser.add_argument('--verify', help='Only run verification check', default=False, action='store_true')
parser.add_argument('--email', help='Send a summary email', default=False, action='store_true')
parser.add_argument('--settings', help='Path to settings file', dest='settings')
parser.add_argument('--site', dest='site',
help='Only run the snapshot on the specified site')
parser.add_argument('--interval', dest='interval', default='hourly',
choices=['hourly', 'daily', 'weekly', 'monthly'],
help='The time interval for this snapshot')
args = vars(parser.parse_args())
# Import settings
settings_file = args['settings'] if args['settings'] else 'settings.py'
settings = imp.load_source('*', settings_file)
# Set the log level (defaults to INFO)
log_level = logging.INFO
if args['log_level']:
log_level = getattr(logging, args['log_level'], None)
if log_level is None:
print("*** ERROR: Invalid log level (%s) specified" % args['log_level'])
exit(1)
logging.basicConfig(level=log_level)
fh = logging.FileHandler('last_run.log', mode='w')
fh.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
check_environment()
try:
verify_config(settings=settings)
except ValueError as e:
logger.fatal(e)
exit(1)
if args['verify'] is False:
snapshot(interval=args['interval'], site=args['site'], settings=settings)
if args['email']:
send_email_summary(interval=args['interval'], settings=settings)
if __name__ == '__main__':
main()
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Transitions between Scenes"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import pyglet
from pyglet.gl import *
from cocos.actions import *
import cocos.scene as scene
from cocos.director import director
from cocos.layer import ColorLayer
from cocos.sprite import Sprite
__all__ = ['TransitionScene',
'RotoZoomTransition', 'JumpZoomTransition',
'MoveInLTransition', 'MoveInRTransition',
'MoveInBTransition', 'MoveInTTransition',
'SlideInLTransition', 'SlideInRTransition',
'SlideInBTransition', 'SlideInTTransition',
'FlipX3DTransition', 'FlipY3DTransition', 'FlipAngular3DTransition',
'ShuffleTransition',
'TurnOffTilesTransition',
'FadeTRTransition', 'FadeBLTransition',
'FadeUpTransition', 'FadeDownTransition',
'ShrinkGrowTransition',
'CornerMoveTransition',
'EnvelopeTransition',
'SplitRowsTransition', 'SplitColsTransition',
'FadeTransition',
'ZoomTransition', ]
class TransitionScene(scene.Scene):
"""TransitionScene
A Scene that takes two scenes and makes a transition between them.
The input scenes are put into envelopes (Scenes) that are made childs to
the transition scene.
Proper transitions are allowed to modify any parameter for the envelopes,
but must not modify directly the input scenes; that would corrupt the input
scenes in the general case.
"""
def __init__(self, dst, duration=1.25, src=None):
"""Initializes the transition
:Parameters:
`dst` : Scene
Incoming scene, the one that remains visible when the transition ends.
`duration` : float
Duration of the transition in seconds. Default: 1.25
`src` : Scene
Outgoing scene. Default: current scene
"""
super(TransitionScene, self).__init__()
if src is None:
src = director.scene
# if the director is already running a transition scene then terminate
# it so we may move on
if isinstance(src, TransitionScene):
tmp = src.in_scene.get('dst')
src.finish()
src = tmp
if src is dst:
raise Exception("Incoming scene must be different from outgoing scene")
envelope = scene.Scene()
envelope.add(dst, name='dst')
self.in_scene = envelope #: envelope with scene that will replace the old one
envelope = scene.Scene()
envelope.add(src, name='src')
self.out_scene = envelope #: envelope with scene that will be replaced
self.duration = duration #: duration in seconds of the transition
if not self.duration:
self.duration = 1.25
self.start()
def start(self):
"""Adds the incoming scene with z=1 and the outgoing scene with z=0"""
self.add(self.in_scene, z=1)
self.add(self.out_scene, z=0)
def finish(self):
"""Called when the time is over.
Envelopes are discarded and the dst scene will be the one runned by director.
"""
# devs:
# try to not override this method
# if you should, try to remain compatible with the recipe TransitionsWithPop
# if you can't, add in the docstring for your class that is not usable
# for that recipe, and bonus points if you add to the recipe that
# your class is not elegible for pop transitions
dst = self.in_scene.get('dst')
src = self.out_scene.get('src')
director.replace(dst)
def hide_out_show_in(self):
"""Hides the outgoing scene and shows the incoming scene"""
self.in_scene.visible = True
self.out_scene.visible = False
def hide_all(self):
"""Hides both the incoming and outgoing scenes"""
self.in_scene.visible = False
self.out_scene.visible = False
def visit(self):
# preserve modelview matrix
glPushMatrix()
super(TransitionScene, self).visit()
glPopMatrix()
class RotoZoomTransition(TransitionScene):
"""Rotate and zoom out the outgoing scene, and then rotate and zoom in the incoming
"""
def __init__(self, *args, **kwargs):
super(RotoZoomTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1.0
self.in_scene.transform_anchor = (width // 2, height // 2)
self.out_scene.transform_anchor = (width // 2, height // 2)
rotozoom = (ScaleBy(0.001, duration=self.duration / 2.0) |
Rotate(360 * 2, duration=self.duration / 2.0)) + Delay(self.duration / 2.0)
self.out_scene.do(rotozoom)
self.in_scene.do(Reverse(rotozoom) + CallFunc(self.finish))
class JumpZoomTransition(TransitionScene):
"""Zoom out and jump the outgoing scene, and then jump and zoom in the incoming
"""
def __init__(self, *args, **kwargs):
super(JumpZoomTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.5
self.in_scene.position = (width, 0)
self.in_scene.transform_anchor = (width // 2, height // 2)
self.out_scene.transform_anchor = (width // 2, height // 2)
jump = JumpBy((-width, 0), width // 4, 2, duration=self.duration / 4.0)
scalein = ScaleTo(1, duration=self.duration / 4.0)
scaleout = ScaleTo(0.5, duration=self.duration / 4.0)
jumpzoomout = scaleout + jump
jumpzoomin = jump + scalein
delay = Delay(self.duration / 2.0)
self.out_scene.do(jumpzoomout)
self.in_scene.do(delay + jumpzoomin + CallFunc(self.finish))
class MoveInLTransition(TransitionScene):
"""Move in from to the left the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(MoveInLTransition, self).__init__(*args, **kwargs)
self.init()
a = self.get_action()
self.in_scene.do((Accelerate(a, 0.5)) + CallFunc(self.finish))
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (-width, 0)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class MoveInRTransition(MoveInLTransition):
"""Move in from to the right the incoming scene.
"""
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (width, 0)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class MoveInTTransition(MoveInLTransition):
"""Move in from to the top the incoming scene.
"""
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (0, height)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class MoveInBTransition(MoveInLTransition):
"""Move in from to the bottom the incoming scene.
"""
def init(self):
width, height = director.get_window_size()
self.in_scene.position = (0, -height)
def get_action(self):
return MoveTo((0, 0), duration=self.duration)
class SlideInLTransition(TransitionScene):
"""Slide in the incoming scene from the left border.
"""
def __init__(self, *args, **kwargs):
super(SlideInLTransition, self).__init__(*args, **kwargs)
self.width, self.height = director.get_window_size()
self.init()
move = self.get_action()
self.in_scene.do(Accelerate(move, 0.5))
self.out_scene.do(Accelerate(move, 0.5) + CallFunc(self.finish))
def init(self):
self.in_scene.position = (-self.width, 0)
def get_action(self):
return MoveBy((self.width, 0), duration=self.duration)
class SlideInRTransition(SlideInLTransition):
"""Slide in the incoming scene from the right border.
"""
def init(self):
self.in_scene.position = (self.width, 0)
def get_action(self):
return MoveBy((-self.width, 0), duration=self.duration)
class SlideInTTransition(SlideInLTransition):
"""Slide in the incoming scene from the top border.
"""
def init(self):
self.in_scene.position = (0, self.height)
def get_action(self):
return MoveBy((0, -self.height), duration=self.duration)
class SlideInBTransition(SlideInLTransition):
"""Slide in the incoming scene from the bottom border.
"""
def init(self):
self.in_scene.position = (0, -self.height)
def get_action(self):
return MoveBy((0, self.height), duration=self.duration)
class FlipX3DTransition(TransitionScene):
"""Flips the screen horizontally.
The front face is the outgoing scene and the back face is the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(FlipX3DTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D(amplitude=0, duration=0, grid=(1, 1), waves=2)
flip90 = OrbitCamera(angle_x=0, delta_z=90, duration=self.duration / 2.0)
flipback90 = OrbitCamera(angle_x=0, angle_z=90, delta_z=90, duration=self.duration / 2.0)
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc(self.hide_out_show_in) + \
flipback90
self.do(flip +
CallFunc(self.finish) +
StopGrid())
class FlipY3DTransition(TransitionScene):
"""Flips the screen vertically.
The front face is the outgoing scene and the back face is the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(FlipY3DTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D(amplitude=0, duration=0, grid=(1, 1), waves=2)
flip90 = OrbitCamera(angle_x=90, delta_z=-90, duration=self.duration / 2.0)
flipback90 = OrbitCamera(angle_x=90, angle_z=90, delta_z=90, duration=self.duration / 2.0)
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc(self.hide_out_show_in) + \
flipback90
self.do(flip +
CallFunc(self.finish) +
StopGrid())
class FlipAngular3DTransition(TransitionScene):
"""Flips the screen half horizontally and half vertically.
The front face is the outgoing scene and the back face is the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(FlipAngular3DTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D(amplitude=0, duration=0, grid=(1, 1), waves=2)
flip90 = OrbitCamera(angle_x=45, delta_z=90, duration=self.duration / 2.0)
flipback90 = OrbitCamera(angle_x=45, angle_z=90, delta_z=90, duration=self.duration / 2.0)
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc(self.hide_out_show_in) + \
flipback90
self.do(flip +
CallFunc(self.finish) +
StopGrid())
class ShuffleTransition(TransitionScene):
"""Shuffle the outgoing scene, and then reorder the tiles with the incoming scene.
"""
def __init__(self, *args, **kwargs):
super(ShuffleTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
aspect = width / height
x, y = int(12*aspect), 12
shuffle = ShuffleTiles(grid=(x, y), duration=self.duration / 2.0, seed=15)
self.in_scene.visible = False
self.do(shuffle +
CallFunc(self.hide_out_show_in) +
Reverse(shuffle) +
CallFunc(self.finish) +
StopGrid())
class ShrinkGrowTransition(TransitionScene):
"""Shrink the outgoing scene while grow the incoming scene
"""
def __init__(self, *args, **kwargs):
super(ShrinkGrowTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1
self.in_scene.transform_anchor = (2*width / 3.0, height / 2.0)
self.out_scene.transform_anchor = (width / 3.0, height / 2.0)
scale_out = ScaleTo(0.01, duration=self.duration)
scale_in = ScaleTo(1.0, duration=self.duration)
self.in_scene.do(Accelerate(scale_in, 0.5))
self.out_scene.do(Accelerate(scale_out, 0.5) + CallFunc(self.finish))
class CornerMoveTransition(TransitionScene):
"""Moves the bottom-right corner of the incoming scene to the top-left corner
"""
def __init__(self, *args, **kwargs):
super(CornerMoveTransition, self).__init__(*args, **kwargs)
self.out_scene.do(MoveCornerUp(duration=self.duration) +
CallFunc(self.finish) +
StopGrid())
def start(self):
# don't call super. overriding order
self.add(self.in_scene, z=0)
self.add(self.out_scene, z=1)
class EnvelopeTransition(TransitionScene):
"""From the outgoing scene:
- moves the top-right corner to the center
- moves the bottom-left corner to the center
From the incoming scene:
- performs the reverse action of the outgoing scene
"""
def __init__(self, *args, **kwargs):
super(EnvelopeTransition, self).__init__(*args, **kwargs)
self.in_scene.visible = False
move = QuadMoveBy(delta0=(320, 240), delta1=(-630, 0),
delta2=(-320, -240), delta3=(630, 0),
duration=self.duration / 2.0)
# move = Accelerate(move)
self.do(move +
CallFunc(self.hide_out_show_in) +
Reverse(move) +
CallFunc(self.finish) +
StopGrid())
class FadeTRTransition(TransitionScene):
"""Fade the tiles of the outgoing scene from the left-bottom corner the to top-right corner.
"""
def __init__(self, *args, **kwargs):
super(FadeTRTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
aspect = width / height
x, y = int(12 * aspect), 12
a = self.get_action(x, y)
# a = Accelerate(a)
self.out_scene.do(a +
CallFunc(self.finish) +
StopGrid())
def start(self):
# don't call super. overriding order
self.add(self.in_scene, z=0)
self.add(self.out_scene, z=1)
def get_action(self, x, y):
return FadeOutTRTiles(grid=(x, y), duration=self.duration)
class FadeBLTransition(FadeTRTransition):
"""Fade the tiles of the outgoing scene from the top-right corner to the bottom-left corner.
"""
def get_action(self, x, y):
return FadeOutBLTiles(grid=(x, y), duration=self.duration)
class FadeUpTransition(FadeTRTransition):
"""Fade the tiles of the outgoing scene from the bottom to the top.
"""
def get_action(self, x, y):
return FadeOutUpTiles(grid=(x, y), duration=self.duration)
class FadeDownTransition(FadeTRTransition):
"""Fade the tiles of the outgoing scene from the top to the bottom.
"""
def get_action(self, x, y):
return FadeOutDownTiles(grid=(x, y), duration=self.duration)
class TurnOffTilesTransition(TransitionScene):
"""Turn off the tiles of the outgoing scene in random order
"""
def __init__(self, *args, **kwargs):
super(TurnOffTilesTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
aspect = width / height
x, y = int(12 * aspect), 12
a = TurnOffTiles(grid=(x, y), duration=self.duration)
# a = Accelerate(a)
self.out_scene.do(a +
CallFunc(self.finish) +
StopGrid())
def start(self):
# don't call super. overriding order
self.add(self.in_scene, z=0)
self.add(self.out_scene, z=1)
class FadeTransition(TransitionScene):
"""Fade out the outgoing scene and then fade in the incoming scene.
Optionally supply the color to fade to in-between as an RGB color tuple.
"""
def __init__(self, *args, **kwargs):
color = kwargs.pop('color', (0, 0, 0)) + (0,)
super(FadeTransition, self).__init__(*args, **kwargs)
self.fadelayer = ColorLayer(*color)
self.in_scene.visible = False
self.add(self.fadelayer, z=2)
def on_enter(self):
super(FadeTransition, self).on_enter()
self.fadelayer.do(FadeIn(duration=self.duration / 2.0) +
CallFunc(self.hide_out_show_in) +
FadeOut(duration=self.duration / 2.0) +
CallFunc(self.finish))
def on_exit(self):
super(FadeTransition, self).on_exit()
self.remove(self.fadelayer)
class SplitColsTransition(TransitionScene):
"""Splits the screen in columns.
The odd columns goes upwards while the even columns goes downwards.
"""
def __init__(self, *args, **kwargs):
super(SplitColsTransition, self).__init__(*args, **kwargs)
width, height = director.get_window_size()
self.in_scene.visible = False
flip_a = self.get_action()
flip = flip_a + \
CallFunc(self.hide_out_show_in) + \
Reverse(flip_a)
self.do(AccelDeccel(flip) +
CallFunc(self.finish) +
StopGrid())
def get_action(self):
return SplitCols(cols=3, duration=self.duration / 2.0)
class SplitRowsTransition(SplitColsTransition):
"""Splits the screen in rows.
The odd rows goes to the left while the even rows goes to the right.
"""
def get_action(self):
return SplitRows(rows=3, duration=self.duration / 2.0)
class ZoomTransition(TransitionScene):
"""Zoom and FadeOut the outgoing scene."""
def __init__(self, *args, **kwargs):
if 'src' in kwargs or len(args) == 3:
raise Exception("ZoomTransition does not accept 'src' parameter.")
super(ZoomTransition, self).__init__(*args, **kwargs)
# fixme: if scene was never run and some drawable need to initialize
# in scene on enter the next line will render bad
self.out_scene.visit()
def start(self):
screensprite = self._create_out_screenshot()
zoom = ScaleBy(2, self.duration) | FadeOut(self.duration)
restore = CallFunc(self.finish)
screensprite.do(zoom + restore)
self.add(screensprite, z=1)
self.add(self.in_scene, z=0)
def finish(self):
# tested with the recipe TransitionsWithPop, works.
dst = self.in_scene.get('dst')
director.replace(dst)
def _create_out_screenshot(self):
# TODO: try to use `pyglet.image.get_buffer_manager().get_color_buffer()`
# instead of create a new BufferManager... note that pyglet uses
# a BufferManager singleton that fail when you change the window
# size.
buffer = pyglet.image.BufferManager()
image = buffer.get_color_buffer()
width, height = director.window.width, director.window.height
actual_width, actual_height = director.get_window_size()
out = Sprite(image)
out.position = actual_width // 2, actual_height // 2
out.scale = max(actual_width / width, actual_height / height)
return out
|
|
#
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Toaster Implementation
#
# Copyright (C) 2013 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from django.db import models
from django.db.models import F
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
class ToasterSetting(models.Model):
name = models.CharField(max_length=63)
helptext = models.TextField()
value = models.CharField(max_length=255)
class ToasterSettingDefaultLayer(models.Model):
layer_version = models.ForeignKey('Layer_Version')
class ProjectManager(models.Manager):
def create_project(self, name, release):
prj = self.model(name = name, bitbake_version = release.bitbake_version, release = release)
prj.save()
for defaultconf in ToasterSetting.objects.filter(name__startswith="DEFCONF_"):
name = defaultconf.name[8:]
ProjectVariable.objects.create( project = prj,
name = name,
value = defaultconf.value)
for layer in map(lambda x: x.layer, ReleaseDefaultLayer.objects.filter(release = release)):
for branches in Branch.objects.filter(name = release.branch):
for lv in Layer_Version.objects.filter(layer = layer, up_branch = branches ):
ProjectLayer.objects.create( project = prj,
layercommit = lv,
optional = False )
return prj
def create(self, *args, **kwargs):
raise Exception("Invalid call to Project.objects.create. Use Project.objects.create_project() to create a project")
def get_or_create(self, *args, **kwargs):
raise Exception("Invalid call to Project.objects.get_or_create. Use Project.objects.create_project() to create a project")
class Project(models.Model):
name = models.CharField(max_length=100)
short_description = models.CharField(max_length=50, blank=True)
bitbake_version = models.ForeignKey('BitbakeVersion')
release = models.ForeignKey("Release")
created = models.DateTimeField(auto_now_add = True)
updated = models.DateTimeField(auto_now = True)
# This is a horrible hack; since Toaster has no "User" model available when
# running in interactive mode, we can't reference the field here directly
# Instead, we keep a possible null reference to the User id, as not to force
# hard links to possibly missing models
user_id = models.IntegerField(null = True)
objects = ProjectManager()
def schedule_build(self):
from bldcontrol.models import BuildRequest, BRTarget, BRLayer, BRVariable, BRBitbake
br = BuildRequest.objects.create(project = self)
BRBitbake.objects.create(req = br,
giturl = self.bitbake_version.giturl,
commit = self.bitbake_version.branch,
dirpath = self.bitbake_version.dirpath)
for l in self.projectlayer_set.all():
BRLayer.objects.create(req = br, name = l.layercommit.layer.name, giturl = l.layercommit.layer.vcs_url, commit = l.layercommit.commit, dirpath = l.layercommit.dirpath)
for t in self.projecttarget_set.all():
BRTarget.objects.create(req = br, target = t.target, task = t.task)
for v in self.projectvariable_set.all():
BRVariable.objects.create(req = br, name = v.name, value = v.value)
br.state = BuildRequest.REQ_QUEUED
br.save()
return br
class Build(models.Model):
SUCCEEDED = 0
FAILED = 1
IN_PROGRESS = 2
BUILD_OUTCOME = (
(SUCCEEDED, 'Succeeded'),
(FAILED, 'Failed'),
(IN_PROGRESS, 'In Progress'),
)
search_allowed_fields = ['machine', 'cooker_log_path', "target__target", "target__target_image_file__file_name"]
project = models.ForeignKey(Project, null = True)
machine = models.CharField(max_length=100)
distro = models.CharField(max_length=100)
distro_version = models.CharField(max_length=100)
started_on = models.DateTimeField()
completed_on = models.DateTimeField()
timespent = models.IntegerField(default=0)
outcome = models.IntegerField(choices=BUILD_OUTCOME, default=IN_PROGRESS)
errors_no = models.IntegerField(default=0)
warnings_no = models.IntegerField(default=0)
cooker_log_path = models.CharField(max_length=500)
build_name = models.CharField(max_length=100)
bitbake_version = models.CharField(max_length=50)
def completeper(self):
tf = Task.objects.filter(build = self)
tfc = tf.count()
if tfc > 0:
completeper = tf.exclude(order__isnull=True).count()*100/tf.count()
else:
completeper = 0
return completeper
def eta(self):
from django.utils import timezone
eta = 0
completeper = self.completeper()
if self.completeper() > 0:
eta = timezone.now() + ((timezone.now() - self.started_on)*(100-completeper)/completeper)
return eta
def get_sorted_target_list(self):
tgts = Target.objects.filter(build_id = self.id).order_by( 'target' );
return( tgts );
class ProjectTarget(models.Model):
project = models.ForeignKey(Project)
target = models.CharField(max_length=100)
task = models.CharField(max_length=100, null=True)
@python_2_unicode_compatible
class Target(models.Model):
search_allowed_fields = ['target', 'file_name']
build = models.ForeignKey(Build)
target = models.CharField(max_length=100)
is_image = models.BooleanField(default = False)
image_size = models.IntegerField(default=0)
license_manifest_path = models.CharField(max_length=500, null=True)
def package_count(self):
return Target_Installed_Package.objects.filter(target_id__exact=self.id).count()
def __str__(self):
return self.target
class Target_Image_File(models.Model):
target = models.ForeignKey(Target)
file_name = models.FilePathField(max_length=254)
file_size = models.IntegerField()
class Target_File(models.Model):
ITYPE_REGULAR = 1
ITYPE_DIRECTORY = 2
ITYPE_SYMLINK = 3
ITYPE_SOCKET = 4
ITYPE_FIFO = 5
ITYPE_CHARACTER = 6
ITYPE_BLOCK = 7
ITYPES = ( (ITYPE_REGULAR ,'regular'),
( ITYPE_DIRECTORY ,'directory'),
( ITYPE_SYMLINK ,'symlink'),
( ITYPE_SOCKET ,'socket'),
( ITYPE_FIFO ,'fifo'),
( ITYPE_CHARACTER ,'character'),
( ITYPE_BLOCK ,'block'),
)
target = models.ForeignKey(Target)
path = models.FilePathField()
size = models.IntegerField()
inodetype = models.IntegerField(choices = ITYPES)
permission = models.CharField(max_length=16)
owner = models.CharField(max_length=128)
group = models.CharField(max_length=128)
directory = models.ForeignKey('Target_File', related_name="directory_set", null=True)
sym_target = models.ForeignKey('Target_File', related_name="symlink_set", null=True)
class TaskManager(models.Manager):
def related_setscene(self, task_object):
return Task.objects.filter(task_executed=True, build = task_object.build, recipe = task_object.recipe, task_name=task_object.task_name+"_setscene")
class Task(models.Model):
SSTATE_NA = 0
SSTATE_MISS = 1
SSTATE_FAILED = 2
SSTATE_RESTORED = 3
SSTATE_RESULT = (
(SSTATE_NA, 'Not Applicable'), # For rest of tasks, but they still need checking.
(SSTATE_MISS, 'File not in cache'), # the sstate object was not found
(SSTATE_FAILED, 'Failed'), # there was a pkg, but the script failed
(SSTATE_RESTORED, 'Succeeded'), # successfully restored
)
CODING_NA = 0
CODING_PYTHON = 2
CODING_SHELL = 3
TASK_CODING = (
(CODING_NA, 'N/A'),
(CODING_PYTHON, 'Python'),
(CODING_SHELL, 'Shell'),
)
OUTCOME_NA = -1
OUTCOME_SUCCESS = 0
OUTCOME_COVERED = 1
OUTCOME_CACHED = 2
OUTCOME_PREBUILT = 3
OUTCOME_FAILED = 4
OUTCOME_EMPTY = 5
TASK_OUTCOME = (
(OUTCOME_NA, 'Not Available'),
(OUTCOME_SUCCESS, 'Succeeded'),
(OUTCOME_COVERED, 'Covered'),
(OUTCOME_CACHED, 'Cached'),
(OUTCOME_PREBUILT, 'Prebuilt'),
(OUTCOME_FAILED, 'Failed'),
(OUTCOME_EMPTY, 'Empty'),
)
TASK_OUTCOME_HELP = (
(OUTCOME_SUCCESS, 'This task successfully completed'),
(OUTCOME_COVERED, 'This task did not run because its output is provided by another task'),
(OUTCOME_CACHED, 'This task restored output from the sstate-cache directory or mirrors'),
(OUTCOME_PREBUILT, 'This task did not run because its outcome was reused from a previous build'),
(OUTCOME_FAILED, 'This task did not complete'),
(OUTCOME_EMPTY, 'This task has no executable content'),
(OUTCOME_NA, ''),
)
search_allowed_fields = [ "recipe__name", "recipe__version", "task_name", "logfile" ]
objects = TaskManager()
def get_related_setscene(self):
return Task.objects.related_setscene(self)
def get_outcome_text(self):
return Task.TASK_OUTCOME[self.outcome + 1][1]
def get_outcome_help(self):
return Task.TASK_OUTCOME_HELP[self.outcome][1]
def get_sstate_text(self):
if self.sstate_result==Task.SSTATE_NA:
return ''
else:
return Task.SSTATE_RESULT[self.sstate_result][1]
def get_executed_display(self):
if self.task_executed:
return "Executed"
return "Not Executed"
def get_description(self):
helptext = HelpText.objects.filter(key=self.task_name, area=HelpText.VARIABLE, build=self.build)
try:
return helptext[0].text
except IndexError:
return ''
build = models.ForeignKey(Build, related_name='task_build')
order = models.IntegerField(null=True)
task_executed = models.BooleanField(default=False) # True means Executed, False means Not/Executed
outcome = models.IntegerField(choices=TASK_OUTCOME, default=OUTCOME_NA)
sstate_checksum = models.CharField(max_length=100, blank=True)
path_to_sstate_obj = models.FilePathField(max_length=500, blank=True)
recipe = models.ForeignKey('Recipe', related_name='build_recipe')
task_name = models.CharField(max_length=100)
source_url = models.FilePathField(max_length=255, blank=True)
work_directory = models.FilePathField(max_length=255, blank=True)
script_type = models.IntegerField(choices=TASK_CODING, default=CODING_NA)
line_number = models.IntegerField(default=0)
disk_io = models.IntegerField(null=True)
cpu_usage = models.DecimalField(max_digits=6, decimal_places=2, null=True)
elapsed_time = models.DecimalField(max_digits=6, decimal_places=2, null=True)
sstate_result = models.IntegerField(choices=SSTATE_RESULT, default=SSTATE_NA)
message = models.CharField(max_length=240)
logfile = models.FilePathField(max_length=255, blank=True)
outcome_text = property(get_outcome_text)
sstate_text = property(get_sstate_text)
class Meta:
ordering = ('order', 'recipe' ,)
unique_together = ('build', 'recipe', 'task_name', )
class Task_Dependency(models.Model):
task = models.ForeignKey(Task, related_name='task_dependencies_task')
depends_on = models.ForeignKey(Task, related_name='task_dependencies_depends')
class Package(models.Model):
search_allowed_fields = ['name', 'version', 'revision', 'recipe__name', 'recipe__version', 'recipe__license', 'recipe__layer_version__layer__name', 'recipe__layer_version__branch', 'recipe__layer_version__commit', 'recipe__layer_version__layer__local_path', 'installed_name']
build = models.ForeignKey('Build')
recipe = models.ForeignKey('Recipe', null=True)
name = models.CharField(max_length=100)
installed_name = models.CharField(max_length=100, default='')
version = models.CharField(max_length=100, blank=True)
revision = models.CharField(max_length=32, blank=True)
summary = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
size = models.IntegerField(default=0)
installed_size = models.IntegerField(default=0)
section = models.CharField(max_length=80, blank=True)
license = models.CharField(max_length=80, blank=True)
class Package_DependencyManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return super(Package_DependencyManager, self).get_query_set().exclude(package_id = F('depends_on__id'))
class Package_Dependency(models.Model):
TYPE_RDEPENDS = 0
TYPE_TRDEPENDS = 1
TYPE_RRECOMMENDS = 2
TYPE_TRECOMMENDS = 3
TYPE_RSUGGESTS = 4
TYPE_RPROVIDES = 5
TYPE_RREPLACES = 6
TYPE_RCONFLICTS = 7
' TODO: bpackage should be changed to remove the DEPENDS_TYPE access '
DEPENDS_TYPE = (
(TYPE_RDEPENDS, "depends"),
(TYPE_TRDEPENDS, "depends"),
(TYPE_TRECOMMENDS, "recommends"),
(TYPE_RRECOMMENDS, "recommends"),
(TYPE_RSUGGESTS, "suggests"),
(TYPE_RPROVIDES, "provides"),
(TYPE_RREPLACES, "replaces"),
(TYPE_RCONFLICTS, "conflicts"),
)
''' Indexed by dep_type, in view order, key for short name and help
description which when viewed will be printf'd with the
package name.
'''
DEPENDS_DICT = {
TYPE_RDEPENDS : ("depends", "%s is required to run %s"),
TYPE_TRDEPENDS : ("depends", "%s is required to run %s"),
TYPE_TRECOMMENDS : ("recommends", "%s extends the usability of %s"),
TYPE_RRECOMMENDS : ("recommends", "%s extends the usability of %s"),
TYPE_RSUGGESTS : ("suggests", "%s is suggested for installation with %s"),
TYPE_RPROVIDES : ("provides", "%s is provided by %s"),
TYPE_RREPLACES : ("replaces", "%s is replaced by %s"),
TYPE_RCONFLICTS : ("conflicts", "%s conflicts with %s, which will not be installed if this package is not first removed"),
}
package = models.ForeignKey(Package, related_name='package_dependencies_source')
depends_on = models.ForeignKey(Package, related_name='package_dependencies_target') # soft dependency
dep_type = models.IntegerField(choices=DEPENDS_TYPE)
target = models.ForeignKey(Target, null=True)
objects = Package_DependencyManager()
class Target_Installed_Package(models.Model):
target = models.ForeignKey(Target)
package = models.ForeignKey(Package, related_name='buildtargetlist_package')
class Package_File(models.Model):
package = models.ForeignKey(Package, related_name='buildfilelist_package')
path = models.FilePathField(max_length=255, blank=True)
size = models.IntegerField()
class Recipe(models.Model):
search_allowed_fields = ['name', 'version', 'file_path', 'section', 'license', 'layer_version__layer__name', 'layer_version__branch', 'layer_version__commit', 'layer_version__layer__local_path']
layer_source = models.ForeignKey('LayerSource', default = None, null = True) # from where did we get this recipe
up_id = models.IntegerField(null = True, default = None) # id of entry in the source
up_date = models.DateTimeField(null = True, default = None)
name = models.CharField(max_length=100, blank=True) # pn
version = models.CharField(max_length=100, blank=True) # pv
layer_version = models.ForeignKey('Layer_Version', related_name='recipe_layer_version')
summary = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
section = models.CharField(max_length=100, blank=True)
license = models.CharField(max_length=200, blank=True)
homepage = models.URLField(blank=True)
bugtracker = models.URLField(blank=True)
file_path = models.FilePathField(max_length=255)
def get_vcs_link_url(self):
if self.layer_version.layer.vcs_web_file_base_url is None:
return ""
return self.layer_version.layer.vcs_web_file_base_url.replace('%path%', self.file_path).replace('%branch%', self.layer_version.up_branch.name)
def get_layersource_view_url(self):
if self.layer_source is None:
return ""
url = self.layer_source.get_object_view(self.layer_version.up_branch, "recipes", self.name)
return url
def __unicode__(self):
return "Recipe " + self.name + ":" + self.version
class Recipe_DependencyManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return super(Recipe_DependencyManager, self).get_query_set().exclude(recipe_id = F('depends_on__id'))
class Recipe_Dependency(models.Model):
TYPE_DEPENDS = 0
TYPE_RDEPENDS = 1
DEPENDS_TYPE = (
(TYPE_DEPENDS, "depends"),
(TYPE_RDEPENDS, "rdepends"),
)
recipe = models.ForeignKey(Recipe, related_name='r_dependencies_recipe')
depends_on = models.ForeignKey(Recipe, related_name='r_dependencies_depends')
dep_type = models.IntegerField(choices=DEPENDS_TYPE)
objects = Recipe_DependencyManager()
class Machine(models.Model):
layer_source = models.ForeignKey('LayerSource', default = None, null = True) # from where did we get this machine
up_id = models.IntegerField(null = True, default = None) # id of entry in the source
up_date = models.DateTimeField(null = True, default = None)
layer_version = models.ForeignKey('Layer_Version')
name = models.CharField(max_length=255)
description = models.CharField(max_length=255)
def __unicode__(self):
return "Machine " + self.name + "(" + self.description + ")"
class Meta:
unique_together = ("layer_source", "up_id")
from django.db.models.base import ModelBase
class InheritanceMetaclass(ModelBase):
def __call__(cls, *args, **kwargs):
obj = super(InheritanceMetaclass, cls).__call__(*args, **kwargs)
return obj.get_object()
class LayerSource(models.Model):
__metaclass__ = InheritanceMetaclass
class Meta:
unique_together = (('sourcetype', 'apiurl'), )
TYPE_LOCAL = 0
TYPE_LAYERINDEX = 1
SOURCE_TYPE = (
(TYPE_LOCAL, "local"),
(TYPE_LAYERINDEX, "layerindex"),
)
name = models.CharField(max_length=63)
sourcetype = models.IntegerField(choices=SOURCE_TYPE)
apiurl = models.CharField(max_length=255, null=True, default=None)
def save(self, *args, **kwargs):
if isinstance(self, LocalLayerSource):
self.sourcetype = LayerSource.TYPE_LOCAL
elif isinstance(self, LayerIndexLayerSource):
self.sourcetype = LayerSource.TYPE_LAYERINDEX
elif self.sourcetype == None:
raise Exception("Invalid LayerSource type")
return super(LayerSource, self).save(*args, **kwargs)
def get_object(self):
if self.sourcetype is not None:
if self.sourcetype == LayerSource.TYPE_LOCAL:
self.__class__ = LocalLayerSource
if self.sourcetype == LayerSource.TYPE_LAYERINDEX:
self.__class__ = LayerIndexLayerSource
return self
return "LS " + self.sourcetype + " " + self.name
class LocalLayerSource(LayerSource):
class Meta(LayerSource._meta.__class__):
proxy = True
def __init__(self, *args, **kwargs):
super(LocalLayerSource, self).__init__(args, kwargs)
self.sourcetype = LayerSource.TYPE_LOCAL
def update(self):
'''
Fetches layer, recipe and machine information from local repository
'''
pass
class LayerIndexLayerSource(LayerSource):
class Meta(LayerSource._meta.__class__):
proxy = True
def __init__(self, *args, **kwargs):
super(LayerIndexLayerSource, self).__init__(args, kwargs)
self.sourcetype = LayerSource.TYPE_LAYERINDEX
def get_object_view(self, branch, objectype, upid):
if self != branch.layer_source:
raise Exception("Invalid branch specification")
return self.apiurl + "../branch/" + branch.name + "/" + objectype + "/?q=" + str(upid)
def update(self):
'''
Fetches layer, recipe and machine information from remote repository
'''
assert self.apiurl is not None
def _get_json_response(apiurl = self.apiurl):
import httplib, urlparse, json
parsedurl = urlparse.urlparse(apiurl)
(host, port) = parsedurl.netloc.split(":")
if port is None:
port = 80
else:
port = int(port)
#print "-- connect to: http://%s:%s%s?%s" % (host, port, parsedurl.path, parsedurl.query)
conn = httplib.HTTPConnection(host, port)
conn.request("GET", parsedurl.path + "?" + parsedurl.query)
r = conn.getresponse()
if r.status != 200:
raise Exception("Failed to read " + parsedurl.path + ": %d %s" % (r.status, r.reason))
return json.loads(r.read())
# verify we can get the basic api
try:
apilinks = _get_json_response()
except:
print "EE: could not connect to %s, skipping update" % self.apiurl
return
# update branches; only those that we already have names listed in the database
whitelist_branch_names = map(lambda x: x.name, Branch.objects.all())
branches_info = _get_json_response(apilinks['branches']
+ "?filter=name:%s" % "OR".join(whitelist_branch_names))
for bi in branches_info:
b, created = Branch.objects.get_or_create(layer_source = self, name = bi['name'])
b.up_id = bi['id']
b.up_date = bi['updated']
b.name = bi['name']
b.bitbake_branch = bi['bitbake_branch']
b.short_description = bi['short_description']
b.save()
# update layers
layers_info = _get_json_response(apilinks['layerItems'])
for li in layers_info:
l, created = Layer.objects.get_or_create(layer_source = self, up_id = li['id'])
l.up_date = li['updated']
l.name = li['name']
l.vcs_url = li['vcs_url']
l.vcs_web_file_base_url = li['vcs_web_file_base_url']
l.summary = li['summary']
l.description = li['description']
l.save()
# update layerbranches/layer_versions
layerbranches_info = _get_json_response(apilinks['layerBranches']
+ "?filter=branch:%s" % "OR".join(map(lambda x: str(x.up_id), Branch.objects.filter(layer_source = self)))
)
for lbi in layerbranches_info:
lv, created = Layer_Version.objects.get_or_create(layer_source = self, up_id = lbi['id'])
lv.up_date = lbi['updated']
lv.layer = Layer.objects.get(layer_source = self, up_id = lbi['layer'])
lv.up_branch = Branch.objects.get(layer_source = self, up_id = lbi['branch'])
lv.branch = lbi['actual_branch']
lv.commit = lbi['vcs_last_rev']
lv.dirpath = lbi['vcs_subdir']
lv.save()
# update machines
machines_info = _get_json_response(apilinks['machines']
+ "?filter=layerbranch:%s" % "OR".join(map(lambda x: str(x.up_id), Layer_Version.objects.filter(layer_source = self)))
)
for mi in machines_info:
mo, created = Machine.objects.get_or_create(layer_source = self, up_id = mi['id'])
mo.up_date = mi['updated']
mo.layer_version = Layer_Version.objects.get(layer_source = self, up_id = mi['layerbranch'])
mo.name = mi['name']
mo.description = mi['description']
mo.save()
# update recipes; paginate by layer version / layer branch
recipes_info = _get_json_response(apilinks['recipes']
+ "?filter=layerbranch:%s" % "OR".join(map(lambda x: str(x.up_id), Layer_Version.objects.filter(layer_source = self)))
)
for ri in recipes_info:
ro, created = Recipe.objects.get_or_create(layer_source = self, up_id = ri['id'])
ro.up_date = ri['updated']
ro.layer_version = Layer_Version.objects.get(layer_source = self, up_id = mi['layerbranch'])
ro.name = ri['pn']
ro.version = ri['pv']
ro.summary = ri['summary']
ro.description = ri['description']
ro.section = ri['section']
ro.license = ri['license']
ro.homepage = ri['homepage']
ro.bugtracker = ri['bugtracker']
ro.file_path = ri['filepath'] + ri['filename']
ro.save()
pass
class BitbakeVersion(models.Model):
name = models.CharField(max_length=32, unique = True)
giturl = models.URLField()
branch = models.CharField(max_length=32)
dirpath = models.CharField(max_length=255)
class Release(models.Model):
name = models.CharField(max_length=32, unique = True)
description = models.CharField(max_length=255)
bitbake_version = models.ForeignKey(BitbakeVersion)
branch = models.CharField(max_length=32)
class ReleaseDefaultLayer(models.Model):
release = models.ForeignKey(Release)
layer = models.ForeignKey('Layer')
# Branch class is synced with layerindex.Branch, branches can only come from remote layer indexes
class Branch(models.Model):
layer_source = models.ForeignKey('LayerSource', null = True, default = True)
up_id = models.IntegerField(null = True, default = None) # id of branch in the source
up_date = models.DateTimeField(null = True, default = None)
name = models.CharField(max_length=50)
bitbake_branch = models.CharField(max_length=50, blank=True)
short_description = models.CharField(max_length=50, blank=True)
class Meta:
verbose_name_plural = "Branches"
unique_together = (('layer_source', 'name'),('layer_source', 'up_id'))
def __unicode__(self):
return self.name
# Layer class synced with layerindex.LayerItem
class Layer(models.Model):
layer_source = models.ForeignKey(LayerSource, null = True, default = None) # from where did we got this layer
up_id = models.IntegerField(null = True, default = None) # id of layer in the remote source
up_date = models.DateTimeField(null = True, default = None)
name = models.CharField(max_length=100)
local_path = models.FilePathField(max_length=255, null = True, default = None)
layer_index_url = models.URLField()
vcs_url = models.URLField(default = None, null = True)
vcs_web_file_base_url = models.URLField(null = True, default = None)
summary = models.CharField(max_length=200, help_text='One-line description of the layer', null = True, default = None)
description = models.TextField(null = True, default = None)
def __unicode__(self):
return "L " + self.name
class Meta:
unique_together = (("layer_source", "up_id"), ("layer_source", "name"))
# LayerCommit class is synced with layerindex.LayerBranch
class Layer_Version(models.Model):
search_allowed_fields = ["layer__name", "layer__summary",]
build = models.ForeignKey(Build, related_name='layer_version_build', default = None, null = True)
layer = models.ForeignKey(Layer, related_name='layer_version_layer')
layer_source = models.ForeignKey(LayerSource, null = True, default = None) # from where did we get this Layer Version
up_id = models.IntegerField(null = True, default = None) # id of layerbranch in the remote source
up_date = models.DateTimeField(null = True, default = None)
up_branch = models.ForeignKey(Branch, null = True, default = None)
branch = models.CharField(max_length=80) # LayerBranch.actual_branch
commit = models.CharField(max_length=100) # LayerBranch.vcs_last_rev
dirpath = models.CharField(max_length=255, null = True, default = None) # LayerBranch.vcs_subdir
priority = models.IntegerField(default = 0) # if -1, this is a default layer
def __unicode__(self):
return "LV " + str(self.layer) + " " + self.commit
class Meta:
unique_together = ("layer_source", "up_id")
class LayerVersionDependency(models.Model):
layer_source = models.ForeignKey(LayerSource, null = True, default = None) # from where did we got this layer
up_id = models.IntegerField(null = True, default = None) # id of layerbranch in the remote source
layer_version = models.ForeignKey(Layer_Version, related_name="dependencies")
depends_on = models.ForeignKey(Layer_Version, related_name="dependees")
class Meta:
unique_together = ("layer_source", "up_id")
class ProjectLayer(models.Model):
project = models.ForeignKey(Project)
layercommit = models.ForeignKey(Layer_Version, null=True)
optional = models.BooleanField(default = True)
class ProjectVariable(models.Model):
project = models.ForeignKey(Project)
name = models.CharField(max_length=100)
value = models.TextField(blank = True)
class Variable(models.Model):
search_allowed_fields = ['variable_name', 'variable_value',
'vhistory__file_name', "description"]
build = models.ForeignKey(Build, related_name='variable_build')
variable_name = models.CharField(max_length=100)
variable_value = models.TextField(blank=True)
changed = models.BooleanField(default=False)
human_readable_name = models.CharField(max_length=200)
description = models.TextField(blank=True)
class VariableHistory(models.Model):
variable = models.ForeignKey(Variable, related_name='vhistory')
value = models.TextField(blank=True)
file_name = models.FilePathField(max_length=255)
line_number = models.IntegerField(null=True)
operation = models.CharField(max_length=64)
class HelpText(models.Model):
VARIABLE = 0
HELPTEXT_AREA = ((VARIABLE, 'variable'), )
build = models.ForeignKey(Build, related_name='helptext_build')
area = models.IntegerField(choices=HELPTEXT_AREA)
key = models.CharField(max_length=100)
text = models.TextField()
class LogMessage(models.Model):
INFO = 0
WARNING = 1
ERROR = 2
LOG_LEVEL = ( (INFO, "info"),
(WARNING, "warn"),
(ERROR, "error") )
build = models.ForeignKey(Build)
task = models.ForeignKey(Task, blank = True, null=True)
level = models.IntegerField(choices=LOG_LEVEL, default=INFO)
message=models.CharField(max_length=240)
pathname = models.FilePathField(max_length=255, blank=True)
lineno = models.IntegerField(null=True)
|
|
"""Tests for disco_config utilities."""
from unittest import TestCase
from copy import deepcopy
from ConfigParser import NoOptionError
from mock import patch, Mock
from disco_aws_automation import disco_config, exceptions
from tests.helpers.patch_disco_aws import MockAsiaqConfig
@patch("disco_aws_automation.disco_config.ASIAQ_CONFIG", "FAKE_CONFIG_DIR")
class TestNormalizePath(TestCase):
"""Tests for the normalize_path utility function."""
@patch('os.path.exists')
def test__no_such_path__exception(self, path_exists):
"Normalize path finds no such path - exception."
path_exists.return_value = False
self.assertRaises(exceptions.AsiaqConfigError, disco_config.normalize_path, "yabba", "dabba")
path_exists.assert_called_once_with("FAKE_CONFIG_DIR/yabba/dabba")
@patch('os.path.exists')
def test__path_exists__path_returned(self, path_exists):
"Normalize path thinks the path exists - path is returned"
path_exists.return_value = True
found = disco_config.normalize_path("yabba", "dabba")
self.assertEqual(found, "FAKE_CONFIG_DIR/yabba/dabba")
path_exists.assert_called_once_with(found)
@patch('os.path.exists')
def test__list_arg__correct_path_returned(self, path_exists):
"Normalize path works with a single list-typed argument"
path_exists.return_value = True
found = disco_config.normalize_path(["yabba", "dabba"])
self.assertEqual(found, "FAKE_CONFIG_DIR/yabba/dabba")
path_exists.assert_called_once_with(found)
@patch('os.path.exists')
def test__tuple_arg__correct_path_returned(self, path_exists):
"Normalize path with a single tuple-typed argument"
path_exists.return_value = True
found = disco_config.normalize_path(("yabba", "dabba"))
self.assertEqual(found, "FAKE_CONFIG_DIR/yabba/dabba")
path_exists.assert_called_once_with(found)
@patch("disco_aws_automation.disco_config.ASIAQ_CONFIG", "FAKE_CONFIG_DIR")
@patch('disco_aws_automation.disco_config.AsiaqConfig')
class TestReadConfig(TestCase):
"""Tests for the read_config utility function."""
@patch('os.path.exists', Mock(return_value=True))
def test__no_arg__default_behavior(self, configparser_constructor):
"Default argument for read_config works"
parser = Mock()
configparser_constructor.return_value = parser
parsed = disco_config.read_config()
self.assertIs(parsed, parser)
parser.read.assert_called_once_with("FAKE_CONFIG_DIR/disco_aws.ini")
@patch('os.path.exists', Mock(return_value=True))
def test__named_arg__expected_behavior(self, configparser_constructor):
"Keyword argument for read_config works"
parser = Mock()
configparser_constructor.return_value = parser
parsed = disco_config.read_config(config_file="Foobar")
self.assertIs(parsed, parser)
parser.read.assert_called_once_with("FAKE_CONFIG_DIR/Foobar")
@patch('os.path.exists', Mock(return_value=True))
def test__arglist__expected_behavior(self, configparser_constructor):
"Unnamed argument list for read_config works"
parser = Mock()
configparser_constructor.return_value = parser
parsed = disco_config.read_config("foo", "bar")
self.assertIs(parsed, parser)
parser.read.assert_called_once_with("FAKE_CONFIG_DIR/foo/bar")
@patch('os.path.exists', Mock(return_value=True))
def test__arg_combo__named_arg_last(self, configparser_constructor):
"Combined keyword and listed args for read_config work"
parser = Mock()
configparser_constructor.return_value = parser
parsed = disco_config.read_config("foo", "bar", config_file="baz.ini")
self.assertIs(parsed, parser)
parser.read.assert_called_once_with("FAKE_CONFIG_DIR/foo/bar/baz.ini")
@patch("disco_aws_automation.disco_config.ASIAQ_CONFIG", "FAKE_CONFIG_DIR")
class TestOpenNormalized(TestCase):
"""Tests for the open_normalized utility function."""
@patch('os.path.exists', Mock(return_value=True))
@patch('disco_aws_automation.disco_config.open')
def test__path_exists__passthrough_successful(self, open_mock):
"Valid path for open_normalized - 'open' called"
expected = Mock()
open_mock.return_value = expected
found = disco_config.open_normalized("path", "to", "file", mode="moody")
self.assertIs(expected, found)
open_mock.assert_called_once_with("FAKE_CONFIG_DIR/path/to/file", mode="moody")
class TestAsiaqConfig(TestCase):
"""Tests for the AsiaqConfig object."""
# allow long method names
# pylint: disable=invalid-name
BASE_CONFIG_DICT = {
disco_config.DEFAULT_CONFIG_SECTION: {
'default_environment': 'fake-build',
'default_unused_option': 'fall-all-the-way-back',
},
'mhcfoobar': {
'easy_option': 'easy_answer',
'envy_option': 'fallback_answer',
'envy_option@fake-build': 'default_env_answer',
'envy_option@ci': 'ci_answer'
}
}
S3_BUCKET_CONFIG = {
's3_bucket_base': 'bucket-base',
's3_bucket_suffix': 'blah',
's3_bucket_suffix@production': 'danger'
}
def test__get_asiaq_option__no_env_options(self):
"Option exists in desired section: found it"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertEqual('easy_answer', config.get_asiaq_option(option='easy_option', section='mhcfoobar'))
def test__get_asiaq_option__default_env(self):
"Env-specific option with default environment"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertEqual('default_env_answer',
config.get_asiaq_option(option='envy_option', section='mhcfoobar'))
def test__get_asiaq_option__env_in_constructor(self):
"Env-specific option with environment passed in at construction time"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT), environment='ci')
self.assertEqual('ci_answer',
config.get_asiaq_option('envy_option', section='mhcfoobar'))
def test__get_asiaq_option__env_in_call(self):
"Env-specific option with environment passed in at call time"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertEqual('ci_answer',
config.get_asiaq_option('envy_option', section='mhcfoobar', environment='ci'))
def test__get_asiaq_option__env_in_constructor_and_call(self):
"Env-specific option with environment passed in at call time"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT), environment="bad_env")
self.assertEqual('ci_answer',
config.get_asiaq_option('envy_option', section='mhcfoobar', environment='ci'))
def test__get_asiaq_option__bad_env_in_call(self):
"Env-specific option with unused environment passed in at call time"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertEqual('fallback_answer',
config.get_asiaq_option('envy_option', section='mhcfoobar', environment='nope'))
def test__get_asiaq_option__default_section(self):
"Option found in defaults as fallback"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertEqual('fall-all-the-way-back', config.get_asiaq_option('unused_option'))
self.assertEqual('fall-all-the-way-back',
config.get_asiaq_option('unused_option', section='mhcfoobar'))
def test__get_asiaq_option__missing__exception(self):
"Missing option with required=True"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertRaises(NoOptionError,
config.get_asiaq_option, 'nobody-cares-about-this', section='mhcfoobar')
def test__get_asiaq_option__missing_not_required__default(self):
"Missing option with required=False and default"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertEqual("passed-in-default",
config.get_asiaq_option('nobody-cares-about-this', section='mhcfoobar',
required=False, default="passed-in-default"))
def test__get_asiaq_option__missing_not_required_no_default__none(self):
"Missing option with required=False and default"
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertIsNone(config.get_asiaq_option('nobody-cares-about-this',
section='mhcfoobar', required=False))
def test__get_asiaq_option__nonsense_args__error(self):
"Invalid arguments to get_asiaq_option produce an error."
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertRaises(exceptions.ProgrammerError, config.get_asiaq_option, 'immaterial',
required=True, default=12345)
def test__get_asiaq_s3_bucket_name__no_prefix__error(self):
"Missing bucket prefix should make bucket-name method raise an exception."
config = MockAsiaqConfig(deepcopy(self.BASE_CONFIG_DICT))
self.assertRaises(NoOptionError, config.get_asiaq_s3_bucket_name, 'foobar')
def test__get_asiaq_s3_bucket_name__no_suffix(self):
"Missing suffix should not produce a problem for the bucket-name method."
config_dict = deepcopy(self.BASE_CONFIG_DICT)
config_dict[disco_config.DEFAULT_CONFIG_SECTION]['s3_bucket_base'] = 'bucket-base'
config = MockAsiaqConfig(config_dict)
self.assertEqual("bucket-base--foobar", config.get_asiaq_s3_bucket_name('foobar'))
def test__get_asiaq_s3_bucket_name__defaults(self):
"Base behavior of get_asiaq_s3_bucket_name works as expected."
config_dict = deepcopy(self.BASE_CONFIG_DICT)
config_dict[disco_config.DEFAULT_CONFIG_SECTION].update(self.S3_BUCKET_CONFIG)
config = MockAsiaqConfig(config_dict)
self.assertEqual("bucket-base--foobar--blah", config.get_asiaq_s3_bucket_name('foobar'))
def test__get_asiaq_s3_bucket_name__real_env_specified(self):
"Environment-specific behavior of get_asiaq_s3_bucket_name with a configured env works as expected"
config_dict = deepcopy(self.BASE_CONFIG_DICT)
config_dict[disco_config.DEFAULT_CONFIG_SECTION].update(self.S3_BUCKET_CONFIG)
config = MockAsiaqConfig(config_dict, environment="production")
self.assertEqual("bucket-base--foobar--danger", config.get_asiaq_s3_bucket_name('foobar'))
def test__get_asiaq_s3_bucket_name__bad_env_specified(self):
"Environment-specific behavior of get_asiaq_s3_bucket_name with a nonsense env works as expected"
config_dict = deepcopy(self.BASE_CONFIG_DICT)
config_dict[disco_config.DEFAULT_CONFIG_SECTION].update(self.S3_BUCKET_CONFIG)
config = MockAsiaqConfig(config_dict, environment="nope")
self.assertEqual("bucket-base--foobar--blah", config.get_asiaq_s3_bucket_name('foobar'))
|
|
'''
update:
2014/09/03:
softmax in the last layer
'''
import theano
import theano.tensor as T
import gzip
import cPickle
import numpy
import time
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
self.input = input
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class ANN(object):
def __init__(self, n_in, n_out, lmbd = 0.01, hiddens = [10]):
x = T.matrix('x')
y = T.ivector('y')
lr = T.scalar('lr')
rng = numpy.random.RandomState(numpy.random.randint(2 ** 30))
params = []
hid_layers = []
L2 = .0
n_hid = hiddens + [n_out]
for ind, ele in enumerate(n_hid):
if ind == 0:
input = x
n_in = n_in
else:
input = hid_layers[-1].output
n_in = n_hid[ind-1]
if ind == len(n_hid) - 1:
activation = T.nnet.softmax
else:
activation = T.nnet.sigmoid
layer = HiddenLayer(rng, input = input, n_in = n_in, n_out = ele, activation = activation)
hid_layers.append( layer)
L2 += T.sum(layer.W ** 2)
params.extend([layer.W, layer.b])
nl = -T.mean(T.log(hid_layers[-1].output)[T.arange(y.shape[0]), y])
cost = nl + L2 * lmbd
grads = T.grad(cost, params)
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - lr * grad_i))
y_pred = T.argmax(hid_layers[-1].output, 1)
errors = T.mean(T.neq(y_pred, y))
self.n_in = n_in
self.n_out = n_out
self.hiddens = hiddens
self.hid_layers = hid_layers
self.x = x
self.y = y
self.lr = lr
self.cost = cost
self.errors = errors
self.updates = updates
self.pred = y_pred
self.time = []
def fit(self, datasets, batch_size = 500, n_epochs = 200, lr = 0.01):
''' without validation'''
index = T.lscalar()
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_test_batches /= batch_size
train_model = theano.function([index], self.cost,
updates = self.updates,
givens = {
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size],
self.lr: lr})
test_model = theano.function([], self.errors,
givens = {
self.x: test_set_x,
self.y: test_set_y})
train_error = theano.function([], self.errors,
givens = {
self.x: train_set_x,
self.y: train_set_y})
debug_f = theano.function([index], self.errors,
givens = {
self.x: test_set_x[index * batch_size : (index+1) * batch_size],
self.y: test_set_y[index * batch_size : (index+1) * batch_size]})
# print numpy.mean([debug_f(i) for i in xrange(n_test_batches)])
print(test_model())
print '...training'
maxiter = n_epochs
iteration = 0
while iteration < maxiter:
start_time = time.time()
iteration += 1
print 'iteration %d' % iteration
for minibatch_index in xrange(n_train_batches):
print '\tL of (%03d/%03d) = %f\r' % (minibatch_index, n_train_batches, train_model(minibatch_index)),
print ''
print '\ttrain error = %f' % train_error()
print '\ttest error = %f' % test_model()
self.time.append(time.time()-start_time)
def __repr__(self):
return '<CNN: %r; HID: %r>' % (self.nkerns, self.nhiddens)
def pred(self, x):
return theano.function([], T.argmax(self.hid_layers[-1].output, 1),
givens = {self.x: x})()
def prob(self, x):
return theano.function([], self.hid_layers[-1].output,
givens = {self.x: x})()
def __repr__(self):
return '<ANN:%r-%r-%r>' % (self.n_in, self.hiddens, self.n_out)
def load_data(dataset, num = None):
print '... loading data'
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
train_set = (numpy.concatenate([train_set[0], valid_set[0]], 0), numpy.concatenate([train_set[1], valid_set[1]], 0))
f.close()
def shared_dataset(data_xy, borrow=True, num = None):
data_x, data_y = data_xy
if num:
data_x = data_x[:num]
data_y = data_y[:num]
# data_y = boarden(10, data_y)
size = int(data_x.shape[1]**.5)
# data_x = data_x.reshape(data_x.shape[0], -1)
print data_x.shape, data_y.shape
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set, num = num)
# valid_set_x, valid_set_y = shared_dataset(valid_set, num = num)
train_set_x, train_set_y = shared_dataset(train_set, num = num)
rval = [(train_set_x, train_set_y), #(valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
if __name__ == '__main__':
theano.config.exception_verbosity='high'
theano.config.on_unused_input='ignore'
datasets = load_data('../../Data/mnist/mnist.pkl.gz')
cl = ANN(28 * 28, 10, hiddens = [1])
cl.fit(datasets, lr = 0.1)
|
|
import random
from unittest import TestCase
import mock
import werkzeug
from steinie import app
from steinie import routing
from . import utils
def generate_example_environ(method="GET"):
return {
'HTTP_HOST': 'example.com',
'PATH_INFO': '/',
'REQUEST_METHOD': method,
'wsgi.url_scheme': ('http', '80'),
}
def generate_mock_request(environ=None):
if environ is None:
environ = generate_example_environ()
return mock.Mock(path="/bar/foo", environ=environ)
class DecoratedFunctionsWithRawMethodTestCase(TestCase):
def test_can_decorate_with_multiples(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.method("/", methods=["GET", "POST"])
def index(request, response):
return r
environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=environ)
get_response = router.handle(request, mock.Mock())
self.assertEqual(r, get_response)
environ = generate_example_environ(method='POST')
post_response = router.handle(request, mock.Mock())
self.assertEqual(r, post_response)
self.assertEqual(get_response, post_response)
class NestedRoutingTestCase(TestCase):
def test_allows_tested_router(self):
r1 = routing.Router()
@r1.get("/foo")
def handle_foo(request, response):
return "\n".join([
"request.path: %s" % request.path,
"request.original_path: %s" % request.original_path,
])
r2 = routing.Router()
r2.use("/bar", r1)
request = mock.Mock(path="/bar/foo", environ=generate_example_environ())
response = r2.handle(request, mock.Mock())
expected = "\n".join([
"request.path: /foo",
"request.original_path: /bar/foo",
])
self.assertEqual(expected, response)
def test_middleware_is_instaniated_with_route(self):
Middleware = mock.Mock()
r = routing.Router()
r.use(Middleware)
@r.get("/foo")
def handler(*args):
pass
a = app.Steinie()
a.use("/bar", r)
a.handle(generate_mock_request(), mock.Mock())
Middleware.assert_called_once_with(r)
def test_parameters_are_not_shared_with_parents_or_siblings(self):
r1 = routing.Router()
@r1.param("foo")
def foo_param(param):
return "foo"
@r1.get("/<foo:foo>")
def foo_handler(request, response):
return request.params
r2 = routing.Router()
@r2.param("bar")
def bar_param(param):
return "bar"
@r2.get("/<bar:bar>")
def bar_handler(request, response):
return request.params
a = app.Steinie()
a.use("/foo", r1)
a.use("/bar", r2)
@a.get("/")
def handler(request, response):
return request.params
request = mock.Mock(path="/", environ=generate_example_environ())
response = a.handle(request, mock.Mock())
self.assertEqual({}, response)
request.path = "/foo/bar"
self.assertEqual({"foo": "foo"}, a.handle(request, mock.Mock()))
request.path = "/bar/foo"
self.assertEqual({"bar": "bar"}, a.handle(request, mock.Mock()))
class ParamFunctionTestCase(TestCase):
def test_basic_router(self):
num = random.randint(1000, 2000)
router = routing.Router()
expected = "foo{}".format(random.randint(100, 200))
call_count = []
@router.param("bar")
def bar_to_upper(param):
return param.upper()
@router.get("/<bar:baz>/")
def parameter(request):
call_count.append(num)
self.assertIn('baz', request.params)
self.assertEqual(request.params['baz'], expected.upper())
path = "/{0}/".format(expected)
request = mock.Mock(path=path, environ=generate_example_environ())
router.handle(request, mock.Mock())
self.assert_(len(call_count) == 1)
self.assertIn(num, call_count)
def test_wraps_existing_func(self):
router = routing.Router()
@router.param("bar")
def bar_to_upper(param):
return param.upper()
self.assertEqual(bar_to_upper("foo"), "FOO")
self.assertEqual(bar_to_upper.__name__, "bar_to_upper")
def test_supports_nested_params(self):
num = random.randint(1000, 2000)
router = routing.Router()
expected = "foo{}".format(random.randint(100, 200))
call_count = []
@router.param("bar")
def bar_to_upper(param):
return param.upper()
@router.get("/<bar:baz>/")
def parameter(request):
call_count.append(num)
self.assertIn('baz', request.params)
self.assertEqual(request.params['baz'], expected.upper())
path = "/{0}/".format(expected)
request = mock.Mock(path=path, environ=generate_example_environ())
router.handle(request, mock.Mock())
self.assert_(len(call_count) == 1)
self.assertIn(num, call_count)
router2 = routing.Router()
router2.use("/", router)
router2.handle(request, mock.Mock())
self.assert_(len(call_count) == 2)
class DecoratedDeleteFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.delete("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.delete("/")
def index(request, response):
return r
post_environ = generate_example_environ(method='DELETE')
request = mock.Mock(path='/', environ=post_environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.delete("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedHeadFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.head("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.head("/")
def index(request, response):
return r
post_environ = generate_example_environ(method='HEAD')
request = mock.Mock(path='/', environ=post_environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.head("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedInfoFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.info("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.info("/")
def index(request, response):
return r
post_environ = generate_example_environ(method='INFO')
request = mock.Mock(path='/', environ=post_environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.info("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedOptionFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.options("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.options("/")
def index(request, response):
return r
environ = generate_example_environ(method='OPTIONS')
request = mock.Mock(path='/', environ=environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.options("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedPatchFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.patch("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.patch("/")
def index(request, response):
return r
environ = generate_example_environ(method='PATCH')
request = mock.Mock(path='/', environ=environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.patch("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedPutFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.put("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.put("/")
def index(request, response):
return r
environ = generate_example_environ(method='PUT')
request = mock.Mock(path='/', environ=environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.put("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedPostFunctionsTestCase(TestCase):
def test_wraps_existing_func(self):
router = routing.Router()
@router.post("/")
def index(request):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.post("/")
def index(request):
return r
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.post("/")
def index(request, response):
return r
post_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedTraceFunctionsTestCase(TestCase):
def test_wrapps_existing_func(self):
router = routing.Router()
@router.trace("/")
def index(request, response):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request, mock.Mock()), random_path)
self.assertEqual(index.__name__, "index")
def test_is_dispatched_to_via_handle(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.trace("/")
def index(request, response):
return r
environ = generate_example_environ(method='TRACE')
request = mock.Mock(path='/', environ=environ)
response = router.handle(request, mock.Mock())
self.assertEqual(r, response)
def test_does_not_match_on_get_or_post(self):
r = random.randint(1000, 2000)
router = routing.Router()
@router.trace("/")
def index(request, response):
return r
get_environ = generate_example_environ(method='GET')
request = mock.Mock(path='/', environ=get_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path='/', environ=post_environ)
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class DecoratedGetFunctionsTestCase(TestCase):
def test_wraps_existing_func(self):
router = routing.Router()
@router.get("/")
def index(request):
return request.path
random_path = "/foo/bar/%s" % random.randint(100, 200)
request = mock.Mock(path=random_path)
self.assertEqual(index(request), random_path)
self.assertEqual(index.__name__, "index")
def test_does_not_match_on_post(self):
router = routing.Router()
@router.get("/")
def index(request):
return request.path
post_environ = generate_example_environ(method='POST')
request = mock.Mock(path="/", environ=post_environ, method='POST')
with self.assertRaises(werkzeug.exceptions.MethodNotAllowed):
router.handle(request, mock.Mock())
class MiddlewareTestCase(TestCase):
def test_allows_using_middleware(self):
class Middleware(object):
def __init__(self, app):
pass
def __call__(self, request, response, _next):
response.data = "MIDDLEWARE INVOKED"
return response
a = app.Steinie()
a.use(Middleware)
with utils.run_app(a):
response = utils.get("http://localhost:5151/baz")
self.assertIn("MIDDLEWARE INVOKED", response.content)
def test_allows_using_middleware_from_nested_routers(self):
class Middleware(object):
def __init__(self, app):
pass
def __call__(self, request, response, _next):
response.data = "MIDDLEWARE INVOKED"
return response
r = routing.Router()
r.use(Middleware)
@r.get("/baz")
def get(request):
pass
a = app.Steinie()
a.use('/', r)
with utils.run_app(a):
response = utils.get("http://localhost:5151/baz")
self.assertIn("MIDDLEWARE INVOKED", response.content)
def test_dispatches_if_next_is_called(self):
class Middleware(object):
def __init__(self, app):
pass
def __call__(self, request, response, _next):
return _next(request, response)
a = app.Steinie()
a.use(Middleware)
@a.get("/foo")
def get(request, response):
return "Hello from the route"
with utils.run_app(a):
response = utils.get("http://localhost:5151/foo")
self.assertIn("Hello from the route", response.content)
def test_does_not_call_root_if_next_is_not_called(self):
class Middleware(object):
def __init__(self, app):
pass
def __call__(self, request, response, _next):
pass
a = app.Steinie()
a.use(Middleware)
@a.get("/foo")
def get(request, response):
return "Should never see this"
with utils.run_app(a):
response = utils.get("http://localhost:5151/foo")
self.assertEqual('', response.content)
|
|
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import struct
import subprocess
import sys
import tempfile
import test_format
BUNDLE_SIZE = 32
def CreateElfContent(bits, text_segment):
e_ident = {
32: '\177ELF\1',
64: '\177ELF\2'}[bits]
e_machine = {
32: 3,
64: 62}[bits]
e_phoff = 256
e_phnum = 1
e_phentsize = 0
elf_header_fmt = {
32: '<16sHHIIIIIHHHHHH',
64: '<16sHHIQQQIHHHHHH'}[bits]
elf_header = struct.pack(
elf_header_fmt,
e_ident, 0, e_machine, 0, 0, e_phoff, 0, 0, 0,
e_phentsize, e_phnum, 0, 0, 0)
p_type = 1 # PT_LOAD
p_flags = 5 # r-x
p_filesz = len(text_segment)
p_memsz = p_filesz
p_vaddr = 0
p_offset = 512
p_align = 0
p_paddr = 0
pheader_fmt = {
32: '<IIIIIIII',
64: '<IIQQQQQQ'}[bits]
pheader_fields = {
32: (p_type, p_offset, p_vaddr, p_paddr,
p_filesz, p_memsz, p_flags, p_align),
64: (p_type, p_flags, p_offset, p_vaddr,
p_paddr, p_filesz, p_memsz, p_align)}[bits]
pheader = struct.pack(pheader_fmt, *pheader_fields)
result = elf_header
assert len(result) <= e_phoff
result += '\0' * (e_phoff - len(result))
result += pheader
assert len(result) <= p_offset
result += '\0' * (p_offset - len(result))
result += text_segment
return result
def RunRdfaValidator(options, data):
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
tmp = tempfile.NamedTemporaryFile(mode='wb', delete=False)
try:
tmp.write(CreateElfContent(options.bits, data))
tmp.close()
proc = subprocess.Popen([options.rdfaval, tmp.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
assert stderr == '', stderr
return_code = proc.wait()
finally:
tmp.close()
os.remove(tmp.name)
# Remove the carriage return characters that we get on Windows.
stdout = stdout.replace('\r', '')
return return_code, stdout
def ParseRdfaMessages(stdout):
"""Get (offset, message) pairs from rdfa validator output.
Args:
stdout: Output of rdfa validator as string.
Yields:
Pairs (offset, message).
"""
for line in stdout.split('\n'):
line = line.strip()
if line == '':
continue
if re.match(r"(Valid|Invalid)\.$", line):
continue
m = re.match(r'([0-9a-f]+): (.*)$', line, re.IGNORECASE)
assert m is not None, "can't parse line '%s'" % line
offset = int(m.group(1), 16)
message = m.group(2)
if not message.startswith('warning - '):
yield offset, message
def CheckValidJumpTargets(options, data_chunks):
"""
Check that the validator infers valid jump targets correctly.
This test checks that the validator identifies instruction boundaries and
superinstructions correctly. In order to do that, it attempts to append a jump
to each byte at the end of the given code. Jump should be valid if and only if
it goes to the boundary between data chunks.
Note that the same chunks as in RunRdfaWithNopPatching are used, but here they
play a different role. In RunRdfaWithNopPatching the partitioning into chunks
is only relevant when the whole snippet is invalid. Here, on the other hand,
we only care about valid snippets, and we use chunks to mark valid jump
targets.
Args:
options: Options as produced by optparse.
data_chunks: List of strings containing binary data. Each such chunk is
expected to correspond to indivisible instruction or superinstruction.
Returns:
None.
"""
data = ''.join(data_chunks)
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
# Since we check validity of jump target by adding jump and validating
# resulting piece, we rely on validity of original snippet.
return_code, _ = RunRdfaValidator(options, data)
assert return_code == 0, 'Can only validate jump targets on valid snippet'
valid_jump_targets = set()
pos = 0
for data_chunk in data_chunks:
valid_jump_targets.add(pos)
pos += len(data_chunk)
valid_jump_targets.add(pos)
for i in range(pos + 1):
# Encode JMP with 32-bit relative target.
jump = '\xe9' + struct.pack('<i', i - (len(data) + 5))
return_code, _ = RunRdfaValidator(options, data + jump)
if return_code == 0:
assert i in valid_jump_targets, (
'Offset 0x%x was reported valid jump target' % i)
else:
assert i not in valid_jump_targets, (
'Offset 0x%x was reported invalid jump target' % i)
class RdfaTestRunner(test_format.TestRunner):
SECTION_NAME = 'rdfa_output'
def CommandLineOptions(self, parser):
parser.add_option('--rdfaval', default='validator_test',
help='Path to the ncval validator executable')
def GetSectionContent(self, options, sections):
data_chunks = list(test_format.ParseHex(sections['hex']))
return_code, stdout = RunRdfaValidator(options, ''.join(data_chunks))
result = ''.join('%x: %s\n' % (offset, message)
for offset, message in ParseRdfaMessages(stdout))
result += 'return code: %d\n' % return_code
if return_code == 0:
print ' Checking jump targets...'
CheckValidJumpTargets(options, data_chunks)
return result
def main(argv):
RdfaTestRunner().Run(argv)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module represents flavors of remotely acessible objects.
Currently this is only objects accessible through Perspective Broker, but will
hopefully encompass all forms of remote access which can emulate subsets of PB
(such as XMLRPC or SOAP).
Future Plans: Optimization. Exploitation of new-style object model.
Optimizations to this module should not affect external-use semantics at all,
but may have a small impact on users who subclass and override methods.
@author: Glyph Lefkowitz
"""
# NOTE: this module should NOT import pb; it is supposed to be a module which
# abstractly defines remotely accessible types. Many of these types expect to
# be serialized by Jelly, but they ought to be accessible through other
# mechanisms (like XMLRPC)
# system imports
import sys
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, reflect
# sibling imports
from jelly import setUnjellyableForClass, setUnjellyableForClassTree, setUnjellyableFactoryForClass, unjellyableRegistry
from jelly import Jellyable, Unjellyable, _newDummyLike
from jelly import setInstanceState, getInstanceState
# compatibility
setCopierForClass = setUnjellyableForClass
setCopierForClassTree = setUnjellyableForClassTree
setFactoryForClass = setUnjellyableFactoryForClass
copyTags = unjellyableRegistry
copy_atom = "copy"
cache_atom = "cache"
cached_atom = "cached"
remote_atom = "remote"
class NoSuchMethod(AttributeError):
"""Raised if there is no such remote method"""
class IPBRoot(Interface):
"""Factory for root Referenceable objects for PB servers."""
def rootObject(broker):
"""Return root Referenceable for broker."""
class Serializable(Jellyable):
"""An object that can be passed remotely.
I am a style of object which can be serialized by Perspective
Broker. Objects which wish to be referenceable or copied remotely
have to subclass Serializable. However, clients of Perspective
Broker will probably not want to directly subclass Serializable; the
Flavors of transferable objects are listed below.
What it means to be \"Serializable\" is that an object can be
passed to or returned from a remote method. Certain basic types
(dictionaries, lists, tuples, numbers, strings) are serializable by
default; however, classes need to choose a specific serialization
style: L{Referenceable}, L{Viewable}, L{Copyable} or L{Cacheable}.
You may also pass C{[lists, dictionaries, tuples]} of L{Serializable}
instances to or return them from remote methods, as many levels deep
as you like.
"""
def processUniqueID(self):
"""Return an ID which uniquely represents this object for this process.
By default, this uses the 'id' builtin, but can be overridden to
indicate that two values are identity-equivalent (such as proxies
for the same object).
"""
return id(self)
class Referenceable(Serializable):
perspective = None
"""I am an object sent remotely as a direct reference.
When one of my subclasses is sent as an argument to or returned
from a remote method call, I will be serialized by default as a
direct reference.
This means that the peer will be able to call methods on me;
a method call xxx() from my peer will be resolved to methods
of the name remote_xxx.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'remote_messagename' and call it with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "remote_%s" % message, None)
if method is None:
raise NoSuchMethod("No such method: remote_%s" % (message,))
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self.perspective)
def jellyFor(self, jellier):
"""(internal)
Return a tuple which will be used as the s-expression to
serialize this to a peer.
"""
return ["remote", jellier.invoker.registerReference(self)]
class Root(Referenceable):
"""I provide a root object to L{pb.Broker}s for a L{pb.BrokerFactory}.
When a L{pb.BrokerFactory} produces a L{pb.Broker}, it supplies that
L{pb.Broker} with an object named \"root\". That object is obtained
by calling my rootObject method.
"""
implements(IPBRoot)
def rootObject(self, broker):
"""A L{pb.BrokerFactory} is requesting to publish me as a root object.
When a L{pb.BrokerFactory} is sending me as the root object, this
method will be invoked to allow per-broker versions of an
object. By default I return myself.
"""
return self
class ViewPoint(Referenceable):
"""
I act as an indirect reference to an object accessed through a
L{pb.Perspective}.
Simply put, I combine an object with a perspective so that when a
peer calls methods on the object I refer to, the method will be
invoked with that perspective as a first argument, so that it can
know who is calling it.
While L{Viewable} objects will be converted to ViewPoints by default
when they are returned from or sent as arguments to a remote
method, any object may be manually proxied as well. (XXX: Now that
this class is no longer named C{Proxy}, this is the only occourance
of the term 'proxied' in this docstring, and may be unclear.)
This can be useful when dealing with L{pb.Perspective}s, L{Copyable}s,
and L{Cacheable}s. It is legal to implement a method as such on
a perspective::
| def perspective_getViewPointForOther(self, name):
| defr = self.service.getPerspectiveRequest(name)
| defr.addCallbacks(lambda x, self=self: ViewPoint(self, x), log.msg)
| return defr
This will allow you to have references to Perspective objects in two
different ways. One is through the initial 'attach' call -- each
peer will have a L{pb.RemoteReference} to their perspective directly. The
other is through this method; each peer can get a L{pb.RemoteReference} to
all other perspectives in the service; but that L{pb.RemoteReference} will
be to a L{ViewPoint}, not directly to the object.
The practical offshoot of this is that you can implement 2 varieties
of remotely callable methods on this Perspective; view_xxx and
C{perspective_xxx}. C{view_xxx} methods will follow the rules for
ViewPoint methods (see ViewPoint.L{remoteMessageReceived}), and
C{perspective_xxx} methods will follow the rules for Perspective
methods.
"""
def __init__(self, perspective, object):
"""Initialize me with a Perspective and an Object.
"""
self.perspective = perspective
self.object = object
def processUniqueID(self):
"""Return an ID unique to a proxy for this perspective+object combination.
"""
return (id(self.perspective), id(self.object))
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{view_messagename}' to my Object and call it on my object with
the same arguments, modified by inserting my Perspective as
the first argument.
"""
args = broker.unserialize(args, self.perspective)
kw = broker.unserialize(kw, self.perspective)
method = getattr(self.object, "view_%s" % message)
try:
state = apply(method, (self.perspective,)+args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
rv = broker.serialize(state, self.perspective, method, args, kw)
return rv
class Viewable(Serializable):
"""I will be converted to a L{ViewPoint} when passed to or returned from a remote method.
The beginning of a peer's interaction with a PB Service is always
through a perspective. However, if a C{perspective_xxx} method returns
a Viewable, it will be serialized to the peer as a response to that
method.
"""
def jellyFor(self, jellier):
"""Serialize a L{ViewPoint} for me and the perspective of the given broker.
"""
return ViewPoint(jellier.invoker.serializingPerspective, self).jellyFor(jellier)
class Copyable(Serializable):
"""Subclass me to get copied each time you are returned from or passed to a remote method.
When I am returned from or passed to a remote method call, I will be
converted into data via a set of callbacks (see my methods for more
info). That data will then be serialized using Jelly, and sent to
the peer.
The peer will then look up the type to represent this with; see
L{RemoteCopy} for details.
"""
def getStateToCopy(self):
"""Gather state to send when I am serialized for a peer.
I will default to returning self.__dict__. Override this to
customize this behavior.
"""
return self.__dict__
def getStateToCopyFor(self, perspective):
"""
Gather state to send when I am serialized for a particular
perspective.
I will default to calling L{getStateToCopy}. Override this to
customize this behavior.
"""
return self.getStateToCopy()
def getTypeToCopy(self):
"""Determine what type tag to send for me.
By default, send the string representation of my class
(package.module.Class); normally this is adequate, but
you may override this to change it.
"""
return reflect.qual(self.__class__)
def getTypeToCopyFor(self, perspective):
"""Determine what type tag to send for me.
By default, defer to self.L{getTypeToCopy}() normally this is
adequate, but you may override this to change it.
"""
return self.getTypeToCopy()
def jellyFor(self, jellier):
"""Assemble type tag and state to copy for this broker.
This will call L{getTypeToCopyFor} and L{getStateToCopy}, and
return an appropriate s-expression to represent me.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
p = jellier.invoker.serializingPerspective
t = self.getTypeToCopyFor(p)
state = self.getStateToCopyFor(p)
sxp = jellier.prepare(self)
sxp.extend([t, jellier.jelly(state)])
return jellier.preserve(self, sxp)
class Cacheable(Copyable):
"""A cached instance.
This means that it's copied; but there is some logic to make sure
that it's only copied once. Additionally, when state is retrieved,
it is passed a "proto-reference" to the state as it will exist on
the client.
XXX: The documentation for this class needs work, but it's the most
complex part of PB and it is inherently difficult to explain.
"""
def getStateToCacheAndObserveFor(self, perspective, observer):
"""
Get state to cache on the client and client-cache reference
to observe locally.
This is similiar to getStateToCopyFor, but it additionally
passes in a reference to the client-side RemoteCache instance
that will be created when it is unserialized. This allows
Cacheable instances to keep their RemoteCaches up to date when
they change, such that no changes can occur between the point
at which the state is initially copied and the client receives
it that are not propogated.
"""
return self.getStateToCopyFor(perspective)
def jellyFor(self, jellier):
"""Return an appropriate tuple to serialize me.
Depending on whether this broker has cached me or not, this may
return either a full state or a reference to an existing cache.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
luid = jellier.invoker.cachedRemotelyAs(self, 1)
if luid is None:
luid = jellier.invoker.cacheRemotely(self)
p = jellier.invoker.serializingPerspective
type_ = self.getTypeToCopyFor(p)
observer = RemoteCacheObserver(jellier.invoker, self, p)
state = self.getStateToCacheAndObserveFor(p, observer)
l = jellier.prepare(self)
jstate = jellier.jelly(state)
l.extend([type_, luid, jstate])
return jellier.preserve(self, l)
else:
return cached_atom, luid
def stoppedObserving(self, perspective, observer):
"""This method is called when a client has stopped observing me.
The 'observer' argument is the same as that passed in to
getStateToCacheAndObserveFor.
"""
class RemoteCopy(Unjellyable):
"""I am a remote copy of a Copyable object.
When the state from a L{Copyable} object is received, an instance will
be created based on the copy tags table (see setUnjellyableForClass) and
sent the L{setCopyableState} message. I provide a reasonable default
implementation of that message; subclass me if you wish to serve as
a copier for remote data.
NOTE: copiers are invoked with no arguments. Do not implement a
constructor which requires args in a subclass of L{RemoteCopy}!
"""
def setCopyableState(self, state):
"""I will be invoked with the state to copy locally.
'state' is the data returned from the remote object's
'getStateToCopyFor' method, which will often be the remote
object's dictionary (or a filtered approximation of it depending
on my peer's perspective).
"""
self.__dict__ = state
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.setCopyableState(unjellier.unjelly(jellyList[1]))
return self
class RemoteCache(RemoteCopy, Serializable):
"""A cache is a local representation of a remote L{Cacheable} object.
This represents the last known state of this object. It may
also have methods invoked on it -- in order to update caches,
the cached class generates a L{pb.RemoteReference} to this object as
it is originally sent.
Much like copy, I will be invoked with no arguments. Do not
implement a constructor that requires arguments in one of my
subclasses.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{observe_messagename}' and call it on my with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "observe_%s" % message)
try:
state = apply(method, args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, None, method, args, kw)
def jellyFor(self, jellier):
"""serialize me (only for the broker I'm for) as the original cached reference
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
assert jellier.invoker is self.broker, "You cannot exchange cached proxies between brokers."
return 'lcache', self.luid
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.broker = unjellier.invoker
self.luid = jellyList[1]
cProxy = _newDummyLike(self)
# XXX questionable whether this was a good design idea...
init = getattr(cProxy, "__init__", None)
if init:
init()
unjellier.invoker.cacheLocally(jellyList[1], self)
cProxy.setCopyableState(unjellier.unjelly(jellyList[2]))
# Might have changed due to setCopyableState method; we'll assume that
# it's bad form to do so afterwards.
self.__dict__ = cProxy.__dict__
# chomp, chomp -- some existing code uses "self.__dict__ =", some uses
# "__dict__.update". This is here in order to handle both cases.
self.broker = unjellier.invoker
self.luid = jellyList[1]
return cProxy
## def __really_del__(self):
## """Final finalization call, made after all remote references have been lost.
## """
def __cmp__(self, other):
"""Compare me [to another RemoteCache.
"""
if isinstance(other, self.__class__):
return cmp(id(self.__dict__), id(other.__dict__))
else:
return cmp(id(self.__dict__), other)
def __hash__(self):
"""Hash me.
"""
return int(id(self.__dict__) % sys.maxint)
broker = None
luid = None
def __del__(self):
"""Do distributed reference counting on finalize.
"""
try:
# log.msg( ' --- decache: %s %s' % (self, self.luid) )
if self.broker:
self.broker.decCacheRef(self.luid)
except:
log.deferr()
def unjellyCached(unjellier, unjellyList):
luid = unjellyList[1]
cNotProxy = unjellier.invoker.cachedLocallyAs(luid)
cProxy = _newDummyLike(cNotProxy)
return cProxy
setUnjellyableForClass("cached", unjellyCached)
def unjellyLCache(unjellier, unjellyList):
luid = unjellyList[1]
obj = unjellier.invoker.remotelyCachedForLUID(luid)
return obj
setUnjellyableForClass("lcache", unjellyLCache)
def unjellyLocal(unjellier, unjellyList):
obj = unjellier.invoker.localObjectForID(unjellyList[1])
return obj
setUnjellyableForClass("local", unjellyLocal)
class RemoteCacheMethod:
"""A method on a reference to a L{RemoteCache}.
"""
def __init__(self, name, broker, cached, perspective):
"""(internal) initialize.
"""
self.name = name
self.broker = broker
self.perspective = perspective
self.cached = cached
def __cmp__(self, other):
return cmp((self.name, self.broker, self.perspective, self.cached), other)
def __hash__(self):
return hash((self.name, self.broker, self.perspective, self.cached))
def __call__(self, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID, self.name, args, kw)
class RemoteCacheObserver:
"""I am a reverse-reference to the peer's L{RemoteCache}.
I am generated automatically when a cache is serialized. I
represent a reference to the client's L{RemoteCache} object that
will represent a particular L{Cacheable}; I am the additional
object passed to getStateToCacheAndObserveFor.
"""
def __init__(self, broker, cached, perspective):
"""(internal) Initialize me.
@param broker: a L{pb.Broker} instance.
@param cached: a L{Cacheable} instance that this L{RemoteCacheObserver}
corresponds to.
@param perspective: a reference to the perspective who is observing this.
"""
self.broker = broker
self.cached = cached
self.perspective = perspective
def __repr__(self):
return "<RemoteCacheObserver(%s, %s, %s) at %s>" % (
self.broker, self.cached, self.perspective, id(self))
def __hash__(self):
"""Generate a hash unique to all L{RemoteCacheObserver}s for this broker/perspective/cached triplet
"""
return ( (hash(self.broker) % 2**10)
+ (hash(self.perspective) % 2**10)
+ (hash(self.cached) % 2**10))
def __cmp__(self, other):
"""Compare me to another L{RemoteCacheObserver}.
"""
return cmp((self.broker, self.perspective, self.cached), other)
def callRemote(self, _name, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the "
"object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{pb.RemoteMethod} for this key.
"""
return RemoteCacheMethod(key, self.broker, self.cached, self.perspective)
|
|
# -*- coding: utf-8 -*-
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# License: Simplified BSD
from os import path as op
import numpy as np
from scipy.sparse import csc_matrix
from .open import read_tag, fiff_open
from .tree import dir_tree_find
from .write import (start_block, end_block, write_int, write_float,
write_string, write_float_matrix, write_int_matrix,
write_float_sparse_rcs, write_id)
from .tag import find_tag
from .constants import FIFF
from ..externals.six import text_type, string_types
from ..utils import warn
_proc_keys = ['parent_file_id', 'block_id', 'parent_block_id',
'date', 'experimenter', 'creator']
_proc_ids = [FIFF.FIFF_PARENT_FILE_ID,
FIFF.FIFF_BLOCK_ID,
FIFF.FIFF_PARENT_BLOCK_ID,
FIFF.FIFF_MEAS_DATE,
FIFF.FIFF_EXPERIMENTER,
FIFF.FIFF_CREATOR]
_proc_writers = [write_id, write_id, write_id,
write_int, write_string, write_string]
_proc_casters = [dict, dict, dict,
np.array, text_type, text_type]
def _read_proc_history(fid, tree, info):
"""Read processing history from fiff file
This function reads the SSS info, the CTC correction and the
calibaraions from the SSS processing logs inside af a raw file
(C.f. Maxfilter v2.2 manual (October 2010), page 21):
104 = { 900 = proc. history
104 = { 901 = proc. record
103 = block ID
204 = date
212 = scientist
113 = creator program
104 = { 502 = SSS info
264 = SSS task
263 = SSS coord frame
265 = SSS origin
266 = SSS ins.order
267 = SSS outs.order
268 = SSS nr chnls
269 = SSS components
278 = SSS nfree
243 = HPI g limit 0.98
244 = HPI dist limit 0.005
105 = } 502 = SSS info
104 = { 504 = MaxST info
264 = SSS task
272 = SSST subspace correlation
279 = SSST buffer length
105 = }
104 = { 501 = CTC correction
103 = block ID
204 = date
113 = creator program
800 = CTC matrix
3417 = proj item chs
105 = } 501 = CTC correction
104 = { 503 = SSS finecalib.
270 = SSS cal chnls
271 = SSS cal coeff
105 = } 503 = SSS finecalib.
105 = } 901 = proc. record
105 = } 900 = proc. history
"""
proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY)
out = list()
if len(proc_history) > 0:
proc_history = proc_history[0]
proc_records = dir_tree_find(proc_history,
FIFF.FIFFB_PROCESSING_RECORD)
for proc_record in proc_records:
record = dict()
for i_ent in range(proc_record['nent']):
kind = proc_record['directory'][i_ent].kind
pos = proc_record['directory'][i_ent].pos
for key, id_, cast in zip(_proc_keys, _proc_ids,
_proc_casters):
if kind == id_:
tag = read_tag(fid, pos)
record[key] = cast(tag.data)
break
else:
warn('Unknown processing history item %s' % kind)
record['max_info'] = _read_maxfilter_record(fid, proc_record)
smartshields = dir_tree_find(proc_record,
FIFF.FIFFB_SMARTSHIELD)
if len(smartshields) > 0:
# XXX should eventually populate this
ss = [dict() for _ in range(len(smartshields))]
record['smartshield'] = ss
if len(record['max_info']) > 0:
out.append(record)
if len(proc_records) > 0:
info['proc_history'] = out
def _write_proc_history(fid, info):
"""Write processing history to file"""
if 'proc_history' not in info:
return
if len(info['proc_history']) > 0:
start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
for record in info['proc_history']:
start_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers):
if key in record:
writer(fid, id_, record[key])
_write_maxfilter_record(fid, record['max_info'])
if 'smartshield' in record:
for ss in record['smartshield']:
start_block(fid, FIFF.FIFFB_SMARTSHIELD)
# XXX should eventually populate this
end_block(fid, FIFF.FIFFB_SMARTSHIELD)
end_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
_sss_info_keys = ('job', 'frame', 'origin', 'in_order',
'out_order', 'nchan', 'components', 'nfree',
'hpi_g_limit', 'hpi_dist_limit')
_sss_info_ids = (FIFF.FIFF_SSS_JOB,
FIFF.FIFF_SSS_FRAME,
FIFF.FIFF_SSS_ORIGIN,
FIFF.FIFF_SSS_ORD_IN,
FIFF.FIFF_SSS_ORD_OUT,
FIFF.FIFF_SSS_NMAG,
FIFF.FIFF_SSS_COMPONENTS,
FIFF.FIFF_SSS_NFREE,
FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
FIFF.FIFF_HPI_FIT_DIST_LIMIT)
_sss_info_writers = (write_int, write_int, write_float, write_int,
write_int, write_int, write_int, write_int,
write_float, write_float)
_sss_info_casters = (int, int, np.array, int,
int, int, np.array, int,
float, float)
_max_st_keys = ('job', 'subspcorr', 'buflen')
_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR,
FIFF.FIFF_SSS_ST_LENGTH)
_max_st_writers = (write_int, write_float, write_float)
_max_st_casters = (int, float, float)
_sss_ctc_keys = ('block_id', 'date', 'creator', 'decoupler')
_sss_ctc_ids = (FIFF.FIFF_BLOCK_ID,
FIFF.FIFF_MEAS_DATE,
FIFF.FIFF_CREATOR,
FIFF.FIFF_DECOUPLER_MATRIX)
_sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse_rcs)
_sss_ctc_casters = (dict, np.array, text_type, csc_matrix)
_sss_cal_keys = ('cal_chans', 'cal_corrs')
_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS)
_sss_cal_writers = (write_int_matrix, write_float_matrix)
_sss_cal_casters = (np.array, np.array)
def _read_ctc(fname):
"""Read cross-talk correction matrix"""
if not isinstance(fname, string_types) or not op.isfile(fname):
raise ValueError('fname must be a file that exists, not %s' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
sss_ctc = _read_maxfilter_record(fid, tree)['sss_ctc']
bad_str = 'Invalid cross-talk FIF: %s' % fname
if len(sss_ctc) == 0:
raise ValueError(bad_str)
node = dir_tree_find(tree, FIFF.FIFFB_DATA_CORRECTION)[0]
comment = find_tag(fid, node, FIFF.FIFF_COMMENT).data
if comment != 'cross-talk compensation matrix':
raise ValueError(bad_str)
sss_ctc['creator'] = find_tag(fid, node, FIFF.FIFF_CREATOR).data
sss_ctc['date'] = find_tag(fid, node, FIFF.FIFF_MEAS_DATE).data
return sss_ctc
def _read_maxfilter_record(fid, tree):
"""Read maxfilter processing record from file"""
sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502
sss_info = dict()
if len(sss_info_block) > 0:
sss_info_block = sss_info_block[0]
for i_ent in range(sss_info_block['nent']):
kind = sss_info_block['directory'][i_ent].kind
pos = sss_info_block['directory'][i_ent].pos
for key, id_, cast in zip(_sss_info_keys, _sss_info_ids,
_sss_info_casters):
if kind == id_:
tag = read_tag(fid, pos)
sss_info[key] = cast(tag.data)
break
max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504
max_st = dict()
if len(max_st_block) > 0:
max_st_block = max_st_block[0]
for i_ent in range(max_st_block['nent']):
kind = max_st_block['directory'][i_ent].kind
pos = max_st_block['directory'][i_ent].pos
for key, id_, cast in zip(_max_st_keys, _max_st_ids,
_max_st_casters):
if kind == id_:
tag = read_tag(fid, pos)
max_st[key] = cast(tag.data)
break
sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501
sss_ctc = dict()
if len(sss_ctc_block) > 0:
sss_ctc_block = sss_ctc_block[0]
for i_ent in range(sss_ctc_block['nent']):
kind = sss_ctc_block['directory'][i_ent].kind
pos = sss_ctc_block['directory'][i_ent].pos
for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids,
_sss_ctc_casters):
if kind == id_:
tag = read_tag(fid, pos)
sss_ctc[key] = cast(tag.data)
break
else:
if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST:
tag = read_tag(fid, pos)
chs = tag.data.split(':')
# XXX for some reason this list can have a bunch of junk
# in the last entry, e.g.:
# [..., u'MEG2642', u'MEG2643', u'MEG2641\x00 ... \x00']
chs[-1] = chs[-1].split('\x00')[0]
sss_ctc['proj_items_chs'] = chs
sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503
sss_cal = dict()
if len(sss_cal_block) > 0:
sss_cal_block = sss_cal_block[0]
for i_ent in range(sss_cal_block['nent']):
kind = sss_cal_block['directory'][i_ent].kind
pos = sss_cal_block['directory'][i_ent].pos
for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids,
_sss_cal_casters):
if kind == id_:
tag = read_tag(fid, pos)
sss_cal[key] = cast(tag.data)
break
max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc,
sss_cal=sss_cal, max_st=max_st)
return max_info
def _write_maxfilter_record(fid, record):
"""Write maxfilter processing record to file"""
sss_info = record['sss_info']
if len(sss_info) > 0:
start_block(fid, FIFF.FIFFB_SSS_INFO)
for key, id_, writer in zip(_sss_info_keys, _sss_info_ids,
_sss_info_writers):
if key in sss_info:
writer(fid, id_, sss_info[key])
end_block(fid, FIFF.FIFFB_SSS_INFO)
max_st = record['max_st']
if len(max_st) > 0:
start_block(fid, FIFF.FIFFB_SSS_ST_INFO)
for key, id_, writer in zip(_max_st_keys, _max_st_ids,
_max_st_writers):
if key in max_st:
writer(fid, id_, max_st[key])
end_block(fid, FIFF.FIFFB_SSS_ST_INFO)
sss_ctc = record['sss_ctc']
if len(sss_ctc) > 0: # dict has entries
start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids,
_sss_ctc_writers):
if key in sss_ctc:
writer(fid, id_, sss_ctc[key])
if 'proj_items_chs' in sss_ctc:
write_string(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
':'.join(sss_ctc['proj_items_chs']))
end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
sss_cal = record['sss_cal']
if len(sss_cal) > 0:
start_block(fid, FIFF.FIFFB_SSS_CAL)
for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids,
_sss_cal_writers):
if key in sss_cal:
writer(fid, id_, sss_cal[key])
end_block(fid, FIFF.FIFFB_SSS_CAL)
def _get_sss_rank(sss):
"""Get SSS rank"""
inside = sss['sss_info']['in_order']
nfree = (inside + 1) ** 2 - 1
nfree -= (len(sss['sss_info']['components'][:nfree]) -
sss['sss_info']['components'][:nfree].sum())
return nfree
|
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes api."""
import ast
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import uuidutils
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
def _translate_attachment_detail_view(_context, vol):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(_context, vol)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(_context, vol):
"""Maps keys for attachment summary view."""
d = {}
volume_id = vol['id']
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volume_id'] = volume_id
d['server_id'] = vol['instance_uuid']
d['host_name'] = vol['attached_host']
if vol.get('mountpoint'):
d['device'] = vol['mountpoint']
return d
def _translate_volume_detail_view(context, vol, image_id=None):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol, image_id)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol, image_id=None):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availability_zone'] = vol['availability_zone']
d['created_at'] = vol['created_at']
# Need to form the string true/false explicitly here to
# maintain our API contract
if vol['bootable']:
d['bootable'] = 'true'
else:
d['bootable'] = 'false'
d['attachments'] = []
if vol['attach_status'] == 'attached':
attachment = _translate_attachment_detail_view(context, vol)
d['attachments'].append(attachment)
d['display_name'] = vol['display_name']
d['display_description'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volume_type'] = vol['volume_type']['name']
else:
# TODO(bcwaldon): remove str cast once we use uuids
d['volume_type'] = str(vol['volume_type_id'])
d['snapshot_id'] = vol['snapshot_id']
d['source_volid'] = vol['source_volid']
d['encrypted'] = vol['encryption_key_id'] is not None
if image_id:
d['image_id'] = image_id
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
metadata = vol.get('volume_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
# avoid circular ref when vol is a Volume instance
elif vol.get('metadata') and isinstance(vol.get('metadata'), dict):
d['metadata'] = vol['metadata']
else:
d['metadata'] = {}
return d
def make_attachment(elem):
elem.set('id')
elem.set('server_id')
elem.set('host_name')
elem.set('volume_id')
elem.set('device')
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availability_zone')
elem.set('created_at')
elem.set('display_name')
elem.set('bootable')
elem.set('display_description')
elem.set('volume_type')
elem.set('snapshot_id')
elem.set('source_volid')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM}
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
volume = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone', 'imageRef',
'snapshot_id', 'source_volid']
for attr in attributes:
if volume_node.getAttribute(attr):
volume[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
volume['metadata'] = self.extract_metadata(metadata_node)
return volume
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = utils.safe_minidom_parse_string(string)
volume = self._extract_volume(dom)
return {'body': {'volume': volume}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def __init__(self, ext_mgr):
self.volume_api = cinder_volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
def _add_visible_admin_metadata(self, context, volume):
if context is None:
return
visible_admin_meta = {}
if context.is_admin:
volume_tmp = volume
else:
try:
volume_tmp = self.volume_api.get(context.elevated(),
volume['id'])
except Exception:
return
if volume_tmp.get('volume_admin_metadata'):
for item in volume_tmp['volume_admin_metadata']:
if item['key'] in self._visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume_tmp.get('admin_metadata') and
isinstance(volume_tmp.get('admin_metadata'), dict)):
for key in self._visible_admin_metadata_keys:
if key in volume_tmp['admin_metadata'].keys():
visible_admin_meta[key] = volume_tmp['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['cinder.context']
try:
vol = self.volume_api.get(context, id)
req.cache_resource(vol)
except exception.NotFound:
raise exc.HTTPNotFound()
self._add_visible_admin_metadata(context, vol)
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
#pop out limit and offset , they are not search_opts
search_opts = req.GET.copy()
search_opts.pop('limit', None)
search_opts.pop('offset', None)
if 'metadata' in search_opts:
search_opts['metadata'] = ast.literal_eval(search_opts['metadata'])
context = req.environ['cinder.context']
remove_invalid_options(context,
search_opts, self._get_volume_search_options())
volumes = self.volume_api.get_all(context, marker=None, limit=None,
sort_key='created_at',
sort_dir='desc', filters=search_opts)
volumes = [dict(vol.iteritems()) for vol in volumes]
for volume in volumes:
self._add_visible_admin_metadata(context, volume)
limited_list = common.limited(volumes, req)
req.cache_resource(limited_list)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id.
try:
image_uuid = image_href.split('/').pop()
except (TypeError, AttributeError):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
volume = body['volume']
kwargs = {}
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
try:
if not uuidutils.is_uuid_like(req_volume_type):
kwargs['volume_type'] = \
volume_types.get_volume_type_by_name(
context, req_volume_type)
else:
kwargs['volume_type'] = volume_types.get_volume_type(
context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
try:
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
except exception.NotFound:
explanation = _('snapshot id:%s not found') % snapshot_id
raise exc.HTTPNotFound(explanation=explanation)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
try:
kwargs['source_volume'] = \
self.volume_api.get_volume(context,
source_volid)
except exception.NotFound:
explanation = _('source vol id:%s not found') % source_volid
raise exc.HTTPNotFound(explanation=explanation)
else:
kwargs['source_volume'] = None
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
image_href = None
image_uuid = None
if self.ext_mgr.is_loaded('os-image-create'):
# NOTE(jdg): misleading name "imageRef" as it's an image-id
image_href = volume.get('imageRef')
if image_href is not None:
image_uuid = self._image_uuid_from_href(image_href)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
new_volume = dict(new_volume.iteritems())
self._add_visible_admin_metadata(context, new_volume)
retval = _translate_volume_detail_view(context, new_volume, image_uuid)
return {'volume': retval}
def _get_volume_search_options(self):
"""Return volume search options allowed by non-admin."""
return ('display_name', 'status', 'metadata')
@wsgi.serializers(xml=VolumeTemplate)
def update(self, req, id, body):
"""Update a volume."""
context = req.environ['cinder.context']
if not body:
raise exc.HTTPUnprocessableEntity()
if 'volume' not in body:
raise exc.HTTPUnprocessableEntity()
volume = body['volume']
update_dict = {}
valid_update_keys = (
'display_name',
'display_description',
'metadata',
)
for key in valid_update_keys:
if key in volume:
update_dict[key] = volume[key]
try:
volume = self.volume_api.get(context, id)
volume_utils.notify_about_volume_usage(context, volume,
'update.start')
self.volume_api.update(context, volume, update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
volume.update(update_dict)
self._add_visible_admin_metadata(context, volume)
volume_utils.notify_about_volume_usage(context, volume,
'update.end')
return {'volume': _translate_volume_detail_view(context, volume)}
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s'"
" from query") % {'bad_options': bad_options}
LOG.debug(log_msg)
for opt in unknown_options:
del search_options[opt]
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import webob.exc as webexc
import webtest
from neutron.api import extensions
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import servicetype_db as st_db
from neutron.extensions import servicetype
from neutron.plugins.common import constants
from neutron.services import provider_configuration as provconf
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit import dummy_plugin as dp
from neutron.tests.unit import testlib_api
DEFAULT_SERVICE_DEFS = [{'service_class': constants.DUMMY,
'plugin': dp.DUMMY_PLUGIN_NAME}]
_uuid = test_base._uuid
_get_path = test_base._get_path
class ServiceTypeManagerTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(ServiceTypeManagerTestCase, self).setUp()
st_db.ServiceTypeManager._instance = None
self.manager = st_db.ServiceTypeManager.get_instance()
self.ctx = context.get_admin_context()
def test_service_provider_driver_not_unique(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver'],
'service_providers')
prov = {'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'driver',
'default': False}
self.manager._load_conf()
self.assertRaises(
n_exc.Invalid, self.manager.conf.add_provider, prov)
def test_get_service_providers(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'],
'service_providers')
ctx = context.get_admin_context()
provconf.parse_service_provider_opt()
self.manager._load_conf()
res = self.manager.get_service_providers(ctx)
self.assertEqual(len(res), 2)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.DUMMY])
)
self.assertEqual(len(res), 1)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.LOADBALANCER])
)
self.assertEqual(len(res), 1)
def test_multiple_default_providers_specified_for_service(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.LOADBALANCER +
':lbaas2:driver_path:default'],
'service_providers')
self.assertRaises(n_exc.Invalid, self.manager._load_conf)
def test_get_default_provider(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
# can pass None as a context
p = self.manager.get_default_service_provider(None,
constants.LOADBALANCER)
self.assertEqual(p, {'service_type': constants.LOADBALANCER,
'name': 'lbaas1',
'driver': 'driver_path',
'default': True})
self.assertRaises(
provconf.DefaultServiceProviderNotFound,
self.manager.get_default_service_provider,
None, constants.DUMMY
)
def test_add_resource_association(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
ctx = context.get_admin_context()
self.manager.add_resource_association(ctx,
constants.LOADBALANCER,
'lbaas1', '123-123')
self.assertEqual(ctx.session.
query(st_db.ProviderResourceAssociation).count(),
1)
assoc = ctx.session.query(st_db.ProviderResourceAssociation).one()
ctx.session.delete(assoc)
def test_invalid_resource_association(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
ctx = context.get_admin_context()
self.assertRaises(provconf.ServiceProviderNotFound,
self.manager.add_resource_association,
ctx, 'BLABLA_svc', 'name', '123-123')
class TestServiceTypeExtensionManager(object):
"""Mock extensions manager."""
def get_resources(self):
return (servicetype.Servicetype.get_resources() +
dp.Dummy.get_resources())
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
# This is needed because otherwise a failure will occur due to
# nonexisting core_plugin
self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
cfg.CONF.set_override('service_plugins',
["%s.%s" % (dp.__name__,
dp.DummyServicePlugin.__name__)])
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
ext_mgr = TestServiceTypeExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
self.resource_name = servicetype.RESOURCE_NAME.replace('-', '_')
super(ServiceTypeExtensionTestCaseBase, self).setUp()
class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase):
def setUp(self):
self._patcher = mock.patch(
"neutron.db.servicetype_db.ServiceTypeManager",
autospec=True)
self.mock_mgr = self._patcher.start()
self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value
super(ServiceTypeExtensionTestCase, self).setUp()
def test_service_provider_list(self):
instance = self.mock_mgr.return_value
res = self.api.get(_get_path('service-providers', fmt=self.fmt))
instance.get_service_providers.assert_called_with(mock.ANY,
filters={},
fields=[])
self.assertEqual(res.status_int, webexc.HTTPOk.code)
class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
"""Tests ServiceTypemanager as a public API."""
def setUp(self):
# Blank out service type manager instance
st_db.ServiceTypeManager._instance = None
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'],
'service_providers')
super(ServiceTypeManagerExtTestCase, self).setUp()
def _list_service_providers(self):
return self.api.get(_get_path('service-providers', fmt=self.fmt))
def test_list_service_providers(self):
res = self._list_service_providers()
self.assertEqual(res.status_int, webexc.HTTPOk.code)
data = self.deserialize(res)
self.assertIn('service_providers', data)
self.assertEqual(len(data['service_providers']), 2)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.scheduler_disable_time = None
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
excessive_failures = False
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
excessive_failures = True
if self.failures.num_failures() >= self.disable_failures:
excessive_failures = True
return excessive_failures
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active # seconds since epoch
self.started = time.time() # seconds since epoch
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def dump(self):
state = (self._tasks, self._active_workers)
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(state, fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self._tasks, self._active_workers = state
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker class, this code needs to be updated
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
# not sure why we have SUSPENDED, as it can never be set
if new_status == SUSPENDED:
new_status = PENDING
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None:
return
if new_status == FAILED and task.can_disable():
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def prune(self, task, config, assistants):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
if task.id not in necessary_tasks and self._state.prune(task, self._config, assistant_ids):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, worker, task_id, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
self.update(worker)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if not (task.status == RUNNING and status == PENDING):
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task_id, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = time.time() + self._config.retry_delay
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker)
self._update_priority(task, priority, worker)
if runnable:
task.workers.add(worker)
if expl is not None:
task.expl = expl
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self):
"""
Return worker's rank function for task scheduling.
:return:
"""
dependents = collections.defaultdict(int)
def not_done(t):
task = self._state.get_task(t, default=None)
return task is None or task.status != DONE
for task in self._state.get_pending_tasks():
if task.status != DONE:
deps = list(filter(not_done, task.deps))
inverse_num_deps = 1.0 / max(len(deps), 1)
for dep in deps:
dependents[dep] += inverse_num_deps
return lambda task: (task.priority, dependents[task.id], -task.time)
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def get_work(self, worker, host=None, assistant=False, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
# Return remaining tasks that have no FAILED descendents
self.update(worker, {'host': host})
if assistant:
self.add_worker(worker, [('assistant', assistant)])
best_task = None
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
used_resources = self._used_resources()
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in self._state.get_active_workers())
tasks = list(self._state.get_pending_tasks())
tasks.sort(key=self._rank(), reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and task.workers) or worker in task.workers
if task.status == 'RUNNING' and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker
best_task.time_running = time.time()
self._update_task_history(best_task.id, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, worker, **kwargs):
self.update(worker)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task_id, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task_id, successful)
elif status == PENDING:
self._task_history.task_scheduled(task_id)
elif status == RUNNING:
self._task_history.task_started(task_id, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ProfilesOperations(object):
"""ProfilesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Version of the API to be used with the client request. Current version is 2017-04-02. Constant value: "2017-04-02".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-02"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all of the CDN profiles within an Azure subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Profile
:rtype:
~azure.mgmt.cdn.models.ProfilePaged[~azure.mgmt.cdn.models.Profile]
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/profiles'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ProfilePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ProfilePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all of the CDN profiles within a resource group.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Profile
:rtype:
~azure.mgmt.cdn.models.ProfilePaged[~azure.mgmt.cdn.models.Profile]
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ProfilePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ProfilePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, profile_name, custom_headers=None, raw=False, **operation_config):
"""Gets a CDN profile with the specified profile name under the specified
subscription and resource group.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Profile or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.cdn.models.Profile or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Profile', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, profile_name, profile, custom_headers=None, raw=False, **operation_config):
"""Creates a new CDN profile with a profile name under the specified
subscription and resource group.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param profile: Profile properties needed to create a new profile.
:type profile: ~azure.mgmt.cdn.models.Profile
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Profile or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.cdn.models.Profile]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(profile, 'Profile')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Profile', response)
if response.status_code == 201:
deserialized = self._deserialize('Profile', response)
if response.status_code == 202:
deserialized = self._deserialize('Profile', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, profile_name, tags, custom_headers=None, raw=False, **operation_config):
"""Updates an existing CDN profile with the specified profile name under
the specified subscription and resource group.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param tags: Profile tags
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Profile or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.cdn.models.Profile]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
profile_update_parameters = models.ProfileUpdateParameters(tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(profile_update_parameters, 'ProfileUpdateParameters')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Profile', response)
if response.status_code == 202:
deserialized = self._deserialize('Profile', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, profile_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing CDN profile with the specified parameters. Deleting
a profile will result in the deletion of all of the sub-resources
including endpoints, origins and custom domains.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def generate_sso_uri(
self, resource_group_name, profile_name, custom_headers=None, raw=False, **operation_config):
"""Generates a dynamic SSO URI used to sign in to the CDN supplemental
portal. Supplemnetal portal is used to configure advanced feature
capabilities that are not yet available in the Azure portal, such as
core reports in a standard profile; rules engine, advanced HTTP
reports, and real-time stats and alerts in a premium profile. The SSO
URI changes approximately every 10 minutes.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SsoUri or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.cdn.models.SsoUri or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/generateSsoUri'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SsoUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_supported_optimization_types(
self, resource_group_name, profile_name, custom_headers=None, raw=False, **operation_config):
"""Gets the supported optimization types for the current profile. A user
can create an endpoint with an optimization type from the listed
values.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SupportedOptimizationTypesListResult or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.cdn.models.SupportedOptimizationTypesListResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getSupportedOptimizationTypes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SupportedOptimizationTypesListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_resource_usage(
self, resource_group_name, profile_name, custom_headers=None, raw=False, **operation_config):
"""Checks the quota and actual usage of endpoints under the given CDN
profile.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ResourceUsage
:rtype:
~azure.mgmt.cdn.models.ResourceUsagePaged[~azure.mgmt.cdn.models.ResourceUsage]
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/checkResourceUsage'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ResourceUsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ResourceUsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# -*- coding: utf-8 -*-
import datetime
import os
import random
import subprocess
import time
import traceback
from urllib.parse import urlparse
import github3
import unidiff
from ..interfaces.base import InterfaceBase
from ..util import git, system
class GitHubInterface(InterfaceBase):
def __init__(
self,
owner,
repo,
pr=None,
branch=None,
token=None,
url=None,
commit=None,
ignore_paths=None,
prefix=None,
autofix=False,
set_status=False,
):
"""
GitHubInterface lets us post messages to GitHub.
owner and repo are the repository owner/organization and repo name respectively.
pr is the ID number of the pull request. branch is the branch name. either pr OR branch
must be populated.
token is your GitHub API token.
url is the base URL of your GitHub instance, such as https://github.com
commit is the commit hash we're running against
ignore_paths are paths to ignore comments from
"""
self.start = datetime.datetime.now()
self.github = None
self.stopped_early = False
self.autofixed = False
self.prefix = prefix
self.autofix = autofix
self.ignore_paths = set(ignore_paths or [])
self.token = token
self.set_status = set_status
url = url or "https://github.com"
print("url={}".format(url))
self.netloc = urlparse(url).netloc.strip()
print("urlparse={}".format(urlparse(url)))
if not url or url == "https://github.com":
self.github = github3.GitHub(token=token)
else:
self.github = github3.GitHubEnterprise(url, token=token)
try:
self.github_user = self.github.me().as_dict()
except (TypeError, AttributeError):
# github.py == 0.9.6
self.github_user = self.github.user().to_json()
self.username = ""
self.email = ""
try:
self.username = self.github_user["login"]
for email in self.github.emails():
try:
email_obj = email.as_dict()
except (TypeError, AttributeError):
# github.py == 0.9.6
email_obj = email.to_json()
if email_obj["primary"]:
self.email = email_obj["email"]
except Exception: # NOQA
traceback.print_exc()
self.owner = owner
self.repo = repo
self.github_repo = self.github.repository(self.owner, self.repo)
print("Branch: {0}".format(branch))
self.branch = branch
self.pull_request_number = None
if branch and not pr:
for github_repo in [self.github_repo, self.github_repo.parent]:
if pr:
break
if not github_repo:
continue
try:
# github.py == 0.9.6
pulls = github_repo.iter_pulls()
except AttributeError:
pulls = github_repo.pull_requests()
for pull_request in pulls:
print(
"Branch: {} - Pull Request Head Ref: {}".format(
branch, pull_request.head.ref
)
)
if pull_request.head.ref == branch:
pr = pull_request.number
self.github_repo = github_repo
break
self.owner = self.github_repo.owner
self.repo = self.github_repo.name
# TODO: support non-PR runs
try:
pr = int(pr)
except (ValueError, TypeError):
print("{0} is not a valid pull request ID".format(pr))
self.github = None
return
print("PR ID: {0}".format(pr))
self.pull_request_number = pr
self.pull_request = self.github.pull_request(self.owner, self.repo, pr)
self.target_sha = self.pull_request.base.sha
self.target_branch = self.pull_request.base.ref
self.sha = self.pull_request.head.sha
self.branch = self.pull_request.head.ref
try:
# github.py == 0.9.6
try:
git.fetch(self.pull_request.base.to_json()["repo"]["clone_url"])
except subprocess.CalledProcessError:
git.fetch(self.pull_request.base.to_json()["repo"]["ssh_url"])
except AttributeError:
# latest github.py
try:
git.fetch(self.pull_request.base.repository.as_dict()["clone_url"])
except subprocess.CalledProcessError:
git.fetch(self.pull_request.base.repository.as_dict()["ssh_url"])
print("Target SHA: {0}".format(self.target_sha))
print("Target Branch: {0}".format(self.target_branch))
print("Head SHA: {0}".format(self.sha))
print("Head Branch: {0}".format(self.branch))
self.last_sha = commit or git.current_sha()
print("Last SHA: {0}".format(self.last_sha))
self.diff = git.diff(self.target_sha, self.last_sha)
self.patch = unidiff.PatchSet(self.diff.split("\n"))
self.review_comments = list(self.pull_request.review_comments())
self.last_update = time.time()
self.messages_in_files = dict()
self.filenames = set()
try:
try:
pr_files = self.pull_request.files()
except AttributeError:
# github.py == 0.9.6
pr_files = self.pull_request.iter_files()
self.filenames = set(
os.path.relpath(pr_file.filename).replace("\\", "/").strip()
for pr_file in pr_files
)
print("Files in PR: {}".format(self.filenames))
except Exception:
traceback.print_exc()
def is_valid(self):
return self.pull_request_number is not None
@staticmethod
def pr_commits(pull_request, number=-1):
# github3 has naming/compatibility issues
try:
return [c for c in pull_request.commits(number=number)]
except (AttributeError, TypeError):
return [c for c in pull_request.iter_commits(number=number)]
@staticmethod
def repo_commits(repo, sha, number):
# github3 has naming/compatibility issues
try:
return [c for c in repo.commits(sha=sha, number=number)]
except (AttributeError, TypeError):
return [c for c in repo.iter_commits(sha=sha, number=number)]
def start_review(self):
"""Mark our review as started."""
if self.set_status:
self.github_repo.create_status(
state="pending",
description="Static analysis in progress.",
context="inline-plz",
sha=self.last_sha,
)
def finish_review(self, success=True, error=False):
"""Mark our review as finished."""
if self.set_status:
if error:
self.github_repo.create_status(
state="error",
description="Static analysis error! inline-plz failed to run.",
context="inline-plz",
sha=self.last_sha,
)
elif success:
self.github_repo.create_status(
state="success",
description="Static analysis complete! No errors found in your PR.",
context="inline-plz",
sha=self.last_sha,
)
else:
self.github_repo.create_status(
state="failure",
description="Static analysis complete! Found errors in your PR.",
context="inline-plz",
sha=self.last_sha,
)
def out_of_date(self):
"""Check if our local latest sha matches the remote latest sha"""
try:
latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha
print("Latest remote sha: {}".format(latest_remote_sha))
try:
print("Ratelimit remaining: {}".format(self.github.ratelimit_remaining))
except Exception:
print("Failed to look up ratelimit remaining")
return self.last_sha != latest_remote_sha
except IndexError:
return False
def post_messages(self, messages, max_comments):
if not self.github:
print("Github connection is invalid.")
return
if (
self.autofix
and git.files_changed(self.filenames)
and not self.out_of_date()
):
print("Files changed: attempting to push fixes")
print(git.files_changed(self.filenames))
if self.username:
git.command("config", "--global", "user.name", self.username)
if self.email:
git.command("config", "--global", "user.email", self.email)
git.command("checkout", self.branch)
files_added = 0
for filename in self.filenames:
if os.path.getsize(filename) > 10:
print("Adding {}".format(filename))
git.add(filename)
files_added += 1
if files_added:
git.commit("Autofix by inline-plz")
print("Git pushing to {}".format(self.branch))
try:
git.push(self.branch)
except subprocess.CalledProcessError:
git.set_remote(
"https://{}@{}/{}/{}.git".format(
self.token, self.netloc, self.owner, self.repo
)
)
git.push(self.branch)
print("Successfully pushed - skipping message posting")
self.autofixed = True
return 1
valid_errors = 0
messages_posted = 0
paths = dict()
# randomize message order to more evenly distribute messages across different files
messages = list(messages)
random.shuffle(messages)
if self.out_of_date():
print("This run is out of date because the PR has been updated.")
messages = []
self.stopped_early = True
print("Considering {} messages for posting.".format(len(messages)))
for msg in messages:
if not msg.comments:
continue
msg_position = self.position(msg)
if not msg_position:
continue
if (
msg.path not in self.filenames
or msg.path.split("/")[0] in self.ignore_paths
):
continue
paths.setdefault(msg.path, 0)
valid_errors += 1
self.messages_in_files.setdefault(msg.path, []).append((msg, msg_position))
if self.is_duplicate(msg, msg_position):
print("Dupe comment at {}:{}".format(msg.path, msg_position))
msg.status = "DUPLICATE"
continue
msg_at_position = self.message_at_position(msg, msg_position)
if msg_at_position:
try:
print(
"Trying to edit comment at {}:{}".format(msg.path, msg_position)
)
msg_at_position.edit(self.format_message(msg))
print("Comment edited successfully: {0}".format(msg))
msg.status = "EDITED"
paths[msg.path] += 1
messages_posted += 1
time.sleep(0.1)
continue
except github3.GitHubError:
pass
try:
print("Trying to post comment at {}:{}".format(msg.path, msg_position))
self.pull_request.create_review_comment(
self.format_message(msg), self.last_sha, msg.path, msg_position
)
msg.status = "POSTED"
except github3.GitHubError:
# workaround for our diff not entirely matching up with github's diff
# we can end up with a mismatched diff if the branch is old
valid_errors -= 1
continue
print("Comment posted successfully: {0}".format(msg))
paths[msg.path] += 1
messages_posted += 1
time.sleep(0.1)
if (
(max_comments and messages_posted > max_comments)
or system.should_stop()
or self.out_of_date()
):
print("Stopping early.")
self.stopped_early = True
break
print("\n{} messages posted to Github.".format(messages_posted))
print("\n{} valid errors.".format(valid_errors))
return valid_errors
def is_duplicate(self, message, position):
msg = self.message_at_position(message, position)
if msg and msg.body.strip() == self.format_message(message).strip():
return msg
return None
def message_at_position(self, message, position):
# update our list of review comments about once a second
# to reduce dupes without hitting the API too hard
if time.time() - self.last_update > 1:
self.review_comments = list(self.pull_request.review_comments())
self.last_update = time.time()
for comment in self.review_comments:
if comment.original_position == position and comment.path == message.path:
if not comment.body.startswith(self.prefix):
continue
return comment
return None
def format_message(self, message):
if not message.comments:
return ""
if len(message.comments) > 1 or any("\n" in c for c in message.comments):
return (
"{0}: \n```\n".format(self.prefix)
+ "\n".join(sorted(list(message.comments)))
+ "\n```"
)
return "{0}: `{1}`".format(self.prefix, list(message.comments)[0].strip())
def clear_outdated_messages(self):
obsolete_message = (
"*This message is obsolete but is preserved because it has replies.*"
)
if self.stopped_early or self.autofixed:
return
comments_to_delete = []
in_reply_to = set()
for comment in self.pull_request.review_comments():
try:
# github3 0.9.6 compat
try:
in_reply_to.add(comment.to_json().get("in_reply_to_id"))
except AttributeError:
in_reply_to.add(comment.as_dict().get("in_reply_to_id"))
should_delete = True
if not comment.body.startswith(self.prefix):
continue
for msg, msg_position in self.messages_in_files.get(comment.path, []):
if (
self.format_message(msg).strip() == comment.body.strip()
and msg_position == comment.position
):
should_delete = False
if should_delete:
comments_to_delete.append(comment)
except Exception:
traceback.print_exc()
for comment in comments_to_delete:
try:
if comment.id not in in_reply_to:
comment.delete()
print("Deleted comment: {}".format(comment.body))
elif obsolete_message not in comment.body:
comment.edit(comment.body + "\n" + obsolete_message)
print("Edited obsolete comment: {}".format(comment.body))
except Exception:
traceback.print_exc()
def position(self, message):
"""Calculate position within the PR, which is not the line number"""
if not message.line_number:
message.line_number = 1
for patched_file in self.patch:
target = patched_file.target_file.lstrip("b/")
if target == message.path:
offset = 1
for hunk in patched_file:
for position, hunk_line in enumerate(hunk):
if hunk_line.target_line_no == message.line_number:
if not hunk_line.is_added:
# if the line isn't an added line, we don't want to comment on it
return
return position + offset
offset += len(hunk) + 1
|
|
# 2005/08/28
# v1.4.0
# listquote.py
# Lists 'n' Quotes
# Handling lists and quoted strings
# Can be used for parsing/creating lists - or lines in a CSV file
# And also quoting or unquoting elements.
# Homepage : http://www.voidspace.org.uk/python/modules.shtml
# Copyright Michael Foord, 2004 & 2005.
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# For information about bugfixes, updates and support, please join the Pythonutils mailing list.
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# E-mail fuzzyman@voidspace.org.uk
"""
Having written modules to handle turning a string representation of a list back
into a list (including nested lists) and also a very simple CSV parser, I
realised I needed a more solid set of functions for handling lists (comma
delimited lines) and quoting/unquoting elements of lists.
The test stuff provides useful examples of how the functions work.
"""
import sys
if sys.version_info[0] < 3:
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
else:
basestring = str
import re
inquotes = re.compile(r'''\s*(".*?"|'.*?')(.*)''')
badchars = re.compile(r'''^[^'," \[\]\(\)#]+$''')
##commented_line = re.compile(r'''\s*([^#]*)\s*(#.*)''')
paramfinder = re.compile(r'''(?:'.*?')|(?:".*?")|(?:[^'",\s][^,]*)''')
unquoted = re.compile(r'''
([^\#,"'\(\)\[\]][^\#,\]\)]*) # value
\s* # whitespace - XXX not caught
([\#,\)\]].*)? # rest of the line
$''', re.VERBOSE)
__all__ = [
'elem_quote',
'unquote',
'ListQuoteError',
'QuoteError',
'UnQuoteError',
'BadLineError',
'CommentError',
'quote_escape',
'quote_unescape',
'simplelist',
'LineParser',
'lineparse',
'csvread',
'csvwrite',
'list_stringify',
'makelist'
]
class ListQuoteError(SyntaxError):
"""Base class for errors raised by the listquote module."""
class QuoteError(ListQuoteError):
"""This value can't be quoted."""
class UnQuoteError(ListQuoteError):
"""The value is badly quoted."""
class BadLineError(ListQuoteError):
"""A line is badly built."""
class CommentError(BadLineError):
"""A line contains a disallowed comment."""
class CSVError(ListQuoteError):
"""The CSV File contained errors."""
#################################################################
# functions for quoting and unquoting
def elem_quote(member, nonquote=True, stringify=False, encoding=None):
"""
Simple method to add the most appropriate quote to an element - either single
quotes or double quotes.
If member contains ``\n`` a ``QuoteError`` is raised - multiline values
can't be quoted by elem_quote.
If ``nonquote`` is set to ``True`` (the default), then if member contains none
of ``'," []()#;`` then it isn't quoted at all.
If member contains both single quotes *and* double quotes then all double
quotes (``"``) will be escaped as ``&mjf-quot;`` and member will then be quoted
with double quotes.
If ``stringify`` is set to ``True`` (the default is ``False``) then non string
(unicode or byte-string) values will be first converted to strings using the
``str`` function. Otherwise elem_quote raises a ``TypeError``.
If ``encoding`` is not ``None`` and member is a byte string, then it will be
decoded into unicode using this encoding.
>>> elem_quote('hello')
'hello'
>>> elem_quote('hello', nonquote=False)
'"hello"'
>>> elem_quote('"hello"')
'\\'"hello"\\''
>>> elem_quote(3)
Traceback (most recent call last):
TypeError: Can only quote strings. "3"
>>> elem_quote(3, stringify=True)
'3'
>>> elem_quote('hello', encoding='ascii')
u'hello'
>>> elem_quote('\\n')
Traceback (most recent call last):
QuoteError: Multiline values can't be quoted.
"
"
"""
if not isinstance(member, basestring):
if stringify:
member = str(member)
else:
# FIXME: is this the appropriate error message ?
raise TypeError('Can only quote strings. "%s"' % str(member))
if encoding and isinstance(member, str):
# from string to unicode
member = unicode(member, encoding)
if '\n' in member:
raise QuoteError('Multiline values can\'t be quoted.\n"%s"' % str(member))
#
if nonquote and badchars.match(member) is not None:
return member
# this ordering of tests determines which quote character will be used in
# preference - here we have \" first...
elif member.find('"') == -1:
return '"%s"' % member
# but we will use either... which may not suit some people
elif member.find("'") == -1:
return "'%s'" % member
else:
raise QuoteError('Value can\'t be quoted : "%s"' % member)
def unquote(inline, fullquote=True, retain=False):
"""
Unquote a value.
If the value isn't quoted it returns the value.
If the value is badly quoted it raises ``UnQuoteError``.
If retain is ``True`` (default is ``False``) then the quotes are left
around the value (but leading or trailing whitespace will have been
removed).
If fullquote is ``False`` (default is ``True``) then unquote will only
unquote the first part of the ``inline``. If there is anything after the
quoted element, this will be returned as well (instead of raising an
error).
In this case the return value is ``(value, rest)``.
>>> unquote('hello')
'hello'
>>> unquote('"hello"')
'hello'
>>> unquote('"hello')
Traceback (most recent call last):
UnQuoteError: Value is badly quoted: ""hello"
>>> unquote('"hello" fish')
Traceback (most recent call last):
UnQuoteError: Value is badly quoted: ""hello" fish"
>>> unquote("'hello'", retain=True)
"'hello'"
>>> unquote('"hello" fish', fullquote=False)
('hello', ' fish')
"""
mat = inquotes.match(inline)
if mat is None:
if inline.strip()[0] not in '\'\"': # not quoted
return inline
else:
# badly quoted
raise UnQuoteError('Value is badly quoted: "%s"' % inline)
quoted, rest = mat.groups()
if fullquote and rest.strip():
# badly quoted
raise UnQuoteError('Value is badly quoted: "%s"' % inline)
if not retain:
quoted = quoted[1:-1]
if not fullquote:
return quoted, rest
else:
return quoted
def quote_escape(value, lf='&mjf-lf;', quot='&mjf-quot;'):
"""
Escape a string so that it can safely be quoted. You should use this if the
value to be quoted *may* contain line-feeds or both single quotes and double
quotes.
If the value contains ``\n`` then it will be escaped using ``lf``. By
default this is ``&mjf-lf;``.
If the value contains single quotes *and* double quotes, then all double
quotes will be escaped using ``quot``. By default this is ``&mjf-quot;``.
>>> quote_escape('hello')
'hello'
>>> quote_escape('hello\\n')
'hello&mjf-lf;'
>>> quote_escape('hello"')
'hello"'
>>> quote_escape('hello"\\'')
"hello&mjf-quot;'"
>>> quote_escape('hello"\\'\\n', '&fish;', '&wobble;')
"hello&wobble;'&fish;"
"""
if '\n' in value:
value = value.replace('\n', lf)
if '\'' in value and '\"' in value:
value = value.replace('"', quot)
return value
def quote_unescape(value, lf='&mjf-lf;', quot='&mjf-quot;'):
"""
Unescape a string escaped by ``quote_escape``.
If it was escaped using anything other than the defaults for ``lf`` and
``quot`` you must pass them to this function.
>>> quote_unescape("hello&wobble;'&fish;", '&fish;', '&wobble;')
'hello"\\'\\n'
>>> quote_unescape('hello')
'hello'
>>> quote_unescape('hello&mjf-lf;')
'hello\\n'
>>> quote_unescape("'hello'")
"'hello'"
>>> quote_unescape('hello"')
'hello"'
>>> quote_unescape("hello&mjf-quot;'")
'hello"\\''
>>> quote_unescape("hello&wobble;'&fish;", '&fish;', '&wobble;')
'hello"\\'\\n'
"""
return value.replace(lf, '\n').replace(quot, '"')
def simplelist(inline):
"""
Parse a string to a list.
A simple regex that extracts quoted items from a list.
It retains quotes around elements. (So unquote each element)
>>> simplelist('''hello, goodbye, 'title', "name", "I can't"''')
['hello', 'goodbye', "'title'", '"name"', '"I can\\'t"']
FIXME: This doesn't work fully (allows some badly formed lists):
e.g.
>>> simplelist('hello, fish, "wobble" bottom hooray')
['hello', 'fish', '"wobble"', 'bottom hooray']
"""
return paramfinder.findall(inline)
##############################################
# LineParser - a multi purpose line parser
# handles lines with comma seperated values on it, followed by a comment
# correctly handles quoting
# *and* can handle nested lists - marked between '[...]' or '(...)'
# See the docstring for how this works
# by default it returns a (list, comment) tuple !
# There are several keyword arguments that control how LineParser works.
class LineParser(object):
"""An object to parse nested lists from strings."""
liststart = { '[' : ']', '(' : ')' }
quotes = ['\'', '"']
def __init__(self, options=None, **keywargs):
"""Initialise the LineParser."""
self.reset(options, **keywargs)
def reset(self, options=None, **keywargs):
"""Reset the parser with the specified options."""
if options is None:
options = {}
options.update(keywargs)
#
defaults = {
'recursive': True,
'comment': True,
'retain': False,
'force_list': False,
'csv': False
}
defaults.update(options)
if defaults['csv']:
defaults.update({
'recursive': False,
'force_list': True,
'comment': False,
})
# check all the options are valid
for entry in defaults.keys():
if entry not in ['comment',
'retain',
'csv',
'recursive',
'force_list']:
raise TypeError("'%s' is an invalid keyword argument for "
"this function" % entry)
#
self.recursive = defaults['recursive']
self.comment = defaults['comment']
self.retain = defaults['retain']
self.force_list = defaults['force_list']
def feed(self, inline, endchar=None):
"""
Parse a single line (or fragment).
Uses the options set in the parser object.
Can parse lists - including nested lists. (If ``recursive`` is
``False`` then nested lists will cause a ``BadLineError``).
Return value depends on options.
If ``comment`` is ``False`` it returns ``outvalue``
If ``comment`` is ``True`` it returns ``(outvalue, comment)``. (Even if
comment is just ``''``).
If ``force_list`` is ``False`` then ``outvalue`` may be a list or a
single item.
If ``force_list`` is ``True`` then ``outvalue`` will always be a list -
even if it has just one member.
List syntax :
* Comma separated lines ``a, b, c, d``
* Lists can optionally be between square or ordinary brackets
- ``[a, b, c, d]``
- ``(a, b, c, d)``
* Nested lists *must* be between brackets - ``a, [a, b, c, d], c``
* A single element list can be shown by a trailing quote - ``a,``
* An empty list is shown by ``()`` or ``[]``
Elements can be quoted with single or double quotes (but can't contain
both).
The line can optionally end with a comment (preeded by a '#').
This depends on the ``comment`` attribute.
If the line is badly built then this method will raise one of : ::
CommentError, BadLineError, UnQuoteError
Using the ``csv`` option is the same as setting : ::
'recursive': False
'force_list': True
'comment': False
"""
# preserve the original line
# for error messages
if endchar is None:
self.origline = inline
inline = inline.lstrip()
#
outlist = []
comma_needed = False
found_comma = False
while inline:
# NOTE: this sort of operation would be quicker
# with lists - but then can't use regexes
thischar = inline[0]
if thischar == '#':
# reached a comment
# end of the line...
break
#
if thischar == endchar:
return outlist, inline[1:]
#
if comma_needed:
if thischar == ',':
inline = inline[1:].lstrip()
comma_needed = False
found_comma = True
continue
raise BadLineError('Line is badly built :\n%s' % self.origline)
#
try:
# the character that marks the end of the list
listend = self.liststart[thischar]
except KeyError:
pass
else:
if not self.recursive and endchar is not None:
raise BadLineError('Line is badly built :\n%s' % self.origline)
newlist, inline = self.feed(inline[1:], endchar=listend)
outlist.append(newlist)
inline = inline.lstrip()
comma_needed = True
continue
#
if thischar in self.quotes:
# this might raise an error
# FIXME: trap the error and raise a more appropriate one ?
element, inline = unquote(inline, fullquote=False,
retain=self.retain)
inline = inline.lstrip()
outlist.append(element)
comma_needed = True
continue
#
# must be an unquoted element
mat = unquoted.match(inline)
if mat is not None:
# FIXME: if the regex was better we wouldn't need an rstrip
element = mat.group(1).rstrip()
# group 2 will be ``None`` if we reach the end of the line
inline = mat.group(2) or ''
outlist.append(element)
comma_needed = True
continue
# or it's a badly built line
raise BadLineError('Line is badly built :\n%s' % self.origline)
#
# if we've been called recursively
# we shouldn't have got this far
if endchar is not None:
raise BadLineError('Line is badly built :\n%s' % self.origline)
#
if not found_comma:
# if we didn't find a comma
# the value could be a nested list
if outlist:
outlist = outlist[0]
else:
outlist = ''
if self.force_list and not isinstance(outlist, list):
if outlist:
outlist = [outlist]
else:
outlist = []
if not self.comment:
if inline:
raise CommentError('Comment not allowed :\n%s' % self.origline)
return outlist
return outlist, inline
def lineparse(inline, options=None, **keywargs):
"""
A compatibility function that mimics the old lineparse.
Also more convenient for single line use.
Note: It still uses the new ``LineParser`` - and so takes the same
keyword arguments as that.
>>> lineparse('''"hello", 'goodbye', "I can't do that", 'You "can" !' # a comment''')
(['hello', 'goodbye', "I can't do that", 'You "can" !'], '# a comment')
>>> lineparse('''"hello", 'goodbye', "I can't do that", 'You "can" !' # a comment''', comment=False)
Traceback (most recent call last):
CommentError: Comment not allowed :
"hello", 'goodbye', "I can't do that", 'You "can" !' # a comment
>>> lineparse('''"hello", 'goodbye', "I can't do that", 'You "can" !' # a comment''', recursive=False)
(['hello', 'goodbye', "I can't do that", 'You "can" !'], '# a comment')
>>> lineparse('''"hello", 'goodbye', "I can't do that", 'You "can" !' # a comment''', csv=True)
Traceback (most recent call last):
CommentError: Comment not allowed :
"hello", 'goodbye', "I can't do that", 'You "can" !' # a comment
>>> lineparse('''"hello", 'goodbye', "I can't do that", 'You "can" !' ''', comment=False)
['hello', 'goodbye', "I can't do that", 'You "can" !']
>>> lineparse('')
('', '')
>>> lineparse('', force_list=True)
([], '')
>>> lineparse('[]')
([], '')
>>> lineparse('()')
([], '')
>>> lineparse('()', force_list=True)
([], '')
>>> lineparse('1,')
(['1'], '')
>>> lineparse('"Yo"')
('Yo', '')
>>> lineparse('"Yo"', force_list=True)
(['Yo'], '')
>>> lineparse('''h, i, j, (h, i, ['hello', "f"], [], ([]),), k''')
(['h', 'i', 'j', ['h', 'i', ['hello', 'f'], [], [[]]], 'k'], '')
>>> lineparse('''h, i, j, (h, i, ['hello', "f"], [], ([]),), k''', recursive=False)
Traceback (most recent call last):
BadLineError: Line is badly built :
h, i, j, (h, i, ['hello', "f"], [], ([]),), k
>>> lineparse('fish#dog')
('fish', '#dog')
>>> lineparse('"fish"#dog')
('fish', '#dog')
>>> lineparse('(((())))')
([[[[]]]], '')
>>> lineparse('((((,))))')
Traceback (most recent call last):
BadLineError: Line is badly built :
((((,))))
>>> lineparse('hi, ()')
(['hi', []], '')
>>> lineparse('"hello", "",')
(['hello', ''], '')
>>> lineparse('"hello", ,')
Traceback (most recent call last):
BadLineError: Line is badly built :
"hello", ,
>>> lineparse('"hello", ["hi", ""], ""')
(['hello', ['hi', ''], ''], '')
>>> lineparse('''"member 1", "member 2", ["nest 1", ("nest 2", 'nest 2b', ['nest 3', 'value'], nest 2c), nest1b]''')
(['member 1', 'member 2', ['nest 1', ['nest 2', 'nest 2b', ['nest 3', 'value'], 'nest 2c'], 'nest1b']], '')
>>> lineparse('''"member 1", "member 2", ["nest 1", ("nest 2", 'nest 2b', ['nest 3', 'value'], nest 2c), nest1b]]''')
Traceback (most recent call last):
BadLineError: Line is badly built :
"member 1", "member 2", ["nest 1", ("nest 2", 'nest 2b', ['nest 3', 'value'], nest 2c), nest1b]]
"""
p = LineParser(options, **keywargs)
return p.feed(inline)
############################################################################
# a couple of functions to help build lists
def list_stringify(inlist):
"""
Recursively rebuilds a list - making sure all the members are strings.
Can take any iterable or a sequence as the argument and always
returns a list.
Useful before writing out lists.
Used by makelist if stringify is set.
Uses the ``str`` function for stringification.
Every element will be a string or a unicode object.
Doesn't handle decoding strings into unicode objects (or vice-versa).
>>> list_stringify([2, 2, 2, 2, (3, 3, 2.9)])
['2', '2', '2', '2', ['3', '3', '2.9']]
>>> list_stringify(None)
Traceback (most recent call last):
TypeError: iteration over non-sequence
>>> list_stringify([])
[]
FIXME: can receive any iterable - e.g. a sequence
>>> list_stringify('')
[]
>>> list_stringify('Hello There')
['H', 'e', 'l', 'l', 'o', ' ', 'T', 'h', 'e', 'r', 'e']
"""
outlist = []
for item in inlist:
if not isinstance(item, (tuple, list)):
if not isinstance(item, basestring):
item = str(item)
else:
item = list_stringify(item)
outlist.append(item)
return outlist
def makelist(inlist, listchar='', stringify=False, escape=False, encoding=None):
"""
Given a list - turn it into a string that represents that list. (Suitable
for parsing by ``LineParser``).
listchar should be ``'['``, ``'('`` or ``''``. This is the type of bracket
used to enclose the list. (``''`` meaning no bracket of course).
If you have nested lists and listchar is ``''``, makelist will
automatically use ``'['`` for the nested lists.
If stringify is ``True`` (default is ``False``) makelist will stringify the
inlist first (using ``list_stringify``).
If ``escape`` is ``True`` (default is ``False``) makelist will call
``quote_escape`` on each element before passing them to ``elem_quote`` to
be quoted.
If encoding keyword is not ``None``, all strings are decoded to unicode
with the specified encoding. Each item will then be a unicode object
instead of a string.
>>> makelist([])
'[]'
>>> makelist(['a', 'b', 'I can\\'t do it', 'Yes you "can" !'])
'a, b, "I can\\'t do it", \\'Yes you "can" !\\''
>>> makelist([3, 4, 5, [6, 7, 8]], stringify=True)
'3, 4, 5, [6, 7, 8]'
>>> makelist([3, 4, 5, [6, 7, 8]])
Traceback (most recent call last):
TypeError: Can only quote strings. "3"
>>> makelist(['a', 'b', 'c', ('d', 'e'), ('f', 'g')], listchar='(')
'(a, b, c, (d, e), (f, g))'
>>> makelist(['hi\\n', 'Quote "heck\\''], escape=True)
'hi&mjf-lf;, "Quote &mjf-quot;heck\\'"'
>>> makelist(['a', 'b', 'c', ('d', 'e'), ('f', 'g')], encoding='UTF8')
u'a, b, c, [d, e], [f, g]'
"""
if stringify:
inlist = list_stringify(inlist)
listdict = {'[' : '[%s]', '(' : '(%s)', '' : '%s'}
outline = []
# this makes '[' the default for empty or single value lists
if len(inlist) < 2:
listchar = listchar or '['
for item in inlist:
if not isinstance(item, (list, tuple)):
if escape:
item = quote_escape(item)
outline.append(elem_quote(item, encoding=encoding))
else:
# recursive for nested lists
outline.append(makelist(item, listchar or '[',
stringify, escape, encoding))
return listdict[listchar] % (', '.join(outline))
############################################################################
# CSV functions
# csvread, csvwrite
def csvread(infile):
"""
Given an infile as an iterable, return the CSV as a list of lists.
infile can be an open file object or a list of lines.
If any of the lines are badly built then a ``CSVError`` will be raised.
This has a ``csv`` attribute - which is a reference to the parsed CSV.
Every line that couldn't be parsed will have ``[]`` for it's entry.
The error *also* has an ``errors`` attribute. This is a list of all the
errors raised. Error in this will have an ``index`` attribute, which is the
line number, and a ``line`` attribute - which is the actual line that
caused the error.
Example of usage :
.. raw:: html
{+coloring}
handle = open(filename)
# remove the trailing '\n' from each line
the_file = [line.rstrip('\n') for line in handle.readlines()]
csv = csvread(the_file)
{-coloring}
>>> a = '''"object 1", 'object 2', object 3
... test 1 , "test 2" ,'test 3'
... 'obj 1',obj 2,"obj 3"'''
>>> csvread(a.splitlines())
[['object 1', 'object 2', 'object 3'], ['test 1', 'test 2', 'test 3'], ['obj 1', 'obj 2', 'obj 3']]
>>> csvread(['object 1,'])
[['object 1']]
>>> try:
... csvread(['object 1, "hello', 'object 1, # a comment in a csv ?'])
... except CSVError, e:
... for entry in e.errors:
... print entry.index, entry
0 Value is badly quoted: ""hello"
1 Comment not allowed :
object 1, # a comment in a csv ?
"""
out_csv = []
errors = []
index = -1
p = LineParser(csv=True)
for line in infile:
index += 1
try:
values = p.feed(line)
except ListQuoteError as e:
values = []
e.line = line
e.index = index
errors.append(e)
#
out_csv.append(values)
#
if errors:
e = CSVError("Parsing CSV failed. See 'errors' attribute.")
e.csv = out_csv
e.errors = errors
raise e
return out_csv
def csvwrite(inlist, stringify=False):
"""
Given a list of lists it turns each entry into a line in a CSV.
(Given a list of lists it returns a list of strings).
The lines will *not* be ``\n`` terminated.
Set stringify to ``True`` (default is ``False``) to convert entries to
strings before creating the line.
If stringify is ``False`` then any non string value will raise a
``TypeError``.
Every member will be quoted using ``elem_quote``, but no escaping is done.
Example of usage :
.. raw:: html
{+coloring}
# escape each entry in each line (optional)
for index in range(len(the_list)):
the_list[index] = [quote_escape(val) for val in the_list[index]]
#
the_file = csvwrite(the_list)
# add a '\n' to each line - ready to write to file
the_file = [line + '\n' for line in the_file]
{-coloring}
>>> csvwrite([['object 1', 'object 2', 'object 3'], ['test 1', 'test 2', 'test 3'], ['obj 1', 'obj 2', 'obj 3']])
['"object 1", "object 2", "object 3"', '"test 1", "test 2", "test 3"', '"obj 1", "obj 2", "obj 3"']
>>> csvwrite([[3, 3, 3]])
Traceback (most recent call last):
TypeError: Can only quote strings. "3"
>>> csvwrite([[3, 3, 3]], True)
['3, 3, 3']
"""
out_list = []
for entry in inlist:
if stringify:
new_entry = []
for val in entry:
if not isinstance(val, basestring):
val = str(val)
new_entry.append(val)
entry = new_entry
this_line = ', '.join([elem_quote(val) for val in entry])
out_list.append(this_line)
return out_list
############################################################################
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
"""
ISSUES/TODO
===========
Fix bug in simplelist
Triple quote multiline values ?
Doesn't allow Python style string escaping (but has '&mjf-quot;' and '&mjf-lf;').
Uses both \' and \" as quotes and sometimes doesn't quote at all - see
elem_quote - may not *always* be compatible with other programs.
Allow space seperated lists ? e.g. 10 5 100 20
Lineparser could create tuples.
Allow ',' as an empty list ?
CHANGELOG
=========
2005/08/28 - Version 1.4.0
--------------------------
* Greater use of regular expressions for added speed
* Re-implemented ``lineparse`` as the ``LineParser`` object
* Added doctests
* Custom exceptions
* Changed the behaviour of ``csvread`` and ``csvwrite``
* Removed the CSV ``compare`` function and the ``uncomment`` function
* Only ``'#'`` allowed for comments
* ``elem_quote`` raises exceptions
* Changed behaviour of ``unquote``
* Added ``quote_escape`` and ``quote_unescape``
* Removed the ``uni_conv`` option in the CSV functions
.. note::
These changes are quite extensive. If any of them cause you problems then
let me know. I can provide a workaround in the next release.
2005/06/01 Version 1.3.0
Fixed bug in lineparse handling of empty list members.
Thnks to bug report and fix by Par Pandit <ppandit@yahoo.com>
The 'unquote' function is now regex based.
(bugfix it now doesn't return a tuple if fullquote is 0)
Added the simplelist regex/function.
elem_quote and uncomment use a regex for clarity and speed.
Added a bunch of asserts to the tests.
2005/03/07 Version 1.2.1
makelist improved - better handling of empty or single member lists
2005/02/23 Version 1.2.0
Added uncomment for ConfigObj 3.3.0
Optimised unquote - not a character by character search any more.
lineparse does full '&mjf..;' escape conversions - even when unquote isn't used
makelist and elem_quote takes an 'encoding' keyword for string members to be used to decode strigns to unicode
optimised makelist (including a minor bugfix)
Change to lineparse - it wouldn't allow '[' or '(' inside elements unless they were quoted.
2004/12/04 Version 1.1.2
Changed the license (*again* - now OSI compatible).
Empty values are now quoted by elem_quote.
30-08-04 Version 1.1.1
Removed the unicode hammer in csvread.
Improved docs.
16-08-04 Version 1.1.0
Added handling for non-string elements in elem_quote (optional).
Replaced some old += with lists and ''.join() for speed improvements...
Using basestring and hasattr('__getitem__') tests instead of isinstance(list) and str in a couple of places.
Changed license text.
Made the tests useful.
19-06-04 Version 1.0.0
Seems to work ok. A worthy successor to listparse and csv_s - although not as elegant as it could be.
"""
|
|
"""
Allows for the computation of the PQ-Gram edit distance of two trees. To calculate the distance,
a Profile object must first be created for each tree, then the edit_distance function can be called.
For more information on the PQ-Gram algorithm, please see the README.
"""
import tree, copy
class Profile(object):
"""
Represents a PQ-Gram Profile, which is a list of PQ-Grams. Each PQ-Gram is represented by a
ShiftRegister. This class relies on both the ShiftRegister and tree.Node classes.
"""
def __init__(self, root, p=2, q=3):
"""
Builds the PQ-Gram Profile of the given tree, using the p and q parameters specified.
The p and q parameters do not need to be specified, however, different values will have
an effect on the distribution of the calculated edit distance. In general, smaller values
of p and q are better, though a value of (1, 1) is not recommended, and anything lower is
invalid.
"""
super(Profile, self).__init__()
ancestors = ShiftRegister(p)
self.list = list()
self.profile(root, p, q, ancestors)
self.sort()
def profile(self, root, p, q, ancestors):
"""
Recursively builds the PQ-Gram profile of the given subtree. This method should not be called
directly and is called from __init__.
"""
ancestors.shift(root.label)
siblings = ShiftRegister(q)
if(len(root.children) == 0):
self.append(ancestors.concatenate(siblings))
else:
for child in root.children:
siblings.shift(child.label)
self.append(ancestors.concatenate(siblings))
self.profile(child, p, q, copy.deepcopy(ancestors))
for i in range(q-1):
siblings.shift("*")
self.append(ancestors.concatenate(siblings))
def edit_distance(self, other):
"""
Computes the edit distance between two PQ-Gram Profiles. This value should always
be between 0.0 and 1.0. This calculation is reliant on the intersection method.
"""
union = len(self) + len(other)
return 1.0 - 2.0*(self.intersection(other)/union)
def intersection(self, other):
"""
Computes the set intersection of two PQ-Gram Profiles and returns the number of
elements in the intersection.
"""
intersect = 0.0
i = j = 0
while i < len(self) and j < len(other):
intersect += self.gram_edit_distance(self[i], other[j])
if self[i] == other[j]:
i += 1
j += 1
elif self[i] < other[j]:
i += 1
else:
j += 1
return intersect
def gram_edit_distance(self, gram1, gram2):
"""
Computes the edit distance between two different PQ-Grams. If the two PQ-Grams are the same
then the distance is 1.0, otherwise the distance is 0.0. Changing this will break the
metrics of the algorithm.
"""
distance = 0.0
if gram1 == gram2:
distance = 1.0
return distance
def sort(self):
"""
Sorts the PQ-Grams by the concatenation of their labels. This step is automatically performed
when a PQ-Gram Profile is created to ensure the intersection algorithm functions properly and
efficiently.
"""
self.list.sort(key=lambda x: ''.join)
def append(self, value):
self.list.append(value)
def __len__(self):
return len(self.list)
def __repr__(self):
return str(self.list)
def __str__(self):
return str(self.list)
def __getitem__(self, key):
return self.list[key]
def __iter__(self):
for x in self.list: yield x
class ShiftRegister(object):
"""
Represents a register which acts as a fixed size queue. There are only two valid
operations on a ShiftRegister: shift and concatenate. Shifting results in a new
value being pushed onto the end of the list and the value at the beginning list being
removed. Note that you cannot recover this value, nor do you need to for generating
PQ-Gram Profiles.
"""
def __init__(self, size):
"""
Creates an internal list of the specified size and fills it with the default value
of "*". Once a ShiftRegister is created you cannot change the size without
concatenating another ShiftRegister.
"""
self.register = list()
for i in range(size):
self.register.append("*")
def concatenate(self, reg):
"""
Concatenates two ShiftRegisters and returns the resulting ShiftRegister.
"""
temp = list(self.register)
temp.extend(reg.register)
return temp
def shift(self, el):
"""
Shift is the primary operation on a ShiftRegister. The new item given is pushed onto
the end of the ShiftRegister, the first value is removed, and all items in between shift
to accomodate the new value.
"""
self.register.pop(0)
self.register.append(el)
"""
The following methods are provided for visualization of the PQ-Gram Profile structure. They
are NOT intended for other use, and play no role in using the PQ-Gram algorithm.
"""
def build_extended_tree(root, p=1, q=1):
"""
This method will take a normal tree structure and the given values for p and q, returning
a new tree which represents the so-called PQ-Extended-Tree.
To do this, the following algorithm is used:
1) Add p-1 null ancestors to the root
2) Traverse tree, add q-1 null children before the first and
after the last child of every non-leaf node
3) For each leaf node add q null children
"""
original_root = root # store for later
# Step 1
for i in range(p-1):
node = tree.Node(label="*")
node.addkid(root)
root = node
# Steps 2 and 3
list_of_children = original_root.children
if(len(list_of_children) == 0):
q_append_leaf(original_root, q)
else:
q_append_non_leaf(original_root, q)
while(len(list_of_children) > 0):
temp_list = list()
for child in list_of_children:
if(child.label != "*"):
if(len(child.children) == 0):
q_append_leaf(child, q)
else:
q_append_non_leaf(child, q)
temp_list.extend(child.children)
list_of_children = temp_list
return root
##### Extended Tree Functions #####
def q_append_non_leaf(node, q):
"""
This method will append null node children to the given node. (Step 2)
When adding null nodes to a non-leaf node, the null nodes should exist on both side of
the real children. This is why the first of each pair of children added sets the flag
'before=True', ensuring that on the left and right (or start and end) of the list of
children a node is added.
"""
for i in range(q-1):
node.addkid(tree.Node("*"), before=True)
node.addkid(tree.Node("*"))
def q_append_leaf(node, q):
"""
This method will append q null node children to the given node. (Step 3)
"""
for i in range(q): node.addkid(tree.Node("*"))
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import signal
import time
import traceback
from builtins import object, str
from contextlib import contextmanager
import psutil
from pants.base.build_environment import get_buildroot
from pants.init.subprocess import Subprocess
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
from pants.util.memo import memoized_property
from pants.util.process_handler import subprocess
logger = logging.getLogger(__name__)
@contextmanager
def swallow_psutil_exceptions():
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
class ProcessGroup(object):
"""Wraps a logical group of processes and provides convenient access to ProcessManager objects."""
def __init__(self, name, metadata_base_dir=None):
self._name = name
self._metadata_base_dir = metadata_base_dir
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(name=process.name(),
pid=process.pid,
process_name=process.name(),
metadata_base_dir=self._metadata_base_dir)
def iter_processes(self, proc_filter=None):
"""Yields processes from psutil.process_iter with an optional filter and swallows psutil errors.
If a psutil exception is raised during execution of the filter, that process will not be
yielded but subsequent processes will. On the other hand, if psutil.process_iter raises
an exception, no more processes will be yielded.
"""
with swallow_psutil_exceptions(): # process_iter may raise
for proc in psutil.process_iter():
with swallow_psutil_exceptions(): # proc_filter may raise
if (proc_filter is None) or proc_filter(proc):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessMetadataManager(object):
""""Manages contextual, on-disk process metadata."""
class MetadataError(Exception): pass
class Timeout(Exception): pass
FAIL_WAIT_SEC = 10
INFO_INTERVAL_SEC = 5
WAIT_INTERVAL_SEC = .1
def __init__(self, metadata_base_dir=None):
"""
:param str metadata_base_dir: The base directory for process metadata.
"""
super(ProcessMetadataManager, self).__init__()
self._metadata_base_dir = (
metadata_base_dir or
Subprocess.Factory.global_instance().create().get_subprocess_dir()
)
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _deadline_until(cls, closure, action_msg, timeout=FAIL_WAIT_SEC,
wait_interval=WAIT_INTERVAL_SEC, info_interval=INFO_INTERVAL_SEC):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param str action_msg: a description of the action that is being executed, to be rendered as
info while we wait, and as part of any rendered exception.
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:param float info_interval: the amount of time to wait before and between reports via info
logging that we're still waiting for the closure to succeed.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
now = time.time()
deadline = now + timeout
info_deadline = now + info_interval
while 1:
if closure():
return True
now = time.time()
if now > deadline:
raise cls.Timeout('exceeded timeout of {} seconds while waiting for {}'.format(timeout, action_msg))
if now > info_deadline:
logger.info('waiting for {}...'.format(action_msg))
info_deadline = info_deadline + info_interval
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(cls, filename, timeout=FAIL_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
action_msg = 'file {} to appear'.format(filename)
return cls._deadline_until(file_waiter, action_msg, timeout=timeout)
@staticmethod
def _get_metadata_dir_by_name(name, metadata_base_dir):
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(metadata_base_dir, name)
def _maybe_init_metadata_dir_by_name(self, name):
"""Initialize the metadata directory for a named identity if it doesn't exist."""
safe_mkdir(self.__class__._get_metadata_dir_by_name(name, self._metadata_base_dir))
def _metadata_file_path(self, name, metadata_key):
return self.metadata_file_path(name, metadata_key, self._metadata_base_dir)
@classmethod
def metadata_file_path(cls, name, metadata_key, metadata_base_dir):
return os.path.join(cls._get_metadata_dir_by_name(name, metadata_base_dir), metadata_key)
def read_metadata_by_name(self, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
file_path = self._metadata_file_path(name, metadata_key)
try:
return self._maybe_cast(read_file(file_path).strip(), caster)
except (IOError, OSError):
return None
def write_metadata_by_name(self, name, metadata_key, metadata_value):
"""Write process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
self._maybe_init_metadata_dir_by_name(name)
file_path = self._metadata_file_path(name, metadata_key)
safe_file_dump(file_path, metadata_value)
def await_metadata_by_name(self, name, metadata_key, timeout, caster=None):
"""Block up to a timeout for process metadata to arrive on disk.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param int timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessMetadataManager.Timeout` on timeout.
"""
file_path = self._metadata_file_path(name, metadata_key)
self._wait_for_file(file_path, timeout=timeout)
return self.read_metadata_by_name(name, metadata_key, caster)
def purge_metadata_by_name(self, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise self.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
class ProcessManager(ProcessMetadataManager):
"""Subprocess/daemon management mixin/superclass. Not intended to be thread-safe."""
class InvalidCommandOutput(Exception): pass
class NonResponsiveProcess(Exception): pass
class ExecutionError(Exception):
def __init__(self, message, output=None):
super(ProcessManager.ExecutionError, self).__init__(message)
self.message = message
self.output = output
def __repr__(self):
return '{}(message={!r}, output={!r})'.format(type(self).__name__, self.message, self.output)
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(self, name, pid=None, socket=None, process_name=None, socket_type=int,
metadata_base_dir=None):
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param int pid: The process pid. Overrides fetching of the self.pid @property.
:param string socket: The socket metadata. Overrides fetching of the self.socket @property.
:param string process_name: The process name for cmdline executable name matching.
:param type socket_type: The type to be used for socket type casting (e.g. int).
:param str metadata_base_dir: The overridden base directory for process metadata.
"""
super(ProcessManager, self).__init__(metadata_base_dir)
self._name = name.lower().strip()
self._pid = pid
self._socket = socket
self._socket_type = socket_type
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name. If defined, this is compared to exe_name for stale pid checking."""
return self._process_name
@memoized_property
def lifecycle_lock(self):
"""An identity-keyed inter-process lock for safeguarding lifecycle and other operations."""
safe_mkdir(self._metadata_base_dir)
return OwnerPrintingInterProcessFileLock(
# N.B. This lock can't key into the actual named metadata dir (e.g. `.pids/pantsd/lock`
# via `ProcessMetadataManager._get_metadata_dir_by_name()`) because of a need to purge
# the named metadata dir on startup to avoid stale metadata reads.
os.path.join(self._metadata_base_dir, '.lock.{}'.format(self._name))
)
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
with swallow_psutil_exceptions():
process = self._as_process()
if process:
return process.cmdline()
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self.read_metadata_by_name(self._name, 'pid', int)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type)
@classmethod
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs):
"""Get the output of an executed command.
:param command: An iterable representing the command to execute (e.g. ['ls', '-al']).
:param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout.
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
if ignore_stderr is False:
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
return subprocess.check_output(command, **kwargs)
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, 'output', '').strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, 'pid', timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location"""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, 'socket', str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, 'socket_{}'.format(socket_name), str(socket_info))
def read_named_socket(self, socket_name, socket_type):
"""A multi-tenant, named alternative to ProcessManager.socket."""
return self.read_metadata_by_name(self._name, 'socket_{}'.format(socket_name), socket_type)
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process) or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE) or
# Check for stale pids.
(self.process_name and self.process_name != process.name()) or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks
for process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise self.MetadataError('cannot purge metadata for a running process!')
super(ProcessManager, self).purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, 'daemon to exit', timeout=kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
logger.debug('forking %s', self)
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
class FingerprintedProcessManager(ProcessManager):
"""A `ProcessManager` subclass that provides a general strategy for process fingerprinting."""
FINGERPRINT_KEY = 'fingerprint'
FINGERPRINT_CMD_KEY = None
FINGERPRINT_CMD_SEP = '='
@property
def fingerprint(self):
"""The fingerprint of the current process.
This can either read the current fingerprint from the running process's psutil.Process.cmdline
(if the managed process supports that) or from the `ProcessManager` metadata.
:returns: The fingerprint of the running process as read from the process table, ProcessManager
metadata or `None`.
:rtype: string
"""
return (
self.parse_fingerprint(self.cmdline) or
self.read_metadata_by_name(self.name, self.FINGERPRINT_KEY)
)
def parse_fingerprint(self, cmdline, key=None, sep=None):
"""Given a psutil.Process.cmdline, parse and return a fingerprint.
:param list cmdline: The psutil.Process.cmdline of the current process.
:param string key: The key for fingerprint discovery.
:param string sep: The key/value separator for fingerprint discovery.
:returns: The parsed fingerprint or `None`.
:rtype: string or `None`
"""
key = key or self.FINGERPRINT_CMD_KEY
if key:
sep = sep or self.FINGERPRINT_CMD_SEP
cmdline = cmdline or []
for cmd_part in cmdline:
if cmd_part.startswith('{}{}'.format(key, sep)):
return cmd_part.split(sep)[1]
def has_current_fingerprint(self, fingerprint):
"""Determines if a new fingerprint is the current fingerprint of the running process.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return fingerprint == self.fingerprint
def needs_restart(self, fingerprint):
"""Determines if the current ProcessManager needs to be started or restarted.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return self.is_dead() or not self.has_current_fingerprint(fingerprint)
|
|
#!/usr/bin/env python2.1
"""
Convert Intel HEX files to C code to embed
USAGE: ihex2c.py input.a43 outname [const]
This generates a file named "outname.ci" which contains an array
named "unsigned short funclet_outname[]". That array contains the
machine code from the input file.
The optional argument "const" changes the array type to "const
unsigned short" if present.
Actualy it can read TI-Text too. Specifying a "-" as filename makes
it reading from the standard input, but then only Intel-Hex format
is supported.
(C) 2002 Chris Liechti <cliechti@gmx.net>
This is distributed under a Python style license.
Requires Python 2+
"""
import sys
VERSION = "1.0"
#for the use with memread
def hexdump( (adr, memstr) ):
"""Print a hex dump of data collected with memread
arg1: tuple with adress, memory
return None"""
count = 0
ascii = ''
for value in map(ord, memstr):
if not count: print "%04x: " % adr,
print "%02x" % value,
ascii = ascii + ((32 < value < 127) and chr(value) or '.')
count = count + 1
adr = adr + 1
if count == 16:
count = 0
print " ", ascii
ascii = ''
if count < 16: print " "*(16-count), " ", ascii
class Segment:
"store a string with memory contents along with its startaddress"
def __init__(self, startaddress = 0, data=None):
if data is None:
self.data = ''
else:
self.data = data
self.startaddress = startaddress
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __repr__(self):
return "Segment(startaddress = 0x%04x, data=%r)" % (self.startaddress, self.data)
class Memory:
"represent memory contents. with functions to load files"
def __init__(self, filename=None):
self.segments = []
if filename:
self.filename = filename
self.loadFile(filename)
def append(self, seg):
self.segments.append(seg)
def __getitem__(self, index):
return self.segments[index]
def __len__(self):
return len(self.segments)
def loadIHex(self, file):
"load data from a (opened) file in Intel-HEX format"
segmentdata = []
currentAddr = 0
startAddr = 0
lines = file.readlines()
for l in lines:
if l[0] != ':': raise Exception("File Format Error\n")
l = l.strip() #fix CR-LF issues...
length = int(l[1:3],16)
address = int(l[3:7],16)
type = int(l[7:9],16)
check = int(l[-2:],16)
if type == 0x00:
if currentAddr != address:
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
startAddr = currentAddr = address
segmentdata = []
for i in range(length):
segmentdata.append( chr(int(l[9+2*i:11+2*i],16)) )
currentAddr = length + currentAddr
elif type == 0x01:
pass
else:
sys.stderr.write("Ignored unknown field (type 0x%02x) in ihex file.\n" % type)
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
def loadTIText(self, file):
"load data from a (opened) file in TI-Text format"
next = 1
currentAddr = 0
startAddr = 0
segmentdata = []
#Convert data for MSP430, TXT-File is parsed line by line
while next >= 1:
#Read one line
l = file.readline()
if not l: break #EOF
l = l.strip()
if l[0] == 'q': break
elif l[0] == '@': #if @ => new address => send frame and set new addr.
#create a new segment
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
startAddr = currentAddr = int(l[1:],16)
segmentdata = []
else:
for i in string.split(l):
segmentdata.append(chr(int(i,16)))
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
def loadFile(self, filename):
"fill memory with the contents of a file. file type is determined from extension"
if filename[-4:].lower() == '.txt':
self.loadTIText(open(filename, "rb"))
else:
self.loadIHex(open(filename, "rb"))
def getMemrange(self, fromadr, toadr):
"get a range of bytes from the memory. unavailable values are filled with 0xff."
res = ''
toadr = toadr + 1 #python indxes are excluding end, so include it
while fromadr < toadr:
for seg in self.segments:
segend = seg.startaddress + len(seg.data)
if seg.startaddress <= fromadr and fromadr < segend:
if toadr > segend: #not all data in segment
catchlength = segend-fromadr
else:
catchlength = toadr-fromadr
res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]
fromadr = fromadr + catchlength #adjust start
if len(res) >= toadr-fromadr:
break #return res
else: #undefined memory is filled with 0xff
res = res + chr(255)
fromadr = fromadr + 1 #adjust start
return res
def main():
if len(sys.argv) < 3:
sys.stderr.write(__doc__)
sys.exit(2)
filename = sys.argv[1]
outname = sys.argv[2]
opts = sys.argv[3:]
mem = Memory() #prepare downloaded data
if filename == '-': #for stdin:
mem.loadIHex(sys.stdin) #assume intel hex
elif filename:
mem.loadFile(filename) #autodetect otherwise
if len(mem) != 1:
sys.stderr.write("a file with exactly one segment is required!\n")
sys.exit(1)
output = open(outname+".ci", "w")
bytes = 0
for seg in mem:
hexdump((seg.startaddress, seg.data))
bytes = bytes + len(seg.data)
if 'const' in opts:
output.write("const ")
output.write("unsigned short funclet_%s[] = {\n\t" % outname)
output.write(',\n\t'.join([("0x%04x" % (ord(seg.data[i]) + (ord(seg.data[i+1])<<8)))
for i in range(0,len(seg.data),2)]))
output.write("\n};\n")
sys.stderr.write("%i bytes.\n" % bytes)
output.close()
if __name__ == '__main__':
if sys.hexversion < 0x2010000:
sys.stderr.write("Python 2.1 or newer required\n")
sys.exit(1)
main()
|
|
"""The WaveBlocks Project
IOM plugin providing functions for handling various
overlap matrices of linear combinations of general
wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
import numpy as np
def add_overlaplcwp(self, parameters, timeslots=None, matrixsize=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Add storage for various overlap matrices. We can store one matrix type
per key.
========= ======
Key name Matrix
========= ======
``ov`` :math:`\langle\Upsilon | \Upsilon\rangle`
``ovkin`` :math:`\langle\Upsilon | T | \Upsilon\rangle`
``ovpot`` :math:`\langle\Upsilon | V(\underline{x}) | \Upsilon\rangle`
========= ======
Note that 'strange' errors occur if we later try to load or save
matrices for a key we did not initialise with this function.
:param parameters: A :py:class:`ParameterProvider` instance. It can
be empty and is not used at the moment.
:param timeslots: The number of time slots we need. Can be set to ``None``
to get automatically growing datasets.
:param matrixsize: The (maximal) size of each of the overlap matrices. If specified
this remains fixed for all timeslots. Can be set to ``None`` (default)
to get automatically growing datasets.
:type matrixsize: Pair of integers or ``None``.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
valid_keys = ("ov", "ovkin", "ovpot")
# Create the dataset with appropriate parameters
grp_ov = self._srf[self._prefixb + str(blockid)].create_group("overlaplcwp")
if timeslots is None:
T = 0
Ts = None
csTs = 128
else:
T = timeslots
Ts = timeslots
csTs = min(128, Ts)
if matrixsize is None:
Jr = 0
Jc = 0
Jrs = None
Jcs = None
csJrs = 128
csJcs = 128
else:
Jr, Jc = matrixsize
Jrs, Jcs = matrixsize
csJrs = min(128, Jrs)
csJcs = min(128, Jcs)
for k in key:
if k not in valid_keys:
raise ValueError("Unknown key value " + str(k))
name = k[2:]
daset_tg = grp_ov.create_dataset("timegrid" + name, (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1)
grp_ov.create_dataset("shape" + name, (T, 2), dtype=np.integer, chunks=(csTs, 2), maxshape=(Ts, 2))
grp_ov.create_dataset("overlap" + name, (T, Jr, Jc), dtype=np.complexfloating, chunks=(1, csJrs, csJcs), maxshape=(Ts, Jrs, Jcs))
daset_tg.attrs["pointer"] = 0
def delete_overlaplcwp(self, blockid=0):
r"""Remove the stored overlap matrices.
:param blockid: The ID of the data block to operate on.
"""
try:
del self._srf[self._prefixb + str(blockid) + "/overlaplcwp"]
except KeyError:
pass
def has_overlaplcwp(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Ask if the specified data block has the desired data tensor.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
r = True
r &= ("overlaplcwp" in self._srf[self._prefixb + str(blockid)].keys())
if r and "ov" in key:
r &= ("overlap" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovpot" in key:
r &= ("overlappot" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovkin" in key:
r &= ("overlapkin" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
return r
def save_overlaplcwp(self, data, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Save overlap matrices of linear combinations of general wavepackets.
In principle this function also supports non-square matrices.
:param data: The data matrices to save.
:type data: A list of :py:class:`ndarray` entries.
:param timestep: The timestep at which we save the data.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
for item, datum in zip(key, data):
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
timeslot = self._srf[pathtg].attrs["pointer"]
# Write the data
self.must_resize(pathd, timeslot)
data = np.atleast_2d(np.squeeze(data))
rows, cols = data.shape
self.must_resize(pathd, rows - 1, axis=1)
self.must_resize(pathd, cols - 1, axis=2)
self._srf[pathd][timeslot, :rows, :cols] = data
self.must_resize(pathsh, timeslot)
self._srf[pathsh][timeslot, :] = np.array([rows, cols])
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathtg].attrs["pointer"] += 1
def load_overlaplcwp_timegrid(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the timegrid corresponding to the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to load. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having one column.
"""
tg = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
tg.append(self._srf[pathtg][:])
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
tg.append(self._srf[pathtg][:])
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
tg.append(self._srf[pathtg][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp_shape(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the shape of the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having two columns.
"""
tg = []
for item in key:
if item == "ov":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
tg.append(self._srf[pathsh][:])
elif item == "ovkin":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
tg.append(self._srf[pathsh][:])
elif item == "ovpot":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
tg.append(self._srf[pathsh][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp(self, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load overlap matrices of linear combinations of general wavepackets.
:param timestep: Load only the data of this timestep.
:param split: Split the data array into one array for each component.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` items. Their shapes depend on the
exact value of the above arguments.
"""
result = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
shape = self._srf[pathsh][index, :]
datum = self._srf[pathd][index, :shape[0], :shape[1]]
else:
datum = self._srf[pathd][:, :, :]
result.append(datum)
if len(result) == 1:
return result[0]
else:
return tuple(result)
|
|
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
and support conversion to/from XML. These classes are solely concerned
by providing direct Object <-> XML document conversions. No policy or
operational decisions should be made by code in these classes. Such
policy belongs in the 'designer.py' module which provides simplified
helpers for populating up config object instances.
"""
import time
import six
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.pci import pci_utils
from nova.virt import hardware
from lxml import etree
LOG = logging.getLogger(__name__)
# Namespace to use for Nova specific metadata items in XML
NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
self.root_name = kwargs.get("root_name")
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
def _new_node(self, name, **kwargs):
if self.ns_uri is None:
return etree.Element(name, **kwargs)
else:
return etree.Element("{" + self.ns_uri + "}" + name,
nsmap={self.ns_prefix: self.ns_uri},
**kwargs)
def _text_node(self, name, value, **kwargs):
child = self._new_node(name, **kwargs)
child.text = six.text_type(value)
return child
def format_dom(self):
return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
def parse_dom(self, xmldoc):
if self.root_name != xmldoc.tag:
raise exception.InvalidInput(
"Root element name should be '%s' not '%s'"
% (self.root_name, xmldoc.tag))
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, pretty_print=pretty_print)
LOG.debug("Generated XML %s ", (xml_str,))
return xml_str
class LibvirtConfigCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
**kwargs)
self.host = None
self.guests = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCaps, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "host":
host = LibvirtConfigCapsHost()
host.parse_dom(c)
self.host = host
elif c.tag == "guest":
guest = LibvirtConfigCapsGuest()
guest.parse_dom(c)
self.guests.append(guest)
def format_dom(self):
caps = super(LibvirtConfigCaps, self).format_dom()
if self.host:
caps.append(self.host.format_dom())
for g in self.guests:
caps.append(g.format_dom())
return caps
class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMATopology, self).__init__(
root_name="topology",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
xmlcells = xmldoc.getchildren()[0]
for xmlcell in xmlcells.getchildren():
cell = LibvirtConfigCapsNUMACell()
cell.parse_dom(xmlcell)
self.cells.append(cell)
def format_dom(self):
topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
cells = etree.Element("cells")
cells.set("num", str(len(self.cells)))
topo.append(cells)
for cell in self.cells:
cells.append(cell.format_dom())
return topo
class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.memory = 0
self.cpus = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
for c in xmldoc.getchildren():
if c.tag == "memory":
self.memory = int(c.text)
elif c.tag == "cpus":
for c2 in c.getchildren():
cpu = LibvirtConfigCapsNUMACPU()
cpu.parse_dom(c2)
self.cpus.append(cpu)
def format_dom(self):
cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
cell.set("id", str(self.id))
mem = etree.Element("memory")
mem.set("unit", "KiB")
mem.text = str(self.memory)
cell.append(mem)
cpus = etree.Element("cpus")
cpus.set("num", str(len(self.cpus)))
for cpu in self.cpus:
cpus.append(cpu.format_dom())
cell.append(cpus)
return cell
class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
**kwargs)
self.id = None
self.socket_id = None
self.core_id = None
self.siblings = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
if xmldoc.get("socket_id") is not None:
self.socket_id = int(xmldoc.get("socket_id"))
if xmldoc.get("core_id") is not None:
self.core_id = int(xmldoc.get("core_id"))
if xmldoc.get("siblings") is not None:
self.siblings = hardware.parse_cpu_spec(
xmldoc.get("siblings"))
def format_dom(self):
cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
cpu.set("id", str(self.id))
if self.socket_id is not None:
cpu.set("socket_id", str(self.socket_id))
if self.core_id is not None:
cpu.set("core_id", str(self.core_id))
if self.siblings is not None:
cpu.set("siblings",
hardware.format_cpu_spec(self.siblings))
return cpu
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsHost, self).__init__(root_name="host",
**kwargs)
self.cpu = None
self.uuid = None
self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "cpu":
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
elif c.tag == "topology":
self.topology = LibvirtConfigCapsNUMATopology()
self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
if self.uuid:
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
if self.topology:
caps.append(self.topology.format_dom())
return caps
class LibvirtConfigCapsGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
**kwargs)
self.arch = None
self.ostype = None
self.domtype = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "os_type":
self.ostype = c.text
elif c.tag == "arch":
self.arch = c.get("name")
for sc in c.getchildren():
if sc.tag == "domain":
self.domtype.append(sc.get("type"))
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
if self.ostype is not None:
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = etree.Element("arch", name=self.arch)
for dt in self.domtype:
dte = etree.Element("domain")
dte.set("type", dt)
arch.append(dte)
caps.append(arch)
return caps
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestTimer, self).__init__(root_name="timer",
**kwargs)
self.name = "platform"
self.track = None
self.tickpolicy = None
self.present = None
def format_dom(self):
tm = super(LibvirtConfigGuestTimer, self).format_dom()
tm.set("name", self.name)
if self.track is not None:
tm.set("track", self.track)
if self.tickpolicy is not None:
tm.set("tickpolicy", self.tickpolicy)
if self.present is not None:
if self.present:
tm.set("present", "yes")
else:
tm.set("present", "no")
return tm
class LibvirtConfigGuestClock(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestClock, self).__init__(root_name="clock",
**kwargs)
self.offset = "utc"
self.adjustment = None
self.timezone = None
self.timers = []
def format_dom(self):
clk = super(LibvirtConfigGuestClock, self).format_dom()
clk.set("offset", self.offset)
if self.adjustment:
clk.set("adjustment", self.adjustment)
elif self.timezone:
clk.set("timezone", self.timezone)
for tm in self.timers:
clk.append(tm.format_dom())
return clk
def add_timer(self, tm):
self.timers.append(tm)
class LibvirtConfigCPUFeature(LibvirtConfigObject):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
**kwargs)
self.name = name
def parse_dom(self, xmldoc):
super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
self.name = xmldoc.get("name")
def format_dom(self):
ft = super(LibvirtConfigCPUFeature, self).format_dom()
ft.set("name", self.name)
return ft
def __eq__(self, obj):
return obj.name == self.name
def __ne__(self, obj):
return obj.name != self.name
def __hash__(self):
return hash(self.name)
class LibvirtConfigCPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCPU, self).__init__(root_name='cpu',
**kwargs)
self.arch = None
self.vendor = None
self.model = None
self.sockets = None
self.cores = None
self.threads = None
self.features = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigCPU, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "arch":
self.arch = c.text
elif c.tag == "model":
self.model = c.text
elif c.tag == "vendor":
self.vendor = c.text
elif c.tag == "topology":
self.sockets = int(c.get("sockets"))
self.cores = int(c.get("cores"))
self.threads = int(c.get("threads"))
elif c.tag == "feature":
f = LibvirtConfigCPUFeature()
f.parse_dom(c)
self.add_feature(f)
def format_dom(self):
cpu = super(LibvirtConfigCPU, self).format_dom()
if self.arch is not None:
cpu.append(self._text_node("arch", self.arch))
if self.model is not None:
cpu.append(self._text_node("model", self.model))
if self.vendor is not None:
cpu.append(self._text_node("vendor", self.vendor))
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
top.set("threads", str(self.threads))
cpu.append(top)
# sorting the features to allow more predictable tests
for f in sorted(self.features, key=lambda x: x.name):
cpu.append(f.format_dom())
return cpu
def add_feature(self, feat):
self.features.add(feat)
class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
self.policy = "require"
def format_dom(self):
ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
ft.set("policy", self.policy)
return ft
class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.cpus = None
self.memory = None
def parse_dom(self, xmldoc):
if xmldoc.get("id") is not None:
self.id = int(xmldoc.get("id"))
if xmldoc.get("memory") is not None:
self.memory = int(xmldoc.get("memory"))
if xmldoc.get("cpus") is not None:
self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
def format_dom(self):
cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
if self.id is not None:
cell.set("id", str(self.id))
if self.cpus is not None:
cell.set("cpus",
hardware.format_cpu_spec(self.cpus))
if self.memory is not None:
cell.set("memory", str(self.memory))
return cell
class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc)
for child in xmldoc.getchildren():
if child.tag == "cell":
cell = LibvirtConfigGuestCPUNUMACell()
cell.parse_dom(child)
self.cells.append(cell)
def format_dom(self):
numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
for cell in self.cells:
numa.append(cell.format_dom())
return numa
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
self.mode = None
self.match = "exact"
self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
for child in xmldoc.getchildren():
if child.tag == "numa":
numa = LibvirtConfigGuestCPUNUMA()
numa.parse_dom(child)
self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
if self.numa is not None:
cpu.append(self.numa.format_dom())
return cpu
class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
**kwargs)
self.mode = "sysinfo"
def format_dom(self):
smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
smbios.set("mode", self.mode)
return smbios
class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
**kwargs)
self.type = "smbios"
self.bios_vendor = None
self.bios_version = None
self.system_manufacturer = None
self.system_product = None
self.system_version = None
self.system_serial = None
self.system_uuid = None
def format_dom(self):
sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
sysinfo.set("type", self.type)
bios = None
system = None
if self.bios_vendor is not None:
if bios is None:
bios = etree.Element("bios")
info = etree.Element("entry", name="vendor")
info.text = self.bios_vendor
bios.append(info)
if self.bios_version is not None:
if bios is None:
bios = etree.Element("bios")
info = etree.Element("entry", name="version")
info.text = self.bios_version
bios.append(info)
if self.system_manufacturer is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="manufacturer")
info.text = self.system_manufacturer
system.append(info)
if self.system_product is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="product")
info.text = self.system_product
system.append(info)
if self.system_version is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="version")
info.text = self.system_version
system.append(info)
if self.system_serial is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="serial")
info.text = self.system_serial
system.append(info)
if self.system_uuid is not None:
if system is None:
system = etree.Element("system")
info = etree.Element("entry", name="uuid")
info.text = self.system_uuid
system.append(info)
if bios is not None:
sysinfo.append(bios)
if system is not None:
sysinfo.append(system)
return sysinfo
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDevice, self).__init__(**kwargs)
class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = "file"
self.source_device = "disk"
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
self.disk_read_bytes_sec = None
self.disk_read_iops_sec = None
self.disk_write_bytes_sec = None
self.disk_write_iops_sec = None
self.disk_total_bytes_sec = None
self.disk_total_iops_sec = None
self.logical_block_size = None
self.physical_block_size = None
self.readonly = False
self.snapshot = None
self.backing_store = None
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
dev.set("type", self.source_type)
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None or
self.driver_discard is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
iotune = etree.Element("iotune")
if self.disk_read_bytes_sec is not None:
iotune.append(self._text_node("read_bytes_sec",
self.disk_read_bytes_sec))
if self.disk_read_iops_sec is not None:
iotune.append(self._text_node("read_iops_sec",
self.disk_read_iops_sec))
if self.disk_write_bytes_sec is not None:
iotune.append(self._text_node("write_bytes_sec",
self.disk_write_bytes_sec))
if self.disk_write_iops_sec is not None:
iotune.append(self._text_node("write_iops_sec",
self.disk_write_iops_sec))
if self.disk_total_bytes_sec is not None:
iotune.append(self._text_node("total_bytes_sec",
self.disk_total_bytes_sec))
if self.disk_total_iops_sec is not None:
iotune.append(self._text_node("total_iops_sec",
self.disk_total_iops_sec))
if len(iotune) > 0:
dev.append(iotune)
# Block size tuning
if (self.logical_block_size is not None or
self.physical_block_size is not None):
blockio = etree.Element("blockio")
if self.logical_block_size is not None:
blockio.set('logical_block_size', self.logical_block_size)
if self.physical_block_size is not None:
blockio.set('physical_block_size', self.physical_block_size)
dev.append(blockio)
if self.readonly:
dev.append(etree.Element("readonly"))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c.getchildren():
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
elif c.tag == 'backingStore':
b = LibvirtConfigGuestDiskBackingStore()
b.parse_dom(c)
self.backing_store = b
class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskBackingStore, self).__init__(
root_name="backingStore", **kwargs)
self.index = None
self.source_type = None
self.source_file = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.driver_name = None
self.driver_format = None
self.backing_store = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.index = xmldoc.get('index')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
elif c.tag == 'source':
self.source_file = c.get('file')
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for d in c.getchildren():
if d.tag == 'host':
self.source_hosts.append(d.get('name'))
self.source_ports.append(d.get('port'))
elif c.tag == 'backingStore':
if c.getchildren():
self.backing_store = LibvirtConfigGuestDiskBackingStore()
self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
"""Disk class for handling disk information in snapshots.
Similar to LibvirtConfigGuestDisk, but used to represent
disk entities in <domainsnapshot> structures rather than
real devices. These typically have fewer members, and
different expectations for which fields are required.
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = None
self.source_device = None
self.name = None
self.snapshot = None
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
def format_dom(self):
dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom()
if self.name:
dev.attrib['name'] = self.name
if self.snapshot:
dev.attrib['snapshot'] = self.snapshot
if self.source_type:
dev.set("type", self.source_type)
if self.source_device:
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
if self.target_bus and self.target_dev:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc.getchildren():
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c.getchildren():
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
for c in xmldoc.getchildren():
if c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem",
**kwargs)
self.source_type = "mount"
self.source_dir = None
self.target_dir = "/"
def format_dom(self):
dev = super(LibvirtConfigGuestFilesys, self).format_dom()
dev.set("type", self.source_type)
dev.append(etree.Element("source", dir=self.source_dir))
dev.append(etree.Element("target", dir=self.target_dir))
return dev
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
self.count = 10000
def parse_dom(self, xmldoc):
self.start = int(xmldoc.get('start'))
self.target = int(xmldoc.get('target'))
self.count = int(xmldoc.get('count'))
def format_dom(self):
obj = super(LibvirtConfigGuestIDMap, self).format_dom()
obj.set("start", str(self.start))
obj.set("target", str(self.target))
obj.set("count", str(self.count))
return obj
class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid",
**kwargs)
class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid",
**kwargs)
class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInterface, self).__init__(
root_name="interface",
**kwargs)
self.net_type = None
self.target_dev = None
self.model = None
self.mac_addr = None
self.script = None
self.source_dev = None
self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
self.filterparams = []
self.driver_name = None
self.vif_inbound_peak = None
self.vif_inbound_burst = None
self.vif_inbound_average = None
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
self.vlan = None
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
if self.net_type == "hostdev":
dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
if self.driver_name:
dev.append(etree.Element("driver", name=self.driver_name))
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
elif self.net_type == "hostdev":
source_elem = etree.Element("source")
domain, bus, slot, func = \
pci_utils.get_pci_address_fields(self.source_dev)
addr_elem = etree.Element("address", type='pci')
addr_elem.set("domain", "0x%s" % (domain))
addr_elem.set("bus", "0x%s" % (bus))
addr_elem.set("slot", "0x%s" % (slot))
addr_elem.set("function", "0x%s" % (func))
source_elem.append(addr_elem)
dev.append(source_elem)
else:
dev.append(etree.Element("source", bridge=self.source_dev))
if self.vlan and self.net_type in ("direct", "hostdev"):
vlan_elem = etree.Element("vlan")
tag_elem = etree.Element("tag", id=self.vlan)
vlan_elem.append(tag_elem)
dev.append(vlan_elem)
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
param = etree.Element("parameters")
param.set(p['key'], p['value'])
vport.append(param)
dev.append(vport)
if self.filtername is not None:
filter = etree.Element("filterref", filter=self.filtername)
for p in self.filterparams:
filter.append(etree.Element("parameter",
name=p['key'],
value=p['value']))
dev.append(filter)
if self.vif_inbound_average or self.vif_outbound_average:
bandwidth = etree.Element("bandwidth")
if self.vif_inbound_average is not None:
vif_inbound = etree.Element("inbound",
average=str(self.vif_inbound_average))
if self.vif_inbound_peak is not None:
vif_inbound.set("peak", str(self.vif_inbound_peak))
if self.vif_inbound_burst is not None:
vif_inbound.set("burst", str(self.vif_inbound_burst))
bandwidth.append(vif_inbound)
if self.vif_outbound_average is not None:
vif_outbound = etree.Element("outbound",
average=str(self.vif_outbound_average))
if self.vif_outbound_peak is not None:
vif_outbound.set("peak", str(self.vif_outbound_peak))
if self.vif_outbound_burst is not None:
vif_outbound.set("burst", str(self.vif_outbound_burst))
bandwidth.append(vif_outbound)
dev.append(bandwidth)
return dev
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
def add_vport_param(self, key, value):
self.vportparams.append({'key': key, 'value': value})
class LibvirtConfigGuestInput(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInput, self).__init__(root_name="input",
**kwargs)
self.type = "tablet"
self.bus = "usb"
def format_dom(self):
dev = super(LibvirtConfigGuestInput, self).format_dom()
dev.set("type", self.type)
dev.set("bus", self.bus)
return dev
class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics",
**kwargs)
self.type = "vnc"
self.autoport = True
self.keymap = None
self.listen = None
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
dev.set("type", self.type)
if self.autoport:
dev.set("autoport", "yes")
else:
dev.set("autoport", "no")
if self.keymap:
dev.set("keymap", self.keymap)
if self.listen:
dev.set("listen", self.listen)
return dev
class LibvirtConfigSeclabel(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel",
**kwargs)
self.type = 'dynamic'
self.baselabel = None
def format_dom(self):
seclabel = super(LibvirtConfigSeclabel, self).format_dom()
seclabel.set('type', self.type)
if self.baselabel:
seclabel.append(self._text_node("baselabel", self.baselabel))
return seclabel
class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVideo, self).__init__(root_name="video",
**kwargs)
self.type = 'cirrus'
self.vram = None
self.heads = None
def format_dom(self):
dev = super(LibvirtConfigGuestVideo, self).format_dom()
model = etree.Element("model")
model.set("type", self.type)
if self.vram:
model.set("vram", str(self.vram))
if self.heads:
model.set("heads", str(self.heads))
dev.append(model)
return dev
class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigMemoryBalloon, self).__init__(
root_name='memballoon',
**kwargs)
self.model = None
self.period = None
def format_dom(self):
dev = super(LibvirtConfigMemoryBalloon, self).format_dom()
dev.set('model', str(self.model))
dev.append(etree.Element('stats', period=str(self.period)))
return dev
class LibvirtConfigGuestController(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestController,
self).__init__(root_name="controller", **kwargs)
self.type = None
self.index = None
self.model = None
def format_dom(self):
controller = super(LibvirtConfigGuestController, self).format_dom()
controller.set("type", self.type)
if self.index is not None:
controller.set("index", str(self.index))
if self.model:
controller.set("model", str(self.model))
return controller
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdev, self).\
__init__(root_name="hostdev", **kwargs)
self.mode = kwargs.get('mode')
self.type = kwargs.get('type')
self.managed = 'yes'
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
dev.set("mode", self.mode)
dev.set("type", self.type)
dev.set("managed", self.managed)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.type = xmldoc.get('type')
self.managed = xmldoc.get('managed')
return xmldoc.getchildren()
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element("address",
domain='0x' + self.domain,
bus='0x' + self.bus,
slot='0x' + self.slot,
function='0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc)
for c in childs:
if c.tag == "source":
for sub in c.getchildren():
if sub.tag == 'address':
self.domain = sub.get('domain')
self.bus = sub.get('bus')
self.slot = sub.get('slot')
self.function = sub.get('function')
class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharBase, self).__init__(**kwargs)
self.type = "pty"
self.source_path = None
self.listen_port = None
self.listen_host = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
elif self.type == "tcp":
dev.append(etree.Element("source", mode="bind",
host=self.listen_host,
service=str(self.listen_port)))
return dev
class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChar, self).__init__(**kwargs)
self.target_port = None
def format_dom(self):
dev = super(LibvirtConfigGuestChar, self).format_dom()
if self.target_port is not None:
dev.append(etree.Element("target", port=str(self.target_port)))
return dev
class LibvirtConfigGuestSerial(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSerial, self).__init__(root_name="serial",
**kwargs)
class LibvirtConfigGuestConsole(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestConsole, self).__init__(root_name="console",
**kwargs)
class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChannel, self).__init__(root_name="channel",
**kwargs)
self.target_type = "virtio"
self.target_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestChannel, self).format_dom()
target = etree.Element("target", type=self.target_type)
if self.target_name is not None:
target.set("name", self.target_name)
dev.append(target)
return dev
class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog",
**kwargs)
self.model = 'i6300esb'
self.action = 'reset'
def format_dom(self):
dev = super(LibvirtConfigGuestWatchdog, self).format_dom()
dev.set('model', self.model)
dev.set('action', self.action)
return dev
class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
root_name="vcpupin",
**kwargs)
self.id = None
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
root.set("vcpu", str(self.id))
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune",
**kwargs)
self.shares = None
self.quota = None
self.period = None
self.vcpupin = []
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
if self.shares is not None:
root.append(self._text_node("shares", str(self.shares)))
if self.quota is not None:
root.append(self._text_node("quota", str(self.quota)))
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
for vcpu in self.vcpupin:
root.append(vcpu.format_dom())
return root
class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBacking, self).__init__(
root_name="memoryBacking", **kwargs)
self.hugepages = False
self.sharedpages = True
self.locked = False
def format_dom(self):
root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
if self.hugepages:
root.append(etree.Element("hugepages"))
if not self.sharedpages:
root.append(etree.Element("nosharedpages"))
if self.locked:
root.append(etree.Element("locked"))
return root
class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryTune, self).__init__(
root_name="memtune", **kwargs)
self.hard_limit = None
self.soft_limit = None
self.swap_hard_limit = None
self.min_guarantee = None
def format_dom(self):
root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
if self.hard_limit is not None:
root.append(self._text_node("hard_limit",
str(self.hard_limit),
units="K"))
if self.soft_limit is not None:
root.append(self._text_node("soft_limit",
str(self.soft_limit),
units="K"))
if self.swap_hard_limit is not None:
root.append(self._text_node("swap_hard_limit",
str(self.swap_hard_limit),
units="K"))
if self.min_guarantee is not None:
root.append(self._text_node("min_guarantee",
str(self.min_guarantee),
units="K"))
return root
class LibvirtConfigGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuest, self).__init__(root_name="domain",
**kwargs)
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
self.membacking = None
self.memtune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
self.cputune = None
self.acpi = False
self.apic = False
self.pae = False
self.clock = None
self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
self.os_root = None
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
self.os_mach_type = None
self.devices = []
self.metadata = []
self.idmaps = []
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
if self.membacking is not None:
root.append(self.membacking.format_dom())
if self.memtune is not None:
root.append(self.memtune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
root.append(vcpu)
else:
root.append(self._text_node("vcpu", self.vcpus))
if len(self.metadata) > 0:
metadata = etree.Element("metadata")
for m in self.metadata:
metadata.append(m.format_dom())
root.append(metadata)
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
os.append(self._text_node("cmdline", self.os_cmdline))
if self.os_root is not None:
os.append(self._text_node("root", self.os_root))
if self.os_init_path is not None:
os.append(self._text_node("init", self.os_init_path))
for boot_dev in self.os_boot_dev:
os.append(etree.Element("boot", dev=boot_dev))
if self.os_smbios is not None:
os.append(self.os_smbios.format_dom())
root.append(os)
def _format_features(self, root):
if any((self.acpi, self.apic, self.pae)):
features = etree.Element("features")
if self.acpi:
features.append(etree.Element("acpi"))
if self.apic:
features.append(etree.Element("apic"))
if self.pae:
features.append(etree.Element("pae"))
root.append(features)
def _format_devices(self, root):
if len(self.devices) == 0:
return
devices = etree.Element("devices")
for dev in self.devices:
devices.append(dev.format_dom())
root.append(devices)
def _format_idmaps(self, root):
if len(self.idmaps) == 0:
return
idmaps = etree.Element("idmap")
for idmap in self.idmaps:
idmaps.append(idmap.format_dom())
root.append(idmaps)
def format_dom(self):
root = super(LibvirtConfigGuest, self).format_dom()
root.set("type", self.virt_type)
self._format_basic_props(root)
if self.sysinfo is not None:
root.append(self.sysinfo.format_dom())
self._format_os(root)
self._format_features(root)
if self.cputune is not None:
root.append(self.cputune.format_dom())
if self.clock is not None:
root.append(self.clock.format_dom())
if self.cpu is not None:
root.append(self.cpu.format_dom())
self._format_devices(root)
self._format_idmaps(root)
return root
def parse_dom(self, xmldoc):
# Note: This cover only for: LibvirtConfigGuestDisks
# LibvirtConfigGuestHostdevPCI
# LibvirtConfigGuestUidMap
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
for c in xmldoc.getchildren():
if c.tag == 'devices':
for d in c.getchildren():
if d.tag == 'disk':
obj = LibvirtConfigGuestDisk()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'pci':
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
if c.tag == 'idmap':
for map in c.getchildren():
obj = None
if map.tag == 'uid':
obj = LibvirtConfigGuestUIDMap()
elif map.tag == 'gid':
obj = LibvirtConfigGuestGIDMap()
if obj:
obj.parse_dom(map)
self.idmaps.append(obj)
elif c.tag == 'cpu':
obj = LibvirtConfigGuestCPU()
obj.parse_dom(c)
self.cpu = obj
def add_device(self, dev):
self.devices.append(dev)
def set_clock(self, clk):
self.clock = clk
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshot, self).__init__(
root_name="domainsnapshot",
**kwargs)
self.name = None
self.disks = []
def format_dom(self):
ss = super(LibvirtConfigGuestSnapshot, self).format_dom()
if self.name:
ss.append(self._text_node("name", self.name))
disks = etree.Element('disks')
for disk in self.disks:
disks.append(disk.format_dom())
ss.append(disks)
return ss
def add_disk(self, disk):
self.disks.append(disk)
class LibvirtConfigNodeDevice(LibvirtConfigObject):
"""Libvirt Node Devices parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevice, self).__init__(root_name="device",
**kwargs)
self.name = None
self.parent = None
self.driver = None
self.pci_capability = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "name":
self.name = c.text
elif c.tag == "parent":
self.parent = c.text
elif c.tag == "capability" and c.get("type") == 'pci':
pcicap = LibvirtConfigNodeDevicePciCap()
pcicap.parse_dom(c)
self.pci_capability = pcicap
class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
"""Libvirt Node Devices pci capability parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciCap, self).__init__(
root_name="capability", **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
self.product = None
self.product_id = None
self.vendor = None
self.vendor_id = None
self.fun_capability = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc)
for c in xmldoc.getchildren():
if c.tag == "domain":
self.domain = int(c.text)
elif c.tag == "slot":
self.slot = int(c.text)
elif c.tag == "bus":
self.bus = int(c.text)
elif c.tag == "function":
self.function = int(c.text)
elif c.tag == "product":
self.product = c.text
self.product_id = c.get('id')
elif c.tag == "vendor":
self.vendor = c.text
self.vendor_id = c.get('id')
elif c.tag == "capability" and c.get('type') in \
('virt_functions', 'phys_function'):
funcap = LibvirtConfigNodeDevicePciSubFunctionCap()
funcap.parse_dom(c)
self.fun_capability.append(funcap)
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.device_addrs = list() # list of tuple (domain,bus,slot,function)
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc)
self.type = xmldoc.get("type")
for c in xmldoc.getchildren():
if c.tag == "address":
self.device_addrs.append((c.get('domain'),
c.get('bus'),
c.get('slot'),
c.get('function')))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.model = 'random'
self.backend = None
self.rate_period = None
self.rate_bytes = None
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', 'virtio')
backend = etree.Element("backend")
backend.set("model", self.model)
backend.text = self.backend
if self.rate_period and self.rate_bytes:
rate = etree.Element("rate")
rate.set("period", str(self.rate_period))
rate.set("bytes", str(self.rate_bytes))
dev.append(rate)
dev.append(backend)
return dev
class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaInstance,
self).__init__(root_name="instance",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.package = None
self.flavor = None
self.name = None
self.creationTime = None
self.owner = None
self.roottype = None
self.rootid = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
pkg = self._new_node("package")
pkg.set("version", self.package)
meta.append(pkg)
if self.name is not None:
meta.append(self._text_node("name", self.name))
if self.creationTime is not None:
timestr = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(self.creationTime))
meta.append(self._text_node("creationTime", timestr))
if self.flavor is not None:
meta.append(self.flavor.format_dom())
if self.owner is not None:
meta.append(self.owner.format_dom())
if self.roottype is not None and self.rootid is not None:
root = self._new_node("root")
root.set("type", self.roottype)
root.set("uuid", str(self.rootid))
meta.append(root)
return meta
class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaFlavor,
self).__init__(root_name="flavor",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.name = None
self.memory = None
self.disk = None
self.swap = None
self.ephemeral = None
self.vcpus = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
meta.set("name", self.name)
if self.memory is not None:
meta.append(self._text_node("memory", str(self.memory)))
if self.disk is not None:
meta.append(self._text_node("disk", str(self.disk)))
if self.swap is not None:
meta.append(self._text_node("swap", str(self.swap)))
if self.ephemeral is not None:
meta.append(self._text_node("ephemeral", str(self.ephemeral)))
if self.vcpus is not None:
meta.append(self._text_node("vcpus", str(self.vcpus)))
return meta
class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaOwner,
self).__init__(root_name="owner",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.userid = None
self.username = None
self.projectid = None
self.projectname = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
if self.userid is not None and self.username is not None:
user = self._text_node("user", self.username)
user.set("uuid", self.userid)
meta.append(user)
if self.projectid is not None and self.projectname is not None:
project = self._text_node("project", self.projectname)
project.set("uuid", self.projectid)
meta.append(project)
return meta
|
|
import datetime
import decimal
import hashlib
import logging
import re
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super().execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super().executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""
Shorten a string to a repeatable mangled version with the given length.
If a quote stripped name contains a username, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
match = re.match('([^"]+)"\."([^"]+)', name)
table_name = match.group(2) if match else name
if length is None or len(table_name) <= length:
return name
hsh = hashlib.md5(force_bytes(table_name)).hexdigest()[:hash_len]
return '%s%s%s' % (match.group(1) + '"."' if match else '', table_name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
from functools import wraps
import json
import os
import numbers
import time
import base64
import logging
from osbs.kerberos_ccache import kerberos_ccache_init
from osbs.build.build_response import BuildResponse
from osbs.constants import DEFAULT_NAMESPACE, BUILD_FINISHED_STATES, BUILD_RUNNING_STATES
from osbs.constants import WATCH_MODIFIED, WATCH_DELETED, WATCH_ERROR
from osbs.constants import (SERVICEACCOUNT_SECRET, SERVICEACCOUNT_TOKEN,
SERVICEACCOUNT_CACRT)
from osbs.exceptions import (OsbsResponseException, OsbsException,
OsbsWatchBuildNotFound, OsbsAuthException)
from osbs.utils import graceful_chain_get
from requests.exceptions import ConnectionError
from requests.utils import guess_json_utf
from six.moves import http_client
from six.moves.urllib.parse import urljoin, urlencode, urlparse, parse_qs
from .http import HttpSession
logger = logging.getLogger(__name__)
def check_response(response, log_level=logging.ERROR):
if response.status_code not in (http_client.OK, http_client.CREATED):
if hasattr(response, 'content'):
content = response.content
else:
content = b''.join(response.iter_lines())
logger.log(log_level, "[%d] %s", response.status_code, content)
raise OsbsResponseException(message=content, status_code=response.status_code)
def retry_on_conflict(func, sleep_seconds=0.5, max_attempts=10):
@wraps(func)
def retry(*args, **kwargs):
last_exception = None
for attempt in range(max_attempts):
if attempt != 0:
time.sleep(sleep_seconds)
logger.debug("attempt %d to call %s", attempt + 1, func.__name__)
try:
return func(*args, **kwargs)
except OsbsResponseException as ex:
if ex.status_code != http_client.CONFLICT:
raise
last_exception = ex
raise last_exception or RuntimeError("operation not attempted")
return retry
# TODO: error handling: create function which handles errors in response object
class Openshift(object):
def __init__(self, openshift_api_url, openshift_api_version, openshift_oauth_url,
k8s_api_url=None,
verbose=False, username=None, password=None, use_kerberos=False,
kerberos_keytab=None, kerberos_principal=None, kerberos_ccache=None,
client_cert=None, client_key=None, verify_ssl=True, use_auth=None,
token=None, namespace=DEFAULT_NAMESPACE):
self.os_api_url = openshift_api_url
self.k8s_api_url = k8s_api_url
self._os_api_version = openshift_api_version
self._os_oauth_url = openshift_oauth_url
self.namespace = namespace
self.verbose = verbose
self.verify_ssl = verify_ssl
self._con = HttpSession(verbose=self.verbose)
self.retries_enabled = True
# auth stuff
self.use_kerberos = use_kerberos
self.username = username
self.password = password
self.client_cert = client_cert
self.client_key = client_key
self.kerberos_keytab = kerberos_keytab
self.kerberos_principal = kerberos_principal
self.kerberos_ccache = kerberos_ccache
self.token = token
self.ca = None
auth_credentials_provided = bool(use_kerberos or
token or
(username and password))
if use_auth is None:
self.use_auth = auth_credentials_provided
if not self.use_auth:
# Are we running inside a pod? If so, we will have a
# token available which can be used for authentication
self.use_auth = self.can_use_serviceaccount_token()
else:
self.use_auth = use_auth
if not auth_credentials_provided:
# We've been told to use authentication but no
# credentials have been given. See if we're running
# inside a pod, and if so use the provided token.
self.can_use_serviceaccount_token()
def can_use_serviceaccount_token(self):
try:
with open(os.path.join(SERVICEACCOUNT_SECRET,
SERVICEACCOUNT_TOKEN),
mode='rt') as tfp:
self.token = tfp.read().rstrip()
ca = os.path.join(SERVICEACCOUNT_SECRET,
SERVICEACCOUNT_CACRT)
if os.access(ca, os.R_OK):
self.ca = ca
except IOError:
# No token available
return False
else:
# We can authenticate using the supplied token
logger.info("Using service account's auth token")
return True
@property
def os_oauth_url(self):
return self._os_oauth_url
def _build_k8s_url(self, url, _prepend_namespace=True, **query):
if _prepend_namespace:
url = "namespaces/%s/%s" % (self.namespace, url)
if query:
url += ("?" + urlencode(query))
return urljoin(self.k8s_api_url, url)
def _build_url(self, url, _prepend_namespace=True, **query):
if _prepend_namespace:
url = "namespaces/%s/%s" % (self.namespace, url)
if query:
url += ("?" + urlencode(query))
return urljoin(self.os_api_url, url)
def _request_args(self, with_auth=True, **kwargs):
headers = kwargs.pop("headers", {})
if with_auth and self.use_auth:
if self.token is None:
self.get_oauth_token()
if self.token:
headers["Authorization"] = "Bearer %s" % self.token
else:
raise OsbsAuthException("Please check your credentials. "
"Token was not retrieved successfully.")
# Use the client certificate both for the OAuth request and OpenShift
# API requests. Certificate auth can be used as an alternative to
# OAuth, however a scenario where they are used to get OAuth token is
# also possible. Certificate is not sent when server does not request it.
if self.client_cert or self.client_key:
if self.client_cert and self.client_key:
kwargs["client_cert"] = self.client_cert
kwargs["client_key"] = self.client_key
else:
raise OsbsAuthException("You need to provide both client certificate and key.")
# Do we have a ca.crt? If so, use it
if self.verify_ssl and self.ca is not None:
kwargs["ca"] = self.ca
return headers, kwargs
def _post(self, url, with_auth=True, **kwargs):
headers, kwargs = self._request_args(with_auth, **kwargs)
return self._con.post(
url, headers=headers, verify_ssl=self.verify_ssl,
retries_enabled=self.retries_enabled, **kwargs)
def _get(self, url, with_auth=True, **kwargs):
headers, kwargs = self._request_args(with_auth, **kwargs)
return self._con.get(
url, headers=headers, verify_ssl=self.verify_ssl,
retries_enabled=self.retries_enabled, **kwargs)
def _put(self, url, with_auth=True, **kwargs):
headers, kwargs = self._request_args(with_auth, **kwargs)
return self._con.put(
url, headers=headers, verify_ssl=self.verify_ssl,
retries_enabled=self.retries_enabled, **kwargs)
def _delete(self, url, with_auth=True, **kwargs):
headers, kwargs = self._request_args(with_auth, **kwargs)
return self._con.delete(
url, headers=headers, verify_ssl=self.verify_ssl,
retries_enabled=self.retries_enabled, **kwargs)
def get_oauth_token(self):
url = self.os_oauth_url + "?response_type=token&client_id=openshift-challenging-client"
if self.use_auth:
if self.username and self.password:
logger.debug("using basic authentication")
r = self._get(url, with_auth=False, allow_redirects=False,
username=self.username, password=self.password)
elif self.use_kerberos:
logger.debug("using kerberos authentication")
if self.kerberos_keytab:
if not self.kerberos_principal:
raise OsbsAuthException("You need to provide kerberos principal along "
"with the keytab path.")
kerberos_ccache_init(self.kerberos_principal, self.kerberos_keytab,
ccache_file=self.kerberos_ccache)
r = self._get(url, with_auth=False, allow_redirects=False, kerberos_auth=True)
else:
logger.debug("using identity authentication")
r = self._get(url, with_auth=False, allow_redirects=False)
else:
logger.debug("getting token without any authentication (fingers crossed)")
r = self._get(url, with_auth=False, allow_redirects=False)
try:
redir_url = r.headers['location']
except KeyError:
logger.error("[%s] 'Location' header is missing in response, cannot retrieve token",
r.status_code)
return ""
parsed_url = urlparse(redir_url)
fragment = parsed_url.fragment
logger.debug("fragment is '%s'", fragment)
parsed_fragment = parse_qs(fragment)
self.token = parsed_fragment['access_token'][0]
return self.token
def get_user(self, username="~"):
"""
get info about user (if no user specified, use the one initiating request)
:param username: str, name of user to get info about, default="~"
:return: dict
"""
url = self._build_url("users/%s/" % username, _prepend_namespace=False)
response = self._get(url)
check_response(response)
return response
def get_serviceaccount_tokens(self, username="~"):
result = {}
url = self._build_k8s_url("serviceaccounts/%s/" % username, _prepend_namespace=True)
response = self._get(url)
check_response(response)
sa_json = response.json()
if not sa_json:
return {}
if 'secrets' not in sa_json.keys():
logger.debug("No secrets found for service account %s", username)
return {}
secrets = sa_json['secrets']
for secret in secrets:
if 'name' not in secret.keys():
logger.debug("Malformed secret info: missing 'name' key in %r",
secret)
continue
secret_name = secret['name']
if 'token' not in secret_name:
logger.debug("Secret %s is not a token", secret_name)
continue
url = self._build_k8s_url("secrets/%s/" % secret_name, _prepend_namespace=True)
response = self._get(url)
check_response(response)
secret_json = response.json()
if not secret_json:
continue
if 'data' not in secret_json.keys():
logger.debug("Malformed secret info: missing 'data' key in %r",
json)
continue
secret_data = secret_json['data']
if 'token' not in secret_data.keys():
logger.debug("Malformed secret data: missing 'token' key in %r",
secret_data)
continue
token = secret_data['token']
# Token needs to be base64-decoded
result[secret_name] = base64.b64decode(token)
return result
def create_build(self, build_json):
"""
:return:
"""
url = self._build_url("builds/")
logger.debug(build_json)
return self._post(url, data=json.dumps(build_json),
headers={"Content-Type": "application/json"})
def cancel_build(self, build_id):
response = self.get_build(build_id)
br = BuildResponse(response.json())
br.cancelled = True
url = self._build_url("builds/%s/" % build_id)
return self._put(url, data=json.dumps(br.json),
headers={"Content-Type": "application/json"})
def list_pods(self, label=None):
kwargs = {}
if label is not None:
kwargs['labelSelector'] = label
url = self._build_k8s_url("pods/", **kwargs)
return self._get(url)
def get_build_config(self, build_config_id):
url = self._build_url("buildconfigs/%s/" % build_config_id)
response = self._get(url)
build_config = response.json()
return build_config
def get_build_config_by_labels(self, label_selectors):
"""
Returns a build config matching the given label
selectors. This method will raise OsbsException
if not exactly one build config is found.
"""
labels = ['%s=%s' % (field, value) for field, value in label_selectors]
labels = ','.join(labels)
url = self._build_url("buildconfigs/", labelSelector=labels)
items = self._get(url).json()['items']
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0]
def create_build_config(self, build_config_json):
"""
:return:
"""
url = self._build_url("buildconfigs/")
return self._post(url, data=build_config_json,
headers={"Content-Type": "application/json"})
def update_build_config(self, build_config_id, build_config_json):
url = self._build_url("buildconfigs/%s" % build_config_id)
response = self._put(url, data=build_config_json,
headers={"Content-Type": "application/json"})
check_response(response)
return response
def instantiate_build_config(self, build_config_id):
url = self._build_url("buildconfigs/%s/instantiate" % build_config_id)
data = json.dumps({
"kind": "BuildRequest",
"apiVersion": self._os_api_version,
"metadata": {
"name": build_config_id,
},
})
return self._post(url, data=data,
headers={"Content-Type": "application/json"})
def start_build(self, build_config_id):
"""
:return:
"""
return self.instantiate_build_config(build_config_id)
def wait_for_new_build_config_instance(self, build_config_id, prev_version):
logger.info("waiting for build config %s to get instantiated", build_config_id)
for changetype, obj in self.watch_resource("buildconfigs", build_config_id):
if changetype == WATCH_MODIFIED:
version = graceful_chain_get(obj, 'status', 'lastVersion')
if not isinstance(version, numbers.Integral):
logger.error("BuildConfig %s has unexpected lastVersion: %s", build_config_id,
version)
continue
if version > prev_version:
return "%s-%s" % (build_config_id, version)
if changetype == WATCH_DELETED:
logger.error("BuildConfig deleted while waiting for new build instance")
break
raise OsbsResponseException("New BuildConfig instance not found",
http_client.NOT_FOUND)
def stream_logs(self, build_id):
"""
stream logs from build
:param build_id: str
:return: iterator
"""
kwargs = {'follow': 1}
# If connection is closed within this many seconds, give up:
min_idle_timeout = 60
# Stream logs, but be careful of the connection closing
# due to idle timeout. In that case, try again until the
# call returns more quickly than a reasonable timeout
# would be set to.
last_activity = time.time()
while True:
buildlogs_url = self._build_url("builds/%s/log/" % build_id,
**kwargs)
try:
response = self._get(buildlogs_url, stream=1,
headers={'Connection': 'close'})
check_response(response)
for line in response.iter_lines():
last_activity = time.time()
yield line
# NOTE1: If self._get causes ChunkedEncodingError, ConnectionError,
# or IncompleteRead to be raised, they'll be wrapped in
# OsbsNetworkException or OsbsException
# NOTE2: If decode_json or iter_lines causes ChunkedEncodingError, ConnectionError,
# or IncompleteRead to be raised, it'll simply be silenced.
# NOTE3: An exception may be raised from
# check_response(). In this case, exception will be
# wrapped in OsbsException or OsbsNetworkException,
# inspect cause to detect ConnectionError.
except OsbsException as exc:
if not isinstance(exc.cause, ConnectionError):
raise
idle = time.time() - last_activity
logger.debug("connection closed after %ds", idle)
if idle < min_idle_timeout:
# Finish output
return
since = int(idle - 1)
logger.debug("fetching logs starting from %ds ago", since)
kwargs['sinceSeconds'] = since
def logs(self, build_id, follow=False, build_json=None, wait_if_missing=False):
"""
provide logs from build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:return: None, str or iterator
"""
# does build exist?
try:
build_json = build_json or self.get_build(build_id).json()
except OsbsResponseException as ex:
if ex.status_code == 404:
if not wait_if_missing:
raise OsbsException("Build '%s' doesn't exist." % build_id)
else:
raise
if follow or wait_if_missing:
build_json = self.wait_for_build_to_get_scheduled(build_id)
br = BuildResponse(build_json)
# When build is in new or pending state, openshift responds with 500
if br.is_pending():
return
if follow:
return self.stream_logs(build_id)
buildlogs_url = self._build_url("builds/%s/log/" % build_id)
response = self._get(buildlogs_url, headers={'Connection': 'close'})
check_response(response)
return response.content
def list_builds(self, build_config_id=None, koji_task_id=None,
field_selector=None, labels=None):
"""
List builds matching criteria
:param build_config_id: str, only list builds created from BuildConfig
:param koji_task_id: str, only list builds for Koji Task ID
:param field_selector: str, field selector for query
:return: HttpResponse
"""
query = {}
selector = '{key}={value}'
label = {}
if labels is not None:
label.update(labels)
if build_config_id is not None:
label['buildconfig'] = build_config_id
if koji_task_id is not None:
label['koji-task-id'] = str(koji_task_id)
if label:
query['labelSelector'] = ','.join([selector.format(key=key,
value=value)
for key, value in label.items()])
if field_selector is not None:
query['fieldSelector'] = field_selector
url = self._build_url("builds/", **query)
return self._get(url)
def get_build(self, build_id):
"""
:return:
"""
url = self._build_url("builds/%s/" % build_id)
response = self._get(url)
check_response(response)
return response
def list_resource_quotas(self):
url = self._build_k8s_url("resourcequotas/")
response = self._get(url)
check_response(response)
return response
def get_resource_quota(self, quota_name):
url = self._build_k8s_url("resourcequotas/%s" % quota_name)
response = self._get(url)
check_response(response)
return response
def create_resource_quota(self, name, quota_json):
"""
Prevent builds being scheduled and wait for running builds to finish.
:return:
"""
url = self._build_k8s_url("resourcequotas/")
response = self._post(url, data=json.dumps(quota_json),
headers={"Content-Type": "application/json"})
if response.status_code == http_client.CONFLICT:
url = self._build_k8s_url("resourcequotas/%s" % name)
response = self._put(url, data=json.dumps(quota_json),
headers={"Content-Type": "application/json"})
check_response(response)
return response
def delete_resource_quota(self, name):
url = self._build_k8s_url("resourcequotas/%s" % name)
response = self._delete(url)
if response.status_code != http_client.NOT_FOUND:
check_response(response)
return response
def watch_resource(self, resource_type, resource_name=None, **request_args):
path = "watch/namespaces/%s/%s/" % (self.namespace, resource_type)
if resource_name is not None:
path += "%s/" % resource_name
url = self._build_url(path, _prepend_namespace=False, **request_args)
while True:
with self._get(url, stream=True, headers={'Connection': 'close'}) as response:
check_response(response)
encoding = None
for line in response.iter_lines():
logger.debug(line)
if not encoding:
encoding = guess_json_utf(line)
try:
j = json.loads(line.decode(encoding))
except ValueError:
logger.error("Cannot decode watch event: %s", line)
continue
if 'object' not in j:
logger.error("Watch event has no 'object': %s", j)
continue
if 'type' not in j:
logger.error("Watch event has no 'type': %s", j)
continue
yield (j['type'].lower(), j['object'])
logger.debug("connection closed, reconnecting in 30s")
time.sleep(30)
def wait(self, build_id, states):
"""
:param build_id: wait for build to finish
:return:
"""
logger.info("watching build '%s'", build_id)
for changetype, obj in self.watch_resource("builds", build_id):
try:
obj_name = obj["metadata"]["name"]
except KeyError:
logger.error("'object' doesn't have any name")
continue
try:
obj_status = obj["status"]["phase"]
except KeyError:
logger.error("'object' doesn't have any status")
continue
else:
obj_status_lower = obj_status.lower()
logger.info("object has changed: '%s', status: '%s', name: '%s'",
changetype, obj_status, obj_name)
if obj_name == build_id:
logger.info("matching build found")
logger.debug("is %s in %s?", repr(obj_status_lower), states)
if obj_status_lower in states:
logger.debug("Yes, build is in the state I'm waiting for.")
return obj
else:
logger.debug("No, build is not in the state I'm "
"waiting for.")
else:
logger.info("The build %r isn't me %r", obj_name, build_id)
# I'm not sure how we can end up here since there are two possible scenarios:
# 1. our object was found and we are returning in the loop
# 2. our object was not found and we keep waiting (in the loop)
# Therefore, let's raise here
logger.error("build '%s' was not found during wait", build_id)
raise OsbsWatchBuildNotFound("build '%s' was not found and response stream ended" %
build_id)
def wait_for_build_to_finish(self, build_id):
for retry in range(1, 10):
try:
build_response = self.wait(build_id, BUILD_FINISHED_STATES)
return build_response
except OsbsWatchBuildNotFound:
# this is woraround for https://github.com/openshift/origin/issues/2348
logger.error("I'm going to wait again. Retry #%d.", retry)
continue
raise OsbsException("Failed to wait for a build: %s" % build_id)
def wait_for_build_to_get_scheduled(self, build_id):
build_response = self.wait(build_id, BUILD_FINISHED_STATES + BUILD_RUNNING_STATES)
return build_response
@staticmethod
def _update_metadata_things(metadata, things, values):
metadata.setdefault(things, {})
metadata[things].update(values)
@staticmethod
def _replace_metadata_things(metadata, things, values):
metadata[things] = values
@retry_on_conflict
def adjust_attributes_on_object(self, collection, name, things, values, how):
"""
adjust labels or annotations on object
labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and
have at most 63 chars
:param collection: str, object collection e.g. 'builds'
:param name: str, name of object
:param things: str, 'labels' or 'annotations'
:param values: dict, values to set
:param how: callable, how to adjust the values e.g.
self._replace_metadata_things
:return:
"""
url = self._build_url("%s/%s" % (collection, name))
response = self._get(url)
logger.debug("before modification: %s", response.content)
build_json = response.json()
how(build_json['metadata'], things, values)
response = self._put(url, data=json.dumps(build_json), use_json=True)
check_response(response)
return response
def update_labels_on_build(self, build_id, labels):
return self.adjust_attributes_on_object('builds', build_id,
'labels', labels,
self._update_metadata_things)
def set_labels_on_build(self, build_id, labels):
return self.adjust_attributes_on_object('builds', build_id,
'labels', labels,
self._replace_metadata_things)
def update_labels_on_build_config(self, build_config_id, labels):
return self.adjust_attributes_on_object('buildconfigs', build_config_id,
'labels', labels,
self._update_metadata_things)
def set_labels_on_build_config(self, build_config_id, labels):
return self.adjust_attributes_on_object('buildconfigs', build_config_id,
'labels', labels,
self._replace_metadata_things)
def update_annotations_on_build(self, build_id, annotations):
"""
set annotations on build object
:param build_id: str, id of build
:param annotations: dict, annotations to set
:return:
"""
return self.adjust_attributes_on_object('builds', build_id,
'annotations', annotations,
self._update_metadata_things)
def set_annotations_on_build(self, build_id, annotations):
return self.adjust_attributes_on_object('builds', build_id,
'annotations', annotations,
self._replace_metadata_things)
def get_image_stream_tag(self, tag_id):
url = self._build_url("imagestreamtags/%s" % tag_id)
response = self._get(url)
check_response(response, log_level=logging.DEBUG)
return response
def put_image_stream_tag(self, tag_id, tag):
url = self._build_url("imagestreamtags/%s" % tag_id)
response = self._put(url, data=json.dumps(tag),
headers={"Content-Type": "application/json"})
check_response(response)
return response
def ensure_image_stream_tag(self, stream, tag_name, tag_template,
scheduled=False):
stream_id = stream['metadata']['name']
insecure = (stream['metadata'].get('annotations', {})
.get('openshift.io/image.insecureRepository') == 'true')
repo = stream['spec']['dockerImageRepository']
tag_id = '{0}:{1}'.format(stream_id, tag_name)
changed = False
try:
tag = self.get_image_stream_tag(tag_id).json()
logger.debug('image stream tag found: %s', tag_id)
except OsbsResponseException as exc:
if exc.status_code != 404:
raise
logger.debug('image stream tag NOT found: %s', tag_id)
tag = tag_template
tag['metadata']['name'] = tag_id
tag['tag']['name'] = tag_name
tag['tag']['from']['name'] = '{0}:{1}'.format(repo, tag_name)
changed = True
if insecure != tag['tag']['importPolicy'].get('insecure', False):
tag['tag']['importPolicy']['insecure'] = insecure
logger.debug('setting importPolicy.insecure to: %s', insecure)
changed = True
if scheduled != tag['tag']['importPolicy'].get('scheduled', False):
tag['tag']['importPolicy']['scheduled'] = scheduled
logger.debug('setting importPolicy.scheduled to: %s', scheduled)
changed = True
if changed:
logger.debug('modifying image stream tag: %s', tag_id)
self.put_image_stream_tag(tag_id, tag)
return changed
def get_image_stream(self, stream_id):
url = self._build_url("imagestreams/%s" % stream_id)
response = self._get(url)
check_response(response, log_level=logging.DEBUG)
return response
def create_image_stream(self, stream_json):
url = self._build_url("imagestreams/")
response = self._post(url, data=stream_json,
headers={"Content-Type": "application/json"})
check_response(response)
return response
def import_image(self, name):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether new tags were imported
"""
# Get the JSON for the ImageStream
url = self._build_url("imagestreams/%s" % name)
imagestream_json = self._get(url).json()
logger.debug("imagestream: %r", imagestream_json)
spec = imagestream_json.get('spec', {})
if 'dockerImageRepository' not in spec:
raise OsbsException('No dockerImageRepository for image import')
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
# Mark it as needing import
imagestream_json['metadata'].setdefault('annotations', {})
check_annotation = "openshift.io/image.dockerRepositoryCheck"
imagestream_json['metadata']['annotations'][check_annotation] = ''
response = self._put(url, data=json.dumps(imagestream_json),
use_json=True)
check_response(response)
# Watch for it to be updated
resource_version = imagestream_json['metadata']['resourceVersion']
for changetype, obj in self.watch_resource("imagestreams", name,
resourceVersion=resource_version):
logger.info("Change type: %r", changetype)
if changetype == WATCH_DELETED:
logger.info("Watched ImageStream was deleted")
break
if changetype == WATCH_ERROR:
logger.error("Error watching ImageStream")
break
if changetype == WATCH_MODIFIED:
logger.info("ImageStream modified")
metadata = obj.get('metadata', {})
annotations = metadata.get('annotations', {})
logger.info("ImageStream annotations: %r", annotations)
if annotations.get(check_annotation, False):
logger.info("ImageStream updated")
# Find out if there are new tags
status = obj.get('status', {})
newtags = status.get('tags', [])
logger.debug("tags after import: %r", newtags)
return True
return False
def dump_resource(self, resource_type):
url = self._build_url("%s" % resource_type)
response = self._get(url)
check_response(response)
return response
def restore_resource(self, resource_type, resource):
url = self._build_url("%s" % resource_type)
response = self._post(url, data=json.dumps(resource),
headers={"Content-Type": "application/json"})
check_response(response)
return response
def create_config_map(self, config_data):
url = self._build_k8s_url("configmaps/")
response = self._post(url, data=json.dumps(config_data))
check_response(response)
return response
def get_config_map(self, config_name):
url = self._build_k8s_url("configmaps/%s" % config_name)
response = self._get(url)
check_response(response)
return response
def delete_config_map(self, config_name):
url = self._build_k8s_url("configmaps/%s" % config_name)
response = self._delete(url, data='{}')
check_response(response)
if __name__ == '__main__':
o = Openshift(openshift_api_url="https://localhost:8443/oapi/v1/",
openshift_api_version="v1",
openshift_oauth_url="https://localhost:8443/oauth/authorize",
verbose=True)
print(o.get_oauth_token())
|
|
import sys
import re
import textwrap
from doctest import OutputChecker, ELLIPSIS
from tests.test_pip import reset_env, run_pip, write_file, get_env, pyversion
from tests.local_repos import local_checkout, local_repo
distribute_re = re.compile('^distribute==[0-9.]+\n', re.MULTILINE)
def _check_output(result, expected):
checker = OutputChecker()
actual = str(result)
## FIXME! The following is a TOTAL hack. For some reason the
## __str__ result for pkg_resources.Requirement gets downcased on
## Windows. Since INITools is the only package we're installing
## in this file with funky case requirements, I'm forcibly
## upcasing it. You can also normalize everything to lowercase,
## but then you have to remember to upcase <BLANKLINE>. The right
## thing to do in the end is probably to find out how to report
## the proper fully-cased package name in our error message.
if sys.platform == 'win32':
actual = actual.replace('initools', 'INITools')
# This allows our existing tests to work when run in a context
# with distribute installed.
actual = distribute_re.sub('', actual)
def banner(msg):
return '\n========== %s ==========\n' % msg
assert checker.check_output(expected, actual, ELLIPSIS), banner('EXPECTED')+expected+banner('ACTUAL')+actual+banner(6*'=')
def test_freeze_basic():
"""
Some tests of freeze, first we have to install some stuff. Note that
the test is a little crude at the end because Python 2.5+ adds egg
info to the standard library, so stuff like wsgiref will show up in
the freezing. (Probably that should be accounted for in pip, but
currently it is not).
"""
env = reset_env()
write_file('initools-req.txt', textwrap.dedent("""\
INITools==0.2
# and something else to test out:
MarkupSafe<=0.12
"""))
result = run_pip('install', '-r', env.scratch_path/'initools-req.txt')
result = run_pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: pip freeze
-- stdout: --------------------
INITools==0.2
MarkupSafe==0.12...
<BLANKLINE>""")
_check_output(result, expected)
def test_freeze_svn():
"""Test freezing a svn checkout"""
checkout_path = local_checkout('svn+http://svn.colorstudy.com/INITools/trunk')
#svn internally stores windows drives as uppercase; we'll match that.
checkout_path = checkout_path.replace('c:', 'C:')
env = reset_env()
result = env.run('svn', 'co', '-r10',
local_repo('svn+http://svn.colorstudy.com/INITools/trunk'),
'initools-trunk')
result = env.run('python', 'setup.py', 'develop',
cwd=env.scratch_path/ 'initools-trunk', expect_stderr=True)
result = run_pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
-e %s@10#egg=INITools-0.3.1dev...-dev_r10
...""" % checkout_path)
_check_output(result, expected)
def test_freeze_git_clone():
"""
Test freezing a Git clone.
"""
env = reset_env()
result = env.run('git', 'clone', local_repo('git+http://github.com/pypa/pip-test-package.git'), 'pip-test-package')
result = env.run('git', 'checkout', '7d654e66c8fa7149c165ddeffa5b56bc06619458',
cwd=env.scratch_path / 'pip-test-package', expect_stderr=True)
result = env.run('python', 'setup.py', 'develop',
cwd=env.scratch_path / 'pip-test-package')
result = run_pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
...-e %s@...#egg=pip_test_package-...
...""" % local_checkout('git+http://github.com/pypa/pip-test-package.git'))
_check_output(result, expected)
result = run_pip('freeze', '-f',
'%s#egg=pip_test_package' % local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_stderr=True)
expected = textwrap.dedent("""\
Script result: pip freeze -f %(repo)s#egg=pip_test_package
-- stdout: --------------------
-f %(repo)s#egg=pip_test_package...
-e %(repo)s@...#egg=pip_test_package-dev
...""" % {'repo': local_checkout('git+http://github.com/pypa/pip-test-package.git')})
_check_output(result, expected)
def test_freeze_mercurial_clone():
"""
Test freezing a Mercurial clone.
"""
reset_env()
env = get_env()
result = env.run('hg', 'clone',
'-r', '7bc186caa7dc',
local_repo('hg+http://bitbucket.org/jezdez/django-authority'),
'django-authority')
result = env.run('python', 'setup.py', 'develop',
cwd=env.scratch_path/'django-authority', expect_stderr=True)
result = run_pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
...-e %s@...#egg=django_authority-...
...""" % local_checkout('hg+http://bitbucket.org/jezdez/django-authority'))
_check_output(result, expected)
result = run_pip('freeze', '-f',
'%s#egg=django_authority' % local_checkout('hg+http://bitbucket.org/jezdez/django-authority'),
expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze -f %(repo)s#egg=django_authority
-- stdout: --------------------
-f %(repo)s#egg=django_authority
...-e %(repo)s@...#egg=django_authority-dev
...""" % {'repo': local_checkout('hg+http://bitbucket.org/jezdez/django-authority')})
_check_output(result, expected)
def test_freeze_bazaar_clone():
"""
Test freezing a Bazaar clone.
"""
checkout_path = local_checkout('bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp/release-0.1')
#bzr internally stores windows drives as uppercase; we'll match that.
checkout_pathC = checkout_path.replace('c:', 'C:')
reset_env()
env = get_env()
result = env.run('bzr', 'checkout', '-r', '174',
local_repo('bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp/release-0.1'),
'django-wikiapp')
result = env.run('python', 'setup.py', 'develop',
cwd=env.scratch_path/'django-wikiapp')
result = run_pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
...-e %s@...#egg=django_wikiapp-...
...""" % checkout_pathC)
_check_output(result, expected)
result = run_pip('freeze', '-f',
'%s/#egg=django-wikiapp' % checkout_path,
expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze -f %(repo)s/#egg=django-wikiapp
-- stdout: --------------------
-f %(repo)s/#egg=django-wikiapp
...-e %(repoC)s@...#egg=django_wikiapp-...
...""" % {'repoC': checkout_pathC, 'repo': checkout_path})
_check_output(result, expected)
def test_freeze_with_local_option():
"""
Test that wsgiref (from global site-packages) is reported normally, but not with --local.
"""
reset_env()
result = run_pip('install', 'initools==0.2')
result = run_pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze
-- stdout: --------------------
INITools==0.2
wsgiref==...
<BLANKLINE>""")
# The following check is broken (see
# http://bitbucket.org/ianb/pip/issue/110). For now we are simply
# neutering this test, but if we can't find a way to fix it,
# this whole function should be removed.
# _check_output(result, expected)
result = run_pip('freeze', '--local', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: ...pip freeze --local
-- stdout: --------------------
INITools==0.2
<BLANKLINE>""")
_check_output(result, expected)
def test_freeze_with_requirement_option():
"""
Test that new requirements are created correctly with --requirement hints
"""
reset_env()
ignores = textwrap.dedent("""\
# Unchanged requirements below this line
-r ignore.txt
--requirement ignore.txt
-Z ignore
--always-unzip ignore
-f http://ignore
-i http://ignore
--extra-index-url http://ignore
--find-links http://ignore
--index-url http://ignore
""")
write_file('hint.txt', textwrap.dedent("""\
INITools==0.1
NoExist==4.2
""") + ignores)
result = run_pip('install', 'initools==0.2')
result = run_pip('install', 'MarkupSafe')
result = run_pip('freeze', '--requirement', 'hint.txt', expect_stderr=True)
expected = textwrap.dedent("""\
Script result: pip freeze --requirement hint.txt
-- stderr: --------------------
Requirement file contains NoExist==4.2, but that package is not installed
-- stdout: --------------------
INITools==0.2
""") + ignores + "## The following requirements were added by pip --freeze:..."
_check_output(result, expected)
|
|
# sql/coercions.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
import numbers
import re
from . import operators
from . import roles
from . import visitors
from .base import Options
from .traversals import HasCacheKey
from .visitors import Visitable
from .. import exc
from .. import inspection
from .. import util
from ..util import collections_abc
elements = None
lambdas = None
schema = None
selectable = None
sqltypes = None
traversals = None
def _is_literal(element):
"""Return whether or not the element is a "literal" in the context
of a SQL expression construct.
"""
return (
not isinstance(
element,
(Visitable, schema.SchemaEventTarget),
)
and not hasattr(element, "__clause_element__")
)
def _deep_is_literal(element):
"""Return whether or not the element is a "literal" in the context
of a SQL expression construct.
does a deeper more esoteric check than _is_literal. is used
for lambda elements that have to distinguish values that would
be bound vs. not without any context.
"""
if isinstance(element, collections_abc.Sequence) and not isinstance(
element, str
):
for elem in element:
if not _deep_is_literal(elem):
return False
else:
return True
return (
not isinstance(
element,
(
Visitable,
schema.SchemaEventTarget,
HasCacheKey,
Options,
util.langhelpers._symbol,
),
)
and not hasattr(element, "__clause_element__")
and (
not isinstance(element, type)
or not issubclass(element, HasCacheKey)
)
)
def _document_text_coercion(paramname, meth_rst, param_rst):
return util.add_parameter_text(
paramname,
(
".. warning:: "
"The %s argument to %s can be passed as a Python string argument, "
"which will be treated "
"as **trusted SQL text** and rendered as given. **DO NOT PASS "
"UNTRUSTED INPUT TO THIS PARAMETER**."
)
% (param_rst, meth_rst),
)
def _expression_collection_was_a_list(attrname, fnname, args):
if args and isinstance(args[0], (list, set, dict)) and len(args) == 1:
if isinstance(args[0], list):
util.warn_deprecated_20(
'The "%s" argument to %s(), when referring to a sequence '
"of items, is now passed as a series of positional "
"elements, rather than as a list. " % (attrname, fnname)
)
return args[0]
else:
return args
def expect(
role,
element,
apply_propagate_attrs=None,
argname=None,
post_inspect=False,
**kw
):
if (
role.allows_lambda
# note callable() will not invoke a __getattr__() method, whereas
# hasattr(obj, "__call__") will. by keeping the callable() check here
# we prevent most needless calls to hasattr() and therefore
# __getattr__(), which is present on ColumnElement.
and callable(element)
and hasattr(element, "__code__")
):
return lambdas.LambdaElement(
element,
role,
lambdas.LambdaOptions(**kw),
apply_propagate_attrs=apply_propagate_attrs,
)
# major case is that we are given a ClauseElement already, skip more
# elaborate logic up front if possible
impl = _impl_lookup[role]
original_element = element
if not isinstance(
element,
(elements.ClauseElement, schema.SchemaItem, schema.FetchedValue),
):
resolved = None
if impl._resolve_literal_only:
resolved = impl._literal_coercion(element, **kw)
else:
original_element = element
is_clause_element = False
# this is a special performance optimization for ORM
# joins used by JoinTargetImpl that we don't go through the
# work of creating __clause_element__() when we only need the
# original QueryableAttribute, as the former will do clause
# adaption and all that which is just thrown away here.
if (
impl._skip_clauseelement_for_target_match
and isinstance(element, role)
and hasattr(element, "__clause_element__")
):
is_clause_element = True
else:
while hasattr(element, "__clause_element__"):
is_clause_element = True
if not getattr(element, "is_clause_element", False):
element = element.__clause_element__()
else:
break
if not is_clause_element:
if impl._use_inspection:
insp = inspection.inspect(element, raiseerr=False)
if insp is not None:
if post_inspect:
insp._post_inspect
try:
resolved = insp.__clause_element__()
except AttributeError:
impl._raise_for_expected(original_element, argname)
if resolved is None:
resolved = impl._literal_coercion(
element, argname=argname, **kw
)
else:
resolved = element
else:
resolved = element
if (
apply_propagate_attrs is not None
and not apply_propagate_attrs._propagate_attrs
and resolved._propagate_attrs
):
apply_propagate_attrs._propagate_attrs = resolved._propagate_attrs
if impl._role_class in resolved.__class__.__mro__:
if impl._post_coercion:
resolved = impl._post_coercion(
resolved,
argname=argname,
original_element=original_element,
**kw
)
return resolved
else:
return impl._implicit_coercions(
original_element, resolved, argname=argname, **kw
)
def expect_as_key(role, element, **kw):
kw["as_key"] = True
return expect(role, element, **kw)
def expect_col_expression_collection(role, expressions):
for expr in expressions:
strname = None
column = None
resolved = expect(role, expr)
if isinstance(resolved, util.string_types):
strname = resolved = expr
else:
cols = []
visitors.traverse(resolved, {}, {"column": cols.append})
if cols:
column = cols[0]
add_element = column if column is not None else strname
yield resolved, column, strname, add_element
class RoleImpl(object):
__slots__ = ("_role_class", "name", "_use_inspection")
def _literal_coercion(self, element, **kw):
raise NotImplementedError()
_post_coercion = None
_resolve_literal_only = False
_skip_clauseelement_for_target_match = False
def __init__(self, role_class):
self._role_class = role_class
self.name = role_class._role_name
self._use_inspection = issubclass(role_class, roles.UsesInspection)
def _implicit_coercions(self, element, resolved, argname=None, **kw):
self._raise_for_expected(element, argname, resolved)
def _raise_for_expected(
self,
element,
argname=None,
resolved=None,
advice=None,
code=None,
err=None,
):
if resolved is not None and resolved is not element:
got = "%r object resolved from %r object" % (resolved, element)
else:
got = repr(element)
if argname:
msg = "%s expected for argument %r; got %s." % (
self.name,
argname,
got,
)
else:
msg = "%s expected, got %s." % (self.name, got)
if advice:
msg += " " + advice
util.raise_(exc.ArgumentError(msg, code=code), replace_context=err)
class _Deannotate(object):
__slots__ = ()
def _post_coercion(self, resolved, **kw):
from .util import _deep_deannotate
return _deep_deannotate(resolved)
class _StringOnly(object):
__slots__ = ()
_resolve_literal_only = True
class _ReturnsStringKey(object):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, util.string_types):
return original_element
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
class _ColumnCoercions(object):
__slots__ = ()
def _warn_for_scalar_subquery_coercion(self):
util.warn(
"implicitly coercing SELECT object to scalar subquery; "
"please use the .scalar_subquery() method to produce a scalar "
"subquery.",
)
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if not getattr(resolved, "is_clause_element", False):
self._raise_for_expected(original_element, argname, resolved)
elif resolved._is_select_statement:
self._warn_for_scalar_subquery_coercion()
return resolved.scalar_subquery()
elif resolved._is_from_clause and isinstance(
resolved, selectable.Subquery
):
self._warn_for_scalar_subquery_coercion()
return resolved.element.scalar_subquery()
elif self._role_class.allows_lambda and resolved._is_lambda_element:
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _no_text_coercion(
element, argname=None, exc_cls=exc.ArgumentError, extra=None, err=None
):
util.raise_(
exc_cls(
"%(extra)sTextual SQL expression %(expr)r %(argname)sshould be "
"explicitly declared as text(%(expr)r)"
% {
"expr": util.ellipses_string(element),
"argname": "for argument %s" % (argname,) if argname else "",
"extra": "%s " % extra if extra else "",
}
),
replace_context=err,
)
class _NoTextCoercion(object):
__slots__ = ()
def _literal_coercion(self, element, argname=None, **kw):
if isinstance(element, util.string_types) and issubclass(
elements.TextClause, self._role_class
):
_no_text_coercion(element, argname)
else:
self._raise_for_expected(element, argname)
class _CoerceLiterals(object):
__slots__ = ()
_coerce_consts = False
_coerce_star = False
_coerce_numerics = False
def _text_coercion(self, element, argname=None):
return _no_text_coercion(element, argname)
def _literal_coercion(self, element, argname=None, **kw):
if isinstance(element, util.string_types):
if self._coerce_star and element == "*":
return elements.ColumnClause("*", is_literal=True)
else:
return self._text_coercion(element, argname, **kw)
if self._coerce_consts:
if element is None:
return elements.Null()
elif element is False:
return elements.False_()
elif element is True:
return elements.True_()
if self._coerce_numerics and isinstance(element, (numbers.Number)):
return elements.ColumnClause(str(element), is_literal=True)
self._raise_for_expected(element, argname)
class LiteralValueImpl(RoleImpl):
_resolve_literal_only = True
def _implicit_coercions(
self, element, resolved, argname, type_=None, **kw
):
if not _is_literal(resolved):
self._raise_for_expected(
element, resolved=resolved, argname=argname, **kw
)
return elements.BindParameter(None, element, type_=type_, unique=True)
def _literal_coercion(self, element, argname=None, type_=None, **kw):
return element
class _SelectIsNotFrom(object):
__slots__ = ()
def _raise_for_expected(self, element, argname=None, resolved=None, **kw):
if isinstance(element, roles.SelectStatementRole) or isinstance(
resolved, roles.SelectStatementRole
):
advice = (
"To create a "
"FROM clause from a %s object, use the .subquery() method."
% (resolved.__class__ if resolved is not None else element,)
)
code = "89ve"
else:
advice = code = None
return super(_SelectIsNotFrom, self)._raise_for_expected(
element,
argname=argname,
resolved=resolved,
advice=advice,
code=code,
**kw
)
class HasCacheKeyImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, traversals.HasCacheKey):
return original_element
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
class ExpressionElementImpl(_ColumnCoercions, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, name=None, type_=None, argname=None, is_crud=False, **kw
):
if (
element is None
and not is_crud
and (type_ is None or not type_.should_evaluate_none)
):
# TODO: there's no test coverage now for the
# "should_evaluate_none" part of this, as outside of "crud" this
# codepath is not normally used except in some special cases
return elements.Null()
else:
try:
return elements.BindParameter(
name, element, type_, unique=True, _is_crud=is_crud
)
except exc.ArgumentError as err:
self._raise_for_expected(element, err=err)
def _raise_for_expected(self, element, argname=None, resolved=None, **kw):
if isinstance(element, roles.AnonymizedFromClauseRole):
advice = (
"To create a "
"column expression from a FROM clause row "
"as a whole, use the .table_valued() method."
)
else:
advice = None
return super(ExpressionElementImpl, self)._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
class BinaryElementImpl(ExpressionElementImpl, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, expr, operator, bindparam_type=None, argname=None, **kw
):
try:
return expr._bind_param(operator, element, type_=bindparam_type)
except exc.ArgumentError as err:
self._raise_for_expected(element, err=err)
def _post_coercion(self, resolved, expr, **kw):
if resolved.type._isnull and not expr.type._isnull:
resolved = resolved._with_binary_element_type(expr.type)
return resolved
class InElementImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_from_clause:
if (
isinstance(resolved, selectable.Alias)
and resolved.element._is_select_statement
):
self._warn_for_implicit_coercion(resolved)
return self._post_coercion(resolved.element, **kw)
else:
self._warn_for_implicit_coercion(resolved)
return self._post_coercion(resolved.select(), **kw)
else:
self._raise_for_expected(original_element, argname, resolved)
def _warn_for_implicit_coercion(self, elem):
util.warn(
"Coercing %s object into a select() for use in IN(); "
"please pass a select() construct explicitly"
% (elem.__class__.__name__)
)
def _literal_coercion(self, element, expr, operator, **kw):
if isinstance(element, collections_abc.Iterable) and not isinstance(
element, util.string_types
):
non_literal_expressions = {}
element = list(element)
for o in element:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
self._raise_for_expected(element, **kw)
else:
non_literal_expressions[o] = o
elif o is None:
non_literal_expressions[o] = elements.Null()
if non_literal_expressions:
return elements.ClauseList(
*[
non_literal_expressions[o]
if o in non_literal_expressions
else expr._bind_param(operator, o)
for o in element
]
)
else:
return expr._bind_param(operator, element, expanding=True)
else:
self._raise_for_expected(element, **kw)
def _post_coercion(self, element, expr, operator, **kw):
if element._is_select_statement:
# for IN, we are doing scalar_subquery() coercion without
# a warning
return element.scalar_subquery()
elif isinstance(element, elements.ClauseList):
assert not len(element.clauses) == 0
return element.self_group(against=operator)
elif isinstance(element, elements.BindParameter):
element = element._clone(maintain_key=True)
element.expanding = True
element.expand_op = operator
return element
else:
return element
class OnClauseImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _implicit_coercions(
self, original_element, resolved, argname=None, legacy=False, **kw
):
if legacy and isinstance(resolved, str):
return resolved
else:
return super(OnClauseImpl, self)._implicit_coercions(
original_element,
resolved,
argname=argname,
legacy=legacy,
**kw
)
def _text_coercion(self, element, argname=None, legacy=False):
if legacy and isinstance(element, str):
util.warn_deprecated_20(
"Using strings to indicate relationship names in "
"Query.join() is deprecated and will be removed in "
"SQLAlchemy 2.0. Please use the class-bound attribute "
"directly."
)
return element
return super(OnClauseImpl, self)._text_coercion(element, argname)
def _post_coercion(self, resolved, original_element=None, **kw):
# this is a hack right now as we want to use coercion on an
# ORM InstrumentedAttribute, but we want to return the object
# itself if it is one, not its clause element.
# ORM context _join and _legacy_join() would need to be improved
# to look for annotations in a clause element form.
if isinstance(original_element, roles.JoinTargetRole):
return original_element
return resolved
class WhereHavingImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return _no_text_coercion(element, argname)
class StatementOptionImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return elements.TextClause(element)
class ColumnArgumentImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
class ColumnArgumentOrKeyImpl(_ReturnsStringKey, RoleImpl):
__slots__ = ()
class StrAsPlainColumnImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
def _text_coercion(self, element, argname=None):
return elements.ColumnClause(element)
class ByOfImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl, roles.ByOfRole):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return elements._textual_label_reference(element)
class OrderByImpl(ByOfImpl, RoleImpl):
__slots__ = ()
def _post_coercion(self, resolved, **kw):
if (
isinstance(resolved, self._role_class)
and resolved._order_by_label_element is not None
):
return elements._label_reference(resolved)
else:
return resolved
class GroupByImpl(ByOfImpl, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(resolved, roles.StrictFromClauseRole):
return elements.ClauseList(*resolved.c)
else:
return resolved
class DMLColumnImpl(_ReturnsStringKey, RoleImpl):
__slots__ = ()
def _post_coercion(self, element, as_key=False, **kw):
if as_key:
return element.key
else:
return element
class ConstExprImpl(RoleImpl):
__slots__ = ()
def _literal_coercion(self, element, argname=None, **kw):
if element is None:
return elements.Null()
elif element is False:
return elements.False_()
elif element is True:
return elements.True_()
else:
self._raise_for_expected(element, argname)
class TruncatedLabelImpl(_StringOnly, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, util.string_types):
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, argname=None, **kw):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(element, elements._truncated_label):
return element
else:
return elements._truncated_label(element)
class DDLExpressionImpl(_Deannotate, _CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
# see #5754 for why we can't easily deprecate this coercion.
# essentially expressions like postgresql_where would have to be
# text() as they come back from reflection and we don't want to
# have text() elements wired into the inspection dictionaries.
return elements.TextClause(element)
class DDLConstraintColumnImpl(_Deannotate, _ReturnsStringKey, RoleImpl):
__slots__ = ()
class DDLReferredColumnImpl(DDLConstraintColumnImpl):
__slots__ = ()
class LimitOffsetImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(self, element, resolved, argname=None, **kw):
if resolved is None:
return None
else:
self._raise_for_expected(element, argname, resolved)
def _literal_coercion(self, element, name, type_, **kw):
if element is None:
return None
else:
value = util.asint(element)
return selectable._OffsetLimitParam(
name, value, type_=type_, unique=True
)
class LabeledColumnExprImpl(ExpressionElementImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(resolved, roles.ExpressionElementRole):
return resolved.label(None)
else:
new = super(LabeledColumnExprImpl, self)._implicit_coercions(
original_element, resolved, argname=argname, **kw
)
if isinstance(new, roles.ExpressionElementRole):
return new.label(None)
else:
self._raise_for_expected(original_element, argname, resolved)
class ColumnsClauseImpl(_SelectIsNotFrom, _CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
_coerce_numerics = True
_coerce_star = True
_guess_straight_column = re.compile(r"^\w\S*$", re.I)
def _text_coercion(self, element, argname=None):
element = str(element)
guess_is_literal = not self._guess_straight_column.match(element)
raise exc.ArgumentError(
"Textual column expression %(column)r %(argname)sshould be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity"
% {
"column": util.ellipses_string(element),
"argname": "for argument %s" % (argname,) if argname else "",
"literal_column": "literal_column"
if guess_is_literal
else "column",
}
)
class ReturnsRowsImpl(RoleImpl):
__slots__ = ()
class StatementImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
def _post_coercion(self, resolved, original_element, argname=None, **kw):
if resolved is not original_element and not isinstance(
original_element, util.string_types
):
# use same method as Connection uses; this will later raise
# ObjectNotExecutableError
try:
original_element._execute_on_connection
except AttributeError:
util.warn_deprecated(
"Object %r should not be used directly in a SQL statement "
"context, such as passing to methods such as "
"session.execute(). This usage will be disallowed in a "
"future release. "
"Please use Core select() / update() / delete() etc. "
"with Session.execute() and other statement execution "
"methods." % original_element,
"1.4",
)
return resolved
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_lambda_element:
return resolved
else:
return super(StatementImpl, self)._implicit_coercions(
original_element, resolved, argname=argname, **kw
)
def _text_coercion(self, element, argname=None):
util.warn_deprecated_20(
"Using plain strings to indicate SQL statements without using "
"the text() construct is "
"deprecated and will be removed in version 2.0. Ensure plain "
"SQL statements are passed using the text() construct."
)
return elements.TextClause(element)
class SelectStatementImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_text_clause:
return resolved.columns()
else:
self._raise_for_expected(original_element, argname, resolved)
class HasCTEImpl(ReturnsRowsImpl):
__slots__ = ()
class IsCTEImpl(RoleImpl):
__slots__ = ()
class JoinTargetImpl(RoleImpl):
__slots__ = ()
_skip_clauseelement_for_target_match = True
def _literal_coercion(self, element, legacy=False, **kw):
if isinstance(element, str):
return element
def _implicit_coercions(
self, original_element, resolved, argname=None, legacy=False, **kw
):
if isinstance(original_element, roles.JoinTargetRole):
# note that this codepath no longer occurs as of
# #6550, unless JoinTargetImpl._skip_clauseelement_for_target_match
# were set to False.
return original_element
elif legacy and isinstance(resolved, str):
util.warn_deprecated_20(
"Using strings to indicate relationship names in "
"Query.join() is deprecated and will be removed in "
"SQLAlchemy 2.0. Please use the class-bound attribute "
"directly."
)
return resolved
elif legacy and isinstance(resolved, roles.WhereHavingRole):
return resolved
elif legacy and resolved._is_select_statement:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT "
"constructs into FROM clauses is deprecated; please call "
".subquery() on any Core select or ORM Query object in "
"order to produce a subquery object.",
version="1.4",
)
# TODO: doing _implicit_subquery here causes tests to fail,
# how was this working before? probably that ORM
# join logic treated it as a select and subquery would happen
# in _ORMJoin->Join
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
class FromClauseImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self,
original_element,
resolved,
argname=None,
explicit_subquery=False,
allow_select=True,
**kw
):
if resolved._is_select_statement:
if explicit_subquery:
return resolved.subquery()
elif allow_select:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT "
"constructs into FROM clauses is deprecated; please call "
".subquery() on any Core select or ORM Query object in "
"order to produce a subquery object.",
version="1.4",
)
return resolved._implicit_subquery
elif resolved._is_text_clause:
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _post_coercion(self, element, deannotate=False, **kw):
if deannotate:
return element._deannotate()
else:
return element
class StrictFromClauseImpl(FromClauseImpl):
__slots__ = ()
def _implicit_coercions(
self,
original_element,
resolved,
argname=None,
allow_select=False,
**kw
):
if resolved._is_select_statement and allow_select:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT constructs "
"into FROM clauses is deprecated; please call .subquery() "
"on any Core select or ORM Query object in order to produce a "
"subquery object.",
version="1.4",
)
return resolved._implicit_subquery
else:
self._raise_for_expected(original_element, argname, resolved)
class AnonymizedFromClauseImpl(StrictFromClauseImpl):
__slots__ = ()
def _post_coercion(self, element, flat=False, name=None, **kw):
assert name is None
return element._anonymous_fromclause(flat=flat)
class DMLTableImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
__slots__ = ()
def _post_coercion(self, element, **kw):
if "dml_table" in element._annotations:
return element._annotations["dml_table"]
else:
return element
class DMLSelectImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_from_clause:
if (
isinstance(resolved, selectable.Alias)
and resolved.element._is_select_statement
):
return resolved.element
else:
return resolved.select()
else:
self._raise_for_expected(original_element, argname, resolved)
class CompoundElementImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
def _raise_for_expected(self, element, argname=None, resolved=None, **kw):
if isinstance(element, roles.FromClauseRole):
if element._is_subquery:
advice = (
"Use the plain select() object without "
"calling .subquery() or .alias()."
)
else:
advice = (
"To SELECT from any FROM clause, use the .select() method."
)
else:
advice = None
return super(CompoundElementImpl, self)._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
_impl_lookup = {}
for name in dir(roles):
cls = getattr(roles, name)
if name.endswith("Role"):
name = name.replace("Role", "Impl")
if name in globals():
impl = globals()[name](cls)
_impl_lookup[cls] = impl
|
|
# Author: Mark Wronkiewicz <wronk@uw.edu>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import numpy as np
import sys
import scipy
from numpy.testing import assert_equal, assert_allclose
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
from distutils.version import LooseVersion
from mne import compute_raw_covariance, pick_types
from mne.chpi import read_head_pos, filter_chpi
from mne.forward import _prep_meg_channels
from mne.cov import _estimate_rank_meeg_cov
from mne.datasets import testing
from mne.io import (Raw, proc_history, read_info, read_raw_bti, read_raw_kit,
_BaseRaw)
from mne.preprocessing.maxwell import (
maxwell_filter, _get_n_moments, _sss_basis_basic, _sh_complex_to_real,
_sh_real_to_complex, _sh_negate, _bases_complex_to_real, _trans_sss_basis,
_bases_real_to_complex, _sph_harm, _prep_mf_coils)
from mne.tests.common import assert_meg_snr
from mne.utils import (_TempDir, run_tests_if_main, slow_test, catch_logging,
requires_version, object_diff, buggy_mkl_svd)
from mne.externals.six import PY3
warnings.simplefilter('always') # Always throw warnings
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
raw_fname = pre + 'raw.fif'
sss_std_fname = pre + 'stdOrigin_raw_sss.fif'
sss_nonstd_fname = pre + 'nonStdOrigin_raw_sss.fif'
sss_bad_recon_fname = pre + 'badRecon_raw_sss.fif'
sss_reg_in_fname = pre + 'regIn_raw_sss.fif'
sss_fine_cal_fname = pre + 'fineCal_raw_sss.fif'
sss_ctc_fname = pre + 'crossTalk_raw_sss.fif'
sss_trans_default_fname = pre + 'transDefault_raw_sss.fif'
sss_trans_sample_fname = pre + 'transSample_raw_sss.fif'
sss_st1FineCalCrossTalkRegIn_fname = \
pre + 'st1FineCalCrossTalkRegIn_raw_sss.fif'
sss_st1FineCalCrossTalkRegInTransSample_fname = \
pre + 'st1FineCalCrossTalkRegInTransSample_raw_sss.fif'
sss_movecomp_fname = pre + 'movecomp_raw_sss.fif'
sss_movecomp_reg_in_fname = pre + 'movecomp_regIn_raw_sss.fif'
sss_movecomp_reg_in_st4s_fname = pre + 'movecomp_regIn_st4s_raw_sss.fif'
erm_fname = pre + 'erm_raw.fif'
sss_erm_std_fname = pre + 'erm_devOrigin_raw_sss.fif'
sss_erm_reg_in_fname = pre + 'erm_regIn_raw_sss.fif'
sss_erm_fine_cal_fname = pre + 'erm_fineCal_raw_sss.fif'
sss_erm_ctc_fname = pre + 'erm_crossTalk_raw_sss.fif'
sss_erm_st_fname = pre + 'erm_st1_raw_sss.fif'
sss_erm_st1FineCalCrossTalk_fname = pre + 'erm_st1FineCalCrossTalk_raw_sss.fif'
sss_erm_st1FineCalCrossTalkRegIn_fname = \
pre + 'erm_st1FineCalCrossTalkRegIn_raw_sss.fif'
sample_fname = op.join(data_path, 'MEG', 'sample_audvis_trunc_raw.fif')
sss_samp_reg_in_fname = op.join(data_path, 'SSS',
'sample_audvis_trunc_regIn_raw_sss.fif')
sss_samp_fname = op.join(data_path, 'SSS', 'sample_audvis_trunc_raw_sss.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
bases_fname = op.join(sss_path, 'sss_data.mat')
fine_cal_fname = op.join(sss_path, 'sss_cal_3053.dat')
fine_cal_fname_3d = op.join(sss_path, 'sss_cal_3053_3d.dat')
ctc_fname = op.join(sss_path, 'ct_sparse.fif')
fine_cal_mgh_fname = op.join(sss_path, 'sss_cal_mgh.dat')
ctc_mgh_fname = op.join(sss_path, 'ct_sparse_mgh.fif')
sample_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
triux_path = op.join(data_path, 'SSS', 'TRIUX')
tri_fname = op.join(triux_path, 'triux_bmlhus_erm_raw.fif')
tri_sss_fname = op.join(triux_path, 'triux_bmlhus_erm_raw_sss.fif')
tri_sss_reg_fname = op.join(triux_path, 'triux_bmlhus_erm_regIn_raw_sss.fif')
tri_sss_st4_fname = op.join(triux_path, 'triux_bmlhus_erm_st4_raw_sss.fif')
tri_sss_ctc_fname = op.join(triux_path, 'triux_bmlhus_erm_ctc_raw_sss.fif')
tri_sss_cal_fname = op.join(triux_path, 'triux_bmlhus_erm_cal_raw_sss.fif')
tri_sss_ctc_cal_fname = op.join(
triux_path, 'triux_bmlhus_erm_ctc_cal_raw_sss.fif')
tri_sss_ctc_cal_reg_in_fname = op.join(
triux_path, 'triux_bmlhus_erm_ctc_cal_regIn_raw_sss.fif')
tri_ctc_fname = op.join(triux_path, 'ct_sparse_BMLHUS.fif')
tri_cal_fname = op.join(triux_path, 'sss_cal_BMLHUS.dat')
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
int_order, ext_order = 8, 3
mf_head_origin = (0., 0., 0.04)
mf_meg_origin = (0., 0.013, -0.006)
# otherwise we can get SVD error
requires_svd_convergence = requires_version('scipy', '0.12')
# 30 random bad MEG channels (20 grad, 10 mag) that were used in generation
bads = ['MEG0912', 'MEG1722', 'MEG2213', 'MEG0132', 'MEG1312', 'MEG0432',
'MEG2433', 'MEG1022', 'MEG0442', 'MEG2332', 'MEG0633', 'MEG1043',
'MEG1713', 'MEG0422', 'MEG0932', 'MEG1622', 'MEG1343', 'MEG0943',
'MEG0643', 'MEG0143', 'MEG2142', 'MEG0813', 'MEG2143', 'MEG1323',
'MEG0522', 'MEG1123', 'MEG0423', 'MEG2122', 'MEG2532', 'MEG0812']
def _assert_n_free(raw_sss, lower, upper=None):
"""Helper to check the DOF"""
upper = lower if upper is None else upper
n_free = raw_sss.info['proc_history'][0]['max_info']['sss_info']['nfree']
assert_true(lower <= n_free <= upper,
'nfree fail: %s <= %s <= %s' % (lower, n_free, upper))
@slow_test
@testing.requires_testing_data
def test_movement_compensation():
"""Test movement compensation"""
temp_dir = _TempDir()
lims = (0, 4, False)
raw = Raw(raw_fname, allow_maxshield='yes', preload=True).crop(*lims)
head_pos = read_head_pos(pos_fname)
#
# Movement compensation, no regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin,
regularize=None, bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_movecomp_fname).crop(*lims),
4.6, 12.4, chpi_med_tol=58)
# IO
temp_fname = op.join(temp_dir, 'test_raw_sss.fif')
raw_sss.save(temp_fname)
raw_sss = Raw(temp_fname)
assert_meg_snr(raw_sss, Raw(sss_movecomp_fname).crop(*lims),
4.6, 12.4, chpi_med_tol=58)
#
# Movement compensation, regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin)
assert_meg_snr(raw_sss, Raw(sss_movecomp_reg_in_fname).crop(*lims),
0.5, 1.9, chpi_med_tol=121)
#
# Movement compensation, regularization, tSSS at the end
#
raw_nohpi = filter_chpi(raw.copy())
with warnings.catch_warnings(record=True) as w: # untested feature
raw_sss_mv = maxwell_filter(raw_nohpi, head_pos=head_pos,
st_duration=4., origin=mf_head_origin,
st_fixed=False)
assert_equal(len(w), 1)
assert_true('is untested' in str(w[0].message))
# Neither match is particularly good because our algorithm actually differs
assert_meg_snr(raw_sss_mv, Raw(sss_movecomp_reg_in_st4s_fname).crop(*lims),
0.6, 1.3)
tSSS_fname = op.join(sss_path, 'test_move_anon_st4s_raw_sss.fif')
assert_meg_snr(raw_sss_mv, Raw(tSSS_fname).crop(*lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(Raw(sss_movecomp_reg_in_st4s_fname), Raw(tSSS_fname),
0.8, 1.0, chpi_med_tol=None)
#
# Movement compensation, regularization, tSSS at the beginning
#
raw_sss_mc = maxwell_filter(raw_nohpi, head_pos=head_pos, st_duration=4.,
origin=mf_head_origin)
assert_meg_snr(raw_sss_mc, Raw(tSSS_fname).crop(*lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(raw_sss_mc, raw_sss_mv, 0.6, 1.4)
# some degenerate cases
raw_erm = Raw(erm_fname, allow_maxshield='yes')
assert_raises(ValueError, maxwell_filter, raw_erm, coord_frame='meg',
head_pos=head_pos) # can't do ERM file
assert_raises(ValueError, maxwell_filter, raw,
head_pos=head_pos[:, :9]) # bad shape
assert_raises(TypeError, maxwell_filter, raw, head_pos='foo') # bad type
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos[::-1])
head_pos_bad = head_pos.copy()
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 1e-2
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos_bad)
# make sure numerical error doesn't screw it up, though
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 5e-4
raw_sss_tweak = maxwell_filter(raw, head_pos=head_pos_bad,
origin=mf_head_origin)
assert_meg_snr(raw_sss_tweak, raw_sss, 2., 10., chpi_med_tol=11)
@slow_test
def test_other_systems():
"""Test Maxwell filtering on KIT, BTI, and CTF files"""
# KIT
kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
with warnings.catch_warnings(record=True): # head fit
assert_raises(RuntimeError, maxwell_filter, raw_kit)
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 65, 65)
raw_sss_auto = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, mag_scale='auto')
assert_allclose(raw_sss._data, raw_sss_auto._data)
# XXX this KIT origin fit is terrible! Eventually we should get a
# corrected HSP file with proper coverage
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
assert_raises(RuntimeError, maxwell_filter, raw_kit,
ignore_ref=True, regularize=None) # bad condition
raw_sss = maxwell_filter(raw_kit, origin='auto',
ignore_ref=True, bad_condition='warning',
verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
assert_true('more than 20 mm from' in log_file)
# fits can differ slightly based on scipy version, so be lenient here
_assert_n_free(raw_sss, 28, 34) # bad origin == brutal reg
# Let's set the origin
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, bad_condition='warning',
regularize=None, verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
_assert_n_free(raw_sss, 80)
# Now with reg
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, verbose=True)
log_file = log_file.getvalue()
assert_true('badly conditioned' not in log_file)
_assert_n_free(raw_sss, 65)
# BTi
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
with warnings.catch_warnings(record=True): # weght table
raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
picks = pick_types(raw_bti.info, meg='mag', exclude=())
power = np.sqrt(np.sum(raw_bti[picks][0] ** 2))
raw_sss = maxwell_filter(raw_bti)
_assert_n_free(raw_sss, 70)
_assert_shielding(raw_sss, power, 0.5)
raw_sss_auto = maxwell_filter(raw_bti, mag_scale='auto', verbose=True)
_assert_shielding(raw_sss_auto, power, 0.7)
# CTF
raw_ctf = Raw(fname_ctf_raw)
assert_equal(raw_ctf.compensation_grade, 3)
assert_raises(RuntimeError, maxwell_filter, raw_ctf) # compensated
raw_ctf.apply_gradient_compensation(0)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
_assert_shielding(raw_sss, raw_ctf, 1.8)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
_assert_shielding(raw_sss, raw_ctf, 12)
raw_sss_auto = maxwell_filter(raw_ctf, origin=(0., 0., 0.04),
ignore_ref=True, mag_scale='auto')
assert_allclose(raw_sss._data, raw_sss_auto._data)
def test_spherical_harmonics():
"""Test spherical harmonic functions"""
from scipy.special import sph_harm
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
# As of Oct 16, 2015, Anancoda has a bug in scipy due to old compilers (?):
# https://github.com/ContinuumIO/anaconda-issues/issues/479
if (PY3 and
LooseVersion(scipy.__version__) >= LooseVersion('0.15') and
'Continuum Analytics' in sys.version):
raise SkipTest('scipy sph_harm bad in Py3k on Anaconda')
# Test our basic spherical harmonics
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
sph_scipy = sph_harm(order, degree, az, pol)
assert_allclose(sph, sph_scipy, atol=1e-7)
def test_spherical_conversions():
"""Test spherical harmonic conversions"""
# Test our real<->complex conversion functions
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
# ensure that we satisfy the conjugation property
assert_allclose(_sh_negate(sph, order),
_sph_harm(-order, degree, az, pol))
# ensure our conversion functions work
sph_real_pos = _sh_complex_to_real(sph, order)
sph_real_neg = _sh_complex_to_real(sph, -order)
sph_2 = _sh_real_to_complex([sph_real_pos, sph_real_neg], order)
assert_allclose(sph, sph_2, atol=1e-7)
@testing.requires_testing_data
def test_multipolar_bases():
"""Test multipolar moment basis calculation using sensor information"""
from scipy.io import loadmat
# Test our basis calculations
info = read_info(raw_fname)
coils = _prep_meg_channels(info, accurate=True, elekta_defs=True,
do_es=True)[0]
# Check against a known benchmark
sss_data = loadmat(bases_fname)
exp = dict(int_order=int_order, ext_order=ext_order)
for origin in ((0, 0, 0.04), (0, 0.02, 0.02)):
o_str = ''.join('%d' % (1000 * n) for n in origin)
exp.update(origin=origin)
S_tot = _sss_basis_basic(exp, coils, method='alternative')
# Test our real<->complex conversion functions
S_tot_complex = _bases_real_to_complex(S_tot, int_order, ext_order)
S_tot_round = _bases_complex_to_real(S_tot_complex,
int_order, ext_order)
assert_allclose(S_tot, S_tot_round, atol=1e-7)
S_tot_mat = np.concatenate([sss_data['Sin' + o_str],
sss_data['Sout' + o_str]], axis=1)
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
assert_allclose(S_tot, S_tot_mat_real, rtol=1e-4, atol=1e-8)
# Now normalize our columns
S_tot /= np.sqrt(np.sum(S_tot * S_tot, axis=0))[np.newaxis]
S_tot_complex /= np.sqrt(np.sum(
(S_tot_complex * S_tot_complex.conj()).real, axis=0))[np.newaxis]
# Check against a known benchmark
S_tot_mat = np.concatenate([sss_data['SNin' + o_str],
sss_data['SNout' + o_str]], axis=1)
# Check this roundtrip
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
# Now test our optimized version
S_tot = _sss_basis_basic(exp, coils)
S_tot_fast = _trans_sss_basis(
exp, all_coils=_prep_mf_coils(info), trans=info['dev_head_t'])
# there are some sign differences for columns (order/degrees)
# in here, likely due to Condon-Shortley. Here we use a
# Magnetometer channel to figure out the flips because the
# gradiometer channels have effectively zero values for first three
# external components (i.e., S_tot[grad_picks, 80:83])
flips = (np.sign(S_tot_fast[2]) != np.sign(S_tot[2]))
flips = 1 - 2 * flips
assert_allclose(S_tot, S_tot_fast * flips, atol=1e-16)
@testing.requires_testing_data
def test_basic():
"""Test Maxwell filter basic version"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
raw_err = Raw(raw_fname, proj=True, allow_maxshield='yes')
raw_erm = Raw(erm_fname, allow_maxshield='yes')
assert_raises(RuntimeError, maxwell_filter, raw_err)
assert_raises(TypeError, maxwell_filter, 1.) # not a raw
assert_raises(ValueError, maxwell_filter, raw, int_order=20) # too many
n_int_bases = int_order ** 2 + 2 * int_order
n_ext_bases = ext_order ** 2 + 2 * ext_order
nbases = n_int_bases + n_ext_bases
# Check number of bases computed correctly
assert_equal(_get_n_moments([int_order, ext_order]).sum(), nbases)
# Test SSS computation at the standard head origin
assert_equal(len(raw.info['projs']), 12) # 11 MEG projs + 1 AVG EEG
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_equal(len(raw_sss.info['projs']), 1) # avg EEG
assert_equal(raw_sss.info['projs'][0]['desc'], 'Average EEG reference')
assert_meg_snr(raw_sss, Raw(sss_std_fname), 200., 1000.)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_equal(len(py_cal), 0)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_equal(len(py_ctc), 0)
py_st = raw_sss.info['proc_history'][0]['max_info']['max_st']
assert_equal(len(py_st), 0)
assert_raises(RuntimeError, maxwell_filter, raw_sss)
# Test SSS computation at non-standard head origin
raw_sss = maxwell_filter(raw, origin=[0., 0.02, 0.02], regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_nonstd_fname), 250., 700.)
# Test SSS computation at device origin
sss_erm_std = Raw(sss_erm_std_fname)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg',
origin=mf_meg_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_erm_std, 100., 900.)
for key in ('job', 'frame'):
vals = [x.info['proc_history'][0]['max_info']['sss_info'][key]
for x in [raw_sss, sss_erm_std]]
assert_equal(vals[0], vals[1])
# Check against SSS functions from proc_history
sss_info = raw_sss.info['proc_history'][0]['max_info']
assert_equal(_get_n_moments(int_order),
proc_history._get_sss_rank(sss_info))
# Degenerate cases
assert_raises(ValueError, maxwell_filter, raw, coord_frame='foo')
assert_raises(ValueError, maxwell_filter, raw, origin='foo')
assert_raises(ValueError, maxwell_filter, raw, origin=[0] * 4)
assert_raises(ValueError, maxwell_filter, raw, mag_scale='foo')
@testing.requires_testing_data
def test_maxwell_filter_additional():
"""Test processing of Maxwell filtered data"""
# TODO: Future tests integrate with mne/io/tests/test_proc_history
# Load testing data (raw, SSS std origin, SSS non-standard origin)
data_path = op.join(testing.data_path(download=False))
file_name = 'test_move_anon'
raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
# Use 2.0 seconds of data to get stable cov. estimate
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 2., copy=False)
# Get MEG channels, compute Maxwell filtered data
raw.load_data()
raw.pick_types(meg=True, eeg=False)
int_order = 8
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
# Test io on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'test_raw_sss.fif')
raw_sss.save(test_outname)
raw_sss_loaded = Raw(test_outname, preload=True)
# Some numerical imprecision since save uses 'single' fmt
assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0],
rtol=1e-6, atol=1e-20)
# Test rank of covariance matrices for raw and SSS processed data
cov_raw = compute_raw_covariance(raw)
cov_sss = compute_raw_covariance(raw_sss)
scalings = None
cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info,
scalings)
assert_equal(cov_raw_rank, raw.info['nchan'])
assert_equal(cov_sss_rank, _get_n_moments(int_order))
@slow_test
@testing.requires_testing_data
def test_bads_reconstruction():
"""Test Maxwell filter reconstruction of bad channels"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1.)
raw.info['bads'] = bads
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_bad_recon_fname), 300.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_maxwell():
"""Test Maxwell filter (tSSS) spatiotemporal processing"""
# Load raw testing data
raw = Raw(raw_fname, allow_maxshield='yes')
# Test that window is less than length of data
assert_raises(ValueError, maxwell_filter, raw, st_duration=1000.)
# Check both 4 and 10 seconds because Elekta handles them differently
# This is to ensure that std/non-std tSSS windows are correctly handled
st_durations = [4., 10.]
tols = [325., 200.]
for st_duration, tol in zip(st_durations, tols):
# Load tSSS data depending on st_duration and get data
tSSS_fname = op.join(sss_path,
'test_move_anon_st%0ds_raw_sss.fif' % st_duration)
tsss_bench = Raw(tSSS_fname)
# Because Elekta's tSSS sometimes(!) lumps the tail window of data
# onto the previous buffer if it's shorter than st_duration, we have to
# crop the data here to compensate for Elekta's tSSS behavior.
if st_duration == 10.:
tsss_bench.crop(0, st_duration, copy=False)
# Test sss computation at the standard head origin. Same cropping issue
# as mentioned above.
if st_duration == 10.:
raw_tsss = maxwell_filter(raw.crop(0, st_duration),
origin=mf_head_origin,
st_duration=st_duration, regularize=None,
bad_condition='ignore')
else:
raw_tsss = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose=True)
raw_tsss_2 = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', st_fixed=False,
verbose=True)
assert_meg_snr(raw_tsss, raw_tsss_2, 100., 1000.)
assert_equal(raw_tsss.estimate_rank(), 140)
assert_equal(raw_tsss_2.estimate_rank(), 140)
assert_meg_snr(raw_tsss, tsss_bench, tol)
py_st = raw_tsss.info['proc_history'][0]['max_info']['max_st']
assert_true(len(py_st) > 0)
assert_equal(py_st['buflen'], st_duration)
assert_equal(py_st['subspcorr'], 0.98)
# Degenerate cases
assert_raises(ValueError, maxwell_filter, raw, st_duration=10.,
st_correlation=0.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_only():
"""Test tSSS-only processing"""
# Load raw testing data
raw = Raw(raw_fname,
allow_maxshield='yes').crop(0, 2, copy=False).load_data()
picks = pick_types(raw.info, meg='mag', exclude=())
power = np.sqrt(np.sum(raw[picks][0] ** 2))
# basics
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 10)
# temporal proj will actually reduce spatial DOF with small windows!
raw_tsss = maxwell_filter(raw, st_duration=0.1, st_only=True)
assert_true(raw_tsss.estimate_rank() < 350)
_assert_shielding(raw_tsss, power, 40)
# with movement
head_pos = read_head_pos(pos_fname)
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
with warnings.catch_warnings(record=True): # st_fixed False
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos, st_fixed=False)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
# should do nothing
raw_tsss = maxwell_filter(raw, st_duration=1., st_correlation=1.,
st_only=True)
assert_allclose(raw[:][0], raw_tsss[:][0])
# degenerate
assert_raises(ValueError, maxwell_filter, raw, st_only=True) # no ST
# two-step process equivalent to single-step process
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
raw_tsss = maxwell_filter(raw_tsss)
raw_tsss_2 = maxwell_filter(raw, st_duration=1.)
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
# now also with head movement, and a bad MEG channel
assert_equal(len(raw.info['bads']), 0)
raw.info['bads'] = ['EEG001', 'MEG2623']
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw.info['bads'], ['EEG001', 'MEG2623'])
assert_equal(raw_tsss.info['bads'], ['EEG001', 'MEG2623']) # don't reset
raw_tsss = maxwell_filter(raw_tsss, head_pos=head_pos)
assert_equal(raw_tsss.info['bads'], ['EEG001']) # do reset MEG bads
raw_tsss_2 = maxwell_filter(raw, st_duration=1., head_pos=head_pos)
assert_equal(raw_tsss_2.info['bads'], ['EEG001'])
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
@testing.requires_testing_data
def test_fine_calibration():
"""Test Maxwell filter fine calibration"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
sss_fine_cal = Raw(sss_fine_cal_fname)
# Test 1D SSS fine calibration
raw_sss = maxwell_filter(raw, calibration=fine_cal_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_fine_cal, 82, 611)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_true(py_cal is not None)
assert_true(len(py_cal) > 0)
mf_cal = sss_fine_cal.info['proc_history'][0]['max_info']['sss_cal']
# we identify these differently
mf_cal['cal_chans'][mf_cal['cal_chans'][:, 1] == 3022, 1] = 3024
assert_allclose(py_cal['cal_chans'], mf_cal['cal_chans'])
assert_allclose(py_cal['cal_corrs'], mf_cal['cal_corrs'],
rtol=1e-3, atol=1e-3)
# Test 3D SSS fine calibration (no equivalent func in MaxFilter yet!)
# very low SNR as proc differs, eventually we should add a better test
raw_sss_3D = maxwell_filter(raw, calibration=fine_cal_fname_3d,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss_3D, sss_fine_cal, 1.0, 6.)
raw_ctf = Raw(fname_ctf_raw).apply_gradient_compensation(0)
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
calibration=fine_cal_fname)
@slow_test
@testing.requires_testing_data
def test_regularization():
"""Test Maxwell filter regularization"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
min_tols = (100., 2.6, 1.0)
med_tols = (1000., 21.4, 3.7)
origins = ((0., 0., 0.04), (0.,) * 3, (0., 0.02, 0.02))
coord_frames = ('head', 'meg', 'head')
raw_fnames = (raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_reg_in_fname, sss_erm_reg_in_fname,
sss_samp_reg_in_fname)
comp_tols = [0, 1, 4]
for ii, rf in enumerate(raw_fnames):
raw = Raw(rf, allow_maxshield='yes').crop(0., 1.)
sss_reg_in = Raw(sss_fnames[ii])
# Test "in" regularization
raw_sss = maxwell_filter(raw, coord_frame=coord_frames[ii],
origin=origins[ii])
assert_meg_snr(raw_sss, sss_reg_in, min_tols[ii], med_tols[ii], msg=rf)
# check components match
_check_reg_match(raw_sss, sss_reg_in, comp_tols[ii])
def _check_reg_match(sss_py, sss_mf, comp_tol):
"""Helper to check regularization"""
info_py = sss_py.info['proc_history'][0]['max_info']['sss_info']
assert_true(info_py is not None)
assert_true(len(info_py) > 0)
info_mf = sss_mf.info['proc_history'][0]['max_info']['sss_info']
n_in = None
for inf in (info_py, info_mf):
if n_in is None:
n_in = _get_n_moments(inf['in_order'])
else:
assert_equal(n_in, _get_n_moments(inf['in_order']))
assert_equal(inf['components'][:n_in].sum(), inf['nfree'])
assert_allclose(info_py['nfree'], info_mf['nfree'],
atol=comp_tol, err_msg=sss_py._filenames[0])
@testing.requires_testing_data
def test_cross_talk():
"""Test Maxwell filter cross-talk cancellation"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
raw.info['bads'] = bads
sss_ctc = Raw(sss_ctc_fname)
raw_sss = maxwell_filter(raw, cross_talk=ctc_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_ctc, 275.)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_true(len(py_ctc) > 0)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw_fname)
mf_ctc = sss_ctc.info['proc_history'][0]['max_info']['sss_ctc']
del mf_ctc['block_id'] # we don't write this
assert_equal(object_diff(py_ctc, mf_ctc), '')
raw_ctf = Raw(fname_ctf_raw).apply_gradient_compensation(0)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
raw_missing = raw.copy().crop(0, 0.1).load_data().pick_channels(
[raw.ch_names[pi] for pi in pick_types(raw.info, meg=True,
exclude=())[3:]])
with warnings.catch_warnings(record=True) as w:
maxwell_filter(raw_missing, cross_talk=ctc_fname)
assert_equal(len(w), 1)
assert_true('Not all cross-talk channels in raw' in str(w[0].message))
# MEG channels not in cross-talk
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
cross_talk=ctc_fname)
@testing.requires_testing_data
def test_head_translation():
"""Test Maxwell filter head translation"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
# First try with an unchanged destination
raw_sss = maxwell_filter(raw, destination=raw_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_std_fname).crop(0., 1.), 200.)
# Now with default
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=mf_head_origin,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('over 25 mm' in log.getvalue())
assert_meg_snr(raw_sss, Raw(sss_trans_default_fname), 125.)
destination = np.eye(4)
destination[2, 3] = 0.04
assert_allclose(raw_sss.info['dev_head_t']['trans'], destination)
# Now to sample's head pos
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=sample_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('= 25.6 mm' in log.getvalue())
assert_meg_snr(raw_sss, Raw(sss_trans_sample_fname), 350.)
assert_allclose(raw_sss.info['dev_head_t']['trans'],
read_info(sample_fname)['dev_head_t']['trans'])
# Degenerate cases
assert_raises(RuntimeError, maxwell_filter, raw,
destination=mf_head_origin, coord_frame='meg')
assert_raises(ValueError, maxwell_filter, raw, destination=[0.] * 4)
# TODO: Eventually add simulation tests mirroring Taulu's original paper
# that calculates the localization error:
# http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=1495874
def _assert_shielding(raw_sss, erm_power, shielding_factor, meg='mag'):
"""Helper to assert a minimum shielding factor using empty-room power"""
picks = pick_types(raw_sss.info, meg=meg, ref_meg=False)
if isinstance(erm_power, _BaseRaw):
picks_erm = pick_types(raw_sss.info, meg=meg, ref_meg=False)
assert_allclose(picks, picks_erm)
erm_power = np.sqrt((erm_power[picks_erm][0] ** 2).sum())
sss_power = raw_sss[picks][0].ravel()
sss_power = np.sqrt(np.sum(sss_power * sss_power))
factor = erm_power / sss_power
assert_true(factor >= shielding_factor,
'Shielding factor %0.3f < %0.3f' % (factor, shielding_factor))
@buggy_mkl_svd
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_shielding_factor():
"""Test Maxwell filter shielding factor using empty room"""
raw_erm = Raw(erm_fname, allow_maxshield='yes', preload=True)
picks = pick_types(raw_erm.info, meg='mag')
erm_power = raw_erm[picks][0]
erm_power = np.sqrt(np.sum(erm_power * erm_power))
erm_power_grad = raw_erm[pick_types(raw_erm.info, meg='grad')][0]
erm_power_grad = np.sqrt(np.sum(erm_power * erm_power))
# Vanilla SSS (second value would be for meg=True instead of meg='mag')
_assert_shielding(Raw(sss_erm_std_fname), erm_power, 10) # 1.5)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None)
_assert_shielding(raw_sss, erm_power, 12) # 1.5)
_assert_shielding(raw_sss, erm_power_grad, 0.45, 'grad') # 1.5)
# Using different mag_scale values
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
mag_scale='auto')
_assert_shielding(raw_sss, erm_power, 12)
_assert_shielding(raw_sss, erm_power_grad, 0.48, 'grad')
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
mag_scale=1.) # not a good choice
_assert_shielding(raw_sss, erm_power, 7.3)
_assert_shielding(raw_sss, erm_power_grad, 0.2, 'grad')
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
mag_scale=1000., bad_condition='ignore')
_assert_shielding(raw_sss, erm_power, 4.0)
_assert_shielding(raw_sss, erm_power_grad, 0.1, 'grad')
# Fine cal
_assert_shielding(Raw(sss_erm_fine_cal_fname), erm_power, 12) # 2.0)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
calibration=fine_cal_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.0)
# Crosstalk
_assert_shielding(Raw(sss_erm_ctc_fname), erm_power, 12) # 2.1)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.1)
# Fine cal + Crosstalk
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 13) # 2.2)
# tSSS
_assert_shielding(Raw(sss_erm_st_fname), erm_power, 37) # 5.8)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 37) # 5.8)
# Crosstalk + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
cross_talk=ctc_fname, origin=mf_meg_origin,
st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.91)
# Fine cal + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.98)
# Fine cal + Crosstalk + tSSS
_assert_shielding(Raw(sss_erm_st1FineCalCrossTalk_fname),
erm_power, 39) # 6.07)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname, origin=mf_meg_origin,
cross_talk=ctc_fname, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 39) # 6.05)
# Fine cal + Crosstalk + tSSS + Reg-in
_assert_shielding(Raw(sss_erm_st1FineCalCrossTalkRegIn_fname), erm_power,
57) # 6.97)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
origin=mf_meg_origin,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 53) # 6.64)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 58) # 7.0)
_assert_shielding(raw_sss, erm_power_grad, 1.6, 'grad')
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in',
mag_scale='auto')
_assert_shielding(raw_sss, erm_power, 51)
_assert_shielding(raw_sss, erm_power_grad, 1.5, 'grad')
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname_3d,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 54)
# Show it by rewriting the 3D as 1D and testing it
temp_dir = _TempDir()
temp_fname = op.join(temp_dir, 'test_cal.dat')
with open(fine_cal_fname_3d, 'r') as fid:
with open(temp_fname, 'w') as fid_out:
for line in fid:
fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n')
raw_sss = maxwell_filter(raw_erm, calibration=temp_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 44)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_all():
"""Test maxwell filter using all options"""
raw_fnames = (raw_fname, raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_st1FineCalCrossTalkRegIn_fname,
sss_st1FineCalCrossTalkRegInTransSample_fname,
sss_erm_st1FineCalCrossTalkRegIn_fname,
sss_samp_fname)
fine_cals = (fine_cal_fname,
fine_cal_fname,
fine_cal_fname,
fine_cal_mgh_fname)
coord_frames = ('head', 'head', 'meg', 'head')
ctcs = (ctc_fname, ctc_fname, ctc_fname, ctc_mgh_fname)
mins = (3.5, 3.5, 1.2, 0.9)
meds = (10.8, 10.4, 3.2, 6.)
st_durs = (1., 1., 1., None)
destinations = (None, sample_fname, None, None)
origins = (mf_head_origin,
mf_head_origin,
mf_meg_origin,
mf_head_origin)
for ii, rf in enumerate(raw_fnames):
raw = Raw(rf, allow_maxshield='yes').crop(0., 1.)
with warnings.catch_warnings(record=True): # head fit off-center
sss_py = maxwell_filter(
raw, calibration=fine_cals[ii], cross_talk=ctcs[ii],
st_duration=st_durs[ii], coord_frame=coord_frames[ii],
destination=destinations[ii], origin=origins[ii])
sss_mf = Raw(sss_fnames[ii])
assert_meg_snr(sss_py, sss_mf, mins[ii], meds[ii], msg=rf)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_triux():
"""Test TRIUX system support"""
raw = Raw(tri_fname).crop(0, 0.999)
raw.fix_mag_coil_types()
# standard
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None)
assert_meg_snr(sss_py, Raw(tri_sss_fname), 37, 700)
# cross-talk
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
cross_talk=tri_ctc_fname)
assert_meg_snr(sss_py, Raw(tri_sss_ctc_fname), 35, 700)
# fine cal
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
calibration=tri_cal_fname)
assert_meg_snr(sss_py, Raw(tri_sss_cal_fname), 31, 360)
# ctc+cal
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
calibration=tri_cal_fname,
cross_talk=tri_ctc_fname)
assert_meg_snr(sss_py, Raw(tri_sss_ctc_cal_fname), 31, 350)
# regularization
sss_py = maxwell_filter(raw, coord_frame='meg', regularize='in')
sss_mf = Raw(tri_sss_reg_fname)
assert_meg_snr(sss_py, sss_mf, 0.6, 9)
_check_reg_match(sss_py, sss_mf, 1)
# all three
sss_py = maxwell_filter(raw, coord_frame='meg', regularize='in',
calibration=tri_cal_fname,
cross_talk=tri_ctc_fname)
sss_mf = Raw(tri_sss_ctc_cal_reg_in_fname)
assert_meg_snr(sss_py, sss_mf, 0.6, 9)
_check_reg_match(sss_py, sss_mf, 1)
# tSSS
raw = Raw(tri_fname).fix_mag_coil_types()
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
st_duration=4., verbose=True)
assert_meg_snr(sss_py, Raw(tri_sss_st4_fname), 700., 1600)
run_tests_if_main()
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
float_or_none,
js_to_json,
orderedSet,
strip_jsonp,
strip_or_none,
unified_strdate,
url_or_none,
US_RATINGS,
)
class PBSIE(InfoExtractor):
_STATIONS = (
(r'(?:video|www|player)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/
(r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/
(r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/
(r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org
(r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org
(r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/
(r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org
(r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org
(r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/
(r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm
# (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/
# (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/
# (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/
(r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org
(r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/
(r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/
(r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/
(r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/
(r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/
(r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/
(r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv
(r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/
(r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/
(r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org
(r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/
(r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/
(r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org
(r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org
(r'video\.pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/
(r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/
(r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org
(r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/
(r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org
# (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org
# (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org
# (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org
(r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org
(r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org
(r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org
(r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org
(r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/
(r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/
(r'video\.thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org
(r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org
(r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org
(r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/
# (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/
(r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/
(r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org
(r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org
(r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org
(r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/
(r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net
(r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org
(r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org
(r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/
# (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org
(r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org
(r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org
(r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org
(r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/
(r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/
(r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/
(r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org
(r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/
# (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/
(r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/
(r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org
(r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/
(r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org
(r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org
(r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/
(r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv
(r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/
# (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/
(r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/
(r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org
(r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/
(r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org
(r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org
(r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/
(r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/
(r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/
(r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/
(r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net
(r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org
(r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org
# (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/
(r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org
(r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/
(r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org
(r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org
(r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org
(r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/
(r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org
(r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org
(r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org
(r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org
(r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/
(r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/
(r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org
# (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org
# (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/
# (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/
(r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org
(r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org
(r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/
(r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/
(r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5
(r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/
(r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org
# (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org
(r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/
(r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/
(r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/
(r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/
(r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org
(r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org
(r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/
(r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/
(r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org
(r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/
(r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org
(r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/
(r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu
(r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/
(r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org
(r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org
# (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/
(r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/
(r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org
(r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org
(r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/
(r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org
(r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org
(r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/
(r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org
(r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org
(r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org
(r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org
# (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org
(r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/
(r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/
# (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org
(r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/
(r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/
(r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/
(r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org
(r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/
# (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu
# (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org
(r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org
(r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org
# (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org
# (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org
# (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org
(r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/
(r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/
(r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org
)
IE_NAME = 'pbs'
IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1])
_VALID_URL = r'''(?x)https?://
(?:
# Direct video URL
(?:%s)/(?:(?:vir|port)alplayer|video)/(?P<id>[0-9]+)(?:[?/]|$) |
# Article with embedded player (or direct video)
(?:www\.)?pbs\.org/(?:[^/]+/){1,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
(?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
''' % '|'.join(list(zip(*_STATIONS))[0])
_GEO_COUNTRIES = ['US']
_TESTS = [
{
'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
'md5': '173dc391afd361fa72eab5d3d918968d',
'info_dict': {
'id': '2365006249',
'ext': 'mp4',
'title': 'Constitution USA with Peter Sagal - A More Perfect Union',
'description': 'md5:31b664af3c65fd07fa460d306b837d00',
'duration': 3190,
},
},
{
'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/',
'md5': '6f722cb3c3982186d34b0f13374499c7',
'info_dict': {
'id': '2365297690',
'ext': 'mp4',
'title': 'FRONTLINE - Losing Iraq',
'description': 'md5:5979a4d069b157f622d02bff62fbe654',
'duration': 5050,
},
},
{
'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/',
'md5': 'b19856d7f5351b17a5ab1dc6a64be633',
'info_dict': {
'id': '2201174722',
'ext': 'mp4',
'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist',
'description': 'md5:86ab9a3d04458b876147b355788b8781',
'duration': 801,
},
},
{
'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/',
'md5': 'c62859342be2a0358d6c9eb306595978',
'info_dict': {
'id': '2365297708',
'ext': 'mp4',
'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
'description': 'md5:657897370e09e2bc6bf0f8d2cd313c6b',
'duration': 6559,
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
'md5': '908f3e5473a693b266b84e25e1cf9703',
'info_dict': {
'id': '2365160389',
'display_id': 'killer-typhoon',
'ext': 'mp4',
'description': 'md5:c741d14e979fc53228c575894094f157',
'title': 'NOVA - Killer Typhoon',
'duration': 3172,
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140122',
'age_limit': 10,
},
},
{
'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/',
'info_dict': {
'id': 'united-states-of-secrets',
},
'playlist_count': 2,
},
{
'url': 'http://www.pbs.org/wgbh/americanexperience/films/great-war/',
'info_dict': {
'id': 'great-war',
},
'playlist_count': 3,
},
{
'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/',
'info_dict': {
'id': '2276541483',
'display_id': 'player',
'ext': 'mp4',
'title': 'American Experience - Death and the Civil War, Chapter 1',
'description': 'md5:67fa89a9402e2ee7d08f53b920674c18',
'duration': 682,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/video/2365245528/',
'md5': '115223d41bd55cda8ae5cd5ed4e11497',
'info_dict': {
'id': '2365245528',
'display_id': '2365245528',
'ext': 'mp4',
'title': 'FRONTLINE - United States of Secrets (Part One)',
'description': 'md5:55756bd5c551519cc4b7703e373e217e',
'duration': 6851,
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# Video embedded in iframe containing angle brackets as attribute's value (e.g.
# "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see
# https://github.com/ytdl-org/youtube-dl/issues/7059)
'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/',
'md5': '59b0ef5009f9ac8a319cc5efebcd865e',
'info_dict': {
'id': '2365546844',
'display_id': 'a-chefs-life-season-3-episode-5-prickly-business',
'ext': 'mp4',
'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business",
'description': 'md5:c0ff7475a4b70261c7e58f493c2792a5',
'duration': 1480,
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# Frontline video embedded via flp2012.js
'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists',
'info_dict': {
'id': '2070868960',
'display_id': 'the-atomic-artists',
'ext': 'mp4',
'title': 'FRONTLINE - The Atomic Artists',
'description': 'md5:f677e4520cfacb4a5ce1471e31b57800',
'duration': 723,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
# Serves hd only via wigget/partnerplayer page
'url': 'http://www.pbs.org/video/2365641075/',
'md5': 'fdf907851eab57211dd589cf12006666',
'info_dict': {
'id': '2365641075',
'ext': 'mp4',
'title': 'FRONTLINE - Netanyahu at War',
'duration': 6852,
'thumbnail': r're:^https?://.*\.jpg$',
'formats': 'mincount:8',
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/13801
'url': 'https://www.pbs.org/video/pbs-newshour-full-episode-july-31-2017-1501539057/',
'info_dict': {
'id': '3003333873',
'ext': 'mp4',
'title': 'PBS NewsHour - full episode July 31, 2017',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 3265,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.pbs.org/wgbh/roadshow/watch/episode/2105-indianapolis-hour-2/',
'info_dict': {
'id': '2365936247',
'ext': 'mp4',
'title': 'Antiques Roadshow - Indianapolis, Hour 2',
'description': 'md5:524b32249db55663e7231b6b8d1671a2',
'duration': 3180,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['HTTP Error 403: Forbidden'],
},
{
'url': 'https://www.pbs.org/wgbh/masterpiece/episodes/victoria-s2-e1/',
'info_dict': {
'id': '3007193718',
'ext': 'mp4',
'title': "Victoria - A Soldier's Daughter / The Green-Eyed Monster",
'description': 'md5:37efbac85e0c09b009586523ec143652',
'duration': 6292,
'thumbnail': r're:^https?://.*\.(?:jpg|JPG)$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['HTTP Error 403: Forbidden'],
},
{
'url': 'https://player.pbs.org/partnerplayer/tOz9tM5ljOXQqIIWke53UA==/',
'info_dict': {
'id': '3011407934',
'ext': 'mp4',
'title': 'Stories from the Stage - Road Trip',
'duration': 1619,
'thumbnail': r're:^https?://.*\.(?:jpg|JPG)$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['HTTP Error 403: Forbidden'],
},
{
'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true',
'only_matching': True,
},
{
'url': 'http://watch.knpb.org/video/2365616055/',
'only_matching': True,
},
{
'url': 'https://player.pbs.org/portalplayer/3004638221/?uid=',
'only_matching': True,
}
]
_ERRORS = {
101: 'We\'re sorry, but this video is not yet available.',
403: 'We\'re sorry, but this video is not available in your region due to right restrictions.',
404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.',
410: 'This video has expired and is no longer available for online streaming.',
}
def _real_initialize(self):
cookie = (self._download_json(
'http://localization.services.pbs.org/localize/auto/cookie/',
None, headers=self.geo_verification_headers(), fatal=False) or {}).get('cookie')
if cookie:
station = self._search_regex(r'#?s=\["([^"]+)"', cookie, 'station')
if station:
self._set_cookie('.pbs.org', 'pbsol.station', station)
def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
description = None
presumptive_id = mobj.group('presumptive_id')
display_id = presumptive_id
if presumptive_id:
webpage = self._download_webpage(url, display_id)
description = strip_or_none(self._og_search_description(
webpage, default=None) or self._html_search_meta(
'description', webpage, default=None))
upload_date = unified_strdate(self._search_regex(
r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
webpage, 'upload date', default=None))
# tabbed frontline videos
MULTI_PART_REGEXES = (
r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"',
r'<a[^>]+href=["\']#(?:video-|part)\d+["\'][^>]+data-cove[Ii]d=["\'](\d+)',
)
for p in MULTI_PART_REGEXES:
tabbed_videos = orderedSet(re.findall(p, webpage))
if tabbed_videos:
return tabbed_videos, presumptive_id, upload_date, description
MEDIA_ID_REGEXES = [
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
r'class="coveplayerid">([^<]+)<', # coveplayer
r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
r"(?s)window\.PBS\.playerConfig\s*=\s*{.*?id\s*:\s*'([0-9]+)',",
r'<div[^>]+\bdata-cove-id=["\'](\d+)"', # http://www.pbs.org/wgbh/roadshow/watch/episode/2105-indianapolis-hour-2/
r'<iframe[^>]+\bsrc=["\'](?:https?:)?//video\.pbs\.org/widget/partnerplayer/(\d+)', # https://www.pbs.org/wgbh/masterpiece/episodes/victoria-s2-e1/
]
media_id = self._search_regex(
MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
if media_id:
return media_id, presumptive_id, upload_date, description
# Fronline video embedded via flp
video_id = self._search_regex(
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None)
if video_id:
# pkg_id calculation is reverse engineered from
# http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js
prg_id = self._search_regex(
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:]
if 'q' in prg_id:
prg_id = prg_id.split('q')[1]
prg_id = int(prg_id, 16)
getdir = self._download_json(
'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id,
presumptive_id, 'Downloading getdir JSON',
transform_source=strip_jsonp)
return getdir['mid'], presumptive_id, upload_date, description
for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage):
url = self._search_regex(
r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe,
'player URL', default=None, group='url')
if url:
break
if not url:
url = self._og_search_url(webpage)
mobj = re.match(
self._VALID_URL, self._proto_relative_url(url.strip()))
player_id = mobj.group('player_id')
if not display_id:
display_id = player_id
if player_id:
player_page = self._download_webpage(
url, display_id, note='Downloading player page',
errnote='Could not download player page')
video_id = self._search_regex(
r'<div\s+id=["\']video_(\d+)', player_page, 'video ID',
default=None)
if not video_id:
video_info = self._extract_video_data(
player_page, 'video data', display_id)
video_id = compat_str(
video_info.get('id') or video_info['contentID'])
else:
video_id = mobj.group('id')
display_id = video_id
return video_id, display_id, None, description
def _extract_video_data(self, string, name, video_id, fatal=True):
return self._parse_json(
self._search_regex(
[r'(?s)PBS\.videoData\s*=\s*({.+?});\n',
r'window\.videoBridge\s*=\s*({.+?});'],
string, name, default='{}'),
video_id, transform_source=js_to_json, fatal=fatal)
def _real_extract(self, url):
video_id, display_id, upload_date, description = self._extract_webpage(url)
if isinstance(video_id, list):
entries = [self.url_result(
'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id)
for vid_id in video_id]
return self.playlist_result(entries, display_id)
info = None
redirects = []
redirect_urls = set()
def extract_redirect_urls(info):
for encoding_name in ('recommended_encoding', 'alternate_encoding'):
redirect = info.get(encoding_name)
if not redirect:
continue
redirect_url = redirect.get('url')
if redirect_url and redirect_url not in redirect_urls:
redirects.append(redirect)
redirect_urls.add(redirect_url)
encodings = info.get('encodings')
if isinstance(encodings, list):
for encoding in encodings:
encoding_url = url_or_none(encoding)
if encoding_url and encoding_url not in redirect_urls:
redirects.append({'url': encoding_url})
redirect_urls.add(encoding_url)
chapters = []
# Player pages may also serve different qualities
for page in ('widget/partnerplayer', 'portalplayer'):
player = self._download_webpage(
'http://player.pbs.org/%s/%s' % (page, video_id),
display_id, 'Downloading %s page' % page, fatal=False)
if player:
video_info = self._extract_video_data(
player, '%s video data' % page, display_id, fatal=False)
if video_info:
extract_redirect_urls(video_info)
if not info:
info = video_info
if not chapters:
raw_chapters = video_info.get('chapters') or []
if not raw_chapters:
for chapter_data in re.findall(r'(?s)chapters\.push\(({.*?})\)', player):
chapter = self._parse_json(chapter_data, video_id, js_to_json, fatal=False)
if not chapter:
continue
raw_chapters.append(chapter)
for chapter in raw_chapters:
start_time = float_or_none(chapter.get('start_time'), 1000)
duration = float_or_none(chapter.get('duration'), 1000)
if start_time is None or duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + duration,
'title': chapter.get('title'),
})
formats = []
http_url = None
for num, redirect in enumerate(redirects):
redirect_id = redirect.get('eeid')
redirect_info = self._download_json(
'%s?format=json' % redirect['url'], display_id,
'Downloading %s video url info' % (redirect_id or num),
headers=self.geo_verification_headers())
if redirect_info['status'] == 'error':
message = self._ERRORS.get(
redirect_info['http_code'], redirect_info['message'])
if redirect_info['http_code'] == 403:
self.raise_geo_restricted(
msg=message, countries=self._GEO_COUNTRIES)
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, message), expected=True)
format_url = redirect_info.get('url')
if not format_url:
continue
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, display_id, 'mp4', m3u8_id='hls', fatal=False))
else:
formats.append({
'url': format_url,
'format_id': redirect_id,
})
if re.search(r'^https?://.*(?:\d+k|baseline)', format_url):
http_url = format_url
self._remove_duplicate_formats(formats)
m3u8_formats = list(filter(
lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none',
formats))
if http_url:
for m3u8_format in m3u8_formats:
bitrate = self._search_regex(r'(\d+)k', m3u8_format['url'], 'bitrate', default=None)
# Lower qualities (150k and 192k) are not available as HTTP formats (see [1]),
# we won't try extracting them.
# Since summer 2016 higher quality formats (4500k and 6500k) are also available
# albeit they are not documented in [2].
# 1. https://github.com/ytdl-org/youtube-dl/commit/cbc032c8b70a038a69259378c92b4ba97b42d491#commitcomment-17313656
# 2. https://projects.pbs.org/confluence/display/coveapi/COVE+Video+Specifications
if not bitrate or int(bitrate) < 400:
continue
f_url = re.sub(r'\d+k|baseline', bitrate + 'k', http_url)
# This may produce invalid links sometimes (e.g.
# http://www.pbs.org/wgbh/frontline/film/suicide-plan)
if not self._is_valid_url(f_url, display_id, 'http-%sk video' % bitrate):
continue
f = m3u8_format.copy()
f.update({
'url': f_url,
'format_id': m3u8_format['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
formats.append(f)
self._sort_formats(formats)
rating_str = info.get('rating')
if rating_str is not None:
rating_str = rating_str.rpartition('-')[2]
age_limit = US_RATINGS.get(rating_str)
subtitles = {}
closed_captions_url = info.get('closed_captions_url')
if closed_captions_url:
subtitles['en'] = [{
'ext': 'ttml',
'url': closed_captions_url,
}]
mobj = re.search(r'/(\d+)_Encoded\.dfxp', closed_captions_url)
if mobj:
ttml_caption_suffix, ttml_caption_id = mobj.group(0, 1)
ttml_caption_id = int(ttml_caption_id)
subtitles['en'].extend([{
'url': closed_captions_url.replace(
ttml_caption_suffix, '/%d_Encoded.srt' % (ttml_caption_id + 1)),
'ext': 'srt',
}, {
'url': closed_captions_url.replace(
ttml_caption_suffix, '/%d_Encoded.vtt' % (ttml_caption_id + 2)),
'ext': 'vtt',
}])
# info['title'] is often incomplete (e.g. 'Full Episode', 'Episode 5', etc)
# Try turning it to 'program - title' naming scheme if possible
alt_title = info.get('program', {}).get('title')
if alt_title:
info['title'] = alt_title + ' - ' + re.sub(r'^' + alt_title + r'[\s\-:]+', '', info['title'])
description = info.get('description') or info.get(
'program', {}).get('description') or description
return {
'id': video_id,
'display_id': display_id,
'title': info['title'],
'description': description,
'thumbnail': info.get('image_url'),
'duration': int_or_none(info.get('duration')),
'age_limit': age_limit,
'upload_date': upload_date,
'formats': formats,
'subtitles': subtitles,
'chapters': chapters,
}
|
|
'''Utility functions.
Created on Sep 13, 2010
@author: jnaous
'''
import os
import re
from django.conf import settings
from expedient.common.federation.geni.util.urn_util import URN
from expedient.common.federation.geni.util.cert_util import create_cert
from expedient.common.federation.sfa.trust.gid import GID
from django.core.urlresolvers import reverse
from expedient.common.federation.geni.util import cred_util
import uuid
def get_user_cert_fname(user):
"""Get the filename of the user's GCF x509 certificate.
@param user: The user whose certificate filename we want.
@type user: L{django.contrib.auth.models.User}
@return: The certificate absolute path
@rtype: C{str}
"""
return os.path.join(
settings.GCF_X509_USER_CERT_DIR,
settings.GCF_X509_USER_CERT_FNAME_PREFIX + user.username + ".crt"
)
def get_user_key_fname(user):
"""Get the filename of the user's GCF x509 certificate key.
@param user: The user whose certificate key filename we want.
@type user: L{django.contrib.auth.models.User}
@return: The certificate key's absolute path
@rtype: C{str}
"""
return os.path.join(
settings.GCF_X509_KEY_DIR,
settings.GCF_X509_USER_CERT_FNAME_PREFIX + user.username + ".key"
)
def get_user_urn(username):
"""Get a user's URN
@param username: The username of the user whose URN we want.
@type username: C{str}
@return: The URN
@rtype: C{str}
"""
return URN(
str(settings.GCF_BASE_NAME), str("user"), str(username)
).urn_string()
def get_ch_urn():
"""Get the URN for Expedient as a clearinghouse.
@return: The URN
@rtype: C{str}
"""
return URN(
settings.GCF_BASE_NAME, "authority", "sa",
).urn_string()
def get_slice_urn(name):
"""Get the URN for a slice with name C{name}.
@param name: Name of the slice. Must be unique.
@type name: C{str}
@return: a slice URN
@rtype: C{str}
"""
return URN(
settings.GCF_BASE_NAME, "slice", name,
).urn_string()
def create_slice_urn():
"""Create a urn for the slice."""
return get_slice_urn(uuid.uuid4().__str__()[4:12])
def create_x509_cert(urn, cert_fname=None, key_fname=None, is_self_signed=False):
"""Create a GCF certificate and store it in a file.
@param urn: The urn to use in the cert.
@type urn: C{str}
@keyword cert_fname: The filename to store the cert in.
If None (default), then don't store.
@type cert_fname: C{str}
@keyword key_fname: The filename to store the certificate key in.
If None (default), then don't store.
@type key_fname: C{str}
@keyword is_self_signed: should the certificate be self-signed? Otherwise
it will be signed by Expedient's CH certificate. Default False.
@type is_self_signed: C{bool}
@return: tuple (cert, keys)
@rtype: (C{sfa.trust.gid.GID}, C{sfa.trust.certificate.Keypair})
"""
if is_self_signed:
cert, keys = create_cert(
urn,
)
else:
cert, keys = create_cert(
urn,
issuer_key=settings.GCF_X509_CH_KEY,
issuer_cert=settings.GCF_X509_CH_CERT
)
cert.decode()
if cert_fname:
cert.save_to_file(cert_fname)
if key_fname:
keys.save_to_file(key_fname)
return cert, keys
def read_cert_from_file(cert_fname):
"""Read a GCF certificate from a file.
Read the certificate from a file and put it into a C{sfa.trust.gid.GID}
object. The returned certificate is already decoded.
@param cert_fname: The filename to read the cert from
@type cert_fname: C{str}
@return: The certificate stored in the file at C{cert_fname}
@rtype: C{sfa.trust.gid.GID}
"""
cert = GID(filename=cert_fname)
cert.decode()
return cert
def read_cert_from_string(cert_str):
"""Read a GCF certificate from a string.
Read the certificate from a string and put it into a C{sfa.trust.gid.GID}
object. The returned certificate is already decoded.
@param cert_str: The string to read the cert from
@type cert_str: C{str}
@return: The certificate stored in the string at C{cert_str}
@rtype: C{sfa.trust.gid.GID}
"""
cert = GID(string=cert_str)
cert.decode()
return cert
def describe_ui_plugin(slice):
"""Describes the UI plugin according to L{expedient.clearinghouse.defaultsettings.expedient.UI_PLUGINS}."""
return ("GCF RSpec Plugin",
"Allows the user to modify the slice by uploading"
" RSpecs or to download an RSpec of the slice.",
reverse("gcf_rspec_ui", slice.id))
def urn_to_username(urn):
"""Create a valid username from a URN.
This creates the username by taking the authority part of
the URN, and the name part of the URN and joining them with "@".
Any characters other than letters, digits, '@', '-', '_', '+', and '.'
are replace with '_'.
e.g. "urn:publicid:IDN+stanford:expedient%26+user+jnaous" becomes
"jnaous@expedient_26.stanford"
The authority part of the URN is truncated to 155 characters, and the
name part is truncated to 100 characters.
@param urn: a urn to turn into a username
@type urn: C{str}
@return: a valid username
@rtype: C{str}
"""
invalid_chars_re = re.compile(r"[^\w@+.-]")
urn = URN(urn=str(urn))
auth = urn.getAuthority()
auth = auth.split("//")
auth.reverse()
auth = ".".join(auth)
if len(auth) > 150:
auth = auth[:150]
name = urn.getName()
if len(name) > 100:
name =name[:100]
username = name + "@" + auth
# replace all invalid chars with _
username = invalid_chars_re.sub("_", username)
assert(len(username) <= 255)
return username
def get_trusted_cert_filenames():
"""Return list of paths to files containing trusted certs."""
filenames = os.listdir(settings.GCF_X509_TRUSTED_CERT_DIR)
filenames = [os.path.join(settings.GCF_X509_TRUSTED_CERT_DIR, f) \
for f in filenames]
trusted_certs = []
for f in filenames:
if f.endswith(".crt") and os.path.isfile(f):
trusted_certs.append(f)
return trusted_certs
def create_slice_credential(user_gid, slice_gid):
'''Create a Slice credential object for this user_gid (object) on given slice gid (object)
@param user_gid: The user's cert
@type user_gid: C{sfa.trust.gid.GID}
@param slice_gid: The slice's gid
@type slice_gid: C{sfa.trust.gid.GID}
@return: The credential
@rtype: C{sfa.trust.credential.Credential}
'''
return cred_util.create_credential(
user_gid, slice_gid, settings.GCF_SLICE_CRED_LIFE,
'slice',
settings.GCF_X509_CH_KEY, settings.GCF_X509_CH_CERT,
get_trusted_cert_filenames(),
)
def create_user_credential(user_gid):
'''Create a user credential object for this user_gid
@param user_gid: The user's cert
@type user_gid: C{sfa.trust.gid.GID}
@return: The credential
@rtype: C{sfa.trust.credential.Credential}
'''
return cred_util.create_credential(
user_gid, user_gid, settings.GCF_USER_CRED_LIFE,
'user',
settings.GCF_X509_CH_KEY, settings.GCF_X509_CH_CERT,
get_trusted_cert_filenames(),
)
def get_or_create_user_cert(user):
"""Get the user's cert, creating it if it doesn't exist."""
cert_fname = get_user_cert_fname(user)
key_fname = get_user_key_fname(user)
urn = get_user_urn(user.username)
if not os.access(cert_fname, os.R_OK):
cert, _ = create_x509_cert(urn, cert_fname, key_fname)
else:
cert = read_cert_from_file(cert_fname)
return cert
|
|
# -*- coding: utf-8 -*-
""" Command line configuration parser """
import sys
import os.path
import ConfigParser
import re
import ast
from copy import deepcopy
try:
from collections import OrderedDict as ordereddict
except ImportError:
from ordereddict import OrderedDict as ordereddict
TABLE_CONFIG_OPTIONS = [
{ 'key': 'enable_weighted_read_autoscaling',
'option': 'enable-weighted-read-autoscaling',
'required': False,
'type': 'bool'
},
{ 'key': 'enable_weighted_write_autoscaling',
'option': 'enable-weighted-write-autoscaling',
'required': False,
'type': 'bool'
},
{
'key': 'max_scale_down_ops_per_day',
'option': 'max-scale-down-ops-per-day',
'required': False,
'type': 'int'
},
{
'key': 'enable_reads_autoscaling',
'option': 'enable-reads-autoscaling',
'required': False,
'type': 'bool'
},
{
'key': 'enable_writes_autoscaling',
'option': 'enable-writes-autoscaling',
'required': False,
'type': 'bool'
},
{
'key': 'enable_reads_up_scaling',
'option': 'enable-reads-up-scaling',
'required': False,
'type': 'bool'
},
{
'key': 'enable_reads_down_scaling',
'option': 'enable-reads-down-scaling',
'required': False,
'type': 'bool'
},
{
'key': 'enable_writes_up_scaling',
'option': 'enable-writes-up-scaling',
'required': False,
'type': 'bool'
},
{
'key': 'enable_writes_down_scaling',
'option': 'enable-writes-down-scaling',
'required': False,
'type': 'bool'
},
{
'key': 'reads_lower_threshold',
'option': 'reads-lower-threshold',
'required': False,
'type': 'int'
},
{
'key': 'reads_upper_threshold',
'option': 'reads-upper-threshold',
'required': False,
'type': 'float'
},
{
'key': 'throttled_reads_upper_threshold',
'option': 'throttled-reads-upper-threshold',
'required': False,
'type': 'int'
},
{
'key': 'increase_reads_with',
'option': 'increase-reads-with',
'required': False,
'type': 'int'
},
{
'key': 'decrease_reads_with',
'option': 'decrease-reads-with',
'required': False,
'type': 'int'
},
{
'key': 'increase_reads_unit',
'option': 'increase-reads-unit',
'required': True,
'type': 'str'
},
{
'key': 'decrease_reads_unit',
'option': 'decrease-reads-unit',
'required': True,
'type': 'str'
},
{
'key': 'writes_lower_threshold',
'option': 'writes-lower-threshold',
'required': False,
'type': 'int'
},
{
'key': 'writes_upper_threshold',
'option': 'writes-upper-threshold',
'required': False,
'type': 'float'
},
{
'key': 'throttled_writes_upper_threshold',
'option': 'throttled-writes-upper-threshold',
'required': False,
'type': 'int'
},
{
'key': 'increase_writes_with',
'option': 'increase-writes-with',
'required': False,
'type': 'int'
},
{
'key': 'decrease_writes_with',
'option': 'decrease-writes-with',
'required': False,
'type': 'int'
},
{
'key': 'increase_writes_unit',
'option': 'increase-writes-unit',
'required': True,
'type': 'str'
},
{
'key': 'decrease_writes_unit',
'option': 'decrease-writes-unit',
'required': True,
'type': 'str'
},
{
'key': 'min_provisioned_reads',
'option': 'min-provisioned-reads',
'required': False,
'type': 'int'
},
{
'key': 'max_provisioned_reads',
'option': 'max-provisioned-reads',
'required': False,
'type': 'int'
},
{
'key': 'min_provisioned_writes',
'option': 'min-provisioned-writes',
'required': False,
'type': 'int'
},
{
'key': 'max_provisioned_writes',
'option': 'max-provisioned-writes',
'required': False,
'type': 'int'
},
{
'key': 'maintenance_windows',
'option': 'maintenance-windows',
'required': False,
'type': 'str'
},
{
'key': 'allow_scaling_down_reads_on_0_percent',
'option': 'allow-scaling-down-reads-on-0-percent',
'required': False,
'type': 'bool'
},
{
'key': 'allow_scaling_down_writes_on_0_percent',
'option': 'allow-scaling-down-writes-on-0-percent',
'required': False,
'type': 'bool'
},
{
'key': 'always_decrease_rw_together',
'option': 'always-decrease-rw-together',
'required': False,
'type': 'bool'
},
{
'key': 'sns_topic_arn',
'option': 'sns-topic-arn',
'required': False,
'type': 'str'
},
{
'key': 'sns_message_types',
'option': 'sns-message-types',
'required': False,
'type': 'str'
},
{
'key': 'num_read_checks_before_scale_down',
'option': 'num-read-checks-before-scale-down',
'required': False,
'type': 'int'
},
{
'key': 'num_write_checks_before_scale_down',
'option': 'num-write-checks-before-scale-down',
'required': False,
'type': 'int'
},
{
'key': 'num_write_checks_reset_percent',
'option': 'num-write-checks-reset-percent',
'required': False,
'type': 'int'
},
{
'key': 'num_read_checks_reset_percent',
'option': 'num-read-checks-reset-percent',
'required': False,
'type': 'int'
},
{
'key': 'reads-upper-alarm-threshold',
'option': 'reads-upper-alarm-threshold',
'required': False,
'type': 'int'
},
{
'key': 'reads-lower-alarm-threshold',
'option': 'reads-lower-alarm-threshold',
'required': False,
'type': 'int'
},
{
'key': 'writes-upper-alarm-threshold',
'option': 'writes-upper-alarm-threshold',
'required': False,
'type': 'int'
},
{
'key': 'writes-lower-alarm-threshold',
'option': 'writes-lower-alarm-threshold',
'required': False,
'type': 'int'
},
{
'key': 'lookback_window_start',
'option': 'lookback-window-start',
'required': False,
'type': 'int'
},
{
'key': 'lookback_period',
'option': 'lookback-period',
'required': False,
'type': 'int'
},
{
'key': 'increase_throttled_by_provisioned_reads_unit',
'option': 'increase-throttled-by-provisioned-reads-unit',
'required': False,
'type': 'str'
},
{
'key': 'increase_throttled_by_provisioned_reads_scale',
'option': 'increase-throttled-by-provisioned-reads-scale',
'required': False,
'type': 'dict'
},
{
'key': 'increase_throttled_by_provisioned_writes_unit',
'option': 'increase-throttled-by-provisioned-writes-unit',
'required': False,
'type': 'str'
},
{
'key': 'increase_throttled_by_provisioned_writes_scale',
'option': 'increase-throttled-by-provisioned-writes-scale',
'required': False,
'type': 'dict'
},
{
'key': 'increase_throttled_by_consumed_reads_unit',
'option': 'increase-throttled-by-consumed-reads-unit',
'required': False,
'type': 'str'
},
{
'key': 'increase_throttled_by_consumed_reads_scale',
'option': 'increase-throttled-by-consumed-reads-scale',
'required': False,
'type': 'dict'
},
{
'key': 'increase_throttled_by_consumed_writes_unit',
'option': 'increase-throttled-by-consumed-writes-unit',
'required': False,
'type': 'str'
},
{
'key': 'increase_throttled_by_consumed_writes_scale',
'option': 'increase-throttled-by-consumed-writes-scale',
'required': False,
'type': 'dict'
},
{
'key': 'increase_consumed_reads_unit',
'option': 'increase-consumed-reads-unit',
'required': False,
'type': 'str'
},
{
'key': 'increase_consumed_reads_with',
'option': 'increase-consumed-reads-with',
'required': False,
'type': 'int'
},
{
'key': 'increase_consumed_reads_scale',
'option': 'increase-consumed-reads-scale',
'required': False,
'type': 'dict'
},
{
'key': 'increase_consumed_writes_unit',
'option': 'increase-consumed-writes-unit',
'required': False,
'type': 'str'
},
{
'key': 'increase_consumed_writes_with',
'option': 'increase-consumed-writes-with',
'required': False,
'type': 'int'
},
{
'key': 'increase_consumed_writes_scale',
'option': 'increase-consumed-writes-scale',
'required': False,
'type': 'dict'
},
{
'key': 'decrease_consumed_reads_unit',
'option': 'decrease-consumed-reads-unit',
'required': False,
'type': 'str'
},
{
'key': 'decrease_consumed_reads_with',
'option': 'decrease-consumed-reads-with',
'required': False,
'type': 'int'
},
{
'key': 'decrease_consumed_reads_scale',
'option': 'decrease-consumed-reads-scale',
'required': False,
'type': 'dict'
},
{
'key': 'decrease_consumed_writes_unit',
'option': 'decrease-consumed-writes-unit',
'required': False,
'type': 'str'
},
{
'key': 'decrease_consumed_writes_with',
'option': 'decrease-consumed-writes-with',
'required': False,
'type': 'int'
},
{
'key': 'decrease_consumed_writes_scale',
'option': 'decrease-consumed-writes-scale',
'required': False,
'type': 'dict'
},
{
'key': 'circuit_breaker_url',
'option': 'circuit-breaker-url',
'required': False,
'type': 'str'
},
{
'key': 'circuit_breaker_timeout',
'option': 'circuit-breaker-timeout',
'required': False,
'type': 'float'
},
]
def __parse_options(config_file, section, options):
""" Parse the section options
:type config_file: ConfigParser object
:param config_file: The config file object to use
:type section: str
:param section: Which section to read in the configuration file
:type options: list of dicts
:param options:
A list of options to parse. Example list::
[{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': str
}]
:returns: dict
"""
configuration = {}
for option in options:
try:
if option.get('type') == 'str':
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
elif option.get('type') == 'int':
try:
configuration[option.get('key')] = \
config_file.getint(section, option.get('option'))
except ValueError:
print('Error: Expected an integer value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'float':
try:
configuration[option.get('key')] = \
config_file.getfloat(section, option.get('option'))
except ValueError:
print('Error: Expected an float value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'bool':
try:
configuration[option.get('key')] = \
config_file.getboolean(section, option.get('option'))
except ValueError:
print('Error: Expected an boolean value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'dict':
configuration[option.get('key')] = \
ast.literal_eval(
config_file.get(section, option.get('option')))
else:
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
except ConfigParser.NoOptionError:
if option.get('required'):
print('Missing [{0}] option "{1}" in configuration'.format(
section, option.get('option')))
sys.exit(1)
return configuration
def parse(config_path):
""" Parse the configuration file
:type config_path: str
:param config_path: Path to the configuration file
"""
config_path = os.path.expanduser(config_path)
# Read the configuration file
config_file = ConfigParser.RawConfigParser()
config_file.SECTCRE = re.compile(r"\[ *(?P<header>.*) *\]")
config_file.optionxform = lambda option: option
config_file.read(config_path)
#
# Handle [global]
#
if 'global' in config_file.sections():
global_config = __parse_options(
config_file,
'global',
[
{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': 'str'
},
{
'key': 'aws_secret_access_key',
'option': 'aws-secret-access-key-id',
'required': False,
'type': 'str'
},
{
'key': 'region',
'option': 'region',
'required': False,
'type': 'str'
},
{
'key': 'check_interval',
'option': 'check-interval',
'required': False,
'type': 'int'
},
{
'key': 'circuit_breaker_url',
'option': 'circuit-breaker-url',
'required': False,
'type': 'str'
},
{
'key': 'circuit_breaker_timeout',
'option': 'circuit-breaker-timeout',
'required': False,
'type': 'float'
},
])
#
# Handle [logging]
#
if 'logging' in config_file.sections():
logging_config = __parse_options(
config_file,
'logging',
[
{
'key': 'log_level',
'option': 'log-level',
'required': False,
'type': 'str'
},
{
'key': 'log_file',
'option': 'log-file',
'required': False,
'type': 'str'
},
{
'key': 'log_config_file',
'option': 'log-config-file',
'required': False,
'type': 'str'
}
])
if 'default_options' in config_file.sections():
# nothing is required in defaults, so we set required to False
default_config_options = deepcopy(TABLE_CONFIG_OPTIONS)
for item in default_config_options:
item['required'] = False
default_options = __parse_options(
config_file, 'default_options', default_config_options)
# if we've got a default set required to be false for table parsing
for item in TABLE_CONFIG_OPTIONS:
if item['key'] in default_options:
item['required'] = False
else:
default_options = {}
#
# Handle [table: ]
#
table_config = {'tables': ordereddict()}
# Find the first table definition
found_table = False
for current_section in config_file.sections():
if current_section.rsplit(':', 1)[0] != 'table':
continue
found_table = True
current_table_name = current_section.rsplit(':', 1)[1].strip()
table_config['tables'][current_table_name] = \
dict(default_options.items() + __parse_options(
config_file, current_section, TABLE_CONFIG_OPTIONS).items())
if not found_table:
print('Could not find a [table: <table_name>] section in {0}'.format(
config_path))
sys.exit(1)
# Find gsi definitions - this allows gsi's to be defined before the table
# definitions we don't worry about parsing everything twice here
for current_section in config_file.sections():
try:
header1, gsi_key, header2, table_key = current_section.split(' ')
except ValueError:
continue
if header1 != 'gsi:':
continue
if table_key not in table_config['tables']:
print('No table configuration matching {0} found.'.format(
table_key))
sys.exit(1)
if 'gsis' not in table_config['tables'][table_key]:
table_config['tables'][table_key]['gsis'] = {}
table_config['tables'][table_key]['gsis'][gsi_key] = \
ordereddict(default_options.items() + __parse_options(
config_file, current_section, TABLE_CONFIG_OPTIONS).items())
return ordereddict(
global_config.items() +
logging_config.items() +
table_config.items())
|
|
# encoding: utf8
from __future__ import unicode_literals
from optparse import make_option
from collections import OrderedDict
from importlib import import_module
import itertools
import traceback
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_migrate_signal, emit_pre_migrate_signal
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.state import ProjectState
from django.db.migrations.autodetector import MigrationAutodetector
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
make_option('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them'),
make_option('--list', '-l', action='store_true', dest='list', default=False,
help='Show a list of all known migrations and which are applied'),
)
help = "Updates database schema. Manages both apps with migrations and those without."
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity'))
self.interactive = options.get('interactive')
self.show_traceback = options.get('traceback')
self.load_initial_data = options.get('load_initial_data')
self.test_database = options.get('test_database', False)
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# If they asked for a migration listing, quit main execution flow and show it
if options.get("list", False):
return self.show_migration_list(connection, args)
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError("Conflicting migrations detected (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % name_str)
# If they supplied command line arguments, work out what they mean.
run_syncdb = False
target_app_labels_only = True
if len(args) > 2:
raise CommandError("Too many command-line arguments (expecting 'app_label' or 'app_label migrationname')")
elif len(args) == 2:
app_label, migration_name = args
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (app_label, migration_name))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (app_label, migration_name))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif len(args) == 1:
app_label = args[0]
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
run_syncdb = True
plan = executor.migration_plan(targets)
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") + (", ".join(executor.loader.unmigrated_apps) or "(none)"))
if target_app_labels_only:
self.stdout.write(self.style.MIGRATE_LABEL(" Apply all migrations: ") + (", ".join(set(a for a, n in targets)) or "(none)"))
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(" Unapply all migrations: ") + "%s" % (targets[0][0], ))
else:
self.stdout.write(self.style.MIGRATE_LABEL(" Target specific migration: ") + "%s, from %s" % (targets[0][1], targets[0][0]))
# Run the syncdb phase.
# If you ever manage to get rid of this, I owe you many, many drinks.
# Note that pre_migrate is called from inside here, as it needs
# the list of models about to be installed.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
created_models = self.sync_apps(connection, executor.loader.unmigrated_apps)
else:
created_models = []
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations needed.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.graph.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(" Your models have changes that are not yet reflected in a migration, and so won't be applied."))
self.stdout.write(self.style.NOTICE(" Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them."))
else:
executor.migrate(targets, plan, fake=options.get("fake", False))
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(created_models, self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration, fake=False):
if self.verbosity >= 1:
if action == "apply_start":
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED"))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
elif action == "unapply_start":
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED"))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
def sync_apps(self, connection, app_labels):
"Runs the old syncdb-style operation on a list of app_labels."
cursor = connection.cursor()
try:
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names(cursor)
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=True))
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
create_models = set(itertools.chain(*manifest.values()))
emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=False):
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if self.verbosity >= 3:
self.stdout.write(" Processing %s.%s model\n" % (app_name, model._meta.object_name))
sql, references = connection.creation.sql_create_model(model, no_style(), seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, no_style(), pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, no_style(), pending_references))
if self.verbosity >= 1 and sql:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
# We force a commit here, as that was the previous behavior.
# If you can prove we don't need this, remove it.
transaction.set_dirty(using=connection.alias)
finally:
cursor.close()
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
try:
# Install custom SQL for the app (but only if this
# is a model we've just created)
if self.verbosity >= 1:
self.stdout.write(" Installing custom SQL...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, no_style(), connection)
if custom_sql:
if self.verbosity >= 2:
self.stdout.write(" Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=connection.alias):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(" Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
if self.show_traceback:
traceback.print_exc()
else:
if self.verbosity >= 3:
self.stdout.write(" No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
if self.verbosity >= 1:
self.stdout.write(" Installing indexes...\n")
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, no_style())
if index_sql:
if self.verbosity >= 2:
self.stdout.write(" Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=connection.alias):
for sql in index_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(" Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
finally:
cursor.close()
# Load initial_data fixtures (unless that has been disabled)
if self.load_initial_data:
for app_label in app_labels:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=connection.alias, skip_validation=True, app_label=app_label, hide_empty=True)
return created_models
def show_migration_list(self, connection, app_names=None):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE)
|
|
###################################
### ###
### Joshua G. Mausolf ###
### Department of Sociology ###
### Computation Institute ###
### University of Chicago ###
### ###
###################################
import re
import pandas as pd
import numpy as np
import glob
import os
def group_text(text, group_size):
"""
groups a text into text groups set by group_size
returns a list of grouped strings
"""
word_list = text.split()
group_list = []
for k in range(len(word_list)):
start = k
end = k + group_size
group_slice = word_list[start: end]
# append only groups of proper length/size
if len(group_slice) == group_size:
group_list.append(" ".join(group_slice))
return group_list
def remove_non_ascii_2(text):
import re
#return re.sub(r'[^\xe2]+', "'", text)
return re.sub(r'[^\x00-\x7F]+', "'", text)
def read_speech(speechfile):
speech = str(speechfile)
f = open(speech, 'rU')
raw = f.read().decode('utf8')
raw1 = raw.replace('.', ' ')
sent = remove_non_ascii_2(raw1)
return sent
def get_url(speechfile):
speech = str(speechfile)
f = open(speech, 'rU')
raw = f.read().decode('utf8')
sent = remove_non_ascii_2(raw)
url = sent.split('\n')[1]
return url
def get_group_set(group_size, text):
group_list = group_text(text, group_size)
group_set = set(group_list)
return group_set
def ngram(n, data):
ngram = get_group_set(n, data)
return ngram
wall_street = ["lobby", "lobbying", "lobbies", "special interest", "special interests", "revolving door", "campaign donor", "campaign donation", "campaign donations", "bidder", "highest bidder", "campaign contributions", "loophole", "loopholes", "tax shelter", "tax evasion", "write their own rules", "own rules", "Wall Street", "bailout", "bailouts"]
corporate_greed = ["cheat", "cheating", "stacked against", "stacked up against", " stacked against", "good benefits", "decent salary", "stack the deck", "exploit", "exploiting", "protect workers", "protecting workers", "protect laborers", "protecting laborers", "protect Americans", "protecting Americans", "protect employee", "protect employees", "protecting employees", "work safe", "working safely", "safe at work", "work conditions", "innocent", "minimum wage", "pollute", "polluting", "regulate", "regulating", "federal oversight", "financial reform", "gambling", "derivative", "derivatives", "sub-prime", "risky investment", "risky investments", "bust unions", "labor unions", "dirtiest air", "cheapest labor", "wages", "workplace safety", "Consumer Finance Protection Bureau", "consumer protection", "unions", "union label", "union workers", "CEO", "CEO's", "corporation", "corporations"]
inequality = ["wealth", "wealthy", "income equality", "income inequality", "privileged", "rich", "1%", "1 percent", "one percent", "fair", "unfair", "fairness", "unfairness", "middle-class", "middle class", "working class", "poor", "99%", "ninety-nine percent", "ninety nine percent", "equity", "inequity", "egalitarian", "disparity", "unequal", "average American", "average Americans", "Wall Street", "Main Street", "main street", "50 million", " Warren Buffet", "Warren Buffett's secretary", "class warfare", "class warefare", "warrior for the middle class", "Giving everybody a shot", "giving everybody a shot", "giving everybody a fair shot", "an America that is fair and just", "everybody is included", " folks at the top", "folks at the bottom", "fair shake"]
fair_share = ["pay their fair share", "our fair share", "fair share"]
other_terms = ["We", "gets a fair shake", "jobs", "fair", "fair share", "economy", "unemployment", "99", "99 percent", "1 percent", "1", "wealthy", "loophole", "main street", "Warren Buffett", "Warren Buffett's secretary", "secretary", "income", "wealth", "occupy", "occupying", "tax rate", "middle class", "upper class", "working class", "lobby", "corporate", "fair shot", "special interests", "lower class", "poor", "poverty", "rich", "inequality", "class", "America", "Wall Street", "Wall Street billionaires"]
terms = wall_street+corporate_greed+inequality+fair_share+other_terms
def speech_phrase_counter(ngram1, ngram2, ngram3, ngram4, terms):
#print "FUNCTION TEST"
for term in terms:
for gram in ngram4:
if term == gram:
count = sent.count(gram)
print "Count: ", count, "| ", gram
for gram in ngram3:
if term == gram:
count = sent.count(gram)
print "Count: ", count, "| ", gram
for gram in ngram2:
if term == gram:
count = sent.count(gram)
print "Count: ", count, "| ", gram
for gram in ngram1:
if term == gram:
count = sent.count(gram)
print "Count: ", count, "| ", gram
#speech_phrase_counter(ngram1, ngram2, ngram3, ngram4, terms)
def find_time(text):
#Add Time to Data Frame
try:
try:
time = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
#time = time0[0].replace('P M ', 'PM').replace('A M ', 'AM')
#df.ix[n, "TIME"] = time
return time[0]
except:
try:
time = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
#df.ix[n, "TIME"] = time[0]
return time[0]
except:
time = re.findall(r'\d{1,2}(?:(?:AM|PM)|(?::\d{1,2})(?:AM|PM)?)', sent)
#df.ix[n, "TIME"] = time[0]
return time[0]
except:
pass
def return_time(text):
#Add Time to Data Frame
try:
try:
time0 = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
time = time0[0].replace('P M ', 'PM').replace('A M ', 'AM')
#df.ix[n, "TIME"] = time
return time
except:
try:
time = re.findall(r'\d{1,2}:\d{1,2}\s[A-Z].[A-Z].+', sent)
#df.ix[n, "TIME"] = time[0]
return time[0]
except:
time = re.findall(r'\d{1,2}(?:(?:AM|PM)|(?::\d{1,2})(?:AM|PM)?)', sent)
#df.ix[n, "TIME"] = time[0]
return time[0]
except:
pass
def speech_phrase_counter2(ngram1, ngram2, ngram3, ngram4, terms, df, n):
#print "FUNCTION TEST"
for term in terms:
for gram in ngram4:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
for gram in ngram3:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
for gram in ngram2:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
for gram in ngram1:
if term == gram:
count = sent.count(gram)
df.ix[n, term] = count
#Setup Initial Data Frame
header = ["DATE", "TIME", "LOCATION", "URL"]+terms
index = np.arange(0)
df = pd.DataFrame(columns=header, index = index)
#Test Files
#speech_files = ["2014-01-15_ID1.txt", "2014-01-16_ID1.txt", "2014-01-17_ID1.txt", "2011-12-06_ID1.txt"]
#Get Files in Folder
os.chdir("2014_Speech_President")
speech_files = glob.glob("*.txt")
for speech in speech_files:
print "Analyzing speech file ", speech, "..."
date = speech.split('_')[0]
n = len(df.index)
#Add Row to Data Frame
df.loc[n] = 0
df.ix[n, "DATE"] = date
sent = read_speech(speech)
#Add Time to Data Frame
time = return_time(sent)
df.ix[n, "TIME"] = time
#Add Location
try:
time_ = find_time(sent)
location0 = sent
location1 = location0.replace(time_, '|').split('|', 1)[0]
location2 = location1.replace('\n\n', '|').replace('|\n', '|').replace('| ', '').split('|')
X = len(location2)-2
location3 = location2[X]
location = location3.replace('\n', ', ').replace('\t', '')
except:
location = ''
pass
df.ix[n, "LOCATION"] = location
#Add Citation/URL
url = get_url(speech)
df.ix[n, "URL"] = url
ngram1 = get_group_set(1, sent)
ngram2 = get_group_set(2, sent)
ngram3 = get_group_set(3, sent)
ngram4 = get_group_set(4, sent)
speech_phrase_counter2(ngram1, ngram2, ngram3, ngram4, terms, df, n)
print df
df.to_csv("Presidential_Speech_Data.csv", encoding='utf-8')
|
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import copy
import common
from pyalgotrade import observer
from pyalgotrade import dispatcher
class NonRealtimeFeed(observer.Subject):
def __init__(self, datetimes, priority=None):
self.__datetimes = datetimes
self.__event = observer.Event()
self.__priority = priority
def getEvent(self):
return self.__event
def start(self):
pass
def stop(self):
pass
def join(self):
pass
def eof(self):
return len(self.__datetimes) == 0
def dispatch(self):
ret = True
self.__event.emit(self.__datetimes.pop(0))
return ret
def peekDateTime(self):
return self.__datetimes[0]
def getDispatchPriority(self):
return self.__priority
class RealtimeFeed(observer.Subject):
def __init__(self, datetimes, priority=None):
self.__datetimes = datetimes
self.__event = observer.Event()
self.__priority = priority
def getEvent(self):
return self.__event
def start(self):
pass
def stop(self):
pass
def join(self):
pass
def eof(self):
return len(self.__datetimes) == 0
def dispatch(self):
ret = True
self.__event.emit(self.__datetimes.pop(0))
return ret
def peekDateTime(self):
return None
def getDispatchPriority(self):
return self.__priority
class DispatcherTestCase(common.TestCase):
def test1NrtFeed(self):
values = []
now = datetime.datetime.now()
datetimes = [now + datetime.timedelta(seconds=i) for i in xrange(10)]
nrtFeed = NonRealtimeFeed(copy.copy(datetimes))
nrtFeed.getEvent().subscribe(lambda x: values.append(x))
disp = dispatcher.Dispatcher()
disp.addSubject(nrtFeed)
disp.run()
self.assertEqual(values, datetimes)
def test2NrtFeeds(self):
values = []
now = datetime.datetime.now()
datetimes1 = [now + datetime.timedelta(seconds=i) for i in xrange(10)]
datetimes2 = [now + datetime.timedelta(seconds=i+len(datetimes1)) for i in xrange(10)]
nrtFeed1 = NonRealtimeFeed(copy.copy(datetimes1))
nrtFeed1.getEvent().subscribe(lambda x: values.append(x))
nrtFeed2 = NonRealtimeFeed(copy.copy(datetimes2))
nrtFeed2.getEvent().subscribe(lambda x: values.append(x))
disp = dispatcher.Dispatcher()
disp.addSubject(nrtFeed1)
disp.addSubject(nrtFeed2)
disp.run()
self.assertEqual(len(values), len(datetimes1) + len(datetimes2))
self.assertEqual(values[:len(datetimes1)], datetimes1)
self.assertEqual(values[len(datetimes1):], datetimes2)
def test1RtFeed(self):
values = []
now = datetime.datetime.now()
datetimes = [now + datetime.timedelta(seconds=i) for i in xrange(10)]
nrtFeed = RealtimeFeed(copy.copy(datetimes))
nrtFeed.getEvent().subscribe(lambda x: values.append(x))
disp = dispatcher.Dispatcher()
disp.addSubject(nrtFeed)
disp.run()
self.assertEqual(values, datetimes)
def test2RtFeeds(self):
values = []
now = datetime.datetime.now()
datetimes1 = [now + datetime.timedelta(seconds=i) for i in xrange(10)]
datetimes2 = [now + datetime.timedelta(seconds=i+len(datetimes1)) for i in xrange(10)]
nrtFeed1 = RealtimeFeed(copy.copy(datetimes1))
nrtFeed1.getEvent().subscribe(lambda x: values.append(x))
nrtFeed2 = RealtimeFeed(copy.copy(datetimes2))
nrtFeed2.getEvent().subscribe(lambda x: values.append(x))
disp = dispatcher.Dispatcher()
disp.addSubject(nrtFeed1)
disp.addSubject(nrtFeed2)
disp.run()
self.assertEqual(len(values), len(datetimes1) + len(datetimes2))
for i in xrange(len(datetimes1)):
self.assertEqual(values[i*2], datetimes1[i])
self.assertEqual(values[i*2+1], datetimes2[i])
def test2Combined(self):
values = []
now = datetime.datetime.now()
datetimes1 = [now + datetime.timedelta(seconds=i) for i in xrange(10)]
datetimes2 = [now + datetime.timedelta(seconds=i+len(datetimes1)) for i in xrange(10)]
nrtFeed1 = RealtimeFeed(copy.copy(datetimes1))
nrtFeed1.getEvent().subscribe(lambda x: values.append(x))
nrtFeed2 = NonRealtimeFeed(copy.copy(datetimes2))
nrtFeed2.getEvent().subscribe(lambda x: values.append(x))
disp = dispatcher.Dispatcher()
disp.addSubject(nrtFeed1)
disp.addSubject(nrtFeed2)
disp.run()
self.assertEqual(len(values), len(datetimes1) + len(datetimes2))
for i in xrange(len(datetimes1)):
self.assertEqual(values[i*2], datetimes1[i])
self.assertEqual(values[i*2+1], datetimes2[i])
def testPriority(self):
feed4 = RealtimeFeed([], None)
feed3 = RealtimeFeed([], None)
feed2 = RealtimeFeed([], 3)
feed1 = RealtimeFeed([], 0)
disp = dispatcher.Dispatcher()
disp.addSubject(feed3)
disp.addSubject(feed2)
disp.addSubject(feed1)
self.assertEqual(disp.getSubjects(), [feed1, feed2, feed3])
disp = dispatcher.Dispatcher()
disp.addSubject(feed1)
disp.addSubject(feed2)
disp.addSubject(feed3)
self.assertEqual(disp.getSubjects(), [feed1, feed2, feed3])
disp = dispatcher.Dispatcher()
disp.addSubject(feed3)
disp.addSubject(feed4)
disp.addSubject(feed2)
disp.addSubject(feed1)
self.assertEqual(disp.getSubjects(), [feed1, feed2, feed3, feed4])
def testDispatchOrder(self):
values = []
now = datetime.datetime.now()
feed1 = NonRealtimeFeed([now], 0)
feed2 = RealtimeFeed([now + datetime.timedelta(seconds=1)], None)
feed1.getEvent().subscribe(lambda x: values.append(x))
feed2.getEvent().subscribe(lambda x: values.append(x))
disp = dispatcher.Dispatcher()
disp.addSubject(feed2)
disp.addSubject(feed1)
self.assertEqual(disp.getSubjects(), [feed1, feed2])
disp.run()
# Check that although feed2 is realtime, feed1 was dispatched before.
self.assertTrue(values[0] < values[1])
class EventTestCase(common.TestCase):
def testEmitOrder(self):
handlersData = []
def handler3():
handlersData.append(3)
def handler1():
handlersData.append(1)
def handler2():
handlersData.append(2)
event = observer.Event()
event.subscribe(handler1)
event.subscribe(handler2)
event.subscribe(handler3)
event.emit()
self.assertTrue(handlersData == [1, 2, 3])
handlersData = []
event = observer.Event()
event.subscribe(handler3)
event.subscribe(handler2)
event.subscribe(handler1)
event.emit()
self.assertTrue(handlersData == [3, 2, 1])
def testDuplicateHandlers(self):
def handler1():
handlersData.append(1)
handlersData = []
event = observer.Event()
event.subscribe(handler1)
event.subscribe(handler1)
event.emit()
self.assertTrue(handlersData == [1])
def testReentrancy(self):
handlersData = []
event = observer.Event()
def handler2():
handlersData.append(2)
def handler1():
handlersData.append(1)
event.subscribe(handler2)
event.subscribe(handler1)
event.subscribe(handler1)
event.emit()
self.assertTrue(handlersData == [1])
event.emit()
self.assertTrue(handlersData == [1, 1, 2])
event.unsubscribe(handler1)
event.emit()
self.assertTrue(handlersData == [1, 1, 2, 2])
event.unsubscribe(handler2)
event.emit()
self.assertTrue(handlersData == [1, 1, 2, 2])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_clusters_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_access_profile_request, build_get_request, build_get_upgrade_profile_request, build_list_by_resource_group_request, build_list_cluster_admin_credentials_request, build_list_cluster_monitoring_user_credentials_request, build_list_cluster_user_credentials_request, build_list_request, build_reset_aad_profile_request_initial, build_reset_service_principal_profile_request_initial, build_rotate_cluster_certificates_request_initial, build_update_tags_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_01_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_01_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_access_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
role_name=role_name,
template_url=self.get_access_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_admin_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_admin_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_monitoring_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster monitoring user credential of a managed cluster.
Gets cluster monitoring user credential of the managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_monitoring_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_monitoring_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_01_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedCluster')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2020_01_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2020_01_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._update_tags_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2020_01_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2020_01_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
request = build_reset_service_principal_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_service_principal_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters:
~azure.mgmt.containerservice.v2020_01_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterAADProfile')
request = build_reset_aad_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_aad_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_01_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def _rotate_cluster_certificates_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_rotate_cluster_certificates_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._rotate_cluster_certificates_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
@distributed_trace_async
async def begin_rotate_cluster_certificates(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
|
|
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
from unittest import TestCase
from octodns.record import Create, Delete, Record, Update
from octodns.provider.base import BaseProvider
from octodns.provider.plan import Plan, UnsafePlan
from octodns.zone import Zone
class HelperProvider(BaseProvider):
log = getLogger('HelperProvider')
SUPPORTS = set(('A',))
id = 'test'
def __init__(self, extra_changes, apply_disabled=False,
include_change_callback=None):
self.__extra_changes = extra_changes
self.apply_disabled = apply_disabled
self.include_change_callback = include_change_callback
self.update_pcent_threshold = Plan.MAX_SAFE_UPDATE_PCENT
self.delete_pcent_threshold = Plan.MAX_SAFE_DELETE_PCENT
def populate(self, zone, target=False, lenient=False):
pass
def _include_change(self, change):
return not self.include_change_callback or \
self.include_change_callback(change)
def _extra_changes(self, existing, changes):
return self.__extra_changes
def _apply(self, plan):
pass
class TestBaseProvider(TestCase):
def test_base_provider(self):
with self.assertRaises(NotImplementedError) as ctx:
BaseProvider('base')
self.assertEquals('Abstract base class, log property missing',
ctx.exception.message)
class HasLog(BaseProvider):
log = getLogger('HasLog')
with self.assertRaises(NotImplementedError) as ctx:
HasLog('haslog')
self.assertEquals('Abstract base class, SUPPORTS_GEO property missing',
ctx.exception.message)
class HasSupportsGeo(HasLog):
SUPPORTS_GEO = False
zone = Zone('unit.tests.', [])
with self.assertRaises(NotImplementedError) as ctx:
HasSupportsGeo('hassupportsgeo').populate(zone)
self.assertEquals('Abstract base class, SUPPORTS property missing',
ctx.exception.message)
class HasSupports(HasSupportsGeo):
SUPPORTS = set(('A',))
with self.assertRaises(NotImplementedError) as ctx:
HasSupports('hassupports').populate(zone)
self.assertEquals('Abstract base class, populate method missing',
ctx.exception.message)
class HasPopulate(HasSupports):
def populate(self, zone, target=False, lenient=False):
zone.add_record(Record.new(zone, '', {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
zone.add_record(Record.new(zone, 'going', {
'ttl': 60,
'type': 'A',
'value': '3.4.5.6'
}))
zone.add_record(Record.new(zone, '', {
'ttl': 60,
'type': 'A',
'value': '1.2.3.4'
}))
self.assertTrue(HasSupports('hassupportsgeo')
.supports(list(zone.records)[0]))
plan = HasPopulate('haspopulate').plan(zone)
self.assertEquals(2, len(plan.changes))
with self.assertRaises(NotImplementedError) as ctx:
HasPopulate('haspopulate').apply(plan)
self.assertEquals('Abstract base class, _apply method missing',
ctx.exception.message)
def test_plan(self):
ignored = Zone('unit.tests.', [])
# No change, thus no plan
provider = HelperProvider([])
self.assertEquals(None, provider.plan(ignored))
record = Record.new(ignored, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
provider = HelperProvider([Create(record)])
plan = provider.plan(ignored)
self.assertTrue(plan)
self.assertEquals(1, len(plan.changes))
def test_apply(self):
ignored = Zone('unit.tests.', [])
record = Record.new(ignored, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
provider = HelperProvider([Create(record)], apply_disabled=True)
plan = provider.plan(ignored)
provider.apply(plan)
provider.apply_disabled = False
self.assertEquals(1, provider.apply(plan))
def test_include_change(self):
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
zone.add_record(record)
provider = HelperProvider([], include_change_callback=lambda c: False)
plan = provider.plan(zone)
# We filtered out the only change
self.assertFalse(plan)
def test_safe_none(self):
# No changes is safe
Plan(None, None, [], True).raise_if_unsafe()
def test_safe_creates(self):
# Creates are safe when existing records is under MIN_EXISTING_RECORDS
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
Plan(zone, zone, [Create(record) for i in range(10)], True) \
.raise_if_unsafe()
def test_safe_min_existing_creates(self):
# Creates are safe when existing records is over MIN_EXISTING_RECORDS
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
Plan(zone, zone, [Create(record) for i in range(10)], True) \
.raise_if_unsafe()
def test_safe_no_existing(self):
# existing records fewer than MIN_EXISTING_RECORDS is safe
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
updates = [Update(record, record), Update(record, record)]
Plan(zone, zone, updates, True).raise_if_unsafe()
def test_safe_updates_min_existing(self):
# MAX_SAFE_UPDATE_PCENT+1 fails when more
# than MIN_EXISTING_RECORDS exist
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
changes = [Update(record, record)
for i in range(int(Plan.MIN_EXISTING_RECORDS *
Plan.MAX_SAFE_UPDATE_PCENT) + 1)]
with self.assertRaises(UnsafePlan) as ctx:
Plan(zone, zone, changes, True).raise_if_unsafe()
self.assertTrue('Too many updates' in ctx.exception.message)
def test_safe_updates_min_existing_pcent(self):
# MAX_SAFE_UPDATE_PCENT is safe when more
# than MIN_EXISTING_RECORDS exist
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
changes = [Update(record, record)
for i in range(int(Plan.MIN_EXISTING_RECORDS *
Plan.MAX_SAFE_UPDATE_PCENT))]
Plan(zone, zone, changes, True).raise_if_unsafe()
def test_safe_deletes_min_existing(self):
# MAX_SAFE_DELETE_PCENT+1 fails when more
# than MIN_EXISTING_RECORDS exist
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
changes = [Delete(record)
for i in range(int(Plan.MIN_EXISTING_RECORDS *
Plan.MAX_SAFE_DELETE_PCENT) + 1)]
with self.assertRaises(UnsafePlan) as ctx:
Plan(zone, zone, changes, True).raise_if_unsafe()
self.assertTrue('Too many deletes' in ctx.exception.message)
def test_safe_deletes_min_existing_pcent(self):
# MAX_SAFE_DELETE_PCENT is safe when more
# than MIN_EXISTING_RECORDS exist
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
changes = [Delete(record)
for i in range(int(Plan.MIN_EXISTING_RECORDS *
Plan.MAX_SAFE_DELETE_PCENT))]
Plan(zone, zone, changes, True).raise_if_unsafe()
def test_safe_updates_min_existing_override(self):
safe_pcent = .4
# 40% + 1 fails when more
# than MIN_EXISTING_RECORDS exist
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
changes = [Update(record, record)
for i in range(int(Plan.MIN_EXISTING_RECORDS *
safe_pcent) + 1)]
with self.assertRaises(UnsafePlan) as ctx:
Plan(zone, zone, changes, True,
update_pcent_threshold=safe_pcent).raise_if_unsafe()
self.assertTrue('Too many updates' in ctx.exception.message)
def test_safe_deletes_min_existing_override(self):
safe_pcent = .4
# 40% + 1 fails when more
# than MIN_EXISTING_RECORDS exist
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
for i in range(int(Plan.MIN_EXISTING_RECORDS)):
zone.add_record(Record.new(zone, unicode(i), {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
changes = [Delete(record)
for i in range(int(Plan.MIN_EXISTING_RECORDS *
safe_pcent) + 1)]
with self.assertRaises(UnsafePlan) as ctx:
Plan(zone, zone, changes, True,
delete_pcent_threshold=safe_pcent).raise_if_unsafe()
self.assertTrue('Too many deletes' in ctx.exception.message)
|
|
from __future__ import absolute_import, unicode_literals
import logging
import random
from mopidy import exceptions
from mopidy.core import listener
from mopidy.internal import deprecation, validation
from mopidy.models import TlTrack, Track
logger = logging.getLogger(__name__)
class TracklistController(object):
pykka_traversable = True
def __init__(self, core):
self.core = core
self._next_tlid = 1
self._tl_tracks = []
self._version = 0
self._shuffled = []
# Properties
def get_tl_tracks(self):
"""Get tracklist as list of :class:`mopidy.models.TlTrack`."""
return self._tl_tracks[:]
tl_tracks = deprecation.deprecated_property(get_tl_tracks)
"""
.. deprecated:: 1.0
Use :meth:`get_tl_tracks` instead.
"""
def get_tracks(self):
"""Get tracklist as list of :class:`mopidy.models.Track`."""
return [tl_track.track for tl_track in self._tl_tracks]
tracks = deprecation.deprecated_property(get_tracks)
"""
.. deprecated:: 1.0
Use :meth:`get_tracks` instead.
"""
def get_length(self):
"""Get length of the tracklist."""
return len(self._tl_tracks)
length = deprecation.deprecated_property(get_length)
"""
.. deprecated:: 1.0
Use :meth:`get_length` instead.
"""
def get_version(self):
"""
Get the tracklist version.
Integer which is increased every time the tracklist is changed. Is not
reset before Mopidy is restarted.
"""
return self._version
def _increase_version(self):
self._version += 1
self.core.playback._on_tracklist_change()
self._trigger_tracklist_changed()
version = deprecation.deprecated_property(get_version)
"""
.. deprecated:: 1.0
Use :meth:`get_version` instead.
"""
def get_consume(self):
"""Get consume mode.
:class:`True`
Tracks are removed from the tracklist when they have been played.
:class:`False`
Tracks are not removed from the tracklist.
"""
return getattr(self, '_consume', False)
def set_consume(self, value):
"""Set consume mode.
:class:`True`
Tracks are removed from the tracklist when they have been played.
:class:`False`
Tracks are not removed from the tracklist.
"""
validation.check_boolean(value)
if self.get_consume() != value:
self._trigger_options_changed()
return setattr(self, '_consume', value)
consume = deprecation.deprecated_property(get_consume, set_consume)
"""
.. deprecated:: 1.0
Use :meth:`get_consume` and :meth:`set_consume` instead.
"""
def get_random(self):
"""Get random mode.
:class:`True`
Tracks are selected at random from the tracklist.
:class:`False`
Tracks are played in the order of the tracklist.
"""
return getattr(self, '_random', False)
def set_random(self, value):
"""Set random mode.
:class:`True`
Tracks are selected at random from the tracklist.
:class:`False`
Tracks are played in the order of the tracklist.
"""
validation.check_boolean(value)
if self.get_random() != value:
self._trigger_options_changed()
if value:
self._shuffled = self.get_tl_tracks()
random.shuffle(self._shuffled)
return setattr(self, '_random', value)
random = deprecation.deprecated_property(get_random, set_random)
"""
.. deprecated:: 1.0
Use :meth:`get_random` and :meth:`set_random` instead.
"""
def get_repeat(self):
"""
Get repeat mode.
:class:`True`
The tracklist is played repeatedly.
:class:`False`
The tracklist is played once.
"""
return getattr(self, '_repeat', False)
def set_repeat(self, value):
"""
Set repeat mode.
To repeat a single track, set both ``repeat`` and ``single``.
:class:`True`
The tracklist is played repeatedly.
:class:`False`
The tracklist is played once.
"""
validation.check_boolean(value)
if self.get_repeat() != value:
self._trigger_options_changed()
return setattr(self, '_repeat', value)
repeat = deprecation.deprecated_property(get_repeat, set_repeat)
"""
.. deprecated:: 1.0
Use :meth:`get_repeat` and :meth:`set_repeat` instead.
"""
def get_single(self):
"""
Get single mode.
:class:`True`
Playback is stopped after current song, unless in ``repeat`` mode.
:class:`False`
Playback continues after current song.
"""
return getattr(self, '_single', False)
def set_single(self, value):
"""
Set single mode.
:class:`True`
Playback is stopped after current song, unless in ``repeat`` mode.
:class:`False`
Playback continues after current song.
"""
validation.check_boolean(value)
if self.get_single() != value:
self._trigger_options_changed()
return setattr(self, '_single', value)
single = deprecation.deprecated_property(get_single, set_single)
"""
.. deprecated:: 1.0
Use :meth:`get_single` and :meth:`set_single` instead.
"""
# Methods
def index(self, tl_track=None, tlid=None):
"""
The position of the given track in the tracklist.
If neither *tl_track* or *tlid* is given we return the index of
the currently playing track.
:param tl_track: the track to find the index of
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:param tlid: TLID of the track to find the index of
:type tlid: :class:`int` or :class:`None`
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
The *tlid* parameter
"""
tl_track is None or validation.check_instance(tl_track, TlTrack)
tlid is None or validation.check_integer(tlid, min=1)
if tl_track is None and tlid is None:
tl_track = self.core.playback.get_current_tl_track()
if tl_track is not None:
try:
return self._tl_tracks.index(tl_track)
except ValueError:
pass
elif tlid is not None:
for i, tl_track in enumerate(self._tl_tracks):
if tl_track.tlid == tlid:
return i
return None
def get_eot_tlid(self):
"""
The TLID of the track that will be played after the current track.
Not necessarily the same TLID as returned by :meth:`get_next_tlid`.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
"""
current_tl_track = self.core.playback.get_current_tl_track()
return getattr(self.eot_track(current_tl_track), 'tlid', None)
def eot_track(self, tl_track):
"""
The track that will be played after the given track.
Not necessarily the same track as :meth:`next_track`.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
"""
deprecation.warn('core.tracklist.eot_track', pending=True)
tl_track is None or validation.check_instance(tl_track, TlTrack)
if self.get_single() and self.get_repeat():
return tl_track
elif self.get_single():
return None
# Current difference between next and EOT handling is that EOT needs to
# handle "single", with that out of the way the rest of the logic is
# shared.
return self.next_track(tl_track)
def get_next_tlid(self):
"""
The tlid of the track that will be played if calling
:meth:`mopidy.core.PlaybackController.next()`.
For normal playback this is the next track in the tracklist. If repeat
is enabled the next track can loop around the tracklist. When random is
enabled this should be a random track, all tracks should be played once
before the tracklist repeats.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
"""
current_tl_track = self.core.playback.get_current_tl_track()
return getattr(self.next_track(current_tl_track), 'tlid', None)
def next_track(self, tl_track):
"""
The track that will be played if calling
:meth:`mopidy.core.PlaybackController.next()`.
For normal playback this is the next track in the tracklist. If repeat
is enabled the next track can loop around the tracklist. When random is
enabled this should be a random track, all tracks should be played once
before the tracklist repeats.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
"""
deprecation.warn('core.tracklist.next_track', pending=True)
tl_track is None or validation.check_instance(tl_track, TlTrack)
if not self._tl_tracks:
return None
if self.get_random() and not self._shuffled:
if self.get_repeat() or not tl_track:
logger.debug('Shuffling tracks')
self._shuffled = self._tl_tracks[:]
random.shuffle(self._shuffled)
if self.get_random():
if self._shuffled:
return self._shuffled[0]
return None
next_index = self.index(tl_track)
if next_index is None:
next_index = 0
else:
next_index += 1
if self.get_repeat():
next_index %= len(self._tl_tracks)
elif next_index >= len(self._tl_tracks):
return None
return self._tl_tracks[next_index]
def get_previous_tlid(self):
"""
Returns the TLID of the track that will be played if calling
:meth:`mopidy.core.PlaybackController.previous()`.
For normal playback this is the previous track in the tracklist. If
random and/or consume is enabled it should return the current track
instead.
:rtype: :class:`int` or :class:`None`
.. versionadded:: 1.1
"""
current_tl_track = self.core.playback.get_current_tl_track()
return getattr(self.previous_track(current_tl_track), 'tlid', None)
def previous_track(self, tl_track):
"""
Returns the track that will be played if calling
:meth:`mopidy.core.PlaybackController.previous()`.
For normal playback this is the previous track in the tracklist. If
random and/or consume is enabled it should return the current track
instead.
:param tl_track: the reference track
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:rtype: :class:`mopidy.models.TlTrack` or :class:`None`
"""
deprecation.warn('core.tracklist.previous_track', pending=True)
tl_track is None or validation.check_instance(tl_track, TlTrack)
if self.get_repeat() or self.get_consume() or self.get_random():
return tl_track
position = self.index(tl_track)
if position in (None, 0):
return None
# Since we know we are not at zero we have to be somewhere in the range
# 1 - len(tracks) Thus 'position - 1' will always be within the list.
return self._tl_tracks[position - 1]
def add(self, tracks=None, at_position=None, uri=None, uris=None):
"""
Add tracks to the tracklist.
If ``uri`` is given instead of ``tracks``, the URI is looked up in the
library and the resulting tracks are added to the tracklist.
If ``uris`` is given instead of ``uri`` or ``tracks``, the URIs are
looked up in the library and the resulting tracks are added to the
tracklist.
If ``at_position`` is given, the tracks are inserted at the given
position in the tracklist. If ``at_position`` is not given, the tracks
are appended to the end of the tracklist.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param tracks: tracks to add
:type tracks: list of :class:`mopidy.models.Track` or :class:`None`
:param at_position: position in tracklist to add tracks
:type at_position: int or :class:`None`
:param uri: URI for tracks to add
:type uri: string or :class:`None`
:param uris: list of URIs for tracks to add
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.TlTrack`
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``tracks`` and ``uri`` arguments. Use ``uris``.
"""
if sum(o is not None for o in [tracks, uri, uris]) != 1:
raise ValueError(
'Exactly one of "tracks", "uri" or "uris" must be set')
tracks is None or validation.check_instances(tracks, Track)
uri is None or validation.check_uri(uri)
uris is None or validation.check_uris(uris)
validation.check_integer(at_position or 0)
if tracks:
deprecation.warn('core.tracklist.add:tracks_arg')
if uri:
deprecation.warn('core.tracklist.add:uri_arg')
if tracks is None:
if uri is not None:
uris = [uri]
tracks = []
track_map = self.core.library.lookup(uris=uris)
for uri in uris:
tracks.extend(track_map[uri])
tl_tracks = []
max_length = self.core._config['core']['max_tracklist_length']
for track in tracks:
if self.get_length() >= max_length:
raise exceptions.TracklistFull(
'Tracklist may contain at most %d tracks.' % max_length)
tl_track = TlTrack(self._next_tlid, track)
self._next_tlid += 1
if at_position is not None:
self._tl_tracks.insert(at_position, tl_track)
at_position += 1
else:
self._tl_tracks.append(tl_track)
tl_tracks.append(tl_track)
if tl_tracks:
self._increase_version()
return tl_tracks
def clear(self):
"""
Clear the tracklist.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
"""
self._tl_tracks = []
self._increase_version()
def filter(self, criteria=None, **kwargs):
"""
Filter the tracklist by the given criterias.
A criteria consists of a model field to check and a list of values to
compare it against. If the model field matches one of the values, it
may be returned.
Only tracks that matches all the given criterias are returned.
Examples::
# Returns tracks with TLIDs 1, 2, 3, or 4 (tracklist ID)
filter({'tlid': [1, 2, 3, 4]})
# Returns track with URIs 'xyz' or 'abc'
filter({'uri': ['xyz', 'abc']})
# Returns track with a matching TLIDs (1, 3 or 6) and a
# matching URI ('xyz' or 'abc')
filter({'tlid': [1, 3, 6], 'uri': ['xyz', 'abc']})
:param criteria: on or more criteria to match by
:type criteria: dict, of (string, list) pairs
:rtype: list of :class:`mopidy.models.TlTrack`
.. deprecated:: 1.1
Providing the criteria via ``kwargs``.
"""
if kwargs:
deprecation.warn('core.tracklist.filter:kwargs_criteria')
criteria = criteria or kwargs
tlids = criteria.pop('tlid', [])
validation.check_query(criteria, validation.TRACKLIST_FIELDS)
validation.check_instances(tlids, int)
matches = self._tl_tracks
for (key, values) in criteria.items():
matches = [
ct for ct in matches if getattr(ct.track, key) in values]
if tlids:
matches = [ct for ct in matches if ct.tlid in tlids]
return matches
def move(self, start, end, to_position):
"""
Move the tracks in the slice ``[start:end]`` to ``to_position``.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param start: position of first track to move
:type start: int
:param end: position after last track to move
:type end: int
:param to_position: new position for the tracks
:type to_position: int
"""
if start == end:
end += 1
tl_tracks = self._tl_tracks
# TODO: use validation helpers?
assert start < end, 'start must be smaller than end'
assert start >= 0, 'start must be at least zero'
assert end <= len(tl_tracks), \
'end can not be larger than tracklist length'
assert to_position >= 0, 'to_position must be at least zero'
assert to_position <= len(tl_tracks), \
'to_position can not be larger than tracklist length'
new_tl_tracks = tl_tracks[:start] + tl_tracks[end:]
for tl_track in tl_tracks[start:end]:
new_tl_tracks.insert(to_position, tl_track)
to_position += 1
self._tl_tracks = new_tl_tracks
self._increase_version()
def remove(self, criteria=None, **kwargs):
"""
Remove the matching tracks from the tracklist.
Uses :meth:`filter()` to lookup the tracks to remove.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param criteria: on or more criteria to match by
:type criteria: dict
:rtype: list of :class:`mopidy.models.TlTrack` that was removed
.. deprecated:: 1.1
Providing the criteria via ``kwargs``.
"""
if kwargs:
deprecation.warn('core.tracklist.remove:kwargs_criteria')
tl_tracks = self.filter(criteria or kwargs)
for tl_track in tl_tracks:
position = self._tl_tracks.index(tl_track)
del self._tl_tracks[position]
self._increase_version()
return tl_tracks
def shuffle(self, start=None, end=None):
"""
Shuffles the entire tracklist. If ``start`` and ``end`` is given only
shuffles the slice ``[start:end]``.
Triggers the :meth:`mopidy.core.CoreListener.tracklist_changed` event.
:param start: position of first track to shuffle
:type start: int or :class:`None`
:param end: position after last track to shuffle
:type end: int or :class:`None`
"""
tl_tracks = self._tl_tracks
# TOOD: use validation helpers?
if start is not None and end is not None:
assert start < end, 'start must be smaller than end'
if start is not None:
assert start >= 0, 'start must be at least zero'
if end is not None:
assert end <= len(tl_tracks), 'end can not be larger than ' + \
'tracklist length'
before = tl_tracks[:start or 0]
shuffled = tl_tracks[start:end]
after = tl_tracks[end or len(tl_tracks):]
random.shuffle(shuffled)
self._tl_tracks = before + shuffled + after
self._increase_version()
def slice(self, start, end):
"""
Returns a slice of the tracklist, limited by the given start and end
positions.
:param start: position of first track to include in slice
:type start: int
:param end: position after last track to include in slice
:type end: int
:rtype: :class:`mopidy.models.TlTrack`
"""
# TODO: validate slice?
return self._tl_tracks[start:end]
def _mark_playing(self, tl_track):
"""Internal method for :class:`mopidy.core.PlaybackController`."""
if self.get_random() and tl_track in self._shuffled:
self._shuffled.remove(tl_track)
def _mark_unplayable(self, tl_track):
"""Internal method for :class:`mopidy.core.PlaybackController`."""
logger.warning('Track is not playable: %s', tl_track.track.uri)
if self.get_random() and tl_track in self._shuffled:
self._shuffled.remove(tl_track)
def _mark_played(self, tl_track):
"""Internal method for :class:`mopidy.core.PlaybackController`."""
if self.consume and tl_track is not None:
self.remove({'tlid': [tl_track.tlid]})
return True
return False
def _trigger_tracklist_changed(self):
if self.get_random():
self._shuffled = self._tl_tracks[:]
random.shuffle(self._shuffled)
else:
self._shuffled = []
logger.debug('Triggering event: tracklist_changed()')
listener.CoreListener.send('tracklist_changed')
def _trigger_options_changed(self):
logger.debug('Triggering options changed event')
listener.CoreListener.send('options_changed')
|
|
# -*- coding: utf-8 -*-
import json
import re
from django.shortcuts import render_to_response
from django import forms
from django.contrib import admin
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.template.defaultfilters import force_escape
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible, smart_str
from django.utils.translation import ugettext, ugettext_lazy as _
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.exceptions import SubClassNeededError
from cms.models import CMSPlugin
from cms.utils import get_cms_setting
class CMSPluginBaseMetaclass(forms.MediaDefiningClass):
"""
Ensure the CMSPlugin subclasses have sane values and set some defaults if
they're not given.
"""
def __new__(cls, name, bases, attrs):
super_new = super(CMSPluginBaseMetaclass, cls).__new__
parents = [base for base in bases if isinstance(base, CMSPluginBaseMetaclass)]
if not parents:
# If this is CMSPluginBase itself, and not a subclass, don't do anything
return super_new(cls, name, bases, attrs)
new_plugin = super_new(cls, name, bases, attrs)
# validate model is actually a CMSPlugin subclass.
if not issubclass(new_plugin.model, CMSPlugin):
raise SubClassNeededError(
"The 'model' attribute on CMSPluginBase subclasses must be "
"either CMSPlugin or a subclass of CMSPlugin. %r on %r is not."
% (new_plugin.model, new_plugin)
)
# validate the template:
if (not hasattr(new_plugin, 'render_template') and
not hasattr(new_plugin, 'get_render_template')):
raise ImproperlyConfigured(
"CMSPluginBase subclasses must have a render_template attribute"
" or get_render_template method"
)
# Set the default form
if not new_plugin.form:
form_meta_attrs = {
'model': new_plugin.model,
'exclude': ('position', 'placeholder', 'language', 'plugin_type', 'path', 'depth')
}
form_attrs = {
'Meta': type('Meta', (object,), form_meta_attrs)
}
new_plugin.form = type('%sForm' % name, (forms.ModelForm,), form_attrs)
# Set the default fieldsets
if not new_plugin.fieldsets:
basic_fields = []
advanced_fields = []
for f in new_plugin.model._meta.fields:
if not f.auto_created and f.editable:
if hasattr(f, 'advanced'):
advanced_fields.append(f.name)
else:
basic_fields.append(f.name)
if advanced_fields:
new_plugin.fieldsets = [
(
None,
{
'fields': basic_fields
}
),
(
_('Advanced options'),
{
'fields': advanced_fields,
'classes': ('collapse',)
}
)
]
# Set default name
if not new_plugin.name:
new_plugin.name = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", name)
return new_plugin
@python_2_unicode_compatible
class CMSPluginBase(six.with_metaclass(CMSPluginBaseMetaclass, admin.ModelAdmin)):
name = ""
module = _("Generic") # To be overridden in child classes
form = None
change_form_template = "admin/cms/page/plugin/change_form.html"
# Should the plugin be rendered in the admin?
admin_preview = False
render_template = None
# Should the plugin be rendered at all, or doesn't it have any output?
render_plugin = True
model = CMSPlugin
text_enabled = False
page_only = False
allow_children = False
child_classes = None
require_parent = False
parent_classes = None
disable_child_plugins = False
cache = get_cms_setting('PLUGIN_CACHE')
system = False
opts = {}
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
def __init__(self, model=None, admin_site=None):
if admin_site:
super(CMSPluginBase, self).__init__(self.model, admin_site)
self.object_successfully_changed = False
self.placeholder = None
self.page = None
self.cms_plugin_instance = None
# The _cms_initial_attributes acts as a hook to set
# certain values when the form is saved.
# Currently this only happens on plugin creation.
self._cms_initial_attributes = {}
def _get_render_template(self, context, instance, placeholder):
if hasattr(self, 'get_render_template'):
template = self.get_render_template(context, instance, placeholder)
elif getattr(self, 'render_template', False):
template = getattr(self, 'render_template', False)
else:
template = None
if not template:
raise ValidationError("plugin has no render_template: %s" % self.__class__)
return template
@classmethod
def get_render_queryset(cls):
return cls.model._default_manager.all()
def render(self, context, instance, placeholder):
context['instance'] = instance
context['placeholder'] = placeholder
return context
@classmethod
def requires_parent_plugin(cls, slot, page):
if cls.get_require_parent(slot, page):
return True
allowed_parents = cls().get_parent_classes(slot, page) or []
return bool(allowed_parents)
@classmethod
def get_require_parent(cls, slot, page):
from cms.utils.placeholder import get_placeholder_conf
template = page and page.get_template() or None
# config overrides..
require_parent = get_placeholder_conf('require_parent', slot, template, default=cls.require_parent)
return require_parent
def get_cache_expiration(self, request, instance, placeholder):
"""
Provides hints to the placeholder, and in turn to the page for
determining the appropriate Cache-Control headers to add to the
HTTPResponse object.
Must return one of:
- None: This means the placeholder and the page will not even
consider this plugin when calculating the page expiration;
- A TZ-aware `datetime` of a specific date and time in the future
when this plugin's content expires;
- A `datetime.timedelta` instance indicating how long, relative to
the response timestamp that the content can be cached;
- An integer number of seconds that this plugin's content can be
cached.
There are constants are defined in `cms.constants` that may be helpful:
- `EXPIRE_NOW`
- `MAX_EXPIRATION_TTL`
An integer value of 0 (zero) or `EXPIRE_NOW` effectively means "do not
cache". Negative values will be treated as `EXPIRE_NOW`. Values
exceeding the value `MAX_EXPIRATION_TTL` will be set to that value.
Negative `timedelta` values or those greater than `MAX_EXPIRATION_TTL`
will also be ranged in the same manner.
Similarly, `datetime` values earlier than now will be treated as
`EXPIRE_NOW`. Values greater than `MAX_EXPIRATION_TTL` seconds in the
future will be treated as `MAX_EXPIRATION_TTL` seconds in the future.
"""
return None
def get_vary_cache_on(self, request, instance, placeholder):
"""
Provides hints to the placeholder, and in turn to the page for
determining VARY headers for the response.
Must return one of:
- None (default),
- String of a case-sensitive header name, or
- iterable of case-sensitive header names.
NOTE: This only makes sense to use with caching. If this plugin has
``cache = False`` or plugin.get_cache_expiration(...) returns 0,
get_vary_cache_on() will have no effect.
"""
return None
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
We just need the popup interface here
"""
context.update({
'preview': "no_preview" not in request.GET,
'is_popup': True,
'plugin': obj,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
})
return super(CMSPluginBase, self).render_change_form(request, context, add, change, form_url, obj)
def render_close_frame(self, obj, extra_context=None):
context = {
'plugin': obj,
'is_popup': True,
'name': force_text(obj),
"type": obj.get_plugin_name(),
'plugin_id': obj.pk,
'icon': force_escape(obj.get_instance_icon_src()),
'alt': force_escape(obj.get_instance_icon_alt()),
}
if extra_context:
context.update(extra_context)
return render_to_response(
'admin/cms/page/plugin/confirm_form.html', context
)
def save_model(self, request, obj, form, change):
"""
Override original method, and add some attributes to obj
This have to be made, because if object is newly created, he must know
where he lives.
"""
# remember the saved object
self.saved_object = obj
return super(CMSPluginBase, self).save_model(request, obj, form, change)
def save_form(self, request, form, change):
obj = super(CMSPluginBase, self).save_form(request, form, change)
for field, value in self._cms_initial_attributes.items():
# Set the initial attribute hooks (if any)
setattr(obj, field, value)
return obj
def response_add(self, request, obj, **kwargs):
self.object_successfully_changed = True
# Normally we would add the user message to say the object
# was added successfully but looks like the CMS has not
# supported this and can lead to issues with plugins
# like ckeditor.
return self.render_close_frame(obj)
def response_change(self, request, obj):
self.object_successfully_changed = True
opts = self.model._meta
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.render_close_frame(obj)
def log_addition(self, request, obj, bypass=None):
pass
def log_change(self, request, obj, message, bypass=None):
pass
def log_deletion(self, request, obj, object_repr, bypass=None):
pass
def icon_src(self, instance):
"""
Overwrite this if text_enabled = True
Return the URL for an image to be used for an icon for this
plugin instance in a text editor.
"""
return ""
def icon_alt(self, instance):
"""
Overwrite this if necessary if text_enabled = True
Return the 'alt' text to be used for an icon representing
the plugin object in a text editor.
"""
return "%s - %s" % (force_text(self.name), force_text(instance))
def get_fieldsets(self, request, obj=None):
"""
Same as from base class except if there are no fields, show an info message.
"""
fieldsets = super(CMSPluginBase, self).get_fieldsets(request, obj)
for name, data in fieldsets:
if data.get('fields'): # if fieldset with non-empty fields is found, return fieldsets
return fieldsets
if self.inlines:
return [] # if plugin has inlines but no own fields return empty fieldsets to remove empty white fieldset
try: # if all fieldsets are empty (assuming there is only one fieldset then) add description
fieldsets[0][1]['description'] = self.get_empty_change_form_text(obj=obj)
except KeyError:
pass
return fieldsets
@classmethod
def get_empty_change_form_text(cls, obj=None):
"""
Returns the text displayed to the user when editing a plugin
that requires no configuration.
"""
return ugettext('There are no further settings for this plugin. Please press save.')
@classmethod
def get_child_class_overrides(cls, slot, page):
"""
Returns a list of plugin types that are allowed
as children of this plugin.
"""
from cms.utils.placeholder import get_placeholder_conf
template = page and page.get_template() or None
# config overrides..
ph_conf = get_placeholder_conf('child_classes', slot, template, default={})
return ph_conf.get(cls.__name__, cls.child_classes)
@classmethod
def get_child_plugin_candidates(cls, slot, page):
"""
Returns a list of all plugin classes
that will be considered when fetching
all available child classes for this plugin.
"""
# Adding this as a separate method,
# we allow other plugins to affect
# the list of child plugin candidates.
# Useful in cases like djangocms-text-ckeditor
# where only text only plugins are allowed.
from cms.plugin_pool import plugin_pool
return plugin_pool.get_all_plugins()
@classmethod
def get_child_classes(cls, slot, page):
"""
Returns a list of plugin types that can be added
as children to this plugin.
"""
# Placeholder overrides are highest in priority
child_classes = cls.get_child_class_overrides(slot, page)
if child_classes:
return child_classes
# Get all child plugin candidates
installed_plugins = cls.get_child_plugin_candidates(slot, page)
child_classes = []
plugin_type = cls.__name__
# The following will go through each
# child plugin candidate and check if
# has configured parent class restrictions.
# If there are restrictions then the plugin
# is only a valid child class if the current plugin
# matches one of the parent restrictions.
# If there are no restrictions then the plugin
# is a valid child class.
for plugin_class in installed_plugins:
allowed_parents = plugin_class().get_parent_classes(slot, page) or []
if not allowed_parents or plugin_type in allowed_parents:
# Plugin has no parent restrictions or
# Current plugin (self) is a configured parent
child_classes.append(plugin_class.__name__)
else:
continue
return child_classes
@classmethod
def get_parent_classes(cls, slot, page):
from cms.utils.placeholder import get_placeholder_conf
template = page and page.get_template() or None
# config overrides..
ph_conf = get_placeholder_conf('parent_classes', slot, template, default={})
parent_classes = ph_conf.get(cls.__name__, cls.parent_classes)
return parent_classes
def get_action_options(self):
return self.action_options
def requires_reload(self, action):
actions = self.get_action_options()
reload_required = False
if action in actions:
options = actions[action]
reload_required = options.get('requires_reload', False)
return reload_required
def get_plugin_urls(self):
"""
Return URL patterns for which the plugin wants to register
views for.
"""
return []
def plugin_urls(self):
return self.get_plugin_urls()
plugin_urls = property(plugin_urls)
def get_extra_placeholder_menu_items(self, request, placeholder):
pass
def get_extra_global_plugin_menu_items(self, request, plugin):
pass
def get_extra_local_plugin_menu_items(self, request, plugin):
pass
def __repr__(self):
return smart_str(self.name)
def __str__(self):
return self.name
class PluginMenuItem(object):
def __init__(self, name, url, data, question=None, action='ajax', attributes=None):
"""
Creates an item in the plugin / placeholder menu
:param name: Item name (label)
:param url: URL the item points to. This URL will be called using POST
:param data: Data to be POSTed to the above URL
:param question: Confirmation text to be shown to the user prior to call the given URL (optional)
:param action: Custom action to be called on click; currently supported: 'ajax', 'ajax_add'
:param attributes: Dictionary whose content will be addes as data-attributes to the menu item
"""
if not attributes:
attributes = {}
self.name = name
self.url = url
self.data = json.dumps(data)
self.question = question
self.action = action
self.attributes = attributes
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploads or downloads files between Google Cloud Storage and the filesystem.
The file is transfered in CHUNKSIZE pieces, and the process can resume in case
of some failures.
Usage examples:
$ python chunked_transfer.py gs://bucket/object ~/Desktop/filename
$ python chunked_transfer.py ~/Desktop/filename gs://bucket/object
"""
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build as discovery_build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from apiclient.http import MediaIoBaseDownload
from json import dumps as json_dumps
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage as CredentialStorage
from oauth2client.tools import run_flow as run_oauth2
# CLIENT_SECRETS_FILE, name of a file containing the OAuth 2.0 information for
# this application, including client_id and client_secret. You can acquire an
# ID/secret pair from the API Access tab on the Google APIs Console
# <http://code.google.com/apis/console#access>
# For more information about using OAuth2 to access Google APIs, please visit:
# <https://developers.google.com/accounts/docs/OAuth2>
CLIENT_SECRETS_FILE = 'client_secrets.json'
# File where we will store authentication credentials after acquiring them.
CREDENTIALS_FILE = 'credentials.json'
# Message describing how to use the script.
USAGE = """
Usage examples:
$ python chunked_transfer.py gs://bucket/object ~/Desktop/filename
$ python chunked_transfer.py ~/Desktop/filename gs://bucket/object
"""
RW_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_write'
RO_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_only'
# Helpful message to display if the CLIENT_SECRETS_FILE is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
<https://code.google.com/apis/console#access>.
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
# Retry transport and file IO errors.
RETRYABLE_ERRORS = (httplib2.HttpLib2Error, IOError)
# Number of times to retry failed downloads.
NUM_RETRIES = 5
# Number of bytes to send/receive in each request.
CHUNKSIZE = 2 * 1024 * 1024
# Mimetype to use if one can't be guessed from the file extension.
DEFAULT_MIMETYPE = 'application/octet-stream'
def get_authenticated_service(scope):
print 'Authenticating...'
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=scope,
message=MISSING_CLIENT_SECRETS_MESSAGE)
credential_storage = CredentialStorage(CREDENTIALS_FILE)
credentials = credential_storage.get()
if credentials is None or credentials.invalid:
credentials = run_oauth2(flow, credential_storage)
print 'Constructing Google Cloud Storage service...'
http = credentials.authorize(httplib2.Http())
return discovery_build('storage', 'v1', http=http)
def handle_progressless_iter(error, progressless_iters):
if progressless_iters > NUM_RETRIES:
print 'Failed to make progress for too many consecutive iterations.'
raise error
sleeptime = random.random() * (2**progressless_iters)
print ('Caught exception (%s). Sleeping for %s seconds before retry #%d.'
% (str(error), sleeptime, progressless_iters))
time.sleep(sleeptime)
def print_with_carriage_return(s):
sys.stdout.write('\r' + s)
sys.stdout.flush()
def upload(argv):
filename = argv[1]
bucket_name, object_name = argv[2][5:].split('/', 1)
assert bucket_name and object_name
service = get_authenticated_service(RW_SCOPE)
print 'Building upload request...'
media = MediaFileUpload(filename, chunksize=CHUNKSIZE, resumable=True)
if not media.mimetype():
media = MediaFileUpload(filename, DEFAULT_MIMETYPE, resumable=True)
request = service.objects().insert(bucket=bucket_name, name=object_name,
media_body=media)
print 'Uploading file: %s to bucket: %s object: %s ' % (filename, bucket_name,
object_name)
progressless_iters = 0
response = None
while response is None:
error = None
try:
progress, response = request.next_chunk()
if progress:
print_with_carriage_return('Upload %d%%' % (100 * progress.progress()))
except HttpError, err:
error = err
if err.resp.status < 500:
raise
except RETRYABLE_ERRORS, err:
error = err
if error:
progressless_iters += 1
handle_progressless_iter(error, progressless_iters)
else:
progressless_iters = 0
print '\nUpload complete!'
print 'Uploaded Object:'
print json_dumps(response, indent=2)
def download(argv):
bucket_name, object_name = argv[1][5:].split('/', 1)
filename = argv[2]
assert bucket_name and object_name
service = get_authenticated_service(RO_SCOPE)
print 'Building download request...'
f = file(filename, 'w')
request = service.objects().get_media(bucket=bucket_name,
object=object_name)
media = MediaIoBaseDownload(f, request, chunksize=CHUNKSIZE)
print 'Downloading bucket: %s object: %s to file: %s' % (bucket_name,
object_name,
filename)
progressless_iters = 0
done = False
while not done:
error = None
try:
progress, done = media.next_chunk()
if progress:
print_with_carriage_return(
'Download %d%%.' % int(progress.progress() * 100))
except HttpError, err:
error = err
if err.resp.status < 500:
raise
except RETRYABLE_ERRORS, err:
error = err
if error:
progressless_iters += 1
handle_progressless_iter(error, progressless_iters)
else:
progressless_iters = 0
print '\nDownload complete!'
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Too few arguments.'
print USAGE
if sys.argv[2].startswith('gs://'):
upload(sys.argv)
elif sys.argv[1].startswith('gs://'):
download(sys.argv)
else:
print USAGE
|
|
#!/usr/bin/env python
"""Creates the pin file for the Teensy."""
from __future__ import print_function
import argparse
import sys
import csv
SUPPORTED_FN = {
'FTM' : ['CH0', 'CH1', 'CH2', 'CH3', 'CH4', 'CH5', 'CH6', 'CH7',
'QD_PHA', 'QD_PHB'],
'I2C' : ['SDA', 'SCL'],
'UART' : ['RX', 'TX', 'CTS', 'RTS'],
'SPI' : ['NSS', 'SCK', 'MISO', 'MOSI']
}
def parse_port_pin(name_str):
"""Parses a string and returns a (port-num, pin-num) tuple."""
if len(name_str) < 4:
raise ValueError("Expecting pin name to be at least 4 charcters.")
if name_str[0:2] != 'PT':
raise ValueError("Expecting pin name to start with PT")
if name_str[2] not in ('A', 'B', 'C', 'D', 'E', 'Z'):
raise ValueError("Expecting pin port to be between A and E or Z")
port = ord(name_str[2]) - ord('A')
pin_str = name_str[3:].split('/')[0]
if not pin_str.isdigit():
raise ValueError("Expecting numeric pin number.")
return (port, int(pin_str))
def split_name_num(name_num):
num = None
for num_idx in range(len(name_num) - 1, -1, -1):
if not name_num[num_idx].isdigit():
name = name_num[0:num_idx + 1]
num_str = name_num[num_idx + 1:]
if len(num_str) > 0:
num = int(num_str)
break
return name, num
class AlternateFunction(object):
"""Holds the information associated with a pins alternate function."""
def __init__(self, idx, af_str):
self.idx = idx
self.af_str = af_str
self.func = ''
self.fn_num = None
self.pin_type = ''
self.supported = False
af_words = af_str.split('_', 1)
self.func, self.fn_num = split_name_num(af_words[0])
if len(af_words) > 1:
self.pin_type = af_words[1]
if self.func in SUPPORTED_FN:
pin_types = SUPPORTED_FN[self.func]
if self.pin_type in pin_types:
self.supported = True
def is_supported(self):
return self.supported
def ptr(self):
"""Returns the numbered function (i.e. USART6) for this AF."""
if self.fn_num is None:
return self.func
return '{:s}{:d}'.format(self.func, self.fn_num)
def mux_name(self):
return 'AF{:d}_{:s}'.format(self.idx, self.ptr())
def print(self):
"""Prints the C representation of this AF."""
if self.supported:
print(' AF', end='')
else:
print(' //', end='')
fn_num = self.fn_num
if fn_num is None:
fn_num = 0
print('({:2d}, {:8s}, {:2d}, {:10s}, {:8s}), // {:s}'.format(self.idx,
self.func, fn_num, self.pin_type, self.ptr(), self.af_str))
def qstr_list(self):
return [self.mux_name()]
class Pin(object):
"""Holds the information associated with a pin."""
def __init__(self, port, pin):
self.port = port
self.pin = pin
self.alt_fn = []
self.alt_fn_count = 0
self.adc_num = 0
self.adc_channel = 0
self.board_pin = False
def port_letter(self):
return chr(self.port + ord('A'))
def cpu_pin_name(self):
return '{:s}{:d}'.format(self.port_letter(), self.pin)
def is_board_pin(self):
return self.board_pin
def set_is_board_pin(self):
self.board_pin = True
def parse_adc(self, adc_str):
if (adc_str[:3] != 'ADC'):
return
(adc,channel) = adc_str.split('_')
for idx in range(3, len(adc)):
adc_num = int(adc[idx]) # 1, 2, or 3
self.adc_num |= (1 << (adc_num - 1))
self.adc_channel = int(channel[2:])
def parse_af(self, af_idx, af_strs_in):
if len(af_strs_in) == 0:
return
# If there is a slash, then the slash separates 2 aliases for the
# same alternate function.
af_strs = af_strs_in.split('/')
for af_str in af_strs:
alt_fn = AlternateFunction(af_idx, af_str)
self.alt_fn.append(alt_fn)
if alt_fn.is_supported():
self.alt_fn_count += 1
def alt_fn_name(self, null_if_0=False):
if null_if_0 and self.alt_fn_count == 0:
return 'NULL'
return 'pin_{:s}_af'.format(self.cpu_pin_name())
def adc_num_str(self):
str = ''
for adc_num in range(1,4):
if self.adc_num & (1 << (adc_num - 1)):
if len(str) > 0:
str += ' | '
str += 'PIN_ADC'
str += chr(ord('0') + adc_num)
if len(str) == 0:
str = '0'
return str
def print(self):
if self.alt_fn_count == 0:
print("// ", end='')
print('const pin_af_obj_t {:s}[] = {{'.format(self.alt_fn_name()))
for alt_fn in self.alt_fn:
alt_fn.print()
if self.alt_fn_count == 0:
print("// ", end='')
print('};')
print('')
print('const pin_obj_t pin_{:s} = PIN({:s}, {:d}, {:d}, {:s}, {:s}, {:d});'.format(
self.cpu_pin_name(), self.port_letter(), self.pin,
self.alt_fn_count, self.alt_fn_name(null_if_0=True),
self.adc_num_str(), self.adc_channel))
print('')
def print_header(self, hdr_file):
hdr_file.write('extern const pin_obj_t pin_{:s};\n'.
format(self.cpu_pin_name()))
if self.alt_fn_count > 0:
hdr_file.write('extern const pin_af_obj_t pin_{:s}_af[];\n'.
format(self.cpu_pin_name()))
def qstr_list(self):
result = []
for alt_fn in self.alt_fn:
if alt_fn.is_supported():
result += alt_fn.qstr_list()
return result
class NamedPin(object):
def __init__(self, name, pin):
self._name = name
self._pin = pin
def pin(self):
return self._pin
def name(self):
return self._name
class Pins(object):
def __init__(self):
self.cpu_pins = [] # list of NamedPin objects
self.board_pins = [] # list of NamedPin objects
def find_pin(self, port_num, pin_num):
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.port == port_num and pin.pin == pin_num:
return pin
def parse_af_file(self, filename, pinname_col, af_col):
with open(filename, 'r') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
try:
(port_num, pin_num) = parse_port_pin(row[pinname_col])
except:
continue
pin = Pin(port_num, pin_num)
for af_idx in range(af_col, len(row)):
if af_idx >= af_col:
pin.parse_af(af_idx - af_col, row[af_idx])
self.cpu_pins.append(NamedPin(pin.cpu_pin_name(), pin))
def parse_board_file(self, filename):
with open(filename, 'r') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
try:
(port_num, pin_num) = parse_port_pin(row[1])
except:
continue
pin = self.find_pin(port_num, pin_num)
if pin:
pin.set_is_board_pin()
self.board_pins.append(NamedPin(row[0], pin))
def print_named(self, label, named_pins):
print('STATIC const mp_map_elem_t pin_{:s}_pins_locals_dict_table[] = {{'.format(label))
for named_pin in named_pins:
pin = named_pin.pin()
if pin.is_board_pin():
print(' {{ MP_OBJ_NEW_QSTR(MP_QSTR_{:s}), (mp_obj_t)&pin_{:s} }},'.format(named_pin.name(), pin.cpu_pin_name()))
print('};')
print('MP_DEFINE_CONST_DICT(pin_{:s}_pins_locals_dict, pin_{:s}_pins_locals_dict_table);'.format(label, label));
def print(self):
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
pin.print()
self.print_named('cpu', self.cpu_pins)
print('')
self.print_named('board', self.board_pins)
def print_adc(self, adc_num):
print('');
print('const pin_obj_t * const pin_adc{:d}[] = {{'.format(adc_num))
for channel in range(16):
adc_found = False
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if (pin.is_board_pin() and
(pin.adc_num & (1 << (adc_num - 1))) and (pin.adc_channel == channel)):
print(' &pin_{:s}, // {:d}'.format(pin.cpu_pin_name(), channel))
adc_found = True
break
if not adc_found:
print(' NULL, // {:d}'.format(channel))
print('};')
def print_header(self, hdr_filename):
with open(hdr_filename, 'wt') as hdr_file:
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
pin.print_header(hdr_file)
hdr_file.write('extern const pin_obj_t * const pin_adc1[];\n')
hdr_file.write('extern const pin_obj_t * const pin_adc2[];\n')
hdr_file.write('extern const pin_obj_t * const pin_adc3[];\n')
def print_qstr(self, qstr_filename):
with open(qstr_filename, 'wt') as qstr_file:
qstr_set = set([])
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
qstr_set |= set(pin.qstr_list())
qstr_set |= set([named_pin.name()])
for named_pin in self.board_pins:
qstr_set |= set([named_pin.name()])
for qstr in sorted(qstr_set):
print('Q({})'.format(qstr), file=qstr_file)
def print_af_hdr(self, af_const_filename):
with open(af_const_filename, 'wt') as af_const_file:
af_hdr_set = set([])
mux_name_width = 0
for named_pin in self.cpu_pins:
pin = named_pin.pin()
if pin.is_board_pin():
for af in pin.alt_fn:
if af.is_supported():
mux_name = af.mux_name()
af_hdr_set |= set([mux_name])
if len(mux_name) > mux_name_width:
mux_name_width = len(mux_name)
for mux_name in sorted(af_hdr_set):
key = 'MP_OBJ_NEW_QSTR(MP_QSTR_{}),'.format(mux_name)
val = 'MP_OBJ_NEW_SMALL_INT(GPIO_{})'.format(mux_name)
print(' { %-*s %s },' % (mux_name_width + 26, key, val),
file=af_const_file)
def print_af_py(self, af_py_filename):
with open(af_py_filename, 'wt') as af_py_file:
print('PINS_AF = (', file=af_py_file);
for named_pin in self.board_pins:
print(" ('%s', " % named_pin.name(), end='', file=af_py_file)
for af in named_pin.pin().alt_fn:
if af.is_supported():
print("(%d, '%s'), " % (af.idx, af.af_str), end='', file=af_py_file)
print('),', file=af_py_file)
print(')', file=af_py_file)
def main():
parser = argparse.ArgumentParser(
prog="make-pins.py",
usage="%(prog)s [options] [command]",
description="Generate board specific pin file"
)
parser.add_argument(
"-a", "--af",
dest="af_filename",
help="Specifies the alternate function file for the chip",
default="mk20dx256_af.csv"
)
parser.add_argument(
"--af-const",
dest="af_const_filename",
help="Specifies header file for alternate function constants.",
default="build/pins_af_const.h"
)
parser.add_argument(
"--af-py",
dest="af_py_filename",
help="Specifies the filename for the python alternate function mappings.",
default="build/pins_af.py"
)
parser.add_argument(
"-b", "--board",
dest="board_filename",
help="Specifies the board file",
)
parser.add_argument(
"-p", "--prefix",
dest="prefix_filename",
help="Specifies beginning portion of generated pins file",
default="mk20dx256_prefix.c"
)
parser.add_argument(
"-q", "--qstr",
dest="qstr_filename",
help="Specifies name of generated qstr header file",
default="build/pins_qstr.h"
)
parser.add_argument(
"-r", "--hdr",
dest="hdr_filename",
help="Specifies name of generated pin header file",
default="build/pins.h"
)
args = parser.parse_args(sys.argv[1:])
pins = Pins()
print('// This file was automatically generated by make-pins.py')
print('//')
if args.af_filename:
print('// --af {:s}'.format(args.af_filename))
pins.parse_af_file(args.af_filename, 4, 3)
if args.board_filename:
print('// --board {:s}'.format(args.board_filename))
pins.parse_board_file(args.board_filename)
if args.prefix_filename:
print('// --prefix {:s}'.format(args.prefix_filename))
print('')
with open(args.prefix_filename, 'r') as prefix_file:
print(prefix_file.read())
pins.print()
pins.print_adc(1)
pins.print_adc(2)
pins.print_adc(3)
pins.print_header(args.hdr_filename)
pins.print_qstr(args.qstr_filename)
pins.print_af_hdr(args.af_const_filename)
pins.print_af_py(args.af_py_filename)
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2019 Nordic Semiconductor ASA
# SPDX-License-Identifier: BSD-3-Clause
import contextlib
import io
from logging import WARNING
import os
from pathlib import Path
import pytest
from devicetree import edtlib
# Test suite for edtlib.py.
#
# Run it using pytest (https://docs.pytest.org/en/stable/usage.html):
#
# $ pytest testedtlib.py
#
# See the comment near the top of testdtlib.py for additional pytest advice.
#
# test.dts is the main test file. test-bindings/ and test-bindings-2/ has
# bindings. The tests mostly use string comparisons via the various __repr__()
# methods.
HERE = os.path.dirname(__file__)
@contextlib.contextmanager
def from_here():
# Convenience hack to minimize diff from zephyr.
cwd = os.getcwd()
try:
os.chdir(HERE)
yield
finally:
os.chdir(cwd)
def hpath(filename):
'''Convert 'filename' to the host path syntax.'''
return os.fspath(Path(filename))
def test_warnings(caplog):
'''Tests for situations that should cause warnings.'''
with from_here(): edtlib.EDT("test.dts", ["test-bindings"])
enums_hpath = hpath('test-bindings/enums.yaml')
expected_warnings = [
f"'oldprop' is marked as deprecated in 'properties:' in {hpath('test-bindings/deprecated.yaml')} for node /test-deprecated.",
"unit address and first address in 'reg' (0x1) don't match for /reg-zero-size-cells/node",
"unit address and first address in 'reg' (0x5) don't match for /reg-ranges/parent/node",
"unit address and first address in 'reg' (0x30000000200000001) don't match for /reg-nested-ranges/grandparent/parent/node",
f"compatible 'enums' in binding '{enums_hpath}' has non-tokenizable enum for property 'string-enum': 'foo bar', 'foo_bar'",
f"compatible 'enums' in binding '{enums_hpath}' has enum for property 'tokenizable-lower-enum' that is only tokenizable in lowercase: 'bar', 'BAR'",
]
assert caplog.record_tuples == [('devicetree.edtlib', WARNING, warning_message)
for warning_message in expected_warnings]
def test_interrupts():
'''Tests for the interrupts property.'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
filenames = {i: hpath(f'test-bindings/interrupt-{i}-cell.yaml')
for i in range(1, 4)}
assert str(edt.get_node("/interrupt-parent-test/node").interrupts) == \
f"[<ControllerAndData, name: foo, controller: <Node /interrupt-parent-test/controller in 'test.dts', binding {filenames[3]}>, data: OrderedDict([('one', 1), ('two', 2), ('three', 3)])>, <ControllerAndData, name: bar, controller: <Node /interrupt-parent-test/controller in 'test.dts', binding {filenames[3]}>, data: OrderedDict([('one', 4), ('two', 5), ('three', 6)])>]"
assert str(edt.get_node("/interrupts-extended-test/node").interrupts) == \
f"[<ControllerAndData, controller: <Node /interrupts-extended-test/controller-0 in 'test.dts', binding {filenames[1]}>, data: OrderedDict([('one', 1)])>, <ControllerAndData, controller: <Node /interrupts-extended-test/controller-1 in 'test.dts', binding {filenames[2]}>, data: OrderedDict([('one', 2), ('two', 3)])>, <ControllerAndData, controller: <Node /interrupts-extended-test/controller-2 in 'test.dts', binding {filenames[3]}>, data: OrderedDict([('one', 4), ('two', 5), ('three', 6)])>]"
assert str(edt.get_node("/interrupt-map-test/node@0").interrupts) == \
f"[<ControllerAndData, controller: <Node /interrupt-map-test/controller-0 in 'test.dts', binding {filenames[1]}>, data: OrderedDict([('one', 0)])>, <ControllerAndData, controller: <Node /interrupt-map-test/controller-1 in 'test.dts', binding {filenames[2]}>, data: OrderedDict([('one', 0), ('two', 1)])>, <ControllerAndData, controller: <Node /interrupt-map-test/controller-2 in 'test.dts', binding {filenames[3]}>, data: OrderedDict([('one', 0), ('two', 0), ('three', 2)])>]"
assert str(edt.get_node("/interrupt-map-test/node@1").interrupts) == \
f"[<ControllerAndData, controller: <Node /interrupt-map-test/controller-0 in 'test.dts', binding {filenames[1]}>, data: OrderedDict([('one', 3)])>, <ControllerAndData, controller: <Node /interrupt-map-test/controller-1 in 'test.dts', binding {filenames[2]}>, data: OrderedDict([('one', 0), ('two', 4)])>, <ControllerAndData, controller: <Node /interrupt-map-test/controller-2 in 'test.dts', binding {filenames[3]}>, data: OrderedDict([('one', 0), ('two', 0), ('three', 5)])>]"
assert str(edt.get_node("/interrupt-map-bitops-test/node@70000000E").interrupts) == \
f"[<ControllerAndData, controller: <Node /interrupt-map-bitops-test/controller in 'test.dts', binding {filenames[2]}>, data: OrderedDict([('one', 3), ('two', 2)])>]"
def test_reg():
'''Tests for the regs property'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert str(edt.get_node("/reg-zero-address-cells/node").regs) == \
"[<Register, size: 0x1>, <Register, size: 0x2>]"
assert str(edt.get_node("/reg-zero-size-cells/node").regs) == \
"[<Register, addr: 0x1>, <Register, addr: 0x2>]"
assert str(edt.get_node("/reg-ranges/parent/node").regs) == \
"[<Register, addr: 0x5, size: 0x1>, <Register, addr: 0xe0000000f, size: 0x1>, <Register, addr: 0xc0000000e, size: 0x1>, <Register, addr: 0xc0000000d, size: 0x1>, <Register, addr: 0xa0000000b, size: 0x1>, <Register, addr: 0x0, size: 0x1>]"
assert str(edt.get_node("/reg-nested-ranges/grandparent/parent/node").regs) == \
"[<Register, addr: 0x30000000200000001, size: 0x1>]"
def test_pinctrl():
'''Test 'pinctrl-<index>'.'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert str(edt.get_node("/pinctrl/dev").pinctrls) == \
"[<PinCtrl, name: zero, configuration nodes: []>, <PinCtrl, name: one, configuration nodes: [<Node /pinctrl/pincontroller/state-1 in 'test.dts', no binding>]>, <PinCtrl, name: two, configuration nodes: [<Node /pinctrl/pincontroller/state-1 in 'test.dts', no binding>, <Node /pinctrl/pincontroller/state-2 in 'test.dts', no binding>]>]"
def test_hierarchy():
'''Test Node.parent and Node.children'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert edt.get_node("/").parent is None
assert str(edt.get_node("/parent/child-1").parent) == \
"<Node /parent in 'test.dts', no binding>"
assert str(edt.get_node("/parent/child-2/grandchild").parent) == \
"<Node /parent/child-2 in 'test.dts', no binding>"
assert str(edt.get_node("/parent").children) == \
"OrderedDict([('child-1', <Node /parent/child-1 in 'test.dts', no binding>), ('child-2', <Node /parent/child-2 in 'test.dts', no binding>)])"
assert edt.get_node("/parent/child-1").children == {}
def test_include():
'''Test 'include:' and the legacy 'inherits: !include ...' in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert str(edt.get_node("/binding-include").description) == \
"Parent binding"
assert str(edt.get_node("/binding-include").props) == \
"OrderedDict([('foo', <Property, name: foo, type: int, value: 0>), ('bar', <Property, name: bar, type: int, value: 1>), ('baz', <Property, name: baz, type: int, value: 2>), ('qaz', <Property, name: qaz, type: int, value: 3>)])"
def test_include_filters():
'''Test property-allowlist and property-blocklist in an include.'''
fname2path = {'include.yaml': 'test-bindings-include/include.yaml',
'include-2.yaml': 'test-bindings-include/include-2.yaml'}
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/allow-and-blocklist.yaml", fname2path)
assert ("should not specify both 'property-allowlist:' and 'property-blocklist:'"
in str(e.value))
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/allow-and-blocklist-child.yaml", fname2path)
assert ("should not specify both 'property-allowlist:' and 'property-blocklist:'"
in str(e.value))
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/allow-not-list.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith("'property-allowlist' value")
assert value_str.endswith("should be a list")
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/block-not-list.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith("'property-blocklist' value")
assert value_str.endswith("should be a list")
with pytest.raises(edtlib.EDTError) as e:
with from_here():
binding = edtlib.Binding("test-bindings-include/include-invalid-keys.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith(
"'include:' in test-bindings-include/include-invalid-keys.yaml should not have these "
"unexpected contents: ")
assert 'bad-key-1' in value_str
assert 'bad-key-2' in value_str
with pytest.raises(edtlib.EDTError) as e:
with from_here():
binding = edtlib.Binding("test-bindings-include/include-invalid-type.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith(
"'include:' in test-bindings-include/include-invalid-type.yaml "
"should be a string or list, but has type ")
with pytest.raises(edtlib.EDTError) as e:
with from_here():
binding = edtlib.Binding("test-bindings-include/include-no-name.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith("'include:' element")
assert value_str.endswith(
"in test-bindings-include/include-no-name.yaml should have a 'name' key")
with from_here():
binding = edtlib.Binding("test-bindings-include/allowlist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x'} # 'x' is allowed
binding = edtlib.Binding("test-bindings-include/empty-allowlist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == set() # nothing is allowed
binding = edtlib.Binding("test-bindings-include/blocklist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'y', 'z'} # 'x' is blocked
binding = edtlib.Binding("test-bindings-include/empty-blocklist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x', 'y', 'z'} # nothing is blocked
binding = edtlib.Binding("test-bindings-include/intermixed.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x', 'a'}
binding = edtlib.Binding("test-bindings-include/include-no-list.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x', 'y', 'z'}
binding = edtlib.Binding("test-bindings-include/filter-child-bindings.yaml", fname2path)
child = binding.child_binding
grandchild = child.child_binding
assert set(binding.prop2specs.keys()) == {'x'}
assert set(child.prop2specs.keys()) == {'child-prop-2'}
assert set(grandchild.prop2specs.keys()) == {'grandchild-prop-1'}
def test_bus():
'''Test 'bus:' and 'on-bus:' in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert edt.get_node("/buses/foo-bus").bus == "foo"
# foo-bus does not itself appear on a bus
assert edt.get_node("/buses/foo-bus").on_bus is None
assert edt.get_node("/buses/foo-bus").bus_node is None
# foo-bus/node1 is not a bus node...
assert edt.get_node("/buses/foo-bus/node1").bus is None
# ...but is on a bus
assert edt.get_node("/buses/foo-bus/node1").on_bus == "foo"
assert edt.get_node("/buses/foo-bus/node1").bus_node.path == \
"/buses/foo-bus"
# foo-bus/node2 is not a bus node...
assert edt.get_node("/buses/foo-bus/node2").bus is None
# ...but is on a bus
assert edt.get_node("/buses/foo-bus/node2").on_bus == "foo"
# no-bus-node is not a bus node...
assert edt.get_node("/buses/no-bus-node").bus is None
# ... and is not on a bus
assert edt.get_node("/buses/no-bus-node").on_bus is None
# Same compatible string, but different bindings from being on different
# buses
assert str(edt.get_node("/buses/foo-bus/node1").binding_path) == \
hpath("test-bindings/device-on-foo-bus.yaml")
assert str(edt.get_node("/buses/foo-bus/node2").binding_path) == \
hpath("test-bindings/device-on-any-bus.yaml")
assert str(edt.get_node("/buses/bar-bus/node").binding_path) == \
hpath("test-bindings/device-on-bar-bus.yaml")
assert str(edt.get_node("/buses/no-bus-node").binding_path) == \
hpath("test-bindings/device-on-any-bus.yaml")
# foo-bus/node/nested also appears on the foo-bus bus
assert edt.get_node("/buses/foo-bus/node1/nested").on_bus == "foo"
assert str(edt.get_node("/buses/foo-bus/node1/nested").binding_path) == \
hpath("test-bindings/device-on-foo-bus.yaml")
def test_child_binding():
'''Test 'child-binding:' in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
child1 = edt.get_node("/child-binding/child-1")
child2 = edt.get_node("/child-binding/child-2")
grandchild = edt.get_node("/child-binding/child-1/grandchild")
assert str(child1.binding_path) == hpath("test-bindings/child-binding.yaml")
assert str(child1.description) == "child node"
assert str(child1.props) == "OrderedDict([('child-prop', <Property, name: child-prop, type: int, value: 1>)])"
assert str(child2.binding_path) == hpath("test-bindings/child-binding.yaml")
assert str(child2.description) == "child node"
assert str(child2.props) == "OrderedDict([('child-prop', <Property, name: child-prop, type: int, value: 3>)])"
assert str(grandchild.binding_path) == hpath("test-bindings/child-binding.yaml")
assert str(grandchild.description) == "grandchild node"
assert str(grandchild.props) == "OrderedDict([('grandchild-prop', <Property, name: grandchild-prop, type: int, value: 2>)])"
with from_here():
binding_file = Path("test-bindings/child-binding.yaml").resolve()
top = edtlib.Binding(binding_file, {})
child = top.child_binding
assert Path(top.path) == binding_file
assert Path(child.path) == binding_file
assert top.compatible == 'top-binding'
assert child.compatible is None
with from_here():
binding_file = Path("test-bindings/child-binding-with-compat.yaml").resolve()
top = edtlib.Binding(binding_file, {})
child = top.child_binding
assert Path(top.path) == binding_file
assert Path(child.path) == binding_file
assert top.compatible == 'top-binding-with-compat'
assert child.compatible == 'child-compat'
def test_props():
'''Test Node.props (derived from DT and 'properties:' in the binding)'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
filenames = {i: hpath(f'test-bindings/phandle-array-controller-{i}.yaml')
for i in range(0, 4)}
assert str(edt.get_node("/props").props["int"]) == \
"<Property, name: int, type: int, value: 1>"
assert str(edt.get_node("/props").props["existent-boolean"]) == \
"<Property, name: existent-boolean, type: boolean, value: True>"
assert str(edt.get_node("/props").props["nonexistent-boolean"]) == \
"<Property, name: nonexistent-boolean, type: boolean, value: False>"
assert str(edt.get_node("/props").props["array"]) == \
"<Property, name: array, type: array, value: [1, 2, 3]>"
assert str(edt.get_node("/props").props["uint8-array"]) == \
r"<Property, name: uint8-array, type: uint8-array, value: b'\x124'>"
assert str(edt.get_node("/props").props["string"]) == \
"<Property, name: string, type: string, value: 'foo'>"
assert str(edt.get_node("/props").props["string-array"]) == \
"<Property, name: string-array, type: string-array, value: ['foo', 'bar', 'baz']>"
assert str(edt.get_node("/props").props["phandle-ref"]) == \
f"<Property, name: phandle-ref, type: phandle, value: <Node /ctrl-1 in 'test.dts', binding {filenames[1]}>>"
assert str(edt.get_node("/props").props["phandle-refs"]) == \
f"<Property, name: phandle-refs, type: phandles, value: [<Node /ctrl-1 in 'test.dts', binding {filenames[1]}>, <Node /ctrl-2 in 'test.dts', binding {filenames[2]}>]>"
assert str(edt.get_node("/props").props["phandle-array-foos"]) == \
f"<Property, name: phandle-array-foos, type: phandle-array, value: [<ControllerAndData, controller: <Node /ctrl-1 in 'test.dts', binding {filenames[1]}>, data: OrderedDict([('one', 1)])>, <ControllerAndData, controller: <Node /ctrl-2 in 'test.dts', binding {filenames[2]}>, data: OrderedDict([('one', 2), ('two', 3)])>]>"
assert str(edt.get_node("/props-2").props["phandle-array-foos"]) == \
("<Property, name: phandle-array-foos, type: phandle-array, value: ["
f"<ControllerAndData, name: a, controller: <Node /ctrl-0-1 in 'test.dts', binding {filenames[0]}>, data: OrderedDict()>, "
"None, "
f"<ControllerAndData, name: b, controller: <Node /ctrl-0-2 in 'test.dts', binding {filenames[0]}>, data: OrderedDict()>]>")
assert str(edt.get_node("/props").props["foo-gpios"]) == \
f"<Property, name: foo-gpios, type: phandle-array, value: [<ControllerAndData, controller: <Node /ctrl-1 in 'test.dts', binding {filenames[1]}>, data: OrderedDict([('gpio-one', 1)])>]>"
assert str(edt.get_node("/props").props["path"]) == \
f"<Property, name: path, type: path, value: <Node /ctrl-1 in 'test.dts', binding {filenames[1]}>>"
def test_nexus():
'''Test <prefix>-map via gpio-map (the most common case).'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
filename = hpath('test-bindings/gpio-dst.yaml')
assert str(edt.get_node("/gpio-map/source").props["foo-gpios"]) == \
f"<Property, name: foo-gpios, type: phandle-array, value: [<ControllerAndData, controller: <Node /gpio-map/destination in 'test.dts', binding {filename}>, data: OrderedDict([('val', 6)])>, <ControllerAndData, controller: <Node /gpio-map/destination in 'test.dts', binding {filename}>, data: OrderedDict([('val', 5)])>]>"
def test_prop_defaults():
'''Test property default values given in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert str(edt.get_node("/defaults").props) == \
r"OrderedDict([('int', <Property, name: int, type: int, value: 123>), ('array', <Property, name: array, type: array, value: [1, 2, 3]>), ('uint8-array', <Property, name: uint8-array, type: uint8-array, value: b'\x89\xab\xcd'>), ('string', <Property, name: string, type: string, value: 'hello'>), ('string-array', <Property, name: string-array, type: string-array, value: ['hello', 'there']>), ('default-not-used', <Property, name: default-not-used, type: int, value: 234>)])"
def test_prop_enums():
'''test properties with enum: in the binding'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
props = edt.get_node('/enums').props
int_enum = props['int-enum']
string_enum = props['string-enum']
tokenizable_enum = props['tokenizable-enum']
tokenizable_lower_enum = props['tokenizable-lower-enum']
no_enum = props['no-enum']
assert int_enum.val == 1
assert int_enum.enum_index == 0
assert not int_enum.spec.enum_tokenizable
assert not int_enum.spec.enum_upper_tokenizable
assert string_enum.val == 'foo_bar'
assert string_enum.enum_index == 1
assert not string_enum.spec.enum_tokenizable
assert not string_enum.spec.enum_upper_tokenizable
assert tokenizable_enum.val == '123 is ok'
assert tokenizable_enum.val_as_token == '123_is_ok'
assert tokenizable_enum.enum_index == 2
assert tokenizable_enum.spec.enum_tokenizable
assert tokenizable_enum.spec.enum_upper_tokenizable
assert tokenizable_lower_enum.val == 'bar'
assert tokenizable_lower_enum.val_as_token == 'bar'
assert tokenizable_lower_enum.enum_index == 0
assert tokenizable_lower_enum.spec.enum_tokenizable
assert not tokenizable_lower_enum.spec.enum_upper_tokenizable
assert no_enum.enum_index is None
assert not no_enum.spec.enum_tokenizable
assert not no_enum.spec.enum_upper_tokenizable
def test_binding_inference():
'''Test inferred bindings for special zephyr-specific nodes.'''
warnings = io.StringIO()
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"], warnings)
assert str(edt.get_node("/zephyr,user").props) == r"OrderedDict()"
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"], warnings,
infer_binding_for_paths=["/zephyr,user"])
filenames = {i: hpath(f'test-bindings/phandle-array-controller-{i}.yaml')
for i in range(1, 3)}
assert str(edt.get_node("/zephyr,user").props) == \
rf"OrderedDict([('boolean', <Property, name: boolean, type: boolean, value: True>), ('bytes', <Property, name: bytes, type: uint8-array, value: b'\x81\x82\x83'>), ('number', <Property, name: number, type: int, value: 23>), ('numbers', <Property, name: numbers, type: array, value: [1, 2, 3]>), ('string', <Property, name: string, type: string, value: 'text'>), ('strings', <Property, name: strings, type: string-array, value: ['a', 'b', 'c']>), ('handle', <Property, name: handle, type: phandle, value: <Node /ctrl-1 in 'test.dts', binding {filenames[1]}>>), ('phandles', <Property, name: phandles, type: phandles, value: [<Node /ctrl-1 in 'test.dts', binding {filenames[1]}>, <Node /ctrl-2 in 'test.dts', binding {filenames[2]}>]>), ('phandle-array-foos', <Property, name: phandle-array-foos, type: phandle-array, value: [<ControllerAndData, controller: <Node /ctrl-2 in 'test.dts', binding {filenames[2]}>, data: OrderedDict([('one', 1), ('two', 2)])>]>)])"
def test_multi_bindings():
'''Test having multiple directories with bindings'''
with from_here():
edt = edtlib.EDT("test-multidir.dts", ["test-bindings", "test-bindings-2"])
assert str(edt.get_node("/in-dir-1").binding_path) == \
hpath("test-bindings/multidir.yaml")
assert str(edt.get_node("/in-dir-2").binding_path) == \
hpath("test-bindings-2/multidir.yaml")
def test_dependencies():
''''Test dependency relations'''
with from_here():
edt = edtlib.EDT("test-multidir.dts", ["test-bindings", "test-bindings-2"])
assert edt.get_node("/").dep_ordinal == 0
assert edt.get_node("/in-dir-1").dep_ordinal == 1
assert edt.get_node("/") in edt.get_node("/in-dir-1").depends_on
assert edt.get_node("/in-dir-1") in edt.get_node("/").required_by
def test_slice_errs(tmp_path):
'''Test error messages from the internal _slice() helper'''
dts_file = tmp_path / "error.dts"
verify_error("""
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <2>;
sub {
reg = <3>;
};
};
""",
dts_file,
f"'reg' property in <Node /sub in '{dts_file}'> has length 4, which is not evenly divisible by 12 (= 4*(<#address-cells> (= 1) + <#size-cells> (= 2))). Note that #*-cells properties come either from the parent node or from the controller (in the case of 'interrupts').")
verify_error("""
/dts-v1/;
/ {
sub {
interrupts = <1>;
interrupt-parent = < &{/controller} >;
};
controller {
interrupt-controller;
#interrupt-cells = <2>;
};
};
""",
dts_file,
f"'interrupts' property in <Node /sub in '{dts_file}'> has length 4, which is not evenly divisible by 8 (= 4*<#interrupt-cells>). Note that #*-cells properties come either from the parent node or from the controller (in the case of 'interrupts').")
verify_error("""
/dts-v1/;
/ {
#address-cells = <1>;
sub-1 {
#address-cells = <2>;
#size-cells = <3>;
ranges = <4 5>;
sub-2 {
reg = <1 2 3 4 5>;
};
};
};
""",
dts_file,
f"'ranges' property in <Node /sub-1 in '{dts_file}'> has length 8, which is not evenly divisible by 24 (= 4*(<#address-cells> (= 2) + <#address-cells for parent> (= 1) + <#size-cells> (= 3))). Note that #*-cells properties come either from the parent node or from the controller (in the case of 'interrupts').")
def verify_error(dts, dts_file, expected_err):
# Verifies that parsing a file 'dts_file' with the contents 'dts'
# (a string) raises an EDTError with the message 'expected_err'.
#
# The path 'dts_file' is written with the string 'dts' before the
# test is run.
with open(dts_file, "w", encoding="utf-8") as f:
f.write(dts)
f.flush() # Can't have unbuffered text IO, so flush() instead
with pytest.raises(edtlib.EDTError) as e:
edtlib.EDT(dts_file, [])
assert str(e.value) == expected_err
|
|
# Copyright 2013 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from trove.common import cfg
from trove.common import exception
from trove.guestagent import volume
from trove.guestagent.datastore.experimental.cassandra import service
from trove.openstack.common import periodic_task
from trove.openstack.common import log as logging
from trove.common.i18n import _
from trove.guestagent import dbaas
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
def __init__(self):
self.appStatus = service.CassandraAppStatus()
self.app = service.CassandraApp(self.appStatus)
@periodic_task.periodic_task(ticks_between_runs=3)
def update_status(self, context):
"""Update the status of the Cassandra service."""
self.appStatus.update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def restart(self, context):
self.app.restart()
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(config_contents)
def stop_db(self, context, do_not_start_on_reboot=False):
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
LOG.info(_("Setting status of instance to BUILDING."))
self.appStatus.begin_install()
LOG.debug("Installing cassandra.")
self.app.install_if_needed(packages)
self.app.init_storage_structure(mount_point)
if config_contents or device_path:
# Stop the db while we configure
# FIXME(amrith) Once the cassandra bug
# https://issues.apache.org/jira/browse/CASSANDRA-2356
# is fixed, this code may have to be revisited.
LOG.debug("Stopping database prior to changes.")
self.app.stop_db()
if config_contents:
LOG.debug("Processing configuration.")
self.app.write_config(config_contents)
self.app.make_host_reachable()
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
#rsync exiting data
device.migrate_data(mount_point)
#mount the volume
device.mount(mount_point)
LOG.debug("Mounting new volume.")
LOG.debug("Restarting database after changes.")
self.app.start_db()
self.appStatus.end_install_or_restart()
LOG.info(_("Completed setup of Cassandra database instance."))
def change_passwords(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
def update_attributes(self, context, username, hostname, user_attrs):
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
def create_database(self, context, databases):
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
def create_user(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
def delete_database(self, context, database):
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
def delete_user(self, context, user):
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
def get_user(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
def grant_access(self, context, username, hostname, databases):
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
def revoke_access(self, context, username, hostname, database):
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
def list_access(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
def enable_root(self, context):
raise exception.DatastoreOperationNotSupported(
operation='enable_root', datastore=MANAGER)
def is_root_enabled(self, context):
raise exception.DatastoreOperationNotSupported(
operation='is_root_enabled', datastore=MANAGER)
def _perform_restore(self, backup_info, context, restore_location, app):
raise exception.DatastoreOperationNotSupported(
operation='_perform_restore', datastore=MANAGER)
def create_backup(self, context, backup_info):
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug("Mounted the device %s at the mount point %s." %
(device_path, mount_point))
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug("Unmounted the device %s from the mount point %s." %
(device_path, mount_point))
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug("Resized the filesystem at %s." % mount_point)
def update_overrides(self, context, overrides, remove=False):
LOG.debug("Updating overrides.")
raise exception.DatastoreOperationNotSupported(
operation='update_overrides', datastore=MANAGER)
def apply_overrides(self, context, overrides):
LOG.debug("Applying overrides.")
raise exception.DatastoreOperationNotSupported(
operation='apply_overrides', datastore=MANAGER)
def get_replication_snapshot(self, context, snapshot_info,
replica_source_config=None):
raise exception.DatastoreOperationNotSupported(
operation='get_replication_snapshot', datastore=MANAGER)
def attach_replication_slave(self, context, snapshot, slave_config):
LOG.debug("Attaching replication slave.")
raise exception.DatastoreOperationNotSupported(
operation='attach_replication_slave', datastore=MANAGER)
def detach_replica(self, context, for_failover=False):
raise exception.DatastoreOperationNotSupported(
operation='detach_replica', datastore=MANAGER)
def get_replica_context(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_replica_context', datastore=MANAGER)
def make_read_only(self, context, read_only):
raise exception.DatastoreOperationNotSupported(
operation='make_read_only', datastore=MANAGER)
def enable_as_master(self, context, replica_source_config):
raise exception.DatastoreOperationNotSupported(
operation='enable_as_master', datastore=MANAGER)
def get_txn_count(self):
raise exception.DatastoreOperationNotSupported(
operation='get_txn_count', datastore=MANAGER)
def get_latest_txn_id(self):
raise exception.DatastoreOperationNotSupported(
operation='get_latest_txn_id', datastore=MANAGER)
def wait_for_txn(self, txn):
raise exception.DatastoreOperationNotSupported(
operation='wait_for_txn', datastore=MANAGER)
def demote_replication_master(self, context):
LOG.debug("Demoting replication master.")
raise exception.DatastoreOperationNotSupported(
operation='demote_replication_master', datastore=MANAGER)
|
|
"""Tests for the SmartThings component init module."""
from http import HTTPStatus
from unittest.mock import Mock, patch
from uuid import uuid4
from aiohttp import ClientConnectionError, ClientResponseError
from pysmartthings import InstalledAppStatus, OAuthToken
import pytest
from homeassistant import config_entries
from homeassistant.components import cloud, smartthings
from homeassistant.components.smartthings.const import (
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DOMAIN,
EVENT_BUTTON,
PLATFORMS,
SIGNAL_SMARTTHINGS_UPDATE,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from tests.common import MockConfigEntry
async def test_migration_creates_new_flow(hass, smartthings_mock, config_entry):
"""Test migration deletes app and creates new flow."""
config_entry.version = 1
config_entry.add_to_hass(hass)
await smartthings.async_migrate_entry(hass, config_entry)
await hass.async_block_till_done()
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["handler"] == "smartthings"
assert flows[0]["context"] == {"source": config_entries.SOURCE_IMPORT}
async def test_unrecoverable_api_errors_create_new_flow(
hass, config_entry, smartthings_mock
):
"""
Test a new config flow is initiated when there are API errors.
401 (unauthorized): Occurs when the access token is no longer valid.
403 (forbidden/not found): Occurs when the app or installed app could
not be retrieved/found (likely deleted?)
"""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTPStatus.UNAUTHORIZED
)
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
# Assert entry was removed and new flow created
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["handler"] == "smartthings"
assert flows[0]["context"] == {"source": config_entries.SOURCE_IMPORT}
hass.config_entries.flow.async_abort(flows[0]["flow_id"])
async def test_recoverable_api_errors_raise_not_ready(
hass, config_entry, smartthings_mock
):
"""Test config entry not ready raised for recoverable API errors."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.side_effect = ClientResponseError(
request_info=request_info,
history=None,
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_api_errors_raise_not_ready(
hass, config_entry, app, installed_app, smartthings_mock
):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.scenes.side_effect = ClientResponseError(
request_info=request_info,
history=None,
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_connection_errors_raise_not_ready(hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for connection errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = ClientConnectionError()
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_base_url_no_longer_https_does_not_load(
hass, config_entry, app, smartthings_mock
):
"""Test base_url no longer valid creates a new flow."""
await async_process_ha_core_config(
hass,
{"external_url": "http://example.local:8123"},
)
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
async def test_unauthorized_installed_app_raises_not_ready(
hass, config_entry, app, installed_app, smartthings_mock
):
"""Test config entry not ready raised when the app isn't authorized."""
config_entry.add_to_hass(hass)
installed_app.installed_app_status = InstalledAppStatus.PENDING
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_unauthorized_loads_platforms(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
request_info = Mock(real_url="http://example.com")
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTPStatus.FORBIDDEN
)
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(PLATFORMS)
async def test_config_entry_loads_platforms(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
scene,
):
"""Test config entry loads properly and proxies to platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(PLATFORMS)
async def test_config_entry_loads_unconnected_cloud(
hass,
config_entry,
app,
installed_app,
device,
smartthings_mock,
subscription_factory,
scene,
):
"""Test entry loads during startup when cloud isn't connected."""
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token = str(uuid4())
mock_token.refresh_token = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [
subscription_factory(capability) for capability in device.capabilities
]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert forward_mock.call_count == len(PLATFORMS)
async def test_unload_entry(hass, config_entry):
"""Test entries are unloaded correctly."""
connect_disconnect = Mock()
smart_app = Mock()
smart_app.connect_event.return_value = connect_disconnect
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), smart_app, [], [])
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] = broker
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as forward_mock:
assert await smartthings.async_unload_entry(hass, config_entry)
assert connect_disconnect.call_count == 1
assert config_entry.entry_id not in hass.data[DOMAIN][DATA_BROKERS]
# Assert platforms unloaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(PLATFORMS)
async def test_remove_entry(hass, config_entry, smartthings_mock):
"""Test that the installed app and app are removed up."""
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_cloudhook(hass, config_entry, smartthings_mock):
"""Test that the installed app, app, and cloudhook are removed up."""
hass.config.components.add("cloud")
# Arrange
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
# Act
with patch.object(
cloud, "async_is_logged_in", return_value=True
) as mock_async_is_logged_in, patch.object(
cloud, "async_delete_cloudhook"
) as mock_async_delete_cloudhook:
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert mock_async_is_logged_in.call_count == 1
assert mock_async_delete_cloudhook.call_count == 1
async def test_remove_entry_app_in_use(hass, config_entry, smartthings_mock):
"""Test app is not removed if in use by another config entry."""
# Arrange
config_entry.add_to_hass(hass)
data = config_entry.data.copy()
data[CONF_INSTALLED_APP_ID] = str(uuid4())
entry2 = MockConfigEntry(version=2, domain=DOMAIN, data=data)
entry2.add_to_hass(hass)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_already_deleted(hass, config_entry, smartthings_mock):
"""Test handles when the apps have already been removed."""
request_info = Mock(real_url="http://example.com")
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTPStatus.FORBIDDEN
)
smartthings_mock.delete_app.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTPStatus.FORBIDDEN
)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_installedapp_api_error(
hass, config_entry, smartthings_mock
):
"""Test raises exceptions removing the installed app."""
request_info = Mock(real_url="http://example.com")
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
request_info=request_info,
history=None,
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_installedapp_unknown_error(
hass, config_entry, smartthings_mock
):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_app_api_error(hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
request_info = Mock(real_url="http://example.com")
smartthings_mock.delete_app.side_effect = ClientResponseError(
request_info=request_info,
history=None,
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_app_unknown_error(hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_broker_regenerates_token(hass, config_entry):
"""Test the device broker regenerates the refresh token."""
token = Mock(OAuthToken)
token.refresh_token = str(uuid4())
stored_action = None
def async_track_time_interval(hass, action, interval):
nonlocal stored_action
stored_action = action
with patch(
"homeassistant.components.smartthings.async_track_time_interval",
new=async_track_time_interval,
):
broker = smartthings.DeviceBroker(hass, config_entry, token, Mock(), [], [])
broker.connect()
assert stored_action
await stored_action(None) # pylint:disable=not-callable
assert token.refresh.call_count == 1
assert config_entry.data[CONF_REFRESH_TOKEN] == token.refresh_token
async def test_event_handler_dispatches_updated_devices(
hass, config_entry, device_factory, event_request_factory, event_factory
):
"""Test the event handler dispatches updated devices."""
devices = [
device_factory("Bedroom 1 Switch", ["switch"]),
device_factory("Bathroom 1", ["switch"]),
device_factory("Sensor", ["motionSensor"]),
device_factory("Lock", ["lock"]),
]
device_ids = [
devices[0].device_id,
devices[1].device_id,
devices[2].device_id,
devices[3].device_id,
]
event = event_factory(
devices[3].device_id,
capability="lock",
attribute="lock",
value="locked",
data={"codeId": "1"},
)
request = event_request_factory(device_ids=device_ids, events=[event])
config_entry.data = {
**config_entry.data,
CONF_INSTALLED_APP_ID: request.installed_app_id,
}
called = False
def signal(ids):
nonlocal called
called = True
assert device_ids == ids
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), devices, [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
for device in devices:
assert device.status.values["Updated"] == "Value"
assert devices[3].status.attributes["lock"].value == "locked"
assert devices[3].status.attributes["lock"].data == {"codeId": "1"}
async def test_event_handler_ignores_other_installed_app(
hass, config_entry, device_factory, event_request_factory
):
"""Test the event handler dispatches updated devices."""
device = device_factory("Bedroom 1 Switch", ["switch"])
request = event_request_factory([device.device_id])
called = False
def signal(ids):
nonlocal called
called = True
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert not called
async def test_event_handler_fires_button_events(
hass, config_entry, device_factory, event_factory, event_request_factory
):
"""Test the event handler fires button events."""
device = device_factory("Button 1", ["button"])
event = event_factory(
device.device_id, capability="button", attribute="button", value="pushed"
)
request = event_request_factory(events=[event])
config_entry.data = {
**config_entry.data,
CONF_INSTALLED_APP_ID: request.installed_app_id,
}
called = False
def handler(evt):
nonlocal called
called = True
assert evt.data == {
"component_id": "main",
"device_id": device.device_id,
"location_id": event.location_id,
"value": "pushed",
"name": device.label,
"data": None,
}
hass.bus.async_listen(EVENT_BUTTON, handler)
broker = smartthings.DeviceBroker(hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import time
import math
import logging
import copy
import netaddr
import boto3
import namesgenerator
import paramiko
from scp import SCPClient
import requests
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--key_name', type=str, default="", help="required, key pair name")
parser.add_argument(
'--security_group_id',
type=str,
default="",
help="required, the security group id associated with your VPC")
parser.add_argument(
'--vpc_id',
type=str,
default="",
help="The VPC in which you wish to run test")
parser.add_argument(
'--subnet_id',
type=str,
default="",
help="The Subnet_id in which you wish to run test")
parser.add_argument(
'--pserver_instance_type',
type=str,
default="c5.2xlarge",
help="your pserver instance type, c5.2xlarge by default")
parser.add_argument(
'--trainer_instance_type',
type=str,
default="p2.8xlarge",
help="your trainer instance type, p2.8xlarge by default")
parser.add_argument(
'--task_name',
type=str,
default="",
help="the name you want to identify your job")
parser.add_argument(
'--pserver_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, \
use ami-1ae93962 for us-east-2")
parser.add_argument(
'--pserver_command',
type=str,
default="",
help="pserver start command, format example: python,vgg.py,batch_size:128,is_local:yes"
)
parser.add_argument(
'--trainer_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, \
use ami-1ae93962 for us-west-2")
parser.add_argument(
'--trainer_command',
type=str,
default="",
help="trainer start command, format example: python,vgg.py,batch_size:128,is_local:yes"
)
parser.add_argument(
'--availability_zone',
type=str,
default="us-east-2a",
help="aws zone id to place ec2 instances")
parser.add_argument(
'--trainer_count', type=int, default=1, help="Trainer count")
parser.add_argument(
'--pserver_count', type=int, default=1, help="Pserver count")
parser.add_argument(
'--action', type=str, default="create", help="create|cleanup|status")
parser.add_argument('--pem_path', type=str, help="private key file")
parser.add_argument(
'--pserver_port', type=str, default="5436", help="pserver port")
parser.add_argument(
'--docker_image', type=str, default="busybox", help="training docker image")
parser.add_argument(
'--master_server_port', type=int, default=5436, help="master server port")
parser.add_argument(
'--master_server_public_ip', type=str, help="master server public ip")
parser.add_argument(
'--master_docker_image',
type=str,
default="putcn/paddle_aws_master:latest",
help="master docker image id")
parser.add_argument(
'--no_clean_up',
type=str2bool,
default=False,
help="whether to clean up after training")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
ec2client = boto3.client('ec2')
def print_arguments():
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def create_subnet():
# if no vpc id provided, list vpcs
logging.info("start creating subnet")
if not args.vpc_id:
logging.info("no vpc provided, trying to find the default one")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "isDefault",
"Values": ["true", ]
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No default VPC')
args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"]
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("default vpc fount with id %s and CidrBlock %s" %
(args.vpc_id, vpc_cidrBlock))
if not vpc_cidrBlock:
logging.info("trying to find cidrblock for vpc")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No VPC found')
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("cidrblock for vpc is %s" % vpc_cidrBlock)
# list subnets in vpc in order to create a new one
logging.info("trying to find ip blocks for new subnet")
subnets_desc = ec2client.describe_subnets(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
ips_taken = []
for subnet_dec in subnets_desc["Subnets"]:
ips_taken.append(subnet_dec["CidrBlock"])
ip_blocks_avaliable = netaddr.IPSet(
[vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken)
# adding 10 addresses as buffer
cidr_prefix = 32 - math.ceil(
math.log(args.pserver_count + args.trainer_count + 10, 2))
if cidr_prefix <= 16:
raise ValueError('Too many nodes to fit in current VPC')
for ipnetwork in ip_blocks_avaliable.iter_cidrs():
try:
subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next()
logging.info("subnet ip block found %s" % (subnet_cidr))
break
except Exception:
pass
if not subnet_cidr:
raise ValueError(
'No avaliable subnet to fit required nodes in current VPC')
logging.info("trying to create subnet")
subnet_desc = ec2client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=args.vpc_id,
AvailabilityZone=args.availability_zone)
subnet_id = subnet_desc["Subnet"]["SubnetId"]
subnet_waiter = ec2client.get_waiter('subnet_available')
# sleep for 1s before checking its state
time.sleep(1)
subnet_waiter.wait(SubnetIds=[subnet_id, ])
logging.info("subnet created")
logging.info("adding tags to newly created subnet")
ec2client.create_tags(
Resources=[subnet_id, ],
Tags=[{
"Key": "Task_name",
'Value': args.task_name
}])
return subnet_id
def run_instances(image_id, instance_type, count=1, role="MASTER", cmd=""):
response = ec2client.run_instances(
ImageId=image_id,
InstanceType=instance_type,
MaxCount=count,
MinCount=count,
UserData=cmd,
DryRun=False,
InstanceInitiatedShutdownBehavior="stop",
KeyName=args.key_name,
Placement={'AvailabilityZone': args.availability_zone},
NetworkInterfaces=[{
'DeviceIndex': 0,
'SubnetId': args.subnet_id,
"AssociatePublicIpAddress": True,
'Groups': args.security_group_ids
}],
TagSpecifications=[{
'ResourceType': "instance",
'Tags': [{
"Key": 'Task_name',
"Value": args.task_name + "_master"
}, {
"Key": 'Role',
"Value": role
}]
}])
instance_ids = []
for instance in response["Instances"]:
instance_ids.append(instance["InstanceId"])
if len(instance_ids) > 0:
logging.info(str(len(instance_ids)) + " instance(s) created")
else:
logging.info("no instance created")
#create waiter to make sure it's running
logging.info("waiting for instance to become accessible")
waiter = ec2client.get_waiter('instance_status_ok')
waiter.wait(
Filters=[{
"Name": "instance-status.status",
"Values": ["ok"]
}, {
"Name": "instance-status.reachability",
"Values": ["passed"]
}, {
"Name": "instance-state-name",
"Values": ["running"]
}],
InstanceIds=instance_ids)
instances_response = ec2client.describe_instances(InstanceIds=instance_ids)
return instances_response["Reservations"][0]["Instances"]
def generate_task_name():
return namesgenerator.get_random_name()
def init_args():
if not args.task_name:
args.task_name = generate_task_name()
logging.info("task name generated %s" % (args.task_name))
if not args.pem_path:
args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem"
if args.security_group_id:
args.security_group_ids = (args.security_group_id, )
def create():
init_args()
# create subnet
if not args.subnet_id:
args.subnet_id = create_subnet()
# create master node
master_instance_response = run_instances(
image_id="ami-7a05351f", instance_type="t2.nano")
logging.info("master server started")
args.master_server_public_ip = master_instance_response[0][
"PublicIpAddress"]
args.master_server_ip = master_instance_response[0]["PrivateIpAddress"]
logging.info("master server started, master_ip=%s, task_name=%s" %
(args.master_server_public_ip, args.task_name))
# cp config file and pems to master node
ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(
hostname=args.master_server_public_ip, username="ubuntu", pkey=ssh_key)
with SCPClient(ssh_client.get_transport()) as scp:
scp.put(os.path.expanduser("~") + "/" + ".aws",
recursive=True,
remote_path='/home/ubuntu/')
scp.put(args.pem_path,
remote_path='/home/ubuntu/' + args.key_name + ".pem")
logging.info("credentials and pem copied to master")
# set arguments and start docker
kick_off_cmd = "docker run -d -v /home/ubuntu/.aws:/root/.aws/"
kick_off_cmd += " -v /home/ubuntu/" + args.key_name + ".pem:/root/" + args.key_name + ".pem"
kick_off_cmd += " -v /home/ubuntu/logs/:/root/logs/"
kick_off_cmd += " -p " + str(args.master_server_port) + ":" + str(
args.master_server_port)
kick_off_cmd += " " + args.master_docker_image
args_to_pass = copy.copy(args)
args_to_pass.action = "serve"
del args_to_pass.pem_path
del args_to_pass.security_group_ids
del args_to_pass.master_docker_image
del args_to_pass.master_server_public_ip
for arg, value in sorted(vars(args_to_pass).iteritems()):
if value:
kick_off_cmd += ' --%s %s' % (arg, value)
logging.info(kick_off_cmd)
stdin, stdout, stderr = ssh_client.exec_command(command=kick_off_cmd)
return_code = stdout.channel.recv_exit_status()
logging.info(return_code)
if return_code != 0:
raise Exception("Error while kicking off master")
logging.info(
"master server finished init process, visit %s to check master log" %
(get_master_web_url("/status")))
def cleanup():
print requests.post(get_master_web_url("/cleanup")).text
def status():
print requests.post(get_master_web_url("/status")).text
def get_master_web_url(path):
return "http://" + args.master_server_public_ip + ":" + str(
args.master_server_port) + path
if __name__ == "__main__":
print_arguments()
if args.action == "create":
if not args.key_name or not args.security_group_id:
raise ValueError("key_name and security_group_id are required")
create()
elif args.action == "cleanup":
if not args.master_server_public_ip:
raise ValueError("master_server_public_ip is required")
cleanup()
elif args.action == "status":
if not args.master_server_public_ip:
raise ValueError("master_server_public_ip is required")
status()
|
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import struct
from .. import util
def parse_variable_set(data, kind):
'''Reads a set of Parsable objects prefixed with a VarInteger.
Any object can be used that supports parse(data), which returns
a tuple of (bytes_consumed, value).'''
(offset, count) = FormatTypeVarInteger.parse(data)
result = [ ]
index = 0
while index < count:
(item_length, item_obj) = kind.parse(data[offset:])
result.append(item_obj)
index += 1
offset += item_length
return (offset, result)
class ParameterException(Exception):
def __init__(self, name, value, kind = None):
if kind is None: kind = type(value)
Exception.__init__(self, "Bad Parameter: %s = %r (%s)" % (name, value, kind))
self._name = name
self._value = value
self._kind = kind
name = property(lambda s: s._name)
value = property(lambda s: s._value)
kind = property(lambda s: s._kind)
# This metaclass will convert all the (name, kind) pairs in properties into
# class properties and if the base class has a register(cls) method, call it.
class _AutoPopulateAndRegister(type):
def __init__(cls, name, bases, dct):
super(_AutoPopulateAndRegister, cls).__init__(name, bases, dct)
for (key, vt) in cls.properties:
def get_parameter(k):
return property(lambda s: s._properties[k])
setattr(cls, key, get_parameter(key))
cls._name = name
for base in bases:
if hasattr(base, 'register'):
if hasattr(base, 'do_not_register') and not base.do_not_register:
base.register(cls)
break
#import time
#profile = dict(count = 0)
class CompoundType(object):
properties = []
def __init__(self, *args, **kw):
keys = [k for (k, t) in self.properties]
# convert the positional arguments into keywords
params = dict(zip(keys, args))
# did we specify a parameter both positionally and as a keyword?
for k in kw:
if k in params:
raise TypeError('got multiple values for keyword argument %r' % k)
# do we have any unknown keywords?
keys = set(keys)
for k in kw:
if k not in keys:
raise TypeError('got an unexpected keyword argument %r' % k)
# add in the keyword arguments
params.update(kw)
# check for the correct number of properties
if len(params) != len(keys):
suffix = ''
if suffix != 1: suffix = 's'
raise TypeError("takes exactly %d argument%s (%d given)" % (len(keys), suffix, len(params)))
# verify all properties and convert to immutable types.
for (key, vt) in self.properties:
value = vt.validate(params[key])
if value is None:
raise ParameterException(key, params[key])
params[key] = value
self._properties = params
__metaclass__ = _AutoPopulateAndRegister
def binary(self):
'Returns the binary representation of the message.'
return "".join(vt.binary(self._properties[key]) for (key, vt) in self.properties)
@classmethod
def parse(cls, data):
#t0 = time.time()
kw = dict()
offset = 0
for (key, vt) in cls.properties:
try:
(length, kw[key]) = vt.parse(data[offset:])
offset += length
except Exception, e:
raise ParameterException(key, data[offset:], vt)
#dt = time.time() - t0
#if cls not in profile: profile[cls] = [0.0, 0]
#profile[cls][0] += dt
#profile[cls][1] += 1
#profile['count'] += 1
#if profile['count'] % 100000 == 0:
# print "PROFILE"
# for key in profile:
# if key == 'count': continue
# (t, c) = profile[key]
# print ' %s: %f ms/call (%d calls, %f total time)' % (key.__name__, 1000 * t / c, c, t)
# create without __init__ (would unnecessarily verify the parameters)
self = cls.__new__(cls)
self._properties = kw
return (offset, self)
def __str__(self):
output = [self._name]
for (key, vt) in self.properties:
output.append('%s=%s' % (key, vt.str(self._properties[key])))
return '<%s>' % " ".join(output)
class FormatType(object):
def validate(self, obj):
'''Returns the object to store if obj is valid for this type, otherwise
None. The type returned should be immutable.'''
raise NotImplemented()
def binary(self, obj):
'Returns the binary form for this type.'
raise NotImplemented()
def parse(self, data):
'''Returns a (length, value) tuple where length is the amount of
data that was consumed.'''
raise NotImplemented()
def str(self, obj):
return str(obj)
def __str__(self):
cls = str(self.__class__).split('.')[-1].strip(">'")
return '<%s>' % cls
class FormatTypeCompoundType(object):
expected_type = None
@classmethod
def validate(cls, obj):
if isinstance(obj, cls.expected_type):
return obj
return None
@staticmethod
def binary(obj):
return obj.binary()
@classmethod
def parse(cls, data):
return cls.expected_type.parse(data)
@classmethod
def str(cls, obj):
return str(obj)
class FormatTypeOptional(FormatType):
def __init__(self, child, default):
self._child = child
self._default = default
def validate(self, obj):
try:
value = self._child.validate(obj)
if value is not None:
return value
except Exception, e:
print e
return self._default
def binary(self, obj):
return self._child.binary(obj)
def parse(self, data):
try:
return self._child.parse(data)
except Exception, e:
pass
return (0, self._default)
def __str__(self):
return '<FormatTypeOptional child=%s default=%s>' % (self._child, self._default)
def str(self, obj):
return self._child.str(obj)
# Simple formats (don't use any CompoundTypes nor FormatTypes)
class FormatTypeNumber(FormatType):
'''Number format.
Allows the object type to be the expected_type (default: int) using
the endian and format to pack the value (default: little endian, signed
4-byte integer).
A tuple can be passed in for expected_type to accept multiple types.
Possible Formats:
b, B - signed, unsigned 1-byte char
i, I - signed, unsigned 4-byte integer
q, Q - signed, unsigned 8-byte integer'''
def __init__(self, format = 'i', big_endian = False, allow_float = False):
if format not in self._ranges:
raise ValueError('invalid format type: %s' % format)
self._format = {True: '>', False: '<'}[big_endian] + format
self._allow_float = allow_float
_ranges = dict(
b = (-128, 128),
B = (0, 256),
h = (-32768, 32768),
H = (0, 65536),
i = (-2147483648, 2147483648),
I = (0, 4294967296),
q = (-9223372036854775808L, 9223372036854775808L),
Q = (0, 18446744073709551616L)
)
def validate(self, obj):
# check type
if not (self._allow_float and isinstance(obj, float)):
if self._format[1] in 'qQ':
if not isinstance(obj, (int, long)):
return None
elif not isinstance(obj, int):
return None
# check valid range
(min_value, max_value) = self._ranges[self._format[1]]
if min_value <= obj < max_value:
return obj
return None
def binary(self, obj):
return struct.pack(self._format, int(obj))
def parse(self, data):
length = dict(b = 1, h = 2, i = 4, q = 8)[self._format.lower()[-1]]
return (length, struct.unpack(self._format, data[:length])[0])
def __str__(self):
return '<FormatTypeNumber format=%s>' % (self._format, self._expected_type)
class FormatTypeVarInteger(FormatType):
@staticmethod
def validate(obj):
if isinstance(obj, int):
return obj
return None
@staticmethod
def binary(obj):
if obj < 0xfd:
return struct.pack('<B', obj)
elif obj < 0xffff:
return chr(0xfd) + struct.pack('<H', obj)
elif obj < 0xffffffff:
return chr(0xfe) + struct.pack('<I', obj)
return chr(0xff) + struct.pack('<Q', obj)
@staticmethod
def parse(data):
value = ord(data[0])
if value == 0xfd:
return (3, struct.unpack('<H', data[1:3])[0])
elif value == 0xfe:
return (5, struct.unpack('<I', data[1:5])[0])
elif value == 0xfd:
return (9, struct.unpack('<Q', data[1:9])[0])
return (1, value)
def str(self, obj):
return str(obj)
# @TODO: test ipv6...
class FormatTypeIPAddress(FormatType):
@staticmethod
def _ipv4_groups(obj):
# convert each group to its value
try:
groups = map(int, obj.split('.'))
except ValueError, e:
return None
# too many or not enough groups
if len(groups) != 4:
return None
# is each group in the correct range?
for group in groups:
if not (0x00 <= group <= 0xff):
return None
return groups
@staticmethod
def _ipv6_groups(obj):
# multiple double-colons or more than 8 groups; bad address
objs = obj.split(':')
if objs.count('') > 1 or len(objs) > 8:
return None
# calculate each group's value
groups = [ ]
for group in objs:
if group == '':
groups.extend([ 0 ] * (8 - len(objs)))
else:
groups.append(int(group, 16))
# is each group in the correct range?
for group in groups:
if not (0x0000 <= group <= 0xffff):
return None
return groups
@staticmethod
def validate(obj):
if not isinstance(obj, str):
return None
if FormatTypeIPAddress._ipv4_groups(obj) is not None:
return obj
if FormatTypeIPAddress._ipv6_groups(obj) is not None:
return obj
return None
@staticmethod
def parse(data):
if data[0:10] == (chr(0) * 10) and data[10:12] == (chr(255) * 2):
return (16, '.'.join(str(i) for i in struct.unpack('>BBBB', data[12:16])))
return (16, ':'.join(("%x" % i) for i in struct.unpack('>HHHHHHHH', data[:16])))
def binary(self, obj):
groups = self._ipv4_groups(obj)
if groups is not None:
return (chr(0) * 10) + (chr(255) * 2) + struct.pack('>BBBB', * groups)
groups = self._ipv6_groups(obj)
if groups is not None:
return struct.pack('>HHHHHHHH', *groups)
raise Exception('should not be able to reach here')
class FormatTypeBytes(FormatType):
'''String format.
Allows the object to be a fixed length string.'''
def __init__(self, length):
self._length = length
def validate(self, obj):
if isinstance(obj, str) and len(obj) == self._length:
return obj
return None
def binary(self, obj):
return obj
def parse(self, data):
return (self._length, data[:self._length])
def str(self, obj):
return '0x' + obj.encode('hex')
def __str__(self):
return '<FormatTypeBytes length=%d>' % self._length
class FormatTypeVarString(FormatType):
'''VarString format.
The parameter must be a string, but may have variable length.'''
@staticmethod
def validate(obj):
if isinstance(obj, str):
return obj
return None
@staticmethod
def binary(obj):
return FormatTypeVarInteger.binary(len(obj)) + obj
@staticmethod
def parse(data):
(vl, length) = FormatTypeVarInteger.parse(data)
obj = data[vl:vl + length]
return (vl + len(obj), obj)
def str(self, obj):
return repr(obj)
class FormatTypeArray(FormatType):
'''Array format.
The properties must be an array of objects, each of child_type. If
min_length is specified, the array must contain at least that many
children.
A tuple is returned to ensure the structure is immutable.'''
def __init__(self, child_type, min_length = None, max_length = None):
self._child_type = child_type
self._min_length = min_length
self._max_length = max_length
def validate(self, obj):
if not isinstance(obj, (list, tuple)):
return None
if self._min_length and len(obj) < self._min_length:
return None
if self._max_length and len(obj) > self._max_length:
return None
obj = [self._child_type.validate(o) for o in obj]
if None in obj:
return None
return tuple(obj)
def binary(self, obj):
return (FormatTypeVarInteger.binary(len(obj)) +
"".join(self._child_type.binary(o) for o in obj))
def parse(self, data):
return parse_variable_set(data, self._child_type)
def str(self, obj):
return "[%s]" % ", ".join(self._child_type.str(o) for o in obj)
def __str__(self):
return '<FormatTypeArray child=%s length=[%s, %s]>' % (self._child_type, self._min_length, self._max_length)
#class FormatTypeRemaining(FormatType):
# def validate(self, obj):
# if isinstance(obj, str):
# return obj
# return None
#
# def binary(self, obj):
# return obj
#
# def parse(self, data):
# return (len(data), data)
#
# def str(self, obj):
# return '0x' + obj.encode('hex')
# Network Address types and format
class NetworkAddress(CompoundType):
properties = [
('timestamp', FormatTypeNumber('I', allow_float = True)),
('services', FormatTypeNumber('Q')),
('address', FormatTypeIPAddress()),
('port', FormatTypeNumber('H', big_endian = True)),
]
class FormatTypeNetworkAddress(FormatTypeCompoundType):
'''NetowrkAddress format.
The properties must be a NetworkAddress.'''
expected_type = NetworkAddress
class FormatTypeNetworkAddressWithoutTimestamp(FormatTypeNetworkAddress):
'''NetowrkAddress format.
The properties must be a NetworkAddress. The timestamp will be zero
when deserialized and will be ommitted when serialized'''
@classmethod
def parse(cls, data):
(vl, obj) = FormatTypeNetworkAddress.parse((chr(0) * 4) + data)
return (vl - 4, obj)
def binary(self, obj):
return FormatTypeNetworkAddress.binary(obj)[4:]
# Inventory Vectors type and format
class InventoryVector(CompoundType):
properties = [
('object_type', FormatTypeNumber('I')),
('hash', FormatTypeBytes(32)),
]
class FormatTypeInventoryVector(FormatTypeCompoundType):
'''InventoryVector format.
The properties must be an InventoryVector.'''
expected_type = InventoryVector
# Txn types and formats
class OutPoint(CompoundType):
properties = [
('hash', FormatTypeBytes(32)),
('index', FormatTypeNumber('I')),
]
def __hash__(self):
return hash((self.hash, self.index))
def __eq__(self, other):
if not isinstance(other, OutPoint):
return False
return (self.hash == other.hash) and (self.index == otehr.index)
class FormatTypeOutPoint(FormatTypeInventoryVector):
expected_type = OutPoint
class TxnIn(CompoundType):
properties = [
('previous_output', FormatTypeOutPoint()),
('signature_script', FormatTypeVarString()),
('sequence', FormatTypeNumber('I')),
]
class FormatTypeTxnIn(FormatTypeCompoundType):
'''TxnIn format.
The properties must be a TxnIn.'''
expected_type = TxnIn
class TxnOut(CompoundType):
properties = [
('value', FormatTypeNumber('q')),
('pk_script', FormatTypeVarString()),
]
class FormatTypeTxnOut(FormatTypeCompoundType):
'''TxnOut format.
The properties must be a TxnOut.'''
expected_type = TxnOut
class Txn(CompoundType):
properties = [
('version', FormatTypeNumber('I')),
('tx_in', FormatTypeArray(FormatTypeTxnIn, 1)),
('tx_out', FormatTypeArray(FormatTypeTxnOut, 1)),
('lock_time', FormatTypeNumber('I')),
]
@property
def hash(self):
if '__hash' not in self._properties:
self._properties['__hash'] = util.sha256d(self.binary())
return self._properties['__hash']
class FormatTypeTxn(FormatTypeInventoryVector):
'''Txn format.
The properties must be a Txn.'''
expected_type = Txn
# Block Header type and format
class BlockHeader(CompoundType):
properties = [
('version', FormatTypeNumber('I')),
('prev_block', FormatTypeBytes(32)),
('merkle_root', FormatTypeBytes(32)),
('timestamp', FormatTypeNumber('I', allow_float = True)),
('bits', FormatTypeNumber('I')),
('nonce', FormatTypeNumber('I')),
('txn_count', FormatTypeVarInteger()),
]
@staticmethod
def from_block(block):
return BlockHeader(block.version, block.previous_hash,
block.merkle_root, block.timestamp,
block.bits, block.nonce,
len(block.transactions))
@property
def hash(self):
if '__hash' not in self._properties:
self._properties['__hash'] = util.sha256d(self.binary()[:80])
return self._properties['__hash']
class FormatTypeBlockHeader(FormatTypeInventoryVector):
'''BlockHeader format.
The properties must be a BlockHeader.'''
expected_type = BlockHeader
|
|
import ptypes
from ptypes import *
from . import portable
from .headers import *
import logging
class Signature(portable.IMAGE_FILE_HEADER):
_fields_ = [
(Machine, 'Machine'),
(uint16, 'NumberOfSections'),
]
def isImportSignature(self):
return all((self['Machine'].li.int() == self['Machine'].byname('UNKNOWN'), self['NumberOfSections'].li.int() == 0xffff))
class ObjectHeader(portable.IMAGE_FILE_HEADER):
_fields_ = portable.IMAGE_FILE_HEADER._fields_[2:]
class ImportHeader(pstruct.type):
_fields_ = [
(uint16, 'Version'),
(Machine, 'Machine'),
(TimeDateStamp, 'TimeDateStamp'),
(uint32, 'SizeOfData'),
]
class IMPORT_TYPE(pbinary.enum):
length, _values_ = 2, [
('CODE', 0),
('DATA', 1),
('CONST', 2),
]
class IMPORT_(pbinary.enum):
length, _values_ = 3, [
('ORDINAL', 0),
('NAME', 1),
('NAME_NOPREFIX', 2),
('NAME_UNDECORATE', 3),
]
class ImportData(pstruct.type):
class Type(pbinary.struct):
_fields_ = [
(11, 'Reserved'),
(IMPORT_, 'Name'),
(IMPORT_TYPE, 'Type'),
]
_fields_ = [
(portable.imports.word, 'Ordinal/Hint'),
(Type, 'Type'),
(pstr.szstring, 'Symbol'),
(pstr.szstring, 'Library'),
]
class FileSegmentEntry(pstruct.type):
def __Data(self):
section = self.Section
return dyn.block(section['SizeOfRawData'].li.int())
def __Relocations(self):
section = self.Section
return dyn.clone(portable.relocations.RelocationTable, length=section['NumberOfRelocations'].int())
_fields_ = [
(__Data, 'Data'),
(__Relocations, 'Relocations'),
]
def properties(self):
res = super(FileSegmentEntry, self).properties()
if hasattr(self, 'Section'):
res['SectionName'] = self.Section['Name'].str()
return res
class SegmentTableArray(parray.type):
def _object_(self):
p = self.getparent(Header)
sections = p['Sections'].li
section = sections[len(self.value)]
return dynamic.clone(FileSegmentEntry, Section=section)
class File(pstruct.type, Header, ptype.boundary):
"""Coff Object File"""
def __Sections(self):
sig = self['Signature'].li
count = 0 if sig.isImportSignature() else sig['NumberOfSections'].int()
return dynamic.clone(portable.SectionTableArray, length=count)
def __Segments(self):
sig = self['Signature'].li
if sig.isImportSignature():
return ImportData
return dynamic.clone(SegmentTableArray, length=sig['NumberOfSections'].int())
_fields_ = [
(Signature, 'Signature'),
(lambda s: ImportHeader if s['Signature'].li.isImportSignature() else ObjectHeader, 'Header'),
(__Sections, 'Sections'),
# FIXME: we're actually assuming that these fields are packed and
# aligned, so there's a large chance that that empty space
# could exist in between each item, or the segments could
# be in a completely different order.
(__Segments, 'Segments'),
(portable.symbols.SymbolTableAndStringTable, 'SymbolTable'),
]
def FileHeader(self):
'''Return the Header which contains a number of sizes used by the file.'''
return self['Header']
def Machine(self):
sig = self['Signature']
return self['Header']['Machine'] if sig.isImportSignature() else sig['Machine']
def isImportLibrary(self):
sig = self['Signature']
return sig.isImportSignature()
if __name__ == '__main__':
## parse the file
import sys, pecoff, ptypes
from ptypes import provider
import logging
print('-'*20 + 'loading file..')
coff = pecoff.Object.File(source=provider.file(sys.argv[1]))
coff.load()
__name__ = 'ImportLibrary' if coff.isImportLibrary() else 'CoffObject'
if __name__ == 'ImportLibrary':
print(coff['Signature'])
print(coff['Header'])
print(coff['Data'])
if __name__ == 'CoffObject':
print(coff['Signature'])
print(coff['Header'])
print(coff['Sections'])
### check everything from the symbol table's perspective
sst = coff['Header']['PointerToSymbolTable'].d
print(sst)
sst.load()
symboltable = sst['Symbols']
print('-'*20 + 'printing external symbols')
## build list of external symbols
sym_external = {}
for name in sst.names():
v = sst.Symbol(name)
if v['StorageClass'].int() == v['StorageClass'].byname('EXTERNAL'):
sym_external[name] = v
continue
print('\n'.join(map(repr, sym_external.values())))
print('-'*20 + 'printing statically defined symbols')
## build list of static symbols
sym_static = {}
for name in sst.names():
sym = sst.Symbol(name)
if sym['StorageClass'].int() == sym['StorageClass'].byname('STATIC') and sym['Value'].int() == 0:
idx = sym.SectionIndex()
sym_static[idx] = (sym, sst.AuxiliarySymbols(name))
continue
for x in sym_static.keys():
sym,aux = sym_static[x]
print(sym)
if aux:
print('\n'.join(map(repr,aux)))
print('-'*20 + 'check that the number of relocations in the symboltable matches the section\'s')
## build list of static symbols
## sanity check that the number of relocations are correct
sections = coff['Sections']
for index, (sym, aux) in sym_static.items():
section = sections[index]
sectioncount = section['NumberOfRelocations'].int()
if len(aux) > 0:
symbolcount = aux[0]['NumberOfRelocations'].int()
if sectioncount != symbolcount:
logging.warning("number of relocations ({:d}) for section {:s} differs from section definition ({:d})".format(symbolcount, sym['Name'].str(), sectioncount))
logging.warning(aux[0])
print('failed with relocated section {!r}'.format(section))
continue
print('successfully relocated section {!r}'.format(section))
print('-'*20 + 'adding some symbols')
## reassign some symbols
sy = sst.assign('_TlsAlloc@0', 0xcccccccc)
print('added symbol', sy)
sy = sst.assign('.text', 0x4010000)
print('added symbol', sy)
print('-'*20 + 'printing all symbol information')
print('\n'.join(map(repr, symboltable)))
def formatrelocation(relo, symboltable):
symbol = symboltable[ relo['SymbolTableIndex'].int() ]
return '\n'.join([repr(symbol), repr(relo)]) + '\n'
### everything from the section's perpsective
print('-'*20 + 'printing all relocations')
sections = []
for section in coff['Sections']:
relocations, data = section['PointerToRelocations'].d, section.data().l
sections.append((data.serialize(), relocations)) # save for later
## do relocations for every section
for section, sdr in zip(coff['Sections'], sections):
data, relocations = sdr
for r in relocations.load():
print(r)
data = r.relocate(data, symboltable)
continue
## print out results
print('-'*20 + 'printing relocated sections')
for section in coff['Sections']:
print(section['Name'].str())
print(ptypes.utils.indent('\n'.join(map(lambda x: formatrelocation(x, symboltable), section['PointerToRelocations'].d.l))))
print(ptypes.utils.hexdump(section.data()))
if False:
print('-'*20 + 'dumping relocated sections')
for index in range( len(sections) ):
section = sections[index]
name = ptypes.utils.strdup(section['Name'].serialize(), terminator=b'\0')
sys.stdout.write(name)
if index in sym_static.keys():
sym,aux = sym_static[index]
print(sym['Name'].str(), sym['SectionNumber'].int(), int(sym['Value']))
data = section.getrelocateddata(symboltable)
else:
data = section.data().serialize()
print()
# print(ptypes.utils.hexdump( section.getdata().serialize() ))
print(ptypes.utils.hexdump( data ))
x = file("{:s}.section".format(name[1:]), 'wb')
x.write(data)
x.close()
|
|
#!/usr/bin/env python
import setpath
import unittest
from rename import rename
from bike.transformer.save import save
from bike.testutils import *
import compiler
class RenameClassTests:
def testRenamesClassDcl(self):
srcBefore=trimLines("""
class TheClass:
def theMethod():
pass
""")
srcAfter=trimLines("""
class NewName:
def theMethod():
pass
""")
src = self.rename(srcBefore, 1,6,"NewName")
self.assertEqual(srcAfter,src)
# i.e. a = TheClass()
def testRenamesClassReference(self):
srcBefore=trimLines("""
class TheClass:
pass
a = TheClass()
""")
srcAfter=trimLines("""
class NewName:
pass
a = NewName()
""")
src = self.rename(srcBefore, 1,6,"NewName")
self.assertEqual(srcAfter,src)
# i.e. a = TheClass.TheClass()
def testRenamesClassReferenceWhenScopeIsSameNameAsClass(self):
srcBefore = trimLines("""
class TheClass:
class TheClass:
pass
a = TheClass.TheClass()
""")
srcAfter=trimLines("""
class TheClass:
class NewName:
pass
a = TheClass.NewName()
""")
src = self.rename(srcBefore, 2,10, "NewName")
self.assertEqual(srcAfter,src)
# i.e. a = TheClass.TheClass()
def testRenamesClassReferenceWhenChildIsSameNameAsClass(self):
srcBefore = trimLines("""
class TheClass:
class TheClass:
pass
a = TheClass.TheClass()
""")
srcAfter=trimLines("""
class NewName:
class TheClass:
pass
a = NewName.TheClass()
""")
src = self.rename(srcBefore, 1,6,"NewName")
self.assertEqual(srcAfter,src)
# a = TheClass() + TheClass()
def testRenamesClassReferenceWhenTwoRefsInTheSameLine(self):
srcBefore=trimLines("""
class TheClass:
pass
a = TheClass() + TheClass()
""")
srcAfter=trimLines("""
class NewName:
pass
a = NewName() + NewName()
""")
src = self.rename(srcBefore,1,6, "NewName")
self.assertEqual(srcAfter,src)
def testRenamesClassReferenceInInstanceCreation(self):
srcBefore=trimLines("""
class TheClass:
def theMethod(self): pass
TheClass().theMethod()
""")
srcAfter=trimLines("""
class NewName:
def theMethod(self): pass
NewName().theMethod()
""")
src = self.rename(srcBefore,1,6,"NewName")
self.assertEqual(srcAfter,src)
# i.e. if renaming TheClass, shouldnt rename a.b.c.TheClass
def testDoesntRenameBugusClassReferenceOnEndOfGetattrNest(self):
srcBefore=trimLines("""
class TheClass:
pass
a.b.c.TheClass # Shouldn't be renamed
""")
srcAfter=trimLines("""
class NewName:
pass
a.b.c.TheClass # Shouldn't be renamed
""")
src = self.rename(srcBefore,1,6,"NewName")
self.assertEqual(srcAfter,src)
def testRenamesClassRefUsedInExceptionRaise(self):
srcBefore=trimLines("""
class TheClass:
pass
raise TheClass, \"hello mum\"
""")
srcAfter=trimLines("""
class NewName:
pass
raise NewName, \"hello mum\"
""")
src = self.rename(srcBefore, 1,6, "NewName")
self.assertEqual(srcAfter,src)
def testRenamesClassReferenceNameInInheritenceSpec(self):
srcBefore=trimLines("""
class TheClass:
pass
class DerivedClass(TheClass):
pass
""")
srcAfter=trimLines("""
class NewName:
pass
class DerivedClass(NewName):
pass
""")
src = self.rename(srcBefore, 1,6, "NewName")
self.assertEqual(srcAfter,src)
class RenameClassTests_importsClass:
def testRenamesClassReferenceInInstanceCreationWithFQN(self):
srcBefore=trimLines("""
import b.bah
def foo():
a = b.bah.TheClass()
""")
srcAfter=trimLines("""
import b.bah
def foo():
a = b.bah.NewName()
""")
src = self.renameClass(srcBefore,"NewName")
self.assertEqual(srcAfter,src)
def testRenamesClassReferencesInInheritenceSpecs(self):
srcBefore=trimLines("""
import b
class DerivedClass(b.bah.TheClass):
pass
""")
srcAfter=trimLines("""
import b
class DerivedClass(b.bah.NewName):
pass
""")
src = self.renameClass(srcBefore,"NewName")
self.assertEqual(srcAfter,src)
def testRenamesFromImportReferenceWhenInBodyOfClass(self):
srcBefore=trimLines("""
class AnotherClass:
from b.bah import TheClass
TheClass.baz = 0
""")
srcAfter=trimLines("""
class AnotherClass:
from b.bah import NewName
NewName.baz = 0
""")
src = self.renameClass(srcBefore,"NewName")
self.assertEqual(srcAfter,src)
def testRenamesReferenceToClassImportedInSameClassScope(self):
srcBefore=trimLines("""
class AnotherClass:
from b.bah import TheClass
TheClass.baz = 0
""")
srcAfter=trimLines("""
class AnotherClass:
from b.bah import NewName
NewName.baz = 0
""")
src = self.renameClass(srcBefore,"NewName")
self.assertEqual(srcAfter,src)
def testRenamesReferenceToClassImportedWithFromImportStar(self):
srcBefore=trimLines("""
from a.b.bah import *
a = TheClass()
""")
srcAfter=trimLines("""
from a.b.bah import *
a = NewName()
""")
src = self.renameClass(srcBefore,"NewName")
self.assertEqual(srcAfter,src)
class TestRenameClass(BRMTestCase, RenameClassTests):
def rename(self, src, line, col, newname):
createPackageStructure(src,"pass")
rename(pkgstructureFile1,line,col, newname)
save()
return file(pkgstructureFile1).read()
class TestRenameClassReferenceWithDirectoryStructure(BRMTestCase,
RenameClassTests_importsClass):
def renameClass(self, src, newname):
createPackageStructure(src,TheClassTestdata)
rename(pkgstructureFile2,1,6, newname)
save()
return file(pkgstructureFile1).read()
TheClassTestdata = trimLines("""
class TheClass:
def theMethod(self):
pass
def differentMethod(self):
pass
class DifferentClass:
def theMethod(self):
pass
""")
if __name__ == "__main__":
unittest.main()
|
|
'''local, adjusted version from scipy.linalg.basic.py
changes:
The only changes are that additional results are returned
'''
from __future__ import print_function
from statsmodels.compat.python import lmap, range
import numpy as np
from scipy.linalg import svd as decomp_svd
from scipy.linalg.lapack import get_lapack_funcs
from numpy import asarray, zeros, sum, conjugate, dot, transpose
import numpy
from numpy import asarray_chkfinite, single
from numpy.linalg import LinAlgError
### Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=0, overwrite_b=0):
"""Compute least-squares solution to equation :m:`a x = b`
Compute a vector x such that the 2-norm :m:`|b - a x|` is minimised.
Parameters
----------
a : array, shape (M, N)
b : array, shape (M,) or (M, K)
cond : float
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than rcond*largest_singular_value
are considered zero.
overwrite_a : boolean
Discard data in a (may enhance performance)
overwrite_b : boolean
Discard data in b (may enhance performance)
Returns
-------
x : array, shape (N,) or (N, K) depending on shape of b
Least-squares solution
residues : array, shape () or (1,) or (K,)
Sums of residues, squared 2-norm for each column in :m:`b - a x`
If rank of matrix a is < N or > M this is an empty array.
If b was 1-d, this is an (1,) shape array, otherwise the shape is (K,)
rank : integer
Effective rank of matrix a
s : array, shape (min(M,N),)
Singular values of a. The condition number of a is abs(s[0]/s[-1]).
Raises LinAlgError if computation does not converge
"""
a1, b1 = lmap(asarray_chkfinite, (a, b))
if a1.ndim != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if b1.ndim == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
gelss, = get_lapack_funcs(('gelss',), (a1, b1))
if n > m:
# need to extend b matrix as it will be filled with
# a larger solution matrix
b2 = zeros((n, nrhs), dtype=gelss.dtype)
if b1.ndim == 2:
b2[:m, :] = b1
else:
b2[:m, 0] = b1
b1 = b2
overwrite_a = overwrite_a or (a1 is not a and not hasattr(a, '__array__'))
overwrite_b = overwrite_b or (b1 is not b and not hasattr(b, '__array__'))
if gelss.module_name[:7] == 'flapack':
# get optimal work array
work = gelss(a1, b1, lwork=-1)[4]
lwork = work[0].real.astype(np.int)
v, x, s, rank, work, info = gelss(
a1, b1, cond=cond, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
raise NotImplementedError('calling gelss from %s' %
gelss.module_name)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %-th argument of '
'internal gelss' % -info)
resids = asarray([], dtype=x.dtype)
if n < m:
x1 = x[:n]
if rank == n:
resids = sum(x[n:]**2, axis=0)
x = x1
return x, resids, rank, s
def pinv(a, cond=None, rcond=None):
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : array, shape (M, N)
Matrix to be pseudo-inverted
cond, rcond : float
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than rcond*largest_singular_value are
considered zero.
Returns
-------
B : array, shape (N, M)
Raises LinAlgError if computation does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = asarray_chkfinite(a)
b = numpy.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
return lstsq(a, b, cond=cond)[0]
eps = numpy.finfo(float).eps
feps = numpy.finfo(single).eps
_array_precision = {'f': 0, 'd': 1, 'F': 0, 'D': 1}
def pinv2(a, cond=None, rcond=None):
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : array, shape (M, N)
Matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than rcond*largest_singular_value are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
B : array, shape (N, M)
Raises LinAlgError if SVD computation does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = asarray_chkfinite(a)
u, s, vh = decomp_svd(a)
t = u.dtype.char
if rcond is not None:
cond = rcond
if cond in [None, -1]:
cond = {0: feps*1e3, 1: eps*1e6}[_array_precision[t]]
m, n = a.shape
cutoff = cond*numpy.maximum.reduce(s)
psigma = zeros((m, n), t)
for i in range(len(s)):
if s[i] > cutoff:
psigma[i, i] = 1.0/conjugate(s[i])
# XXX: use lapack/blas routines for dot
return transpose(conjugate(dot(dot(u, psigma), vh)))
def logdet_symm(m, check_symm=False):
"""
Return log(det(m)) asserting positive definiteness of m.
Parameters
----------
m : array-like
2d array that is positive-definite (and symmetric)
Returns
-------
logdet : float
The log-determinant of m.
"""
from scipy import linalg
if check_symm:
if not np.all(m == m.T): # would be nice to short-circuit check
raise ValueError("m is not symmetric.")
c, _ = linalg.cho_factor(m, lower=True)
return 2*np.sum(np.log(c.diagonal()))
def stationary_solve(r, b):
"""
Solve a linear system for a Toeplitz correlation matrix.
A Toeplitz correlation matrix represents the covariance of a
stationary series with unit variance.
Parameters
----------
r : array-like
A vector describing the coefficient matrix. r[0] is the first
band next to the diagonal, r[1] is the second band, etc.
b : array-like
The right-hand side for which we are solving, i.e. we solve
Tx = b and return b, where T is the Toeplitz coefficient matrix.
Returns
-------
The solution to the linear system.
"""
db = r[0:1]
dim = b.ndim
if b.ndim == 1:
b = b[:, None]
x = b[0:1,:]
for j in range(1, len(b)):
rf = r[0:j][::-1]
a = (b[j,:] - np.dot(rf, x)) / (1 - np.dot(rf, db[::-1]))
z = x - np.outer(db[::-1], a)
x = np.concatenate((z, a[None, :]), axis=0)
if j == len(b) - 1:
break
rn = r[j]
a = (rn - np.dot(rf, db)) / (1 - np.dot(rf, db[::-1]))
z = db - a*db[::-1]
db = np.concatenate((z, np.r_[a]))
if dim == 1:
x = x[:, 0]
return x
if __name__ == '__main__':
#for checking only,
#Note on Windows32:
# linalg doesn't always produce the same results in each call
a0 = np.random.randn(100,10)
b0 = a0.sum(1)[:, None] + np.random.randn(100,3)
lstsq(a0,b0)
pinv(a0)
pinv2(a0)
x = pinv(a0)
x2=scipy.linalg.pinv(a0)
print(np.max(np.abs(x-x2)))
x = pinv2(a0)
x2 = scipy.linalg.pinv2(a0)
print(np.max(np.abs(x-x2)))
|
|
import os
from flask import Blueprint, render_template, abort, jsonify
from util.security import api_security
from database import SessionLoader, DummySession, dSIPMultiDomainMapping
from shared import showApiError,debugEndpoint,StatusCodes, getRequestData
from enum import Enum
from werkzeug import exceptions as http_exceptions
import importlib.util
import settings, globals
class config():
hostname=''
port=''
username=''
password=''
dbname=''
type=''
auth_type=''
plugin_type=''
plugin=None
def __init__(self,config_id):
db = DummySession()
db = SessionLoader()
try:
print("The config_id is {}".format(config_id))
domainMapping = db.query(dSIPMultiDomainMapping).filter(dSIPMultiDomainMapping.pbx_id == config_id).first()
if domainMapping is None:
raise Exception("Configuration doesn't exist")
else:
print("***In domain mapping***")
self.hostname=domainMapping.db_host
#self.port=domainMapping.port if "port" in domainMapping else "5432"
self.port="5432"
self.username=domainMapping.db_username
self.password=domainMapping.db_password
self.dbname="fusionpbx"
#response_payload['msg'] = 'Domain Configuration Exists'
if domainMapping.type == int(dSIPMultiDomainMapping.FLAGS.TYPE_FUSIONPBX.value):
self.plugin_type = FLAGS.FUSIONPBX_PLUGIN
elif domainMapping.type == dSIPMultiDomainMapping.FLAGS.TYPE_FUSIONPBX_CLUSTER.value:
self.plugin_type = FLAGS.FUSIONPBX_PLUGIN
elif domainMapping.type == dSIPMultiDomainMapping.FLAGS.TYPE_FREEPBX.value:
self.plugin_type = FLAGS.FREEPBX_PLUGIN;
raise Exception("FreePBX Plugin is not supported yet")
else:
raise Exception("PBX plugin for config #{} can not be found".format(config_id))
# Import plugin
# Returns the Base directory of this file
base_dir = os.path.dirname(__file__)
# Use the Base Dir to specify the location of the plugin required for this domain
spec = importlib.util.spec_from_file_location("plugin.{}".format(self.plugin_type), "{}/plugin/{}/interface.py".format(base_dir,self.plugin_type))
self.plugin = importlib.util.module_from_spec(spec)
if spec.loader.exec_module(self.plugin):
print("***Plugin was loaded***")
return
except Exception as ex:
raise ex
finally:
db.close()
def getPlugin(self):
if self.plugin:
return self.plugin
else:
raise Exception("The plugin could not be loaded")
class FLAGS():
FUSIONPBX_PLUGIN = "fusion"
FREEPBX_PLUGIN = "freepbx"
mediaserver = Blueprint('mediaserver','__name__')
@mediaserver.route('/api/v1/mediaserver/domain/',methods=['GET'])
@mediaserver.route('/api/v1/mediaserver/domain/<string:config_id>',methods=['GET'])
@mediaserver.route('/api/v1/mediaserver/domain/<string:config_id>/<string:domain_id>',methods=['GET'])
@api_security
def getDomains(config_id=None,domain_id=None):
"""
List all of the domains on a PBX\n
If the PBX only contains a single domain then it will return the hostname or ip address of the system.
If the PBX is multi-tenant then a list of all domains will be returned
===============
Request Payload
===============
.. code-block:: json
{}
================
Response Payload
================
.. code-block:: json
{
error: <string>,
msg: <string>,
kamreload: <bool>,
data: [
domains: [
{
domain_id: <int>,
name: <string>,
enabled: <string>,
description: <string>
}
]
]
}
"""
# Determine which plug-in to use
# Currently we only support FusionPBX
# Check if Configuration ID exists
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if settings.DEBUG:
debugEndpoint()
if config_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
# Use plugin to get list of domains by calling plugin.<pbxtype>.getDomain()
domain_list = domains.read(domain_id)
response_payload['msg'] = '{} domains were found'.format(len(domain_list))
response_payload['data'].append(domain_list)
else:
raise Exception("The configuration id must be provided")
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/domain',methods=['POST'])
@api_security
def postDomains():
# use a whitelist to avoid possible buffer overflow vulns or crashes
VALID_REQUEST_DATA_ARGS = {"name": str, "enabled": bool, "description": str, "config_id": int, "cos": str, "settings": dict}
# ensure requred args are provided
REQUIRED_ARGS = {'name','config_id'}
# defaults.. keep data returned separate from returned metadata
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if (settings.DEBUG):
debugEndpoint()
# get request data
data = getRequestData()
# sanity checks
for k, v in data.items():
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request data argument '{}' not recognized".format(k))
if not type(v) == VALID_REQUEST_DATA_ARGS[k]:
raise http_exceptions.BadRequest("Request data argument '{}' not valid".format(k))
for k in REQUIRED_ARGS:
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request argument '{}' is required".format(k))
config_id = data['config_id']
cos = data['cos'] if 'cos' in data else None
domain_settings = data['settings'] if 'settings' in data else None
# Create instance of Media Server Class
if config_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
domain = domains.create(data)
#Generate Close of Service
if cos:
cos_object = plugin.cos(domain,data)
cos_object.create(cos)
if domain_settings:
cos_object.create("domain_settings")
response_payload['data'] = {"domain_id": domain.domain_id}
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/domain',methods=['PUT'])
@api_security
def putDomains():
# use a whitelist to avoid possible buffer overflow vulns or crashes
VALID_REQUEST_DATA_ARGS = {"name": str, "enabled": bool, "description": str, "config_id": int, "domain_id": str}
# ensure requred args are provided
REQUIRED_ARGS = {'domain_id','config_id'}
# defaults.. keep data returned separate from returned metadata
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if (settings.DEBUG):
debugEndpoint()
# get request data
data = getRequestData()
# sanity checks
for k, v in data.items():
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request data argument '{}' not recognized".format(k))
if not type(v) == VALID_REQUEST_DATA_ARGS[k]:
raise http_exceptions.BadRequest("Request data argument '{}' not valid".format(k))
for k in REQUIRED_ARGS:
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request argument '{}' is required".format(k))
config_id = data['config_id']
domain_id = data['domain_id']
# Create instance of Media Server Class
if config_id != None and domain_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
domain_id = domains.update(data)
response_payload['msg'] = "Success"
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/domain',methods=['DELETE'])
@api_security
def deleteDomains():
# use a whitelist to avoid possible buffer overflow vulns or crashes
VALID_REQUEST_DATA_ARGS = {"domain_id": str, "config_id": int}
# ensure requred args are provided
REQUIRED_ARGS = {'domain_id','config_id'}
# defaults.. keep data returned separate from returned metadata
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if (settings.DEBUG):
debugEndpoint()
# get request data
data = getRequestData()
# sanity checks
for k, v in data.items():
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request data argument '{}' not recognized".format(k))
if not type(v) == VALID_REQUEST_DATA_ARGS[k]:
raise http_exceptions.BadRequest("Request data argument '{}' not valid".format(k))
for k in REQUIRED_ARGS:
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request argument '{}' is required".format(k))
config_id = data['config_id']
domain_id = data['domain_id']
# Create instance of Media Server Class
if config_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
# Delete the domain
if domains.delete(domain_id):
response_payload['msg'] = "Success"
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/extension',methods=['POST'])
@api_security
def postExtensions():
# use a whitelist to avoid possible buffer overflow vulns or crashes
VALID_REQUEST_DATA_ARGS = {"domain_id": str, "account_code": str, "extension": str, "password": str, \
"outbound_caller_number": str, "outbound_caller_name": str, "vm_enabled": bool, \
"vm_password": int, "vm_notify_email": str, "enabled": bool, "call_timeout": int, \
"config_id": int}
# ensure requred args are provided
REQUIRED_ARGS = {'domain_id','extension','password', 'enabled','config_id'}
# defaults.. keep data returned separate from returned metadata
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if (settings.DEBUG):
debugEndpoint()
# get request data
data = getRequestData()
print(data)
# sanity checks
for k, v in data.items():
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request data argument '{}' not recognized".format(k))
if not type(v) == VALID_REQUEST_DATA_ARGS[k]:
raise http_exceptions.BadRequest("Request data argument '{}' not valid".format(k))
for k in REQUIRED_ARGS:
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request argument '{}' is required".format(k))
# Create instance of Media Server Class
config_id = data['config_id']
domain_id = data['domain_id']
if config_id != None and domain_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
domain = domains.read(domain_id)
print(domain_id)
print(domain[0]['domain_id'])
extensions = plugin.extensions(mediaserver,domain[0])
ext = plugin.extension()
ext.domain_id=data['domain_id']
ext.extension=data['extension']
ext.password=data['password']
ext.enabled=data['enabled']
ext.config_id=data['config_id']
ext.outbound_caller_number=data['outbound_caller_number']
ext.outbound_caller_name=data['outbound_caller_name']
ext.vm_enabled=data['vm_enabled']
ext.vm_password=data['vm_password']
ext.vm_notify_email=data['vm_notify_email']
ext.account_code=data['account_code']
ext.call_timeout=data['call_timeout']
extensions.create(ext)
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/extension',methods=['PUT'])
@api_security
def putExtensions():
# use a whitelist to avoid possible buffer overflow vulns or crashes
VALID_REQUEST_DATA_ARGS = {"domain_id": str, "account_code": str, "extension": str, "password": str, \
"outbound_caller_number": str, "outbound_caller_name": str, "vm_enabled": bool, \
"vm_password": int, "vm_notify_email": str, "enabled": bool, "call_timeout": int, \
"config_id": int}
# ensure requred args are provided
REQUIRED_ARGS = {'domain_id','extension','password', 'enabled','config_id'}
# defaults.. keep data returned separate from returned metadata
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if (settings.DEBUG):
debugEndpoint()
# get request data
data = getRequestData()
print(data)
# sanity checks
for k, v in data.items():
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request data argument '{}' not recognized".format(k))
if not type(v) == VALID_REQUEST_DATA_ARGS[k]:
raise http_exceptions.BadRequest("Request data argument '{}' not valid".format(k))
for k in REQUIRED_ARGS:
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request argument '{}' is required".format(k))
# Create instance of Media Server Class
config_id = data['config_id']
domain_id = data['domain_id']
if config_id != None and domain_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
domain = domains.read(domain_id)
print(domain_id)
print(domain[0]['domain_id'])
extensions = plugin.extensions(mediaserver,domain[0])
extensions.update(data)
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/extension/<string:config_id>/<string:domain_id>',methods=['GET'])
@mediaserver.route('/api/v1/mediaserver/extension/<string:config_id>/<string:domain_id>/<string:extension_id>',methods=['GET'])
@api_security
def getExtensions(config_id=None,domain_id=None,extension_id=None):
"""
List all of the domains on a PBX\n
If the PBX only contains a single domain then it will return the hostname or ip address of the system.
If the PBX is multi-tenant then a list of all domains will be returned
===============
Request Payload
===============
.. code-block:: json
{}
================
Response Payload
================
.. code-block:: json
{
"data": [
[
{
"call_timeout": null,
"domain_uuid": "51f66016-c2d5-4bd8-8117-29c8fc8ffa17",
"enabled": "true",
"extensions_id": "ae3cb4b8-f467-4a13-9bb8-9296226c1887",
"number": "504",
"user_context": "restaurant.detroitpbx.com"
}
]
}
"""
# Determine which plug-in to use
# Currently we only support FusionPBX
# Check if Configuration ID exists
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if settings.DEBUG:
debugEndpoint()
if config_id != None and domain_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
domain = domains.read(domain_id)
extensions = plugin.extensions(mediaserver,domain[0])
extension_list = extensions.read(extension_id)
response_payload['msg'] = '{} extensions were found'.format(len(extension_list))
response_payload['data'].append(extension_list)
else:
raise Exception("The configuration id and the domain_id must be provided")
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
@mediaserver.route('/api/v1/mediaserver/extension',methods=['DELETE'])
@api_security
def deleteExtensions():
# use a whitelist to avoid possible buffer overflow vulns or crashes
VALID_REQUEST_DATA_ARGS = {"domain_id": str, "config_id": int, "extension": str}
# ensure requred args are provided
REQUIRED_ARGS = VALID_REQUEST_DATA_ARGS
# defaults.. keep data returned separate from returned metadata
response_payload = {'error': '', 'msg': '', 'kamreload': globals.reload_required, 'data': []}
try:
if (settings.DEBUG):
debugEndpoint()
# get request data
data = getRequestData()
# sanity checks
for k, v in data.items():
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request data argument '{}' not recognized".format(k))
if not type(v) == VALID_REQUEST_DATA_ARGS[k]:
raise http_exceptions.BadRequest("Request data argument '{}' not valid".format(k))
for k in REQUIRED_ARGS:
if k not in VALID_REQUEST_DATA_ARGS.keys():
raise http_exceptions.BadRequest("Request argument '{}' is required".format(k))
config_id = data['config_id']
domain_id = data['domain_id']
extension = data['extension']
# Create instance of Media Server Class
if config_id != None:
config_info = config(config_id)
plugin = config_info.getPlugin()
# Create instance of Media Server Class
if plugin:
mediaserver = plugin.mediaserver(config_info)
if mediaserver:
domains = plugin.domains(mediaserver)
domain = domains.read(domain_id)
if domain:
extensions = plugin.extensions(mediaserver,domain[0])
if extensions.delete(extension):
response_payload['msg'] = "Success"
return jsonify(response_payload), StatusCodes.HTTP_OK
except Exception as ex:
return showApiError(ex)
|
|
# Natural Language Toolkit: Earley Parser Demo
#
# Copyright (C) 2003 University of Pennsylvania
# Author: Robert Berwick <berwick@ai.mit.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: earley.py,v 1.1.1.1 2004/02/25 17:16:48 adastra Exp $
import Tkinter
import math
from nltk.parser.earley import *
from nltk.parser.chart import *
from nltk.cfg import *
from nltk.tokenizer import WSTokenizer
from nltk.token import Token, Location
from nltk.tree import TreeToken
from nltk.draw import ShowText
from nltk.draw.chart import *
############################ EARLEY EDGE RULES ##############################
#(let you select an edge, and apply
class EdgeRule(ChartRuleI):
def __init__(self, edge): self._edge = edge
class EarleyEdgeScanner(EdgeRule):
def apply(self, chart, grammar):
nextcat = self._edge.next()
if grammar.isPartOfSpeech(nextcat) and not self._edge.complete():
word = chart.wordAt(self._edge.loc().end())
if nextcat in grammar.getPartsOfSpeech(word):
prod = cfg.CFGProduction(nextcat, word)
loc = self._edge.loc().end_loc()
return [chartmod.self_loop_edge(prod, loc)]
return []
def __str__(self): return 'Earley Scanner'
class EarleyEdgePredictor(EdgeRule):
def apply(self, chart, grammar):
nextcat = self._edge.next()
if not grammar.isPartOfSpeech(nextcat) and not self._edge.complete():
for prod in grammar.getRules(nextcat):
loc = self._edge.loc().end_loc()
return [chartmod.self_loop_edge(prod, loc)]
return []
def __str__(self): return 'Earley Predictor'
class EarleyEdgeCompleter(chartmod.ChartRuleI):
def apply(self, chart, grammar):
edges = []
if not self._edge.complete():
for edge2 in chart.complete_edges():
if (self._edge.next() == edge2.lhs() and
self._edge.end() == edge2.start()):
edges.append(chartmod.fr_edge(self._edge, edge2))
return edges
def __str__(self): return 'Earley Completer Rule'
############################## CHART VIEWER ##################################
class FullChartViewer:
RULENAME = {'FundamentalRule': 'Fundamental Rule',
'FundamentalEdgeRule': 'Fundamental Rule',
'TopDownInitRule': 'Top-down Initialization',
'TopDownRule': 'Top-down Rule',
'TopDownEdgeRule': 'Top-down Rule',
'BottomUpRule': 'Bottom-up Rule',
'BottomUpEdgeRule': 'Bottom-up Rule',
}
def __init__(self, grammar, text, title):
self.root = None
try:
# Create the root window.
self._root = Tkinter.Tk()
self._root.title(title)
self._root.bind('q', self.destroy)
buttons3 = Tkinter.Frame(self._root)
buttons3.pack(side='bottom', fill='none')
buttons2 = Tkinter.Frame(self._root)
buttons2.pack(side='bottom', fill='none')
buttons1 = Tkinter.Frame(self._root)
buttons1.pack(side='bottom', fill='x')
self._lexiconview = self._grammarview = None
self._grammar = grammar
self._tok_sent = text
self._cp = SteppingEarleyParser(self._grammar)
self._cp.initialize(self._tok_sent)
# Keep track of whether we're stepping & animating.
self._step = Tkinter.IntVar(self._root)
self._step.set(1)
self._animate = Tkinter.IntVar(self._root)
self._animate.set(2)
self._chart = self._cp.chart()
self._cv = ChartView(self._chart, self._tok_sent,
self._root, draw_tree=1, draw_source=1)
ruletxt = 'Last edge generated by:'
Tkinter.Label(buttons1,text=ruletxt).pack(side='left')
self._rulelabel = Tkinter.Label(buttons1, width=30,
font=('helvetica', 30, 'bold'),
relief='groove', anchor='w')
self._rulelabel.pack(side='left')
step = Tkinter.Checkbutton(buttons1, variable=self._step,
text='Step')
step.pack(side='right')
## Set up buttons for rules & strategies
Tkinter.Button(buttons2, text='Top Down\nStrategy',
background='#a0c0c0', foreground='black',
command=self.top_down_strategy).pack(side='left')
Tkinter.Button(buttons2, text='Bottom Up\nStrategy',
background='#a0c0c0', foreground='black',
command=self.bottom_up_strategy).pack(side='left')
Tkinter.Button(buttons2, text='Top Down Init\nRule',
background='#a0d0a0', foreground='black',
command=self.top_down_init).pack(side='left')
Tkinter.Button(buttons2, text='Top down\nRule',
background='#a0d0a0', foreground='black',
command=self.top_down).pack(side='left')
Tkinter.Button(buttons2, text='Bottom Up\nInit Rule',
background='#a0d0a0', foreground='black',
command=self.bottom_up_init).pack(side='left')
Tkinter.Button(buttons2, text='Fundamental\nRule',
background='#a0d0a0', foreground='black',
command=self.fundamental).pack(side='left')
Tkinter.Label(buttons3,text="Earley:").pack(side='left')
Tkinter.Button(buttons3, text='Earley\nStrategy',
background='#a0c0c0', foreground='black',
command=self.earley_strategy).pack(side='left')
Tkinter.Button(buttons3, text='Earley\nInit Rule',
background='#a0d0a0', foreground='black',
command=self.earley_init).pack(side='left')
Tkinter.Button(buttons3, text='Earley\nScanner',
background='#a0d0a0', foreground='black',
command=self.earley_scanner).pack(side='left')
Tkinter.Button(buttons3, text='Earley\nPredictor',
background='#a0d0a0', foreground='black',
command=self.earley_predictor).pack(side='left')
Tkinter.Button(buttons3, text='Earley\nCompleter',
background='#a0d0a0', foreground='black',
command=self.earley_completer).pack(side='left')
# For animations..
self._animating = 0 # are we animating right now?
# Initialize the rule-label font.
size = self._cv.get_font_size()
self._rulelabel.configure(font=('helvetica', -size, 'bold'))
# Set up a menu bar.
self._init_menubar()
# Set up keyboard bindings.
self._init_bindings()
# Enter mainloop.
Tkinter.mainloop()
except:
print 'Error creating Tree View'
self.destroy()
raise
def _init_bindings(self):
self._root.bind('<Up>', self._cv.scroll_up)
self._root.bind('<Down>', self._cv.scroll_down)
self._root.bind('q', self.destroy)
self._root.bind('x', self.destroy)
self._root.bind('<F1>', self.help)
self._root.bind('<Control-s>', self.save)
self._root.bind('<Control-o>', self.load)
self._root.bind('<Control-r>', self.reset)
self._root.bind('t', self.top_down_strategy)
self._root.bind('b', self.bottom_up_strategy)
self._root.bind('e', self.earley_strategy)
self._root.bind('f', self.fundamental)
# Animation speed control
self._root.bind('-', lambda e,a=self._animate:a.set(1))
self._root.bind('=', lambda e,a=self._animate:a.set(2))
self._root.bind('+', lambda e,a=self._animate:a.set(3))
# Step control
self._root.bind('s', lambda e,s=self._step:s.set(not s.get()))
def _init_menubar(self):
menubar = Tkinter.Menu(self._root)
filemenu = Tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Save Chart', underline=0,
command=self.save, accelerator='Ctrl-s')
filemenu.add_command(label='Open Chart', underline=0,
command=self.load, accelerator='Ctrl-o')
filemenu.add_command(label='Reset Chart', underline=0,
command=self.reset, accelerator='Ctrl-r')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy,
accelerator='x')
menubar.add_cascade(label='File', underline=0,
menu=filemenu)
rulemenu = Tkinter.Menu(menubar, tearoff=0)
rulemenu.add_command(label='Top Down Strategy', underline=0,
command=self.top_down_strategy,
accelerator='t')
rulemenu.add_command(label='Bottom Up Strategy', underline=0,
command=self.bottom_up_strategy,
accelerator='b')
rulemenu.add_command(label='Earley Strategy', underline=0,
command=self.earley_strategy,
accelerator='e')
rulemenu.add_separator()
rulemenu.add_command(label='Bottom Up Init Rule',
command=self.bottom_up_init)
rulemenu.add_command(label='Top Down Init Rule',
command=self.top_down_init)
rulemenu.add_command(label='Top Down Rule',
command=self.top_down_init)
rulemenu.add_command(label='Fundamental Rule', underline=0,
command=self.fundamental, accelerator='f')
rulemenu.add_command(label='Earley Init Rule',
command=self.earley_init)
rulemenu.add_command(label='Earley Scanner',
command=self.earley_scanner)
rulemenu.add_command(label='Earley Predictor',
command=self.earley_predictor)
rulemenu.add_command(label='Earley Completer',
command=self.earley_completer)
menubar.add_cascade(label='Apply', underline=0,
menu=rulemenu)
animatemenu = Tkinter.Menu(menubar, tearoff=0)
animatemenu.add_checkbutton(label="Step", underline=0,
variable=self._step,
accelerator='s')
animatemenu.add_separator()
animatemenu.add_radiobutton(label="No Animation", underline=0,
variable=self._animate, value=0)
animatemenu.add_radiobutton(label="Slow Animation", underline=0,
variable=self._animate, value=1,
accelerator='-')
animatemenu.add_radiobutton(label="Normal Animation", underline=0,
variable=self._animate, value=2,
accelerator='=')
animatemenu.add_radiobutton(label="Fast Animation", underline=0,
variable=self._animate, value=3,
accelerator='+')
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
zoommenu = Tkinter.Menu(menubar, tearoff=0)
self._size = Tkinter.IntVar(self.root)
self._size.set(self._cv.get_font_size())
zoommenu.add_radiobutton(label='Smallest', variable=self._size,
underline=1, value=10, command=self.resize)
zoommenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
zoommenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
zoommenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=18, command=self.resize)
zoommenu.add_radiobutton(label='Biggest', variable=self._size,
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='Zoom', underline=0,
menu=zoommenu)
helpmenu = Tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label='Instructions', underline=0,
command=self.help, accelerator='F1')
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
self._root.config(menu=menubar)
def help(self, *e):
self._animating = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._root, 'Help: Chart Parser Demo',
(__doc__).strip(), width=75, font='fixed')
except:
ShowText(self._root, 'Help: Chart Parser Demo',
(__doc__).strip(), width=75)
def load(self, *e):
"Load a chart from a pickle file"
import pickle
from tkFileDialog import askopenfilename
ftypes = [('Pickle file', '.pickle'),
('All files', '*')]
filename = askopenfilename(filetypes=ftypes,
defaultextension='.pickle')
if not filename: return
chart = pickle.load(open(filename, 'r'))
self._chart = chart
self._cv.update(chart)
self._cp.set_chart(chart)
def save(self, *e):
"Save a chart to a pickle file"
import pickle
from tkFileDialog import asksaveasfilename
ftypes = [('Pickle file', '.pickle'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes,
defaultextension='.pickle')
if not filename: return
pickle.dump(self._chart, open(filename, 'w'))
def resize(self):
self._animating = 0
size = self._size.get()
self._cv.set_font_size(size)
self._rulelabel['font'] = ('helvetica', -size, 'bold')
def view_grammar(self, *e):
self._animating = 0
self._grammarview = ProductionView(self._grammar.productions(),
'Grammar')
def reset(self, *e):
self._animating = 0
self._cp = SteppingEarleyParser(self._grammar)
self._cp.initialize(self._tok_sent)
self._chart = self._cp.chart()
self._cv.update(self._chart)
def display_rule(self, rule):
if rule == None:
self._rulelabel['text'] = ''
else:
name = FullChartViewer.RULENAME.get(rule.__class__.__name__,
rule.__class__.__name__)
self._rulelabel['text'] = name
size = self._cv.get_font_size()
self._rulelabel['font'] = ('helvetica', -size, 'bold')
def apply_strategy(self, strategy, edge_strategy=None):
if self._animating:
self._animating = 0
return
self.display_rule(None)
self._cv.unmark()
if self._step.get():
edge = self._cv.selected_edge()
if (edge is not None) and (edge_strategy is not None):
if self._apply_strategy([edge_strategy(edge)]) is None:
# Unselect it (select_edge toggles selection)
self._cv.select_edge(edge)
else:
self._apply_strategy(strategy)
else:
if self._animate.get():
self._animating = 1
self._animate_strategy(strategy)
else:
while self._cp.step(strategy=strategy):
self._cv.update()
def _animate_strategy(self, strategy, speed=1):
if self._animating == 0: return
new_edge = self._cp.step(strategy=strategy)
self._cv.unmark()
if new_edge is not None:
self.display_rule(self._cp.current_chartrule())
self._cv.update()
self._cv.view_edge(new_edge)
if self._animate.get() == 0 or self._step.get() == 1:
return
if self._animate.get() == 1:
self._cv.select_edge(new_edge)
self._root.after(3000, self._animate_strategy, strategy)
elif self._animate.get() == 2:
self._cv.select_edge(new_edge)
self._root.after(1000, self._animate_strategy, strategy)
else:
self._cv.mark_edge(new_edge)
self._root.after(20, self._animate_strategy, strategy)
def _apply_strategy(self, strategy):
new_edge = self._cp.step(strategy=strategy)
if new_edge is not None:
self.display_rule(self._cp.current_chartrule())
self._cv.update()
self._cv.mark_edge(new_edge)
self._cv.view_edge(new_edge)
return new_edge
def top_down_init(self, *e):
self.apply_strategy([TopDownInitRule()], None)
def top_down_strategy(self, *e):
self.apply_strategy([TopDownInitRule(), TopDownRule(),
FundamentalRule()], TopDownEdgeRule)
def top_down(self, *e):
self.apply_strategy([TopDownRule()], TopDownEdgeRule)
def fundamental(self, *e):
self.apply_strategy([FundamentalRule()], FundamentalEdgeRule)
def bottom_up_init(self, *e):
self.apply_strategy([BottomUpRule()], BottomUpEdgeRule)
def bottom_up_strategy(self, *e):
self.apply_strategy([BottomUpRule(), FundamentalRule()],
BottomUpEdgeRule)
def earley_strategy(self, *e):
self.apply_strategy([EarleyInitRule(), EarleyPredictor(),
EarleyScanner(), EarleyCompleter()],
EarleyEdgeScanner)
def earley_init(self, *e):
self.apply_strategy([EarleyInitRule()], None)
def earley_scanner(self, *e):
self.apply_strategy([EarleyScanner()], EarleyEdgeScanner)
def earley_predictor(self, *e):
self.apply_strategy([EarleyPredictor()], EarleyEdgePredictor)
def earley_completer(self, *e):
self.apply_strategy([EarleyCompleter()], EarleyEdgeCompleter)
def destroy(self, *args):
if self._lexiconview: self._lexiconview.destroy()
if self._grammarview: self._grammarview.destroy()
if self._root is None: return
self._root.destroy()
self._root = None
############################# GRAMMAR INPUT ##################################
from ScrolledText import ScrolledText
class GrammarGUI:
def __init__(self, grammar, text, title='Input Grammar'):
self.root = None
try:
self._root = Tkinter.Tk()
self._root.title(title)
level1 = Tkinter.Frame(self._root)
level1.pack(side='top', fill='none')
Tkinter.Frame(self._root).pack(side='top', fill='none')
level2 = Tkinter.Frame(self._root)
level2.pack(side='top', fill='x')
buttons = Tkinter.Frame(self._root)
buttons.pack(side='top', fill='none')
self.sentence = Tkinter.StringVar()
Tkinter.Label(level2, text="Sentence:").pack(side='left')
Tkinter.Entry(level2, background='white', foreground='black',
width=60, textvariable=self.sentence).pack(
side='left')
lexiconFrame = Tkinter.Frame(level1)
Tkinter.Label(lexiconFrame, text="Lexicon:").pack(side='top',
fill='x')
Tkinter.Label(lexiconFrame, text=" ex. 'dog N':").pack(
side='top', fill='x')
self.lexicon = ScrolledText(lexiconFrame, background='white',
foreground='black', width=30)
self.lexicon.pack(side='top')
grammarFrame = Tkinter.Frame(level1)
Tkinter.Label(grammarFrame, text="Grammar:").pack(side='top',
fill='x')
Tkinter.Label(grammarFrame,
text=" ex. 'S -> NP VP':").pack(side='top',fill='x')
self.grammarRules = ScrolledText(grammarFrame, background='white',
foreground='black', width=30)
self.grammarRules.pack(side='top')
lexiconFrame.pack(side='left')
grammarFrame.pack(side='left')
Tkinter.Button(buttons, text='Clear',
background='#a0c0c0', foreground='black',
command=self.clear).pack(side='left')
Tkinter.Button(buttons, text='Parse',
background='#a0c0c0', foreground='black',
command=self.parse).pack(side='left')
self.init_menubar()
# Enter mainloop.
Tkinter.mainloop()
except:
print 'Error creating Tree View'
self.destroy()
raise
def init_menubar(self):
menubar = Tkinter.Menu(self._root)
filemenu = Tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Save Rules', underline=0,
command=self.save, accelerator='Ctrl-s')
self._root.bind('<Control-s>', self.save)
filemenu.add_command(label='Load Rules', underline=0,
command=self.load, accelerator='Ctrl-o')
self._root.bind('<Control-o>', self.load)
filemenu.add_command(label='Clear Rules', underline=0,
command=self.clear, accelerator='Ctrl-r')
self._root.bind('<Control-r>', self.clear)
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-q')
self._root.bind('<Control-q>', self.destroy)
menubar.add_cascade(label='File', underline=0,
menu=filemenu)
self._root.config(menu=menubar)
def getRules(self, makegrammar=True):
"""
Takes the currently typed in rules and turns them from text into
a list of either string rules or Earley CFGs
"""
text = self.grammarRules.get(1.0, Tkinter.END)
rules = []
for item in text.split("\n"):
moreitems = item.split(",")
for furtheritem in moreitems:
furtheritem = furtheritem.strip()
if not furtheritem:
continue
tokens = furtheritem.split()
if not (len(tokens)>=3 and tokens[1] == "->"):
print "Invalid rule: %s"%furtheritem
else:
if makegrammar:
rules.append(Rule(cfg.Nonterminal(tokens[0]),
*map(lambda x: cfg.Nonterminal(x),
tokens[2:])))
else:
rules.append(furtheritem.strip())
return rules
def getLexicon(self, makegrammar=True):
"""
Takes the currently typed in lexicon and turns them from text into
a list of either string lexical definitions or Earley CFGs
"""
text = self.lexicon.get(1.0, Tkinter.END)
lex = []
for item in text.split("\n"):
moreitems = item.split(",")
for furtheritem in moreitems:
furtheritem = furtheritem.strip()
if not furtheritem:
continue
tokens = furtheritem.split()
if not len(tokens)>=2:
print "Invalid lexical mapping: %s"%furtheritem
else:
if makegrammar:
word = tokens[0]
for pos in tokens[1:]:
lex.append(Rule(cfg.Nonterminal(pos), word))
else:
lex.append(furtheritem.strip())
return lex
def parse(self, *args):
"""
Calls the FullChartViewer with the given grammar and lexicon to parse
the given sentence
"""
grammar = EarleyCFG(cfg.Nonterminal('S'),
self.getRules(), self.getLexicon())
sent = self.sentence.get().strip()
tok_sent = WSTokenizer().tokenize(sent)
print "Starting chart parsing viewer"
FullChartViewer(grammar, tok_sent, 'Parsing "%s"'%sent)
def save(self, *args):
"Save a rule/lexicon set to a text file"
from tkFileDialog import asksaveasfilename
ftypes = [('Text file', '.txt'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes,
defaultextension='.txt')
if not filename: return
f = open(filename, 'w')
f.write('---- Rules -----\n%s\n' % '\n'.join(self.getRules(False)))
f.write('---- Lexicon -----\n%s\n' % '\n'.join(self.getLexicon(False)))
f.close()
def load(self, *args):
"Load rule/lexicon set from a text file"
from tkFileDialog import askopenfilename
ftypes = [('Text file', '.txt'),
('All files', '*')]
filename = askopenfilename(filetypes=ftypes,
defaultextension='.txt')
if not filename: return
f = open(filename, 'r')
lines = f.readlines()
f.close()
rules = []
lexicon = []
state = 'rules'
for line in lines:
line = line.strip()
if not line:
continue
elif line.startswith("-"):
if line.find("Rules")>0: state = 'rules'
else: state = 'lexicon'
else:
toks = line.split()
if state == 'rules' and len(toks)>=3 and toks[1]=='->':
rules.append(line)
elif state == 'lexicon' and len(toks)>=2:
lexicon.append(line)
self.clear()
self.grammarRules.insert(1.0, '\n'.join(rules))
self.lexicon.insert(1.0, '\n'.join(lexicon))
def clear(self, *args):
"Clears the grammar and lexical and sentence inputs"
self.grammarRules.delete(1.0, Tkinter.END)
self.lexicon.delete(1.0, Tkinter.END)
self.sentence.set('')
def destroy(self, *args):
if self._root is None: return
self._root.destroy()
self._root = None
print self.sentence.get()
def testChartViewer():
grammar = EarleyCFG(cfg.Nonterminal('S'),
map(lambda x:parseRule(x),
["S -> NP VP", "NP -> N", "NP -> Det N",
"VP -> V", "VP -> V NP"]),
map(lambda x:parseLexicon(x),
["Poirot N", "sent V", "the Det", "solutions N"]))
sent = 'Poirot sent the solutions'
tok_sent = WSTokenizer().tokenize(sent)
print 'grammar= ('
for rule in grammar.productions():
print ' ', repr(rule)+','
print ')'
print 'sentence = %r' % sent
print 'Calling "FullChartViewer(grammar, tok_sent)"...'
FullChartViewer(grammar, tok_sent, "Earley Parser")
if __name__ == '__main__':
GrammarGUI(None, None)
# testChartViewer()
|
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['TARGET_NODE'] = ctx.target.node.id
env_map['TARGET_INSTANCE'] = ctx.target.instance.id
env_map['TARGET_INSTANCES'] = get_instance_list(ctx.target.node.id)
env_map['SOURCE_NODE'] = ctx.source.node.id
env_map['SOURCE_INSTANCE'] = ctx.source.instance.id
env_map['SOURCE_INSTANCES'] = get_instance_list(ctx.source.node.id)
new_script_process = {'env': env_map}
node_artifacts = {
"war_file": "artifacts/tomcat-war-types/warFiles/helloWorld.war"
}
relationship_artifacts = {
"properties_file": "artifacts/artifact-test-types/conf/settings.properties"
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
new_script_process['env'].update(download_artifacts(artifacts, download_dir))
ctx.logger.info('Operation is executed with inputs {0}'.format(inputs))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
new_script_process['env'].update(inputs['process']['env'])
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('artifacts/artifact-test-types/scripts/configureProperties.sh'), new_script_process, operationOutputNames)
for k,v in parsed_output['outputs'].items():
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.source.instance.runtime_properties['_a4c_OO:tosca.interfaces.relationship.Configure:post_configure_target:{0}'.format(k)] = v
ctx.source.instance.runtime_properties['local_application_url'] = r'http://' + get_attribute(ctx.source, 'ip_address') + r':' + r'80' + r'/' + r'helloworld'
ctx.source.instance.runtime_properties['application_url'] = r'http://' + get_attribute(ctx.source, 'public_ip_address') + r':' + r'80' + r'/' + r'helloworld'
ctx.source.instance.update()
ctx.target.instance.runtime_properties['server_url'] = r'http://' + get_attribute(ctx.target, 'public_ip_address') + r':' + r'80'
ctx.target.instance.update()
|
|
import sublime
import sublime_plugin
import re
import imp
import json
import sys
import os.path
import traceback
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
PACKAGES_PATH = sublime.packages_path() or os.path.dirname(BASE_PATH)
# EMMET_GRAMMAR = os.path.join(BASE_PATH, 'Emmet.tmLanguage')
EMMET_GRAMMAR = 'Packages/%s/Emmet.tmLanguage' % os.path.basename(BASE_PATH)
sys.path += [BASE_PATH] + [os.path.join(BASE_PATH, f) for f in ['emmet_completions', 'emmet']]
# Make sure all dependencies are reloaded on upgrade
if 'emmet.reloader' in sys.modules:
imp.reload(sys.modules['emmet.reloader'])
import emmet.reloader
# import completions as cmpl
import emmet.pyv8loader as pyv8loader
import emmet_completions as cmpl
from emmet_completions.meta import HTML_ELEMENTS_ATTRIBUTES, HTML_ATTRIBUTES_VALUES
from emmet.context import Context
from emmet.context import js_file_reader as _js_file_reader
from emmet.pyv8loader import LoaderDelegate
__version__ = '1.1'
__core_version__ = '1.0'
__authors__ = ['"Sergey Chikuyonok" <serge.che@gmail.com>'
'"Nicholas Dudfield" <ndudfield@gmail.com>']
is_python3 = sys.version_info[0] > 2
# JS context
ctx = None
# Emmet Settings
settings = None
# Default ST settings
user_settings = None
def is_st3():
return sublime.version()[0] == '3'
def js_file_reader(file_path, use_unicode=True):
if hasattr(sublime, 'load_resource'):
rel_path = file_path
for prefix in [sublime.packages_path(), sublime.installed_packages_path()]:
if rel_path.startswith(prefix):
rel_path = os.path.join('Packages', rel_path[len(prefix) + 1:])
break
rel_path = rel_path.replace('.sublime-package', '')
# for Windows we have to replace slashes
rel_path = rel_path.replace('\\', '/')
return sublime.load_resource(rel_path)
return _js_file_reader(file_path, use_unicode)
def init():
"Init Emmet plugin"
# load settings
globals()['user_settings'] = sublime.load_settings('Preferences.sublime-settings')
globals()['settings'] = sublime.load_settings('Emmet.sublime-settings')
settings.add_on_change('extensions_path', update_settings)
# setup environment for PyV8 loading
pyv8_paths = [
os.path.join(PACKAGES_PATH, 'PyV8'),
os.path.join(PACKAGES_PATH, 'PyV8', pyv8loader.get_arch()),
os.path.join(PACKAGES_PATH, 'PyV8', 'pyv8-%s' % pyv8loader.get_arch())
]
sys.path += pyv8_paths
# unpack recently loaded binary, is exists
for p in pyv8_paths:
pyv8loader.unpack_pyv8(p)
# provide some contributions to JS
contrib = {
'sublime': sublime,
'sublimeReplaceSubstring': replace_substring,
'sublimeGetOption': settings.get
}
# create JS environment
delegate = SublimeLoaderDelegate()
globals()['ctx'] = Context(
files=['../editor.js'],
ext_path=settings.get('extensions_path', None),
contrib=contrib,
logger=delegate.log,
reader=js_file_reader
)
update_settings()
pyv8loader.load(pyv8_paths[1], delegate)
if settings.get('remove_html_completions', False):
sublime.set_timeout(cmpl.remove_html_completions, 2000)
class SublimeLoaderDelegate(LoaderDelegate):
def __init__(self, settings=None):
if settings is None:
settings = {}
for k in ['http_proxy', 'https_proxy', 'timeout']:
if user_settings.has(k):
settings[k] = user_settings.get(k, None)
LoaderDelegate.__init__(self, settings)
self.state = None
self.message = 'Loading PyV8 binary, please wait'
self.i = 0
self.addend = 1
self.size = 8
def on_start(self, *args, **kwargs):
self.state = 'loading'
def on_progress(self, *args, **kwargs):
if kwargs['progress'].is_background:
return
before = self.i % self.size
after = (self.size - 1) - before
msg = '%s [%s=%s]' % (self.message, ' ' * before, ' ' * after)
if not after:
self.addend = -1
if not before:
self.addend = 1
self.i += self.addend
sublime.set_timeout(lambda: sublime.status_message(msg), 0)
def on_complete(self, *args, **kwargs):
self.state = 'complete'
if kwargs['progress'].is_background:
return
sublime.set_timeout(lambda: sublime.status_message('PyV8 binary successfully loaded'), 0)
def on_error(self, exit_code=-1, thread=None):
self.state = 'error'
sublime.set_timeout(lambda: show_pyv8_error(exit_code), 0)
def setting(self, name, default=None):
"Returns specified setting name"
return self.settings.get(name, default)
def log(self, message):
print('Emmet: %s' % message)
def show_pyv8_error(exit_code):
if 'PyV8' not in sys.modules:
sublime.error_message('Error while loading PyV8 binary: exit code %s \nTry to manually install PyV8 from\nhttps://github.com/emmetio/pyv8-binaries' % exit_code)
def active_view():
return sublime.active_window().active_view()
def check_context(verbose=False):
"Checks if JS context is completely available"
if not ctx.js():
if verbose:
sublime.message_dialog('Please wait a bit while PyV8 binary is being downloaded')
return False
return True
def replace_substring(start, end, value, no_indent=False):
view = active_view()
view.sel().clear()
view.sel().add(sublime.Region(start, end or start))
if not is_python3:
value = value.decode('utf-8')
# XXX a bit naive indentation control. It handles most common
# `no_indent` usages like replacing CSS rule content, but may not
# produce expected result in all possible situations
if no_indent:
line = view.substr(view.line(view.sel()[0]))
value = unindent_text(value, get_line_padding(line))
view.run_command('insert_snippet', {'contents': value})
def unindent_text(text, pad):
"""
Removes padding at the beginning of each text's line
@type text: str
@type pad: str
"""
lines = text.splitlines()
for i,line in enumerate(lines):
if line.startswith(pad):
lines[i] = line[len(pad):]
return '\n'.join(lines)
def get_line_padding(line):
"""
Returns padding of current editor's line
@return str
"""
m = re.match(r'^(\s+)', line)
return m and m.group(0) or ''
def update_settings():
ctx.set_ext_path(settings.get('extensions_path', None))
keys = ['snippets', 'preferences', 'syntaxProfiles', 'profiles']
payload = {}
for k in keys:
data = settings.get(k, None)
if data:
payload[k] = data
ctx.reset()
ctx.load_user_data(json.dumps(payload))
ctx.js()
def get_scope(view, pt=-1):
if pt == -1:
# use current caret position
pt = view.sel()[0].begin()
if hasattr(view, 'scope_name'):
return view.scope_name(pt)
return view.syntax_name(pt)
def should_perform_action(name, view=None):
if not view:
view = active_view()
# fallback to old check
if not view.settings().get('enable_emmet_keymap', True):
return False
disabled_actions = settings.get('disabled_keymap_actions', '')
if not disabled_actions: # no disabled actions
return True
if disabled_actions == 'all': # disable all actions
return False
return name not in re.split(r'\s*,\s*', disabled_actions.strip())
def should_handle_tab_key(syntax=None):
view = active_view()
scopes = settings.get('disabled_single_snippet_for_scopes', None)
cur_scope = get_scope(view)
if sublime.score_selector(cur_scope, 'source.css'):
return True
if not scopes or not sublime.score_selector(cur_scope, scopes):
return True
abbr = ctx.js().locals.pyExtractAbbreviation()
disabled_snippets = settings.get('disabled_single_snippets', '').split()
if disabled_snippets and abbr in disabled_snippets:
return False
if not re.match(r'^[\w\:%]+$', abbr):
# it's a complex expression
return True
if re.match(r'^(lorem|lipsum)([a-z]{2})?\d*$', abbr):
# hardcoded Lorem Ipsum generator
return True
# detect inline CSS
if syntax is None:
syntax = ctx.js().locals.pyGetSyntax();
if syntax == 'css':
return True
known_tags = settings.get('known_html_tags', '').split()
if abbr in known_tags or ctx.js().locals.pyHasSnippet(abbr):
return True
return False
def log(message):
if settings.get('debug', False):
print('Emmet: %s' % message)
class RunEmmetAction(sublime_plugin.TextCommand):
def run(self, edit, action=None, **kw):
run_action(lambda i, sel: ctx.js().locals.pyRunAction(action))
# ctx.js().locals.pyRunAction(action)
class ActionContextHandler(sublime_plugin.EventListener):
def on_query_context(self, view, key, op, operand, match_all):
if not key.startswith('emmet_action_enabled.'):
return None
prefix, name = key.split('.')
return should_perform_action(name, view)
def get_edit(view, edit_token=None):
edit = None
try:
edit = view.begin_edit()
except:
pass
if not edit and edit_token:
try:
edit = view.begin_edit(edit_token, 'Emmet')
except Exception as e:
pass
return edit
def run_action(action, view=None):
if not check_context(True):
return
"Runs Emmet action in multiselection mode"
if not view:
view = active_view()
region_key = '__emmet__'
sels = list(view.sel())
r = ctx.js().locals.pyRunAction
result = False
# edit = get_edit(view, edit_token)
max_sel_ix = len(sels) - 1
try:
for i, sel in enumerate(reversed(sels)):
view.sel().clear()
view.sel().add(sel)
# run action
# result = r(name) or result
result = action(max_sel_ix - i, sel) or result
# remember resulting selections
view.add_regions(region_key,
(view.get_regions(region_key) + list(view.sel())) , '')
except Exception as e:
view.erase_regions(region_key)
print(traceback.format_exc())
return
# output all saved regions as selection
view.sel().clear()
for sel in view.get_regions(region_key):
view.sel().add(sel)
view.erase_regions(region_key)
# if edit:
# view.end_edit(edit)
return result
class TabAndCompletionsHandler():
def correct_syntax(self, view, syntax='html'):
return syntax == 'html' and view.match_selector( view.sel()[0].b, cmpl.EMMET_SCOPE )
def completion_handler(self, view):
"Returns completions handler fo current caret position"
black_list = settings.get('completions_blacklist', [])
# A mapping of scopes, sub scopes and handlers, first matching of which
# is used.
COMPLETIONS = (
(cmpl.HTML_INSIDE_TAG, self.html_elements_attributes),
(cmpl.HTML_INSIDE_TAG_ATTRIBUTE, self.html_attributes_values)
)
pos = view.sel()[0].b
# Try to find some more specific contextual abbreviation
for sub_selector, handler in COMPLETIONS:
h_name = handler.__name__
if not black_list or h_name in black_list: continue
if (view.match_selector(pos, sub_selector) or
view.match_selector(pos - 1, sub_selector)):
return handler
return None
def html_elements_attributes(self, view, prefix, pos):
tag = cmpl.find_tag_name(view, pos)
values = HTML_ELEMENTS_ATTRIBUTES.get(tag, [])
return [(v, '%s\t@%s' % (v,v), '%s="$1"' % v) for v in values]
def html_attributes_values(self, view, prefix, pos):
attr = cmpl.find_attribute_name(view, pos)
values = HTML_ATTRIBUTES_VALUES.get(attr, [])
return [(v, '%s\t@=%s' % (v,v), v) for v in values]
def expand_by_tab(self, view):
if not check_context():
return False;
syntax = ctx.js().locals.pyGetSyntax();
if not should_handle_tab_key(syntax):
return False
# we need to filter out attribute completions if
# 'disable_completions' option is not active
if (not settings.get('disable_completions', False) and
self.correct_syntax(view, syntax) and
self.completion_handler(view)):
return None
caret_pos = view.sel()[0].begin()
cur_scope = get_scope(view)
# let's see if Tab key expander should be disabled for current scope
banned_scopes = settings.get('disable_tab_abbreviations_for_scopes', '')
if banned_scopes and view.score_selector(caret_pos, banned_scopes):
return None
# Sometimes ST2 matcher may incorrectly filter scope context,
# check it against special regexp
banned_regexp = settings.get('disable_tab_abbreviations_for_regexp', None)
if banned_regexp and re.search(banned_regexp, cur_scope):
return None
return run_action(lambda i, sel: ctx.js().locals.pyRunAction('expand_abbreviation'))
# view.run_command('run_emmet_action',
# {'action':'expand_abbreviation'})
class ExpandAbbreviationByTab(sublime_plugin.TextCommand):
def run(self, edit, **kw):
if settings.get('use_old_tab_handler', False):
return
view = active_view()
h = TabAndCompletionsHandler()
if not h.expand_by_tab(view):
# try to mimic default Tab behaviour of Sublime Text
view.run_command('insert_best_completion', {
'default': '\t',
'exact': user_settings.get('tab_completion', True)
})
class TabExpandHandler(sublime_plugin.EventListener):
def on_query_context(self, view, key, op, operand, match_all):
if key != 'is_abbreviation':
return None
if settings.get('use_old_tab_handler', False):
h = TabAndCompletionsHandler()
return h.expand_by_tab(view)
return check_context()
def on_query_completions(self, view, prefix, locations):
h = TabAndCompletionsHandler()
if view.match_selector(locations[0], settings.get('css_completions_scope', '')) and check_context():
l = []
if settings.get('show_css_completions', False):
completions = ctx.js().locals.pyGetCSSCompletions()
if completions:
for p in completions:
l.append(('%s\t%s' % (p['k'], p['label']), p['v']))
if not l:
return []
return (l, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
if not h.correct_syntax(view) or settings.get('disable_completions', False):
return []
handler = h.completion_handler(view)
if handler:
pos = view.sel()[0].b
completions = handler(view, prefix, pos)
return completions
return []
class CommandsAsYouTypeBase(sublime_plugin.TextCommand):
input_message = "Enter Input"
default_input = ""
process_panel_input = lambda s, i: i.title()
# Note that this must be of form `Packages/$Package/Emmet.tmLanguage` on ST3
# NOT an absolute path!
panel_grammar = EMMET_GRAMMAR
def is_enabled(self):
return True
def run_command(self, edit, view, processed_input):
if '\n' in processed_input:
for sel in view.sel():
trailing = sublime.Region(sel.end(), view.line(sel).end())
if view.substr(trailing).isspace():
view.erase(edit, trailing)
if not is_python3:
processed_input = processed_input.decode('utf-8')
view.run_command('insert_snippet', { 'contents': processed_input })
def on_panel_change(self, abbr):
if not abbr and self.erase:
self.undo()
self.erase = False
return
def inner_insert():
self.view.run_command(self.name(), dict(panel_input=abbr))
# self.view.run_command('hide_auto_complete')
self.undo()
sublime.set_timeout(inner_insert, 0)
def undo(self):
if self.erase:
sublime.set_timeout(lambda: self.view.run_command('undo'), 0)
def remember_sels(self, view):
self._sels = list(view.sel())
self._sel_items = []
for sel in self._sels:
# selection should be unindented in order to get desired result
line = view.substr(view.line(sel))
s = view.substr(sel)
self._sel_items.append(unindent_text(s, get_line_padding(line)))
def on_panel_done(self, abbr):
pass
def run(self, edit, panel_input=None, **kwargs):
if panel_input is None:
self.setup(edit, self.view, **kwargs)
self.erase = False
panel = self.view.window().show_input_panel (
self.input_message,
self.default_input,
self.on_panel_done, # on_done
self.on_panel_change, # on_change
self.undo) # on_cancel
panel.sel().clear()
panel.sel().add(sublime.Region(0, panel.size()))
if self.panel_grammar:
panel.set_syntax_file(self.panel_grammar)
panel_setting = panel.settings().set
panel_setting('line_numbers', False)
panel_setting('gutter', False)
panel_setting('auto_complete', False)
panel_setting('tab_completion', False)
else:
self.run_on_input(edit, self.view, panel_input)
def setup(self, edit, view, **kwargs):
pass
def run_on_input(self, edit, view, panel_input):
view = self.view
cmd_input = self.process_panel_input(panel_input) or ''
try:
self.erase = self.run_command(edit, view, cmd_input) is not False
except:
pass
class WrapAsYouType(CommandsAsYouTypeBase):
default_input = 'div'
_prev_output = ''
input_message = "Enter Wrap Abbreviation: "
def setup(self, edit, view, **kwargs):
self._prev_output = ''
if len(view.sel()) == 1:
# capture wrapping context (parent HTML element)
# if there is only one selection
r = ctx.js().locals.pyCaptureWrappingRange()
if r:
view.sel().clear()
view.sel().add(sublime.Region(r[0], r[1]))
view.show(view.sel())
self.remember_sels(view)
# override method to correctly wrap abbreviations
def run_on_input(self, edit, view, abbr):
# def _real_insert(self, abbr):
# view = self.view
# self.edit = get_edit(view, self.edit_token)
self.erase = True
# restore selections
view.sel().clear()
for sel in self._sels:
view.sel().add(sel)
def ins(i, sel):
try:
self._prev_output = ctx.js().locals.pyWrapAsYouType(abbr, self._sel_items[i])
# self.run_command(view, output)
except Exception:
"dont litter the console"
self.run_command(edit, view, self._prev_output)
run_action(ins, view)
# if self.edit:
# view.end_edit(self.edit)
class ExpandAsYouType(WrapAsYouType):
default_input = 'div'
input_message = "Enter Abbreviation: "
def setup(self, edit, view, **kwargs):
# adjust selection to non-space bounds
sels = []
for s in view.sel():
text = view.substr(s)
a = s.a + len(text) - len(text.lstrip())
b = s.b - len(text) + len(text.rstrip())
sels.append(sublime.Region(a, b))
view.sel().clear()
for s in sels:
view.sel().add(s)
self.remember_sels(active_view())
class EnterKeyHandler(sublime_plugin.EventListener):
def on_query_context(self, view, key, op, operand, match_all):
if key != 'clear_fields_on_enter_key':
return None
if settings.get('clear_fields_on_enter_key', False):
view.run_command('clear_fields')
return True
class RenameTag(sublime_plugin.TextCommand):
def run(self, edit, **kw):
if not check_context(True):
return
view = active_view()
sels = list(view.sel())
sel_cleared = False
for s in sels:
ranges = ctx.js().locals.pyGetTagNameRanges(s.begin())
if ranges:
if not sel_cleared:
view.sel().clear()
sel_cleared = True
for r in ranges:
view.sel().add(sublime.Region(r[0], r[1]))
view.show(view.sel())
class EmmetInsertAttribute(sublime_plugin.TextCommand):
def run(self, edit, attribute=None, **kw):
if not attribute:
return
view = active_view()
prefix = ''
if view.sel():
sel = view.sel()[0]
if not view.substr(sublime.Region(sel.begin() - 1, sel.begin())).isspace():
prefix = ' '
view.run_command('insert_snippet', {'contents': '%s%s="$1"' % (prefix, attribute)})
class EmmetResetContext(sublime_plugin.TextCommand):
def run(self, edit, **kw):
update_settings()
def plugin_loaded():
sublime.set_timeout(init, 200)
##################
# Init plugin
if not is_python3:
init()
|
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from nfs import NfsCollector
################################################################################
class TestNfsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NfsCollector', {
'interval': 1
})
self.collector = NfsCollector(config, None)
def test_import(self):
self.assertTrue(NfsCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/rpc/nfs')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_rhel5(self, publish_mock):
NfsCollector.PROC = self.getFixturePath('rhel5-1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
NfsCollector.PROC = self.getFixturePath('rhel5-2')
self.collector.collect()
metrics = {
'net.packets': 0.0,
'net.tcpcnt': 0.0,
'net.tcpconn': 0.0,
'net.udpcnt': 0.0,
'rpc.authrefrsh': 0.0,
'rpc.calls': 8042864.0,
'rpc.retrans': 0.0,
'v2.create': 0.0,
'v2.fsstat': 0.0,
'v2.getattr': 0.0,
'v2.link': 0.0,
'v2.lookup': 0.0,
'v2.mkdir': 0.0,
'v2.null': 0.0,
'v2.read': 0.0,
'v2.readdir': 0.0,
'v2.readlink': 0.0,
'v2.remove': 0.0,
'v2.rename': 0.0,
'v2.rmdir': 0.0,
'v2.root': 0.0,
'v2.setattr': 0.0,
'v2.symlink': 0.0,
'v2.wrcache': 0.0,
'v2.write': 0.0,
'v3.access': 40672.0,
'v3.commit': 0.0,
'v3.create': 91.0,
'v3.fsinfo': 0.0,
'v3.fsstat': 20830.0,
'v3.getattr': 162507.0,
'v3.link': 0.0,
'v3.lookup': 89.0,
'v3.mkdir': 0.0,
'v3.mknod': 0.0,
'v3.null': 0.0,
'v3.pathconf': 0.0,
'v3.read': 6093419.0,
'v3.readdir': 4002.0,
'v3.readdirplus': 0.0,
'v3.readlink': 0.0,
'v3.remove': 9.0,
'v3.rename': 0.0,
'v3.rmdir': 0.0,
'v3.setattr': 8640.0,
'v3.symlink': 0.0,
'v3.write': 1712605.0,
'v4.access': 0.0,
'v4.close': 0.0,
'v4.commit': 0.0,
'v4.confirm': 0.0,
'v4.create': 0.0,
'v4.delegreturn': 0.0,
'v4.fs_locations': 0.0,
'v4.fsinfo': 0.0,
'v4.getacl': 0.0,
'v4.getattr': 0.0,
'v4.link': 0.0,
'v4.lock': 0.0,
'v4.lockt': 0.0,
'v4.locku': 0.0,
'v4.lookup': 0.0,
'v4.lookup_root': 0.0,
'v4.null': 0.0,
'v4.open': 0.0,
'v4.open_conf': 0.0,
'v4.open_dgrd': 0.0,
'v4.open_noat': 0.0,
'v4.pathconf': 0.0,
'v4.read': 0.0,
'v4.readdir': 0.0,
'v4.readlink': 0.0,
'v4.rel_lkowner': 0.0,
'v4.remove': 0.0,
'v4.rename': 0.0,
'v4.renew': 0.0,
'v4.server_caps': 0.0,
'v4.setacl': 0.0,
'v4.setattr': 0.0,
'v4.setclntid': 0.0,
'v4.statfs': 0.0,
'v4.symlink': 0.0,
'v4.write': 0.0
}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_rhel6(self, publish_mock):
NfsCollector.PROC = self.getFixturePath('rhel6-1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
NfsCollector.PROC = self.getFixturePath('rhel6-2')
self.collector.collect()
metrics = {
'net.packets': 0.0,
'net.tcpcnt': 0.0,
'net.tcpconn': 0.0,
'net.udpcnt': 0.0,
'rpc.authrefrsh': 32.0,
'rpc.calls': 32.0,
'rpc.retrans': 0.0,
'v2.create': 0.0,
'v2.fsstat': 0.0,
'v2.getattr': 0.0,
'v2.link': 0.0,
'v2.lookup': 0.0,
'v2.mkdir': 0.0,
'v2.null': 0.0,
'v2.read': 0.0,
'v2.readdir': 0.0,
'v2.readlink': 0.0,
'v2.remove': 0.0,
'v2.rename': 0.0,
'v2.rmdir': 0.0,
'v2.root': 0.0,
'v2.setattr': 0.0,
'v2.symlink': 0.0,
'v2.wrcache': 0.0,
'v2.write': 0.0,
'v3.access': 6.0,
'v3.commit': 0.0,
'v3.create': 0.0,
'v3.fsinfo': 0.0,
'v3.fsstat': 17.0,
'v3.getattr': 7.0,
'v3.link': 0.0,
'v3.lookup': 0.0,
'v3.mkdir': 0.0,
'v3.mknod': 0.0,
'v3.null': 0.0,
'v3.pathconf': 0.0,
'v3.read': 0.0,
'v3.readdir': 0.0,
'v3.readdirplus': 0.0,
'v3.readlink': 0.0,
'v3.remove': 0.0,
'v3.rename': 0.0,
'v3.rmdir': 0.0,
'v3.setattr': 1.0,
'v3.symlink': 0.0,
'v3.write': 1.0,
'v4.access': 0.0,
'v4.close': 0.0,
'v4.commit': 0.0,
'v4.confirm': 0.0,
'v4.create': 0.0,
'v4.create_ses': 0.0,
'v4.delegreturn': 0.0,
'v4.destroy_ses': 0.0,
'v4.ds_write': 0.0,
'v4.exchange_id': 0.0,
'v4.fs_locations': 0.0,
'v4.fsinfo': 0.0,
'v4.get_lease_t': 0.0,
'v4.getacl': 0.0,
'v4.getattr': 0.0,
'v4.getdevinfo': 0.0,
'v4.getdevlist': 0.0,
'v4.layoutcommit': 0.0,
'v4.layoutget': 0.0,
'v4.layoutreturn': 0.0,
'v4.link': 0.0,
'v4.lock': 0.0,
'v4.lockt': 0.0,
'v4.locku': 0.0,
'v4.lookup': 0.0,
'v4.lookup_root': 0.0,
'v4.null': 0.0,
'v4.open': 0.0,
'v4.open_conf': 0.0,
'v4.open_dgrd': 0.0,
'v4.open_noat': 0.0,
'v4.pathconf': 0.0,
'v4.read': 0.0,
'v4.readdir': 0.0,
'v4.readlink': 0.0,
'v4.reclaim_comp': 0.0,
'v4.rel_lkowner': 0.0,
'v4.remove': 0.0,
'v4.rename': 0.0,
'v4.renew': 0.0,
'v4.sequence': 0.0,
'v4.server_caps': 0.0,
'v4.setacl': 0.0,
'v4.setattr': 0.0,
'v4.setclntid': 0.0,
'v4.statfs': 0.0,
'v4.symlink': 0.0,
'v4.write': 0.0,
}
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
|
"""This module generates tvtk (Traited VTK) classes from the
VTK-Python API.
"""
# Author: Prabhu Ramachandran
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
import vtk
import os
import os.path
import zipfile
import tempfile
import shutil
import glob
from optparse import OptionParser
# Local imports -- these should be relative imports since these are
# imported before the package is installed.
from common import get_tvtk_name, camel2enthought
from wrapper_gen import WrapperGenerator
from special_gen import HelperGenerator
######################################################################
# `TVTKGenerator`
######################################################################
class TVTKGenerator:
"""Generates all the TVTK code."""
def __init__(self, out_dir=''):
"""Initializes the instance.
Parameters
----------
- out_dir - `string`
The output directory to generate code in. The directory is
created if it does not exist. A directory called
`tvtk_classes` is created inside this directory and all the
code is written here. Any existing code there is blindly
overwritten. If no out_dir is specified, a temporary one is
created using `tempfile.mkdtemp`.
"""
if not out_dir:
out_dir = tempfile.mkdtemp()
self.out_dir = os.path.join(out_dir, 'tvtk_classes')
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
self.zip_name = 'tvtk_classes.zip'
self.wrap_gen = WrapperGenerator()
self.helper_gen = HelperGenerator()
#################################################################
# `TVTKGenerator` interface.
#################################################################
def generate_code(self):
"""Generate all the wrapper code in `self.out_dir`.
"""
out_dir = self.out_dir
helper_gen = self.helper_gen
wrap_gen = self.wrap_gen
# Create an __init__.py file
f = open(os.path.join(out_dir, '__init__.py'), 'w')
f.close()
# Crete a vtk_version.py file that contains VTK build
# information.
v = vtk.vtkVersion()
vtk_version = v.GetVTKVersion()[:3]
vtk_src_version = v.GetVTKSourceVersion()
code ="vtk_build_version = \'%s\'\n"%(vtk_version)
code += "vtk_build_src_version = \'%s\'\n"%(vtk_src_version)
f = open(os.path.join(out_dir, 'vtk_version.py'), 'w')
f.write(code)
f.close()
# Write the helper code header.
helper_file = open(os.path.join(out_dir, 'tvtk_helper.py'), 'w')
helper_gen.write_prelims(helper_file)
# Write the wrapper files.
tree = wrap_gen.get_tree().tree
#classes = dir(vtk)
classes = [x.name for x in wrap_gen.get_tree() \
if x.name.startswith('vtk') and \
not x.name.startswith('vtkQt') and \
not issubclass(getattr(vtk, x.name), object) ]
for nodes in tree:
for node in nodes:
if node.name in classes:
tvtk_name = get_tvtk_name(node.name)
self._write_wrapper_class(node, tvtk_name)
helper_gen.add_class(tvtk_name, helper_file)
helper_file.close()
def write_wrapper_classes(self, names):
"""Given VTK class names in the list `names`, write out the
wrapper classes to a suitable file. This is a convenience
method so one can generate a just a few of the wrapper classes
if desired. This is useful when debugging. Please note that
the method also generates code for all the ancestors of the
specified classes.
"""
# Wrappers for the ancesors are generated in order to get the
# _updateable_traits_ information correctly.
nodes = []
for name in names:
node = self.wrap_gen.get_tree().get_node(name)
if node is None:
print 'ERROR: Cannot find class: %s'%name
nodes.append(node)
# Get ancestors.
for node in nodes[:]:
anc = node.get_ancestors()
for i in anc:
if i not in nodes:
nodes.insert(0, i)
# Sort them as per their level.
nodes.sort(lambda x, y: cmp(x.level, y.level))
# Write code.
for node in nodes:
tvtk_name = get_tvtk_name(node.name)
self._write_wrapper_class(node, tvtk_name)
def build_zip(self, include_src=False):
"""Build the zip file (with name `self.zip_name`) in the
current directory.
Parameters
----------
- include_src : `bool` (default: False)
If True, also includes all the ``*.py`` files in the ZIP file.
By default only the ``*.pyc`` files are included.
"""
cwd = os.getcwd()
d = os.path.dirname(self.out_dir)
os.chdir(d)
z = zipfile.PyZipFile(self.zip_name, 'w',
zipfile.ZIP_DEFLATED)
if include_src:
l = glob.glob(os.path.join('tvtk_classes', '*.py'))
for x in l:
fname = os.path.basename(x)
z.write(x, 'tvtk_classes/%s'%fname)
z.writepy('tvtk_classes')
z.close()
if os.path.exists(cwd + "/" + self.zip_name):
os.unlink(cwd + "/" + self.zip_name)
shutil.move(self.zip_name, cwd)
os.chdir(cwd)
def clean(self):
"""Delete the temporary directory where the code has been
generated.
"""
tmp_dir = os.path.dirname(self.out_dir)
d = os.listdir(tmp_dir)
ok = 0
if len(d) == 1 and d[0] == 'tvtk_classes':
ok = 1
if ok:
shutil.rmtree(tmp_dir)
else:
print "Not removing directory:", tmp_dir
print "It does not contain a tvtk_classes directory!"
#################################################################
# Non-public interface.
#################################################################
def _write_wrapper_class(self, node, tvtk_name):
"""Write the wrapper code to a file."""
# The only reason this method is separate is to generate code
# for an individual class when debugging.
fname = camel2enthought(tvtk_name) + '.py'
out = open(os.path.join(self.out_dir, fname), 'w')
self.wrap_gen.generate_code(node, out)
out.close()
######################################################################
# Utility functions.
######################################################################
def main():
usage = """usage: %prog [options] [vtk_classes]
The options are described below. An optional list of VTK classes for
which code is to be generated may be specified. If none are specified
code will be generated for all the VTK classes.
"""
parser = OptionParser(usage)
parser.add_option("-o", "--output-dir", action="store",
type="string", dest="out_dir", default='',
help="Output directory in which to generate code.")
parser.add_option("-n", "--no-clean", action="store_false",
dest="clean", default=True,
help="Do not clean the temporary directory.")
parser.add_option("-z", "--no-zipfile", action="store_false",
dest="zip", default=True,
help="Do not create a ZIP file.")
parser.add_option("-s", "--source", action="store_true",
dest="src", default=False,
help="Include source files (*.py) in addition to *.pyc files in the ZIP file.")
(options, args) = parser.parse_args()
# Now do stuff.
gen = TVTKGenerator(options.out_dir)
if len(args) == 0:
gen.generate_code()
else:
gen.write_wrapper_classes(args)
if options.zip:
gen.build_zip(options.src)
if options.clean:
gen.clean()
if __name__ == '__main__':
main()
|
|
import hashlib
import hmac
import uuid
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
import basket
import commonware.log
from django_browserid import get_audience
from django_statsd.clients import statsd
from rest_framework import status
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.generics import (CreateAPIView, DestroyAPIView,
RetrieveAPIView, RetrieveUpdateAPIView,
ListAPIView)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
import amo
from mkt.users.models import UserProfile
from mkt.users.views import browserid_authenticate
from mkt.account.serializers import (AccountSerializer, FeedbackSerializer,
FxaLoginSerializer, LoginSerializer,
NewsletterSerializer,
PermissionsSerializer)
from mkt.api.authentication import (RestAnonymousAuthentication,
RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AllowSelf, AllowOwner
from mkt.api.base import CORSMixin, MarketplaceView
from mkt.constants.apps import INSTALL_TYPE_USER
from mkt.site.mail import send_mail_jinja
from mkt.users.views import _fxa_authorize, get_fxa_session
from mkt.webapps.serializers import SimpleAppSerializer
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.account')
def user_relevant_apps(user):
return {
'developed': list(user.addonuser_set.filter(
role=amo.AUTHOR_ROLE_OWNER).values_list('addon_id', flat=True)),
'installed': list(user.installed_set.values_list('addon_id',
flat=True)),
'purchased': list(user.purchase_ids()),
}
class MineMixin(object):
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
if pk == 'mine':
self.kwargs['pk'] = self.request.user.pk
return super(MineMixin, self).get_object(queryset)
class InstalledView(CORSMixin, MarketplaceView, ListAPIView):
cors_allowed_methods = ['get']
serializer_class = SimpleAppSerializer
permission_classes = [AllowSelf]
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
def get_queryset(self):
return Webapp.objects.no_cache().filter(
installed__user=self.request.user,
installed__install_type=INSTALL_TYPE_USER).order_by(
'-installed__created')
class CreateAPIViewWithoutModel(MarketplaceView, CreateAPIView):
"""
A base class for APIs that need to support a create-like action, but
without being tied to a Django Model.
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
cors_allowed_methods = ['post']
permission_classes = (AllowAny,)
def response_success(self, request, serializer, data=None):
if data is None:
data = serializer.data
return Response(data, status=status.HTTP_201_CREATED)
def response_error(self, request, serializer):
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.DATA)
if serializer.is_valid():
data = self.create_action(request, serializer)
return self.response_success(request, serializer, data=data)
return self.response_error(request, serializer)
class AccountView(MineMixin, CORSMixin, RetrieveUpdateAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
cors_allowed_methods = ['get', 'patch', 'put']
model = UserProfile
permission_classes = (AllowOwner,)
serializer_class = AccountSerializer
class FeedbackView(CORSMixin, CreateAPIViewWithoutModel):
class FeedbackThrottle(UserRateThrottle):
THROTTLE_RATES = {
'user': '30/hour',
}
serializer_class = FeedbackSerializer
throttle_classes = (FeedbackThrottle,)
throttle_scope = 'user'
def create_action(self, request, serializer):
context_data = self.get_context_data(request, serializer)
sender = getattr(request.user, 'email', settings.NOBODY_EMAIL)
send_mail_jinja(u'Marketplace Feedback', 'account/email/feedback.txt',
context_data, from_email=sender,
recipient_list=[settings.MKT_FEEDBACK_EMAIL])
def get_context_data(self, request, serializer):
context_data = {
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'ip_address': request.META.get('REMOTE_ADDR', '')
}
context_data.update(serializer.data)
context_data['user'] = request.user
return context_data
def commonplace_token(email):
unique_id = uuid.uuid4().hex
consumer_id = hashlib.sha1(
email + settings.SECRET_KEY).hexdigest()
hm = hmac.new(
unique_id + settings.SECRET_KEY,
consumer_id, hashlib.sha512)
return ','.join((email, hm.hexdigest(), unique_id))
class FxaLoginView(CORSMixin, CreateAPIViewWithoutModel):
authentication_classes = []
serializer_class = FxaLoginSerializer
def create_action(self, request, serializer):
session = get_fxa_session(state=serializer.data['state'])
profile = _fxa_authorize(
session,
settings.FXA_CLIENT_SECRET,
request,
serializer.data['auth_response'])
if profile is None:
raise AuthenticationFailed('No profile.')
request.user = profile
request.groups = profile.groups.all()
# We want to return completely custom data, not the serializer's.
data = {
'error': None,
'token': commonplace_token(request.user.email),
'settings': {
'display_name': request.user.display_name,
'email': request.user.email,
}
}
# Serializers give up if they aren't passed an instance, so we
# do that here despite PermissionsSerializer not needing one
# really.
permissions = PermissionsSerializer(context={'request': request},
instance=True)
data.update(permissions.data)
# Add ids of installed/purchased/developed apps.
data['apps'] = user_relevant_apps(profile)
return data
class LoginView(CORSMixin, CreateAPIViewWithoutModel):
authentication_classes = []
serializer_class = LoginSerializer
def create_action(self, request, serializer):
with statsd.timer('auth.browserid.verify'):
profile, msg = browserid_authenticate(
request, serializer.data['assertion'],
browserid_audience=serializer.data['audience'] or
get_audience(request),
is_mobile=serializer.data['is_mobile'],
)
if profile is None:
# Authentication failure.
log.info('No profile: %s' % (msg or ''))
raise AuthenticationFailed('No profile.')
request.user = profile
request.groups = profile.groups.all()
auth.login(request, profile)
profile.log_login_attempt(True) # TODO: move this to the signal.
user_logged_in.send(sender=profile.__class__, request=request,
user=profile)
# We want to return completely custom data, not the serializer's.
data = {
'error': None,
'token': commonplace_token(request.user.email),
'settings': {
'display_name': request.user.display_name,
'email': request.user.email,
}
}
# Serializers give up if they aren't passed an instance, so we
# do that here despite PermissionsSerializer not needing one
# really.
permissions = PermissionsSerializer(context={'request': request},
instance=True)
data.update(permissions.data)
# Add ids of installed/purchased/developed apps.
data['apps'] = user_relevant_apps(profile)
return data
class LogoutView(CORSMixin, DestroyAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = (IsAuthenticated,)
cors_allowed_methods = ['delete']
def delete(self, request):
auth.logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
class NewsletterView(CORSMixin, CreateAPIViewWithoutModel):
class NewsletterThrottle(UserRateThrottle):
scope = 'newsletter'
THROTTLE_RATES = {
'newsletter': '30/hour',
}
serializer_class = NewsletterSerializer
throttle_classes = (NewsletterThrottle,)
def response_success(self, request, serializer, data=None):
return Response({}, status=status.HTTP_204_NO_CONTENT)
def create_action(self, request, serializer):
email = serializer.data['email']
newsletter = serializer.data['newsletter']
basket.subscribe(email, newsletter,
format='H', country=request.REGION.slug,
lang=request.LANG, optin='Y',
trigger_welcome='Y')
class PermissionsView(CORSMixin, MineMixin, RetrieveAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
cors_allowed_methods = ['get']
permission_classes = (AllowSelf,)
model = UserProfile
serializer_class = PermissionsSerializer
|
|
import sys
import random
import curses
class Cell(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.opened = False
self.checked = False
self.mine = False
class Command(object):
def __init__(self):
self.x = -1
self.y = -1
self.open_cell = False
self.check_cell = False
class Minesweeper(object):
def __init__(self, ncol, nrow, nmines):
self.NUM_X = ncol # number of columns (x-axis)
self.NUM_Y = nrow # number of rows (y-axis)
self.NUM_MINES = nmines
self.cells = {}
for i in xrange(self.NUM_X):
for j in xrange(self.NUM_Y):
cell = Cell(i, j)
self.cells[(i, j)] = cell
# arrange mines randomly
imines = random.sample(range(len(self.cells)), self.NUM_MINES)
for i, cell in enumerate(self.cells.values()):
if i in imines:
cell.mine = True
def get_cell(self, x, y):
return self.cells[(x, y)]
# number of columns
def get_num_x(self):
return self.NUM_X
# number of rows
def get_num_y(self):
return self.NUM_Y
# listup surrounding cells
def __surrounding_mines(self, x, y):
l = []
for i in [x-1, x, x+1]:
for j in [y-1, y, y+1]:
if i < 0:
continue
if i >= self.get_num_x():
continue
if j < 0:
continue
if j >= self.get_num_y():
continue
if i == x and j == y:
continue
l.append((i, j))
return l
# count surrounding mines
def count_mines(self, x, y):
ct = 0
l = self.__surrounding_mines(x, y)
for buf in l:
i = buf[0]
j = buf[1]
if self.cells[(i, j)].mine:
ct += 1
return ct
# evaluate index
def eval_index_error(self, cmd):
if (cmd.x, cmd.y) in self.cells.keys():
pass
else:
return True
return False
# evaluate command validity
def eval_cmd_error(self, cmd):
if (cmd.x, cmd.y) in self.cells.keys():
pass
else:
return True
if cmd.open_cell and cmd.check_cell:
return True
if not cmd.open_cell and not cmd.check_cell:
return True
if self.cells[(cmd.x, cmd.y)].checked and cmd.open_cell:
return True
return False
def exec_cmd(self, cmd):
if self.eval_cmd_error(cmd):
return
if cmd.open_cell:
if self.cells[(cmd.x, cmd.y)].opened:
return
self.cells[(cmd.x, cmd.y)].opened = True
if self.count_mines(cmd.x, cmd.y) == 0:
l = self.__surrounding_mines(cmd.x, cmd.y)
for buf in l:
x = buf[0]
y = buf[1]
cmd = Command()
cmd.x = x
cmd.y = y
cmd.open_cell = True
self.exec_cmd(cmd)
if not self.cells[(cmd.x, cmd.y)].checked and cmd.check_cell:
self.cells[(cmd.x, cmd.y)].checked = True
elif self.cells[(cmd.x, cmd.y)].checked and cmd.check_cell:
self.cells[(cmd.x, cmd.y)].checked = False
# returns true if player lose
def eval_lose(self):
for cell in self.cells.values():
if cell.mine and cell.opened:
return True
return False
# returns true if player win
def eval_win(self):
if self.eval_lose():
return False
ok = 0
for cell in self.cells.values():
if cell.opened:
ok += 1
if cell.mine and cell.checked:
ok += 1
if ok == len(self.cells):
return True
return False
class View(object):
def __init__(self):
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
self.stdscr.keypad(1)
yx = self.stdscr.getmaxyx()
if yx[0] < 25 or yx[1] < 30:
self.stdscr.clear()
self.stdscr.addstr('window size too small.\n')
self.stdscr.addstr('please magnify your terminal window.')
self.stdscr.refresh()
self.stdscr.getch()
sys.exit()
def finalize(self):
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
# returns character according to status of a cell
def __cell_to_str(self, cell, minesweeper):
if cell.opened and cell.mine:
return 'X'
if cell.opened:
ct = minesweeper.count_mines(cell.x, cell.y)
if ct == 0:
return ' '
return str(ct)
if cell.checked:
return '!'
return '?'
def __draw_cells(self, minesweeper, text):
nx = minesweeper.get_num_x()
ny = minesweeper.get_num_y()
for x in xrange(nx):
self.stdscr.addstr(0, x+1, chr(ord('a') + x))
for y in xrange(ny):
self.stdscr.addstr(y+1, 0, str(y))
cmd = self.__eval_input(text)
for y in xrange(ny):
for x in xrange(nx):
attr = []
if (cmd.x == -1 or cmd.y == -1) and (x == cmd.x or y == cmd.y):
attr = [curses.A_REVERSE]
if x == cmd.x and y == cmd.y:
attr = [curses.A_REVERSE]
self.stdscr.addstr(
y+1, x+1, self.__cell_to_str(
minesweeper.get_cell(x, y), minesweeper), *attr)
def __draw_input(self, text, y):
self.stdscr.addstr(y, 0, '>')
self.stdscr.addstr(y, 2, text)
def __eval_input_char(self, c):
if c.isdigit():
return (-1, int(c))
elif c.isalpha():
return (ord(c) - ord('a'), -1)
return (-1, -1)
# evaluate input text and convert to Command object including x and y.
def __eval_input(self, text):
cmd = Command()
if len(text) == 0:
return cmd
if len(text) == 1:
buf = self.__eval_input_char(text[0])
cmd.x = buf[0]
cmd.y = buf[1]
return cmd
if len(text) == 2:
if text[0].isdigit() and text[1].isdigit():
return cmd
elif text[0].isalpha() and text[1].isalpha():
return cmd
buf0 = self.__eval_input_char(text[0])
buf1 = self.__eval_input_char(text[1])
cmd.x = max(buf0[0], buf1[0])
cmd.y = max(buf0[1], buf1[1])
return cmd
return cmd
# draw cells and ask for command from player
def draw_and_input(self, minesweeper):
text = ''
self.stdscr.clear()
self.__draw_cells(minesweeper, text)
self.stdscr.addstr(19, 0, 'select cell')
self.__draw_input(text, 20)
self.stdscr.refresh()
# ask player which cell to select.
while True:
c = self.stdscr.getch()
if c == curses.KEY_ENTER or c == 10:
cmd = self.__eval_input(text)
if cmd.x >= 0 and cmd.y >= 0:
break
text = ''
if c == curses.KEY_BACKSPACE:
text = text[:-1]
if c < 256 and c != 10:
text += chr(c)
# hit CTRL-Q to exit.
if c == 17:
self.finalize()
sys.exit()
self.stdscr.clear()
self.__draw_cells(minesweeper, text)
self.stdscr.addstr(19, 0, 'select cell')
self.__draw_input(text, 20)
self.stdscr.refresh()
cmd = self.__eval_input(text)
if minesweeper.eval_index_error(cmd):
return self.draw_and_input(minesweeper)
# ask player what to do
text_cmd = ''
while True:
self.stdscr.clear()
self.__draw_cells(minesweeper, text)
self.stdscr.addstr(19, 0, 'select cell')
self.__draw_input(text, 20)
if not minesweeper.get_cell(cmd.x, cmd.y).checked:
self.stdscr.addstr(21, 0, 'o: open, c: check')
else:
self.stdscr.addstr(21, 0, 'o: open, c: uncheck')
self.stdscr.addstr(22, 0, '> ' + str(text_cmd))
self.stdscr.refresh()
c = self.stdscr.getch()
if c == curses.KEY_ENTER or c == 10:
if text_cmd == 'c':
cmd.check_cell= True
break
elif text_cmd == 'o':
cmd.open_cell = True
break
elif c == ord('c'):
text_cmd = 'c'
elif c == ord('o'):
text_cmd = 'o'
else:
text_cmd = ''
return cmd
def draw_win(self, minesweeper):
while True:
self.stdscr.clear()
self.__draw_cells(minesweeper, '')
self.stdscr.addstr(20, 0, 'you win!')
self.stdscr.refresh()
c = self.stdscr.getch()
if c == curses.KEY_ENTER or c == 10:
break
def draw_lose(self, minesweeper):
while True:
self.stdscr.clear()
self.__draw_cells(minesweeper, '')
self.stdscr.addstr(20, 0, 'you lose!')
self.stdscr.refresh()
c = self.stdscr.getch()
if c == curses.KEY_ENTER or c == 10:
break
def main():
ncol = raw_input('number of columns > ')
nrow = raw_input('number of rows > ')
nmines = raw_input('number of mines > ')
err = False
if not ncol.isdigit():
err = True
if not nrow.isdigit():
err = True
if not nmines.isdigit():
err = True
if err:
print >>sys.stderr, 'invalid inputs'
return
if int(ncol) > 10:
print >>sys.stderr, 'field too large'
return
if int(nrow) > 10:
print >>sys.stderr, 'field too large'
return
if int(nmines) > int(ncol) * int(nrow):
print >>sys.stderr, 'invalid inputs'
minesweeper = Minesweeper(int(ncol), int(nrow), int(nmines))
view = View()
while True:
cmd = view.draw_and_input(minesweeper)
if minesweeper.eval_cmd_error(cmd):
continue
minesweeper.exec_cmd(cmd)
if minesweeper.eval_win():
view.draw_win(minesweeper)
break
elif minesweeper.eval_lose():
view.draw_lose(minesweeper)
break
view.finalize()
if __name__ == '__main__':
main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ConstantTest(tf.test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=False):
tf_ans = tf.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=True):
tf_ans = tf.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
def testSComplex(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
np.complex64))
self._testAll(np.complex(
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testString(self):
self._testCpu(np.array([tf.compat.as_bytes(str(x))
for x in np.arange(-15, 15)]).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
with self.test_session():
val = tf.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.test_session():
val = tf.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.test_session():
val = tf.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with tf.Graph().as_default():
c = tf.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
with tf.Graph().as_default():
c = tf.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with tf.Graph().as_default():
c = tf.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with tf.Graph().as_default():
c = tf.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with tf.Graph().as_default():
with self.assertRaisesWithPredicateMatch(
ValueError,
lambda e: ("Too many elements provided. Needed at most 5, "
"but received 7" == str(e))):
tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
def testTooLargeConstant(self):
with tf.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = tf.constant(large_array)
def testTooLargeGraph(self):
with tf.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = tf.constant(large_array)
d = tf.constant(large_array)
with self.assertRaisesRegexp(
ValueError, "GraphDef cannot be larger than 2GB."):
g.as_graph_def()
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = tf.constant([[1, 2], [3]], dtype=tf.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = tf.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = tf.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(tf.test.TestCase):
def testAsTensorForTensorInput(self):
with tf.Graph().as_default():
t = tf.constant(10.0)
x = tf.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with tf.Graph().as_default():
x = tf.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, tf.Tensor))
def testAsTensorForShapeInput(self):
with self.test_session():
x = tf.convert_to_tensor(tf.TensorShape([]))
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual([], x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]))
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.int64)
self.assertEqual(tf.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = tf.reshape(tf.zeros([6]), tf.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
with self.assertRaisesRegexp(ValueError, "partially known"):
tf.convert_to_tensor(tf.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
tf.convert_to_tensor(tf.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.float32)
def testAsTensorForDimensionInput(self):
with self.test_session():
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1])
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual(2, x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.int64)
self.assertEqual(tf.int64, x.dtype)
self.assertAllEqual(2, x.eval())
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
tf.convert_to_tensor(tf.TensorShape(None)[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
tf.convert_to_tensor(tf.TensorShape([1, None, 64])[1])
with self.assertRaises(TypeError):
tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.float32)
class IdentityOpTest(tf.test.TestCase):
def testIdTensor(self):
with tf.Graph().as_default():
x = tf.constant(2.0, shape=[6], name="input")
id_op = tf.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], tf.Tensor))
self.assertProtoEquals(
"name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }", id_op.op.node_def)
class ZerosTest(tf.test.TestCase):
def _Zeros(self, shape):
with self.test_session():
ret = tf.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] *
2)))
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = tf.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = tf.zeros(tf.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
with self.test_session():
d = tf.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = tf.zeros([2, 3])
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
z = tf.zeros(tf.shape(d))
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
# Test explicit type control
for dtype in [tf.float32, tf.float64, tf.int32,
tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.int64]:
z = tf.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z = tf.zeros(tf.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
class ZerosLikeTest(tf.test.TestCase):
def _compareZeros(self, dtype, use_gpu):
with self.test_session(use_gpu=False):
# Creates a tensor of non-zero values with shape 2 x 3.
numpy_dtype = dtype.as_numpy_dtype
d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = tf.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[0] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testZerosLikeCPU(self):
for dtype in [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.int64]:
self._compareZeros(dtype, False)
def testZerosLikeGPU(self):
for dtype in [tf.float32, tf.float64, tf.int32]:
self._compareZeros(dtype, True)
def testZerosLikePartialShape(self):
d = tf.placeholder(tf.float32, shape=[None, 4, None])
z = tf.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.test_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = tf.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(tf.test.TestCase):
def _Ones(self, shape):
with self.test_session():
ret = tf.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = tf.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = tf.ones(tf.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
with self.test_session():
d = tf.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = tf.ones([2, 3])
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
z = tf.ones(tf.shape(d))
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
# Test explicit type control
for dtype in [tf.float32, tf.float64, tf.int32,
tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.int64]:
z = tf.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z = tf.ones(tf.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
class OnesLikeTest(tf.test.TestCase):
def testOnesLike(self):
for dtype in [tf.float32, tf.float64, tf.int32,
tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.int64]:
numpy_dtype = dtype.as_numpy_dtype
with self.test_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = tf.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testOnesLikePartialShape(self):
d = tf.placeholder(tf.float32, shape=[None, 4, None])
z = tf.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(tf.test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.fill(dims, val, name="fill")
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.test_session(use_gpu=False):
tf_ans = tf.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
with self.test_session():
for shape in (-1,), (2, -1), (-1, 2):
with self.assertRaises(ValueError):
tf.fill(shape, 7)
# Using a placeholder so this won't be caught in Python.
dims = tf.placeholder(tf.int32)
fill_t = tf.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2):
with self.assertRaises(tf.errors.InvalidArgumentError):
fill_t.eval({dims: shape})
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
tf.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
tf.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = tf.fill(
tf.placeholder(tf.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
def testGradient(self):
with self.test_session():
in_v = tf.constant(5.0)
out_shape = [3, 2]
out_filled = tf.fill(out_shape, in_v)
err = tf.test.compute_gradient_error(in_v, [],
out_filled, out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(tf.test.TestCase):
def testDtype(self):
with self.test_session():
p = tf.placeholder(tf.float32, name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testShape(self):
with self.test_session():
p = tf.placeholder(tf.float32, shape=(10, 10), name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
def testPartialShape(self):
with self.test_session():
p = tf.placeholder(tf.float32, shape=[None, 3], name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
def testControlDependency(self):
with self.test_session():
p = tf.placeholder(tf.int32, shape=[], name="p")
with tf.control_dependencies([p]):
c = tf.constant(5, tf.int32)
d = tf.mul(p, c)
self.assertEqual(10, d.eval(feed_dict={p: 2}))
def testBadShape(self):
with self.assertRaises(ValueError):
tf.placeholder(tf.float32, shape=(-1, 10))
def testTensorStr(self):
a = tf.placeholder(tf.float32, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = tf.placeholder(tf.int32, shape=(32, 40), name="b")
self.assertEqual(
"<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>",
repr(b))
c = tf.placeholder(tf.qint32, shape=(32, None, 2), name="c")
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>",
repr(c))
class PlaceholderWithDefaultTest(tf.test.TestCase):
def testFullShape(self):
with self.test_session():
p = tf.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = tf.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
self.assertAllEqual([[3, 3], [3, 3]],
a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
def testPartialShape(self):
with self.test_session():
p = tf.placeholder_with_default([1, 2, 3], shape=[None])
a = tf.identity(p)
self.assertAllEqual([1, 2, 3], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
def testNoShape(self):
with self.test_session():
p = tf.placeholder_with_default([17], shape=None)
a = tf.identity(p)
self.assertAllEqual([17], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual([[3, 3], [3, 3]],
a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
if __name__ == "__main__":
tf.test.main()
|
|
from PyQt4 import QtCore, QtGui, Qt
from EditorMisc import Constants
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 662)
MainWindow.setToolTip(_fromUtf8(""))
MainWindow.setStatusTip(_fromUtf8(""))
MainWindow.setWhatsThis(_fromUtf8(""))
MainWindow.setAccessibleName(_fromUtf8(""))
MainWindow.setAccessibleDescription(_fromUtf8(""))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
# central panel - label
self.add_central_label()
# central panel - sliders
self.verticalLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(300, 10, 181, 331))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.add_sliders()
# central panel - fears
self.gridLayoutWidget = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(300, 341, 181, 103))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.add_fear_combo_boxes()
# left panel
self.verticalLayoutWidget_3 = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(10, 10, 175, 607)) # increase last param by 23 for each new checkbox
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.add_scenario_combo_box()
self.add_set_bytes_button()
self.add_global_ghost_level_combo_box()
self.add_normal_check_boxes()
self.add_recommended_label()
self.add_recommended_check_boxes()
# right panel
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(630, 10, 160, 38))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.add_mortal_combo_box()
# scenario panel
self.verticalLayoutWidget_4 = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(180, 10, 111, 186))
self.verticalLayoutWidget_4.setObjectName(_fromUtf8("verticalLayoutWidget_4"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.verticalLayoutWidget_4)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.add_scenario_label()
self.add_scenario_spin_boxes()
self.add_plasm_line_edit()
self.add_mood_combo_box()
self.add_manual_terror_check_box()
#
MainWindow.setCentralWidget(self.centralwidget)
self.add_menu_bar(MainWindow)
self.add_status_bar(MainWindow)
self.retranslateUi(MainWindow)
self.add_object_connections(MainWindow)
self.integrity_dialog = IntegrityCheckDialog(self)
# combo boxes
def add_scenario_combo_box(self):
self.comboBox_3 = QtGui.QComboBox(self.verticalLayoutWidget_3)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.comboBox_3.addItem("")
self.comboBox_3.setEnabled(False)
self.verticalLayout_3.addWidget(self.comboBox_3)
def add_mortal_combo_box(self):
self.comboBox_4 = QtGui.QComboBox(self.verticalLayoutWidget)
self.comboBox_4.setObjectName(_fromUtf8("comboBox_4"))
self.comboBox_4.addItem("")
self.comboBox_4.setEnabled(False)
self.verticalLayout.addWidget(self.comboBox_4)
def add_fear_combo_boxes(self):
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.comboBox = CustomComboBox(self)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.setEnabled(False)
self.comboBox_2 = CustomComboBox(self)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.setItemText(15, _fromUtf8(""))
self.comboBox_2.setEnabled(False)
self.comboBox_6 = CustomComboBox(self)
self.comboBox_6.setObjectName(_fromUtf8("comboBox_6"))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.addItem(_fromUtf8(""))
self.comboBox_6.setItemText(15, _fromUtf8(""))
self.comboBox_6.setEnabled(False)
self.gridLayout.addWidget(self.label_4, 0, 0, 1, 1)
self.gridLayout.addWidget(self.comboBox, 1, 0, 1, 1)
self.gridLayout.addWidget(self.comboBox_6, 1, 1, 1, 1)
self.gridLayout.addWidget(self.label_5, 2, 0, 1, 1)
self.gridLayout.addWidget(self.comboBox_2, 3, 0, 1, 1)
def add_global_ghost_level_combo_box(self):
self.comboBox_7 = QtGui.QComboBox(self.verticalLayoutWidget_3)
self.comboBox_7.setObjectName(_fromUtf8("comboBox_7"))
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.addItem("")
self.comboBox_7.setToolTip(Constants.GLOBAL_GHOST_LEVEL)
self.comboBox_7.setEnabled(False)
self.verticalLayout_3.addWidget(self.comboBox_7)
def add_mood_combo_box(self):
self.comboBox_5 = QtGui.QComboBox(self.verticalLayoutWidget_4)
self.comboBox_5.setObjectName(_fromUtf8("comboBox_5"))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.addItem(_fromUtf8(""))
self.comboBox_5.setEnabled(False)
self.comboBox_5.setHidden(True)
self.verticalLayout_4.addWidget(self.comboBox_5)
# check boxes
def add_normal_check_boxes(self):
self.checkBox1 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox1.setObjectName(_fromUtf8("checkBox_1"))
self.checkBox1.setToolTip(Constants.UNLIMITED_PLASM)
self.checkBox1.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox1)
self.checkBox2 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox2.setObjectName(_fromUtf8("checkBox_2"))
self.checkBox2.setToolTip(Constants.UNLIMITED_GOLDPLASM)
self.checkBox2.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox2)
self.checkBox3 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox3.setObjectName(_fromUtf8("checkBox_3"))
self.checkBox3.setToolTip(Constants.INSTANT_POWER_RECHARGE)
self.checkBox3.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox3)
self.checkBox18 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox18.setObjectName(_fromUtf8("checkBox_18"))
self.checkBox18.setToolTip(Constants.CONTINUOUS_POWER_RECASTING)
self.checkBox18.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox18)
self.checkBox4 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox4.setObjectName(_fromUtf8("checkBox_4"))
self.checkBox4.setToolTip(Constants.RESPONSIVE_EMPTY_PORTRAITS)
self.checkBox4.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox4)
self.checkBox5 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox5.setObjectName(_fromUtf8("checkBox_5"))
self.checkBox5.setToolTip(Constants.GHOST_CLONING)
self.checkBox5.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox5)
self.checkBox9 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox9.setObjectName(_fromUtf8("checkBox_9"))
self.checkBox9.setToolTip(Constants.FETTER_SHARING)
self.checkBox9.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox9)
self.checkBox6 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox6.setObjectName(_fromUtf8("checkBox_6"))
self.checkBox6.setToolTip(Constants.INSIDE_OUTSIDE_ON_ALL_GHOSTS)
self.checkBox6.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox6)
self.checkBox10 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox10.setObjectName(_fromUtf8("checkBox_10"))
self.checkBox10.setToolTip(Constants.MOVABLE_RESTLESS_GHOSTS)
self.checkBox10.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox10)
self.checkBox21 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox21.setObjectName(_fromUtf8("checkBox_21"))
self.checkBox21.setToolTip(Constants.BENCH_RESTLESS_GHOSTS)
self.checkBox21.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox21)
self.checkBox7 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox7.setObjectName(_fromUtf8("checkBox_7"))
self.checkBox7.setToolTip(Constants.IGNORE_WARDS)
self.checkBox7.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox7)
self.checkBox12 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox12.setObjectName(_fromUtf8("checkBox_12"))
self.checkBox12.setToolTip(Constants.UNCOVER_FEARS)
self.checkBox12.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox12)
self.checkBox14 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox14.setObjectName(_fromUtf8("checkBox_14"))
self.checkBox14.setToolTip(Constants.DISABLE_CALMING_EFFECTS)
self.checkBox14.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox14)
self.checkBox15 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox15.setObjectName(_fromUtf8("checkBox_15"))
self.checkBox15.setToolTip(Constants.UNLOCK_EXTRA_FEARS)
self.checkBox15.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox15)
self.checkBox16 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox16.setObjectName(_fromUtf8("checkBox_16"))
self.checkBox16.setToolTip(Constants.FIX_COLD_PHOBIA)
self.checkBox16.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox16)
self.checkBox20 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox20.setObjectName(_fromUtf8("checkBox_20"))
self.checkBox20.setToolTip(Constants.EXPLORATION_MODE)
self.checkBox20.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox20)
def add_recommended_check_boxes(self):
self.checkBox8 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox8.setObjectName(_fromUtf8("checkBox_8"))
self.checkBox8.setToolTip(Constants.DISABLE_FIRE_EXTINGUISHERS)
self.checkBox8.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox8)
self.checkBox11 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox11.setObjectName(_fromUtf8("checkBox_11"))
self.checkBox11.setToolTip(Constants.DISABLE_MADNESS_IMMUNITY)
self.checkBox11.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox11)
self.checkBox13 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox13.setObjectName(_fromUtf8("checkBox_13"))
self.checkBox13.setToolTip(Constants.UNLOCK_MISSING_FEARS)
self.checkBox13.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox13)
self.checkBox19 = QtGui.QCheckBox(self.verticalLayoutWidget_3)
self.checkBox19.setObjectName(_fromUtf8("checkBox_19"))
self.checkBox19.setToolTip(Constants.GHOST_RETRAINING)
self.checkBox19.setEnabled(False)
self.verticalLayout_3.addWidget(self.checkBox19)
def add_manual_terror_check_box(self):
self.checkBox17 = QtGui.QCheckBox(self.verticalLayoutWidget_4)
self.checkBox17.setObjectName(_fromUtf8("checkBox_17"))
self.checkBox17.setToolTip(Constants.MANUAL_TERROR)
self.checkBox17.setEnabled(False)
self.checkBox17.setHidden(True)
self.verticalLayout_4.addWidget(self.checkBox17)
# labels
def add_central_label(self):
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(300, 0, 181, 41))
self.label.setTextFormat(QtCore.Qt.AutoText)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
def add_recommended_label(self):
self.line_1 = QtGui.QFrame(self.verticalLayoutWidget_3)
self.line_1.setFrameShape(QtGui.QFrame.HLine)
self.line_1.setFrameShadow(QtGui.QFrame.Sunken)
self.line_1.setObjectName(_fromUtf8("line_1"))
self.verticalLayout_3.addWidget(self.line_1)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget_3)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_3.addWidget(self.label_3)
self.line_2 = QtGui.QFrame(self.verticalLayoutWidget_3)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.verticalLayout_3.addWidget(self.line_2)
def add_scenario_label(self):
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget_4)
self.label_2.setTextFormat(QtCore.Qt.AutoText)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setWordWrap(True)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_2.setHidden(True)
self.verticalLayout_4.addWidget(self.label_2)
# misc
def add_set_bytes_button(self):
self.pushButton_9 = QtGui.QPushButton(self.verticalLayoutWidget_3)
self.pushButton_9.setObjectName(_fromUtf8("pushButton_9"))
self.pushButton_9.setEnabled(False)
self.verticalLayout_3.addWidget(self.pushButton_9)
def add_sliders(self):
spacerItem = QtGui.QSpacerItem(0, 22, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalSlider = QtGui.QSlider(self.verticalLayoutWidget_2)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName(_fromUtf8("horizontalSlider"))
self.horizontalSlider.setEnabled(False)
self.verticalLayout_2.addWidget(self.horizontalSlider)
self.horizontalSlider_4 = CustomSlider(self)
self.horizontalSlider_4.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_4.setObjectName(_fromUtf8("horizontalSlider_4"))
self.horizontalSlider_4.setEnabled(False)
self.verticalLayout_2.addWidget(self.horizontalSlider_4)
self.horizontalSlider_2 = CustomSlider(self)
self.horizontalSlider_2.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_2.setObjectName(_fromUtf8("horizontalSlider_2"))
self.horizontalSlider_2.setEnabled(False)
self.verticalLayout_2.addWidget(self.horizontalSlider_2)
self.horizontalSlider_3 = QtGui.QSlider(self.verticalLayoutWidget_2)
self.horizontalSlider_3.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_3.setObjectName(_fromUtf8("horizontalSlider_3"))
self.horizontalSlider_3.setEnabled(False)
self.verticalLayout_2.addWidget(self.horizontalSlider_3)
self.horizontalSlider.setMinimum(0)
self.horizontalSlider_2.setMinimum(0)
self.horizontalSlider_3.setMinimum(0)
self.horizontalSlider_4.setMinimum(0)
self.horizontalSlider.setMaximum(100)
self.horizontalSlider_2.setMaximum(100)
self.horizontalSlider_3.setMaximum(100)
self.horizontalSlider_4.setMaximum(100)
self.horizontalSlider.tickInterval = 1
self.horizontalSlider_2.tickInterval = 1
self.horizontalSlider_3.tickInterval = 1
self.horizontalSlider_4.tickInterval = 1
self.horizontalSlider.setSingleStep(0)
self.horizontalSlider_2.setSingleStep(0)
self.horizontalSlider_3.setSingleStep(0)
self.horizontalSlider_4.setSingleStep(0)
self.horizontalSlider.setPageStep(0)
self.horizontalSlider_2.setPageStep(0)
self.horizontalSlider_3.setPageStep(0)
self.horizontalSlider_4.setPageStep(0)
white = "rgb(255, 255, 255)"
red = "rgb(238, 156, 0)"
orange = "rgb(0, 178, 235)"
blue = "rgb(223, 0, 41)"
self.horizontalSlider.setStyleSheet(self.custom_slider_style_sheet(white))
self.horizontalSlider_2.setStyleSheet(self.custom_slider_style_sheet(red))
self.horizontalSlider_3.setStyleSheet(self.custom_slider_style_sheet(orange))
self.horizontalSlider_4.setStyleSheet(self.custom_slider_style_sheet(blue))
def custom_slider_style_sheet(self, color):
return '''
QSlider::groove:horizontal {
background-color: ''' + color + ''';
border: 1px solid;
height: 6px;
}
QSlider::groove:horizontal:disabled {
background-color: transparent;
}
QSlider::handle:horizontal {
background-color: black;
margin: -16px 0;
width: 10px;
}
QSlider::handle:horizontal:disabled {
background-color: transparent;
}'''
def add_scenario_spin_boxes(self):
self.spinBox = QtGui.QSpinBox(self.verticalLayoutWidget_4)
self.spinBox.setProperty("value", 0)
self.spinBox.setMinimum(0)
self.spinBox.setMaximum(8)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.spinBox.setEnabled(False)
self.spinBox.setHidden(True)
self.verticalLayout_4.addWidget(self.spinBox)
self.spinBox_2 = QtGui.QDoubleSpinBox(self.verticalLayoutWidget_4)
self.spinBox_2.setProperty("value", 0)
self.spinBox_2.setMinimum(0)
self.spinBox_2.setMaximum(100)
self.spinBox_2.setDecimals(23)
self.spinBox_2.setObjectName(_fromUtf8("spinBox_2"))
self.spinBox_2.setEnabled(False)
self.spinBox_2.setHidden(True)
self.verticalLayout_4.addWidget(self.spinBox_2)
def add_plasm_line_edit(self):
self.lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget_4)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.lineEdit.setEnabled(False)
self.lineEdit.setHidden(True)
self.verticalLayout_4.addWidget(self.lineEdit)
def add_menu_bar(self, MainWindow):
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
#self.actionOpen.setShortcut(QtGui.QKeySequence("Ctrl+Z"))
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
#self.actionSave.setShortcut(QtGui.QKeySequence("Ctrl+C"))
self.actionSave.setEnabled(False)
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionScripts = QtGui.QAction(MainWindow)
self.actionScripts.setObjectName(_fromUtf8("actionScripts"))
self.actionScripts.setEnabled(False)
self.actionReactions = QtGui.QAction(MainWindow)
self.actionReactions.setObjectName(_fromUtf8("actionReactions"))
self.actionReactions.setEnabled(False)
self.menubar.addAction(self.actionOpen)
self.menubar.addAction(self.actionSave)
self.menubar.addAction(self.actionScripts)
self.menubar.addAction(self.actionReactions)
self.menubar.addAction(self.actionAbout)
def add_status_bar(self, MainWindow):
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", Constants.VERSION, None))
self.label.setText(_translate("MainWindow", "Open the file", None))
self.label_2.setText(_translate("MainWindow", "Scenario name", None))
self.label_3.setText(_translate("MainWindow", "Recommended:", None))
self.label_4.setText(_translate("MainWindow", "Conscious", None))
self.label_5.setText(_translate("MainWindow", "Unconscious", None))
self.horizontalSlider.setStatusTip(_translate("MainWindow", "Willpower", None))
self.horizontalSlider_2.setStatusTip(_translate("MainWindow", "Insanity", None))
self.horizontalSlider_3.setStatusTip(_translate("MainWindow", "Belief", None))
self.horizontalSlider_4.setStatusTip(_translate("MainWindow", "Terror", None))
self.checkBox1.setText(_translate("MainWindow", "Unlimited Plasm", None))
self.checkBox2.setText(_translate("MainWindow", "Unlimited Gold Plasm", None))
self.checkBox3.setText(_translate("MainWindow", "Instant Power Recharge", None))
self.checkBox4.setText(_translate("MainWindow", "Responsive Empty Portraits", None))
self.checkBox5.setText(_translate("MainWindow", "Ghost Cloning", None))
self.checkBox6.setText(_translate("MainWindow", "Inside/Outside On All Ghosts", None))
self.checkBox7.setText(_translate("MainWindow", "Ignore Wards", None))
self.checkBox8.setText(_translate("MainWindow", "Disable Fire Extinguishers", None))
self.checkBox9.setText(_translate("MainWindow", "Fetter Sharing", None))
self.checkBox10.setText(_translate("MainWindow", "Movable Restless Ghosts", None))
self.checkBox11.setText(_translate("MainWindow", "Disable Madness Immunity", None))
self.checkBox12.setText(_translate("MainWindow", "Uncover Fears", None))
self.checkBox13.setText(_translate("MainWindow", "Unlock Missing Fears", None))
self.checkBox14.setText(_translate("MainWindow", "Disable Calming Effects", None))
self.checkBox15.setText(_translate("MainWindow", "Unlock Extra Fears", None))
self.checkBox16.setText(_translate("MainWindow", "Fix Cold Phobia", None))
self.checkBox17.setText(_translate("MainWindow", "Manual Terror", None))
self.checkBox18.setText(_translate("MainWindow", "Continuous Power Recasting", None))
self.checkBox19.setText(_translate("MainWindow", "Ghost Retraining", None))
self.checkBox20.setText(_translate("MainWindow", "Exploration Mode (Alpha)", None))
self.checkBox21.setText(_translate("MainWindow", "Bench Restless Ghosts", None))
self.comboBox.setStatusTip(_translate("MainWindow", "Conscious fear", None))
self.comboBox.setItemText(0, _translate("MainWindow", "none", None))
self.comboBox.setItemText(1, _translate("MainWindow", "blood", None))
self.comboBox.setItemText(2, _translate("MainWindow", "cold*", None))
self.comboBox.setItemText(3, _translate("MainWindow", "creepy", None))
self.comboBox.setItemText(4, _translate("MainWindow", "darkness", None))
self.comboBox.setItemText(5, _translate("MainWindow", "electrical", None))
self.comboBox.setItemText(6, _translate("MainWindow", "fire", None))
self.comboBox.setItemText(7, _translate("MainWindow", "hunted", None))
self.comboBox.setItemText(8, _translate("MainWindow", "noise", None))
self.comboBox.setItemText(9, _translate("MainWindow", "normal", None))
self.comboBox.setItemText(10, _translate("MainWindow", "storm", None))
self.comboBox.setItemText(11, _translate("MainWindow", "trapped", None))
self.comboBox.setItemText(12, _translate("MainWindow", "unclean", None))
self.comboBox.setItemText(13, _translate("MainWindow", "water", None))
self.comboBox.setItemText(14, _translate("MainWindow", "pursuit*", None))
self.comboBox.setItemText(15, _translate("MainWindow", "", None))
self.comboBox_2.setStatusTip(_translate("MainWindow", "Unconscious fear", None))
self.comboBox_2.setItemText(0, _translate("MainWindow", "none", None))
self.comboBox_2.setItemText(1, _translate("MainWindow", "blood", None))
self.comboBox_2.setItemText(2, _translate("MainWindow", "cold*", None))
self.comboBox_2.setItemText(3, _translate("MainWindow", "creepy", None))
self.comboBox_2.setItemText(4, _translate("MainWindow", "darkness", None))
self.comboBox_2.setItemText(5, _translate("MainWindow", "electrical", None))
self.comboBox_2.setItemText(6, _translate("MainWindow", "fire", None))
self.comboBox_2.setItemText(7, _translate("MainWindow", "hunted", None))
self.comboBox_2.setItemText(8, _translate("MainWindow", "noise", None))
self.comboBox_2.setItemText(9, _translate("MainWindow", "normal", None))
self.comboBox_2.setItemText(10, _translate("MainWindow", "storm", None))
self.comboBox_2.setItemText(11, _translate("MainWindow", "trapped", None))
self.comboBox_2.setItemText(12, _translate("MainWindow", "unclean", None))
self.comboBox_2.setItemText(13, _translate("MainWindow", "water", None))
self.comboBox_2.setItemText(14, _translate("MainWindow", "pursuit*", None))
self.comboBox_2.setItemText(15, _translate("MainWindow", "", None))
self.comboBox_6.setStatusTip(_translate("MainWindow", "Extra conscious fear", None))
self.comboBox_6.setItemText(0, _translate("MainWindow", "none", None))
self.comboBox_6.setItemText(1, _translate("MainWindow", "blood", None))
self.comboBox_6.setItemText(2, _translate("MainWindow", "cold*", None))
self.comboBox_6.setItemText(3, _translate("MainWindow", "creepy", None))
self.comboBox_6.setItemText(4, _translate("MainWindow", "darkness", None))
self.comboBox_6.setItemText(5, _translate("MainWindow", "electrical", None))
self.comboBox_6.setItemText(6, _translate("MainWindow", "fire", None))
self.comboBox_6.setItemText(7, _translate("MainWindow", "hunted", None))
self.comboBox_6.setItemText(8, _translate("MainWindow", "noise", None))
self.comboBox_6.setItemText(9, _translate("MainWindow", "normal", None))
self.comboBox_6.setItemText(10, _translate("MainWindow", "storm", None))
self.comboBox_6.setItemText(11, _translate("MainWindow", "trapped", None))
self.comboBox_6.setItemText(12, _translate("MainWindow", "unclean", None))
self.comboBox_6.setItemText(13, _translate("MainWindow", "water", None))
self.comboBox_6.setItemText(14, _translate("MainWindow", "pursuit*", None))
self.comboBox_6.setItemText(15, _translate("MainWindow", "", None))
self.comboBox_3.setStatusTip(_translate("MainWindow", "Scenarios", None))
self.comboBox_3.setItemText(0, _translate("MainWindow", "pick scenario", None))
self.comboBox_4.setStatusTip(_translate("MainWindow", "Mortals", None))
self.comboBox_4.setItemText(0, _translate("MainWindow", "pick mortal", None))
self.comboBox_5.setStatusTip(_translate("MainWindow", "Mood (scenario-wide)", None))
self.comboBox_5.setItemText(0, _translate("MainWindow", "none", None))
self.comboBox_5.setItemText(1, _translate("MainWindow", "neutral", None))
self.comboBox_5.setItemText(2, _translate("MainWindow", "friendly", None))
self.comboBox_5.setItemText(3, _translate("MainWindow", "angry", None))
self.comboBox_5.setItemText(4, _translate("MainWindow", "calm", None))
self.comboBox_5.setItemText(5, _translate("MainWindow", "rattled", None))
self.comboBox_5.setItemText(6, _translate("MainWindow", "terrified", None))
self.comboBox_5.setItemText(7, _translate("MainWindow", "insane", None))
self.comboBox_5.setItemText(8, _translate("MainWindow", " ", None))
self.comboBox_7.setItemText(0, _translate("MainWindow", "global ghost level: off", None))
self.comboBox_7.setItemText(1, _translate("MainWindow", "global ghost level: 0", None))
self.comboBox_7.setItemText(2, _translate("MainWindow", "global ghost level: 1", None))
self.comboBox_7.setItemText(3, _translate("MainWindow", "global ghost level: 2", None))
self.comboBox_7.setItemText(4, _translate("MainWindow", "global ghost level: 3", None))
self.comboBox_7.setItemText(5, _translate("MainWindow", "global ghost level: 4", None))
self.comboBox_7.setItemText(6, _translate("MainWindow", "global ghost level: 5", None))
self.comboBox_7.setItemText(7, _translate("MainWindow", "global ghost level: 6", None))
self.comboBox_7.setItemText(8, _translate("MainWindow", "global ghost level: 7", None))
self.comboBox_7.setItemText(9, _translate("MainWindow", "global ghost level: 8", None))
self.actionOpen.setText(_translate("MainWindow", "Open", None))
self.actionSave.setText(_translate("MainWindow", "Save", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionScripts.setText(_translate("MainWindow", "Scripts", None))
self.actionReactions.setText(_translate("MainWindow", "Reactions", None))
self.spinBox.setStatusTip(_translate("MainWindow", "Haunter Slots", None))
self.spinBox_2.setStatusTip(_translate("MainWindow", "Mean Terror", None))
self.lineEdit.setStatusTip(_translate("MainWindow", "Starting Plasm (estimate)", None))
self.pushButton_9.setText(_translate("MainWindow", "Set bytes @ address\nUSE AT OWN RISK", None))
def add_object_connections(self, MainWindow):
QtCore.QObject.connect(self.horizontalSlider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), MainWindow.sliderMoved)
QtCore.QObject.connect(self.horizontalSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), MainWindow.sliderMoved)
QtCore.QObject.connect(self.horizontalSlider_3, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), MainWindow.sliderMoved)
QtCore.QObject.connect(self.horizontalSlider_4, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), MainWindow.sliderMoved)
QtCore.QObject.connect(self.horizontalSlider, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), MainWindow.setWillpower)
QtCore.QObject.connect(self.horizontalSlider_2, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), MainWindow.setInsanity)
QtCore.QObject.connect(self.horizontalSlider_3, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), MainWindow.setBelief)
QtCore.QObject.connect(self.horizontalSlider_4, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), MainWindow.setTerror)
QtCore.QObject.connect(self.checkBox1, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setUnlimitedPlasm)
QtCore.QObject.connect(self.checkBox2, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setUnlimitedGoldplasm)
QtCore.QObject.connect(self.checkBox3, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setInstantPowerRecharge)
QtCore.QObject.connect(self.checkBox4, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setResponsivePortraits)
QtCore.QObject.connect(self.checkBox5, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setGhostCloning)
QtCore.QObject.connect(self.checkBox6, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setInsideOutsideOnAll)
QtCore.QObject.connect(self.checkBox7, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setIgnoreWards)
QtCore.QObject.connect(self.checkBox8, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setDisableFireExtinguishers)
QtCore.QObject.connect(self.checkBox9, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setFetterSharing)
QtCore.QObject.connect(self.checkBox10, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setMovableRestlessGhosts)
QtCore.QObject.connect(self.checkBox11, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setDisableMadnessImmunity)
QtCore.QObject.connect(self.checkBox12, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setUncoverFears)
QtCore.QObject.connect(self.checkBox13, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setUnlockMissingFears)
QtCore.QObject.connect(self.checkBox14, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setDisableCalmingEffects)
QtCore.QObject.connect(self.checkBox15, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setUnlockExtraFears)
QtCore.QObject.connect(self.checkBox16, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setFixColdPhobia)
QtCore.QObject.connect(self.checkBox17, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setManualTerror)
QtCore.QObject.connect(self.checkBox18, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setContinuousPowerRecasting)
QtCore.QObject.connect(self.checkBox19, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setGhostRetraining)
QtCore.QObject.connect(self.checkBox20, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setExplorationMode)
QtCore.QObject.connect(self.checkBox21, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), MainWindow.setBenchRestlessGhosts)
QtCore.QObject.connect(self.comboBox, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.setConciousFear)
QtCore.QObject.connect(self.comboBox_2, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.setUnconciousFear)
QtCore.QObject.connect(self.comboBox_6, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.setExtraFear)
QtCore.QObject.connect(self.comboBox_3, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.scenarioChanged)
QtCore.QObject.connect(self.comboBox_4, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.mortalChanged)
QtCore.QObject.connect(self.comboBox_5, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.setMood)
QtCore.QObject.connect(self.comboBox_7, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), MainWindow.setGhostLevel)
QtCore.QObject.connect(self.actionOpen, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.open_data)
QtCore.QObject.connect(self.actionSave, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.save_data)
QtCore.QObject.connect(self.actionAbout, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.about)
QtCore.QObject.connect(self.actionScripts, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.show_scripts_window)
QtCore.QObject.connect(self.actionReactions, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.show_reactions_window)
QtCore.QObject.connect(self.spinBox, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), MainWindow.setHaunterSlots)
QtCore.QObject.connect(self.spinBox_2, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), MainWindow.setMeanTerror)
QtCore.QObject.connect(self.pushButton_9, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.setBytesAtAddress)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
class IntegrityCheckDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(IntegrityCheckDialog, self).__init__(parent)
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setWindowTitle("Integrity Check")
self.horizontalLayout = QtGui.QHBoxLayout(Dialog)
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.label = QtGui.QLabel(self.splitter)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(True)
self.plainTextEdit = QtGui.QPlainTextEdit(self.splitter)
self.plainTextEdit.setFixedSize(360, 150)
self.plainTextEdit.setReadOnly(True)
self.plainTextEdit.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
self.label_2 = QtGui.QLabel(self.splitter)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setWordWrap(True)
self.horizontalLayout.addWidget(self.splitter)
class ScriptsWindow(QtGui.QDialog):
def __init__(self, parent, scripts):
super(ScriptsWindow, self).__init__(parent)
self.scripts = scripts
self.setupUi(self)
self.setupCheckBoxes()
def setupUi(self, Dialog):
Dialog.setWindowTitle("Scripts")
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.pushButton_1 = QtGui.QPushButton()
self.pushButton_2 = QtGui.QPushButton()
self.pushButton_3 = QtGui.QPushButton()
self.pushButton_4 = QtGui.QPushButton()
self.pushButton_1.setObjectName(_fromUtf8("pushButton_1"))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.pushButton_1.setText(_translate("MainWindow", "Sort by state", None))
self.pushButton_2.setText(_translate("MainWindow", "Sort by id", None))
self.pushButton_3.setText(_translate("MainWindow", "Sort by name", None))
self.pushButton_4.setText(_translate("MainWindow", "Sort by comment", None))
self.pushButton_1.clicked.connect(self.sort_by_state)
self.pushButton_2.clicked.connect(self.sort_by_id)
self.pushButton_3.clicked.connect(self.sort_by_name)
self.pushButton_4.clicked.connect(self.sort_by_comment)
self.button_area_content = QtGui.QWidget()
self.grid_layout_1 = QtGui.QGridLayout(self.button_area_content)
self.grid_layout_1.addWidget(self.pushButton_1, 0, 0)
self.grid_layout_1.addWidget(self.pushButton_2, 0, 1)
self.grid_layout_1.addWidget(self.pushButton_3, 0, 2)
self.grid_layout_1.addWidget(self.pushButton_4, 0, 3)
self.lineEdit = QtGui.QLineEdit()
self.pushButton_5 = QtGui.QPushButton()
self.pushButton_6 = QtGui.QPushButton()
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.pushButton_6.setObjectName(_fromUtf8("pushButton_6"))
self.pushButton_5.setText(_translate("MainWindow", "Search", None))
self.pushButton_6.setText(_translate("MainWindow", "Clear", None))
self.pushButton_5.clicked.connect(self.search)
self.pushButton_6.clicked.connect(self.clear)
self.search_area_content = QtGui.QWidget()
self.grid_layout_2 = QtGui.QGridLayout(self.search_area_content)
self.grid_layout_2.addWidget(self.lineEdit, 0, 0)
self.grid_layout_2.addWidget(self.pushButton_5, 0, 1)
self.grid_layout_2.addWidget(self.pushButton_6, 0, 2)
self.scroll_area_content = QtGui.QWidget()
self.scroll_area = QtGui.QScrollArea(self)
self.scroll_area.setWidget(self.scroll_area_content)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setFixedSize(600, 600)
self.grid_layout_3 = QtGui.QGridLayout(self.scroll_area_content)
self.verticalLayout.addWidget(self.search_area_content)
self.verticalLayout.addWidget(self.button_area_content)
self.verticalLayout.addWidget(self.scroll_area)
def setupCheckBoxes(self):
for elem in self.scripts:
checkbox_id = elem[2]
name = elem[3]
comment = elem[4]
text = "%s - %s%s" % (checkbox_id.zfill(3), name, comment)
checkbox = QtGui.QCheckBox()
checkbox.setObjectName(_fromUtf8("checkBox_scr_%s" % checkbox_id))
checkbox.setText(_translate("MainWindow", text, None))
checkbox.clicked.connect(self.parent().setScript)
self.grid_layout_3.addWidget(checkbox)
def sort_by_state(self):
states = dict()
regex = QtCore.QRegExp("checkBox_scr_\\d+")
checkboxes = self.findChildren(QtGui.QCheckBox, regex)
for checkbox in checkboxes:
self.grid_layout_3.removeWidget(checkbox)
obj_name = checkbox.objectName().split("_")
checkbox_id = str(obj_name[2])
states[checkbox_id] = checkbox.isChecked()
idx = 2
key = lambda x: states[x[idx]]
reverse = not states[self.scripts[0][idx]]
self.scripts.sort(key=key, reverse=reverse)
for elem in self.scripts:
checkbox_id = elem[2]
checkbox = self.findChild(QtGui.QCheckBox, "checkBox_scr_%s" % checkbox_id)
self.grid_layout_3.addWidget(checkbox)
def generic_sort(self, idx, key):
regex = QtCore.QRegExp("checkBox_scr_\\d+")
checkboxes = self.findChildren(QtGui.QCheckBox, regex)
for checkbox in checkboxes:
self.grid_layout_3.removeWidget(checkbox)
reverse = self.scripts[0][idx] < self.scripts[-1][idx]
self.scripts.sort(key=key, reverse=reverse)
for elem in self.scripts:
checkbox_id = elem[2]
checkbox = self.findChild(QtGui.QCheckBox, "checkBox_scr_%s" % checkbox_id)
self.grid_layout_3.addWidget(checkbox)
def sort_by_id(self):
idx = 2
key = lambda x: int(x[idx])
self.generic_sort(idx, key)
def sort_by_name(self):
idx = 3
key = lambda x: x[idx].lower()
self.generic_sort(idx, key)
def sort_by_comment(self):
idx = 4
key = lambda x: x[idx].lower()
self.generic_sort(idx, key)
def search(self):
search_query = self.lineEdit.text()
regex = QtCore.QRegExp("checkBox_scr_\\d+")
checkboxes = self.findChildren(QtGui.QCheckBox, regex)
for checkbox in checkboxes:
checkbox.setVisible(True)
if not checkbox.text().contains(search_query, QtCore.Qt.CaseInsensitive):
checkbox.setVisible(False)
def clear(self):
self.lineEdit.clear()
regex = QtCore.QRegExp("checkBox_scr_\\d+")
checkboxes = self.findChildren(QtGui.QCheckBox, regex)
for checkbox in checkboxes:
checkbox.setVisible(True)
class ReactionsWindow(QtGui.QDialog):
def __init__(self, parent, reactions):
super(ReactionsWindow, self).__init__(parent)
self.reactions = reactions
self.setupUi(self)
self.setupComboBoxes()
def setupUi(self, Dialog):
Dialog.setWindowTitle("Reactions")
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.pushButton_1 = QtGui.QPushButton()
self.pushButton_1.setObjectName(_fromUtf8("pushButton_1"))
self.pushButton_1.setText(_translate("MainWindow", "Restore defaults", None))
self.pushButton_1.clicked.connect(self.parent().setReactionsToDefault)
self.button_area_content = QtGui.QWidget()
self.grid_layout_1 = QtGui.QGridLayout(self.button_area_content)
self.grid_layout_1.addWidget(self.pushButton_1, 0, 0)
self.scroll_area_content = QtGui.QWidget()
self.scroll_area = QtGui.QScrollArea(self)
self.scroll_area.setWidget(self.scroll_area_content)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setFixedSize(600, 600)
self.grid_layout_2 = QtGui.QGridLayout(self.scroll_area_content)
self.verticalLayout.addWidget(self.button_area_content)
self.verticalLayout.addWidget(self.scroll_area)
def setupComboBoxes(self):
label = QtGui.QLabel()
label.setText("CHANGE ALL")
label_empty = QtGui.QLabel()
combobox = QtGui.QComboBox()
combobox.setObjectName(_fromUtf8("comboBox_rea_all"))
combobox.addItems([x["reaction_name"] for x in self.reactions])
combobox.currentIndexChanged.connect(self.parent().setReactionAll)
combobox.setStyleSheet("QComboBox { color: rgba(0, 0, 0, 0) }")
self.grid_layout_2.addWidget(label, 0, 0)
self.grid_layout_2.addWidget(combobox, 0, 1)
self.grid_layout_2.addWidget(label_empty, 1, 0)
for idx, elem in enumerate(self.reactions):
name = elem["reaction_name"]
comment = elem["comment"].replace("\"", "")
label = QtGui.QLabel()
label.setText(name)
label.setToolTip(comment)
combobox = QtGui.QComboBox()
combobox.setObjectName(_fromUtf8("comboBox_rea_%d" % idx))
model = combobox.model()
for x in self.reactions:
item = QtGui.QStandardItem(x["reaction_name"])
if name != x["reaction_name"]:
item.setForeground(QtGui.QColor("blue"))
model.appendRow(item)
combobox.currentIndexChanged.connect(self.parent().setReaction)
self.grid_layout_2.addWidget(label, idx + 2, 0)
self.grid_layout_2.addWidget(combobox, idx + 2, 1)
class CustomSlider(QtGui.QSlider):
def __init__(self, parent):
self.parent = parent
super(CustomSlider, self).__init__(parent)
def enterEvent(self, event):
if not self.isEnabled():
name = self.objectName()
if not self.parent.checkBox11.isChecked() and name == "horizontalSlider_2":
self.parent.checkBox11.setStyleSheet("background: yellow")
elif not self.parent.checkBox17.isChecked() and name == "horizontalSlider_4":
self.parent.checkBox17.setStyleSheet("background: yellow")
return super(CustomSlider, self).enterEvent(event)
def leaveEvent(self, event):
name = self.objectName()
if name == "horizontalSlider_2":
self.parent.checkBox11.setStyleSheet("background: none")
elif name == "horizontalSlider_4":
self.parent.checkBox17.setStyleSheet("background: none")
return super(CustomSlider, self).enterEvent(event)
class CustomComboBox(QtGui.QComboBox):
def __init__(self, parent):
self.parent = parent
super(CustomComboBox, self).__init__(parent)
def enterEvent(self, event):
if not self.isEnabled():
name = self.objectName()
if not self.parent.checkBox13.isChecked() and (name == "comboBox" or name == "comboBox_2"):
self.parent.checkBox13.setStyleSheet("background: yellow")
elif not self.parent.checkBox15.isChecked() and name == "comboBox_6":
self.parent.checkBox15.setStyleSheet("background: yellow")
return super(CustomComboBox, self).enterEvent(event)
def leaveEvent(self, event):
name = self.objectName()
if name == "comboBox" or name == "comboBox_2":
self.parent.checkBox13.setStyleSheet("background: none")
elif name == "comboBox_6":
self.parent.checkBox15.setStyleSheet("background: none")
return super(CustomComboBox, self).enterEvent(event)
|
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Collection, Optional
import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_allclose
import gpflow
from gpflow.base import AnyNDArray, MeanAndVariance
from gpflow.conditionals import conditional, uncertain_conditional
from gpflow.config import default_float
from gpflow.mean_functions import Constant, Linear, MeanFunction, Zero
from gpflow.quadrature import mvnquad
from gpflow.utilities import training_loop
rng = np.random.RandomState(1)
# ------------------------------------------
# Helpers
# ------------------------------------------
class MomentMatchingSVGP(gpflow.models.SVGP):
def uncertain_predict_f_moment_matching(
self, Xmu: tf.Tensor, Xcov: tf.Tensor
) -> MeanAndVariance:
return uncertain_conditional(
Xmu,
Xcov,
self.inducing_variable,
self.kernel,
self.q_mu,
self.q_sqrt,
mean_function=self.mean_function,
white=self.whiten,
full_output_cov=self.full_output_cov,
)
def uncertain_predict_f_monte_carlo(
self, Xmu: tf.Tensor, Xchol: tf.Tensor, mc_iter: int = int(1e6)
) -> MeanAndVariance:
D_in = Xchol.shape[0]
X_samples = Xmu + np.reshape(
Xchol[None, :, :] @ rng.randn(mc_iter, D_in)[:, :, None], [mc_iter, D_in]
)
F_mu, F_var = self.predict_f(X_samples)
F_samples = (F_mu + rng.randn(*F_var.shape) * (F_var ** 0.5)).numpy()
mean = np.mean(F_samples, axis=0)
covar = np.cov(F_samples.T)
return mean, covar
def gen_L(n: int, *shape: int) -> AnyNDArray:
return np.array([np.tril(rng.randn(*shape)) for _ in range(n)])
def gen_q_sqrt(D_out: int, *shape: int) -> tf.Tensor:
return tf.convert_to_tensor(
np.array([np.tril(rng.randn(*shape)) for _ in range(D_out)]),
dtype=default_float(),
)
def mean_function_factory(
mean_function_name: Optional[str], D_in: int, D_out: int
) -> Optional[MeanFunction]:
if mean_function_name == "Zero":
return Zero(output_dim=D_out)
elif mean_function_name == "Constant":
return Constant(c=rng.rand(D_out))
elif mean_function_name == "Linear":
return Linear(A=rng.rand(D_in, D_out), b=rng.rand(D_out))
else:
return None
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
N = 7
N_new = 2
D_out = 3
D_in = 1
X = np.linspace(-5, 5, N)[:, None] + rng.randn(N, 1)
Y = np.hstack([np.sin(X), np.cos(X), X ** 2])
Xnew_mu = rng.randn(N_new, 1)
Xnew_covar = np.zeros((N_new, 1, 1))
data = (X, Y)
class DataMC1(Data):
Y = np.hstack([np.sin(Data.X), np.sin(Data.X) * 2, Data.X ** 2])
data = (Data.X, Y)
class DataMC2(Data):
N = 7
N_new = 5
D_out = 4
D_in = 2
X = rng.randn(N, D_in)
Y = np.hstack([np.sin(X), np.sin(X)])
Xnew_mu = rng.randn(N_new, D_in)
L = gen_L(N_new, D_in, D_in)
Xnew_covar = np.array([l @ l.T for l in L])
data = (X, Y)
class DataQuad:
num_data = 10
num_ind = 10
D_in = 2
D_out = 3
H = 150
Xmu = tf.convert_to_tensor(rng.randn(num_data, D_in), dtype=default_float())
L = gen_L(num_data, D_in, D_in)
Xvar = tf.convert_to_tensor(np.array([l @ l.T for l in L]), dtype=default_float())
Z = rng.randn(num_ind, D_in)
q_mu = tf.convert_to_tensor(rng.randn(num_ind, D_out), dtype=default_float())
q_sqrt = gen_q_sqrt(D_out, num_ind, num_ind)
MEANS: Collection[Optional[str]] = ["Constant", "Linear", "Zero", None]
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_no_uncertainty(white: bool, mean: Optional[str]) -> None:
mean_function = mean_function_factory(mean, Data.D_in, Data.D_out)
kernel = gpflow.kernels.SquaredExponential(variance=rng.rand())
model = MomentMatchingSVGP(
kernel,
gpflow.likelihoods.Gaussian(),
num_latent_gps=Data.D_out,
mean_function=mean_function,
inducing_variable=Data.X.copy(),
whiten=white,
)
model.full_output_cov = False
training_loop(
model.training_loss_closure(Data.data),
optimizer=tf.optimizers.Adam(),
var_list=model.trainable_variables,
maxiter=100,
compile=True,
)
mean1, var1 = model.predict_f(Data.Xnew_mu)
mean2, var2 = model.uncertain_predict_f_moment_matching(
*map(tf.convert_to_tensor, [Data.Xnew_mu, Data.Xnew_covar])
)
assert_allclose(mean1, mean2)
for n in range(Data.N_new):
assert_allclose(var1[n, :], var2[n, ...])
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_monte_carlo_1_din(white: bool, mean: Optional[str]) -> None:
kernel = gpflow.kernels.SquaredExponential(variance=rng.rand())
mean_function = mean_function_factory(mean, DataMC1.D_in, DataMC1.D_out)
model = MomentMatchingSVGP(
kernel,
gpflow.likelihoods.Gaussian(),
num_latent_gps=DataMC1.D_out,
mean_function=mean_function,
inducing_variable=DataMC1.X.copy(),
whiten=white,
)
model.full_output_cov = True
training_loop(
model.training_loss_closure(DataMC1.data),
optimizer=tf.optimizers.Adam(),
var_list=model.trainable_variables,
maxiter=200,
compile=True,
)
mean1, var1 = model.uncertain_predict_f_moment_matching(
*map(tf.convert_to_tensor, [DataMC1.Xnew_mu, DataMC1.Xnew_covar])
)
for n in range(DataMC1.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC1.Xnew_mu[n, ...], DataMC1.Xnew_covar[n, ...] ** 0.5
)
assert_allclose(mean1[n, ...], mean2, atol=1e-3, rtol=1e-1)
assert_allclose(var1[n, ...], var2, atol=1e-2, rtol=1e-1)
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_monte_carlo_2_din(white: bool, mean: Optional[str]) -> None:
kernel = gpflow.kernels.SquaredExponential(variance=rng.rand())
mean_function = mean_function_factory(mean, DataMC2.D_in, DataMC2.D_out)
model = MomentMatchingSVGP(
kernel,
gpflow.likelihoods.Gaussian(),
num_latent_gps=DataMC2.D_out,
mean_function=mean_function,
inducing_variable=DataMC2.X.copy(),
whiten=white,
)
model.full_output_cov = True
training_loop(
model.training_loss_closure(DataMC2.data),
optimizer=tf.optimizers.Adam(),
var_list=model.trainable_variables,
maxiter=100,
compile=True,
)
mean1, var1 = model.uncertain_predict_f_moment_matching(
*map(tf.convert_to_tensor, [DataMC2.Xnew_mu, DataMC2.Xnew_covar])
)
for n in range(DataMC2.N_new):
mean2, var2 = model.uncertain_predict_f_monte_carlo(
DataMC2.Xnew_mu[n, ...], DataMC2.L[n, ...]
)
assert_allclose(mean1[n, ...], mean2, atol=1e-2)
assert_allclose(var1[n, ...], var2, atol=1e-2)
@pytest.mark.parametrize("white", [True, False])
@pytest.mark.parametrize("mean", MEANS)
def test_quadrature(white: bool, mean: Optional[str]) -> None:
kernel = gpflow.kernels.SquaredExponential()
inducing_variable = gpflow.inducing_variables.InducingPoints(DataQuad.Z)
mean_function = mean_function_factory(mean, DataQuad.D_in, DataQuad.D_out)
effective_mean = mean_function or (lambda X: 0.0)
def conditional_fn(X: tf.Tensor) -> MeanAndVariance:
return conditional(
X,
inducing_variable,
kernel,
DataQuad.q_mu,
q_sqrt=DataQuad.q_sqrt,
white=white,
)
def mean_fn(X: tf.Tensor) -> tf.Tensor:
return conditional_fn(X)[0] + effective_mean(X)
def var_fn(X: tf.Tensor) -> tf.Tensor:
return conditional_fn(X)[1]
quad_args = (
DataQuad.Xmu,
DataQuad.Xvar,
DataQuad.H,
DataQuad.D_in,
(DataQuad.D_out,),
)
mean_quad = mvnquad(mean_fn, *quad_args)
var_quad = mvnquad(var_fn, *quad_args)
def mean_sq_fn(X: tf.Tensor) -> tf.Tensor:
return mean_fn(X) ** 2
mean_sq_quad = mvnquad(mean_sq_fn, *quad_args)
var_quad = var_quad + (mean_sq_quad - mean_quad ** 2)
mean_analytic, var_analytic = uncertain_conditional(
DataQuad.Xmu,
DataQuad.Xvar,
inducing_variable,
kernel,
DataQuad.q_mu,
DataQuad.q_sqrt,
mean_function=mean_function,
full_output_cov=False,
white=white,
)
assert_allclose(mean_quad, mean_analytic, rtol=1e-6)
assert_allclose(var_quad, var_analytic, rtol=1e-6)
|
|
# PyMoBu - Python enhancement for Autodesk's MotionBuilder
# Copyright (C) 2010 Scott Englert
# scott@scottenglert.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Component module
Contains component classes and related functions
'''
import re
from pyfbsdk import FBComponent #@UnresolvedImport
from pyfbsdk import FBPropertyType #@UnresolvedImport
from pyfbsdk import FBMatrix #@UnresolvedImport
from pyfbsdk import FBVector3d #@UnresolvedImport
from pyfbsdk import FBModelTransformationType #@UnresolvedImport
from pyfbsdk import FBNamespaceAction #@UnresolvedImport
# eclipseSyntax
if False: from pyfbsdk_gen_doc import * #@UndefinedVariable @UnusedWildImport
def ConvertToPyMoBu(component):
'''Utility to convert a FB class to a PMB class'''
if isinstance(component, PMBComponent):
return component
# get the first two inherited classes
componentClasses = component.__class__.__mro__
for fbClass in componentClasses:
pmbClassName = fbClass.__name__.replace('FB', 'PMB')
try:
pmbClass = eval(pmbClassName)
except:
continue
return pmbClass.Convert(component)
# add this function to FBComponent
FBComponent.ConvertToPyMoBu = ConvertToPyMoBu
# -----------------------------------------------------
# PyMoBu Component Classes
# -----------------------------------------------------
class PMBComponent(object):
'''PyMoBu class for FBComponent'''
# property type dictionary {Name : [enum, internal name]}
kPropertyTypes = dict(Action = [FBPropertyType.kFBPT_Action, 'Action'],
Enum = [FBPropertyType.kFBPT_enum, 'Enum'],
Integer = [FBPropertyType.kFBPT_int,'Integer'],
Bool = [FBPropertyType.kFBPT_bool,'Bool'],
Double = [FBPropertyType.kFBPT_double,'Number'],
CharPtr = [FBPropertyType.kFBPT_charptr, 'String'],
Float = [FBPropertyType.kFBPT_float,'Float'],
Time = [FBPropertyType.kFBPT_Time, 'Time'],
Object = [FBPropertyType.kFBPT_object, 'Object'],
StringList = [FBPropertyType.kFBPT_stringlist, 'StringList'],
Vector4D = [FBPropertyType.kFBPT_Vector4D, 'Vector'],
Vector3D = [FBPropertyType.kFBPT_Vector3D, 'Vector'],
Vector2D = [FBPropertyType.kFBPT_Vector2D, 'Vector'],
ColorRGB = [FBPropertyType.kFBPT_ColorRGB, 'Color'],
ColorRGBA = [FBPropertyType.kFBPT_ColorRGBA, 'ColorAndAlpha'],
TimeSpan = [FBPropertyType.kFBPT_TimeSpan, 'Time'])
def __init__(self, component):
self.component = component
def __repr__(self):
name = getattr(self.component, 'LongName', self.component.Name)
return "%s('%s')" % (self.__class__.__name__, name)
def __str__(self):
'''Returns the full object name'''
return getattr(self.component, 'LongName', self.component.Name)
@property
def Name(self):
'''Returns the object name'''
return getattr(self.component, 'Name', self.component.Name)
@property
def LongName(self):
'''Returns the full object name'''
return getattr(self.component, 'LongName', self.component.Name)
@classmethod
def Convert(cls, component):
return cls(component)
def ListProperties(self, pattern=None, _type=None, **kwargs):
'''
Returns a list of property names from the PropertyList with optional filters
@param pattern: list properties with specific names with optional wildcard *. Default is all.
@param _type: get properties of specific types. See self.kPropertyTypes.keys() for names
Optional parameters True/False for testing from FBProperty:
IsAnimatable
IsInternal
IsList
IsMaxClamp
IsMinClamp
IsObjectList
IsReadOnly
IsReferenceProperty
IsUserProperty
'''
# setup a test for the optional parameters
def passesOptionalTest(x):
for arg, challenge in kwargs.iteritems():
func = getattr(x, arg, None)
if func and func() != challenge:
return False
return True
# set up the name testing based on the pattern
if pattern:
# if there is a wild card in the pattern
if '*' in pattern:
pattern = pattern.replace('*', '.*')
# name testing function
passesNameTest = lambda x: re.match(pattern, x.GetName())
else:
passesNameTest = lambda x: pattern == x.GetName()
else:
passesNameTest = lambda x: True
# add type testing
if _type:
propertyType = self.kPropertyTypes[_type][0]
passesTypeTest = lambda x: x.GetPropertyType() == propertyType
else:
passesTypeTest = lambda x: True
properties = []
for p in self.component.PropertyList:
# odd bug that some items are None
if p is None:
continue
if not passesOptionalTest(p):
continue
if not passesTypeTest(p):
continue
if passesNameTest(p):
properties.append(p)
return properties
def GetPropertyValue(self, name):
'''Returns a property value from the components PropertyList'''
return self._findProperty(name).Data
def SetPropertyValue(self, name, value):
'''Sets a property value in the components PropertyList'''
self._findProperty(name).Data = value
def AddProperty(self, name, _type, animatable=True, user=True):
'''
Add a property to this component
@param name: the name of the property
@param _type: the data type of the property:
'''
if self.ListProperties(pattern=name):
raise Exception("Can not add property '%s'. Already exists on object '%'" % (name, self))
try:
typeData = self.kPropertyTypes[_type]
except KeyError:
raise Exception("Invalid property type '%s'. Valid types are: '%s'" % (_type, ', '.join(self.kPropertyTypes.keys())))
typeData.extend([animatable, user, None])
self.component.PropertyCreate(name, *typeData)
def RemoveProperty(self, name):
'''Remove a property from an object'''
_property = self._findProperty(name)
# test is we can remove a non-user property or not
if _property.IsUserProperty():
self.component.PropertyRemove(_property)
else:
raise Exception("Property is flagged as non-user. Unable to remove property '%s' from object '%s'" % (name, self))
def _findProperty(self, name):
# similar to the native way but raises and exception if property isn't found
_property = self.component.PropertyList.Find(name)
if _property:
return _property
else:
raise Exception("Could not find property named '%s' for object '%s'" % (name, self))
def GetNamespace(self):
'''Returns the namespace of the object'''
namespace = re.match(".*:", getattr(self.component, 'LongName', self.component.Name))
if namespace:
return namespace.group()
def AddNamespace(self, namespace, hierarchy=True, toRight=False):
'''
Adds a namespace to the object
@param hierarchy: Apply this action to hierarchy. Default True
@param toRight: Add namespace to the right of other namespaces. Default False (left)
'''
from pyfbsdk import FBConstraint #@UnresolvedImport @Reimport
action = FBNamespaceAction.kFBConcatNamespace
if hierarchy and not isinstance(self.component, FBConstraint):
self.component.ProcessNamespaceHierarchy(action, namespace, None, toRight)
else:
self.component.ProcessObjectNamespace(action, namespace, None, toRight)
def SwapNamespace(self, newNamespace, oldNamespace, hierarchy=True):
'''
Swaps a new namespace with an existing namespace
@param hierarchy: Apply this action to hierarchy. Default True
'''
from pyfbsdk import FBConstraint #@Reimport @UnresolvedImport
action = FBNamespaceAction.kFBReplaceNamespace
if hierarchy and not isinstance(self.component, FBConstraint):
self.component.ProcessNamespaceHierarchy(action, newNamespace, oldNamespace)
else:
self.component.ProcessObjectNamespace(action, newNamespace, oldNamespace)
def StripNamespace(self, hierarchy=True):
'''
Removes all the namespaces
@param hierarchy: Apply this action to hierarchy. Default True
'''
from pyfbsdk import FBConstraint #@Reimport @UnresolvedImport
action = FBNamespaceAction.kFBRemoveAllNamespace
if hierarchy and not isinstance(self.component, FBConstraint):
self.component.ProcessNamespaceHierarchy(action, '')
else:
self.component.ProcessObjectNamespace(action, '')
class PMBBox(PMBComponent):
'''PyMobu class for FBBox'''
pass
class PMBModel(PMBBox):
'''PyMoBu class for FBModel'''
kInverseMatrixTypeDict = dict(Transformation = FBModelTransformationType.kModelInverse_Transformation,
Translation = FBModelTransformationType.kModelInverse_Translation,
Rotation = FBModelTransformationType.kModelInverse_Rotation,
Scaling = FBModelTransformationType.kModelInverse_Scaling)
# Center = FBModelTransformationType.kModelCenter,
# All = FBModelTransformationType.kModelAll)
kMatrixTypeDict = dict(Transformation = FBModelTransformationType.kModelTransformation,
Translation = FBModelTransformationType.kModelTranslation,
Rotation = FBModelTransformationType.kModelRotation,
Scaling = FBModelTransformationType.kModelScaling)
# ParentOffset = FBModelTransformationType.kModelParentOffset)
# Center = FBModelTransformationType.kModelCenter,
# All = FBModelTransformationType.kModelAll)
@property
def Children(self):
return self.component.Children
@property
def Parent(self):
return self.component.Parent
def SetInverseMatrix(self, matrix, worldSpace=False, _type='Transformation'):
'''
Set the inverse matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
try:
self.component.SetMatrix(matrix, self.kInverseMatrixTypeDict[_type], worldSpace)
except KeyError:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kInverseMatrixTypeDict.keys())))
def SetMatrix(self, matrix, worldSpace=False, _type='Transformation'):
'''
Set the matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
try:
self.component.SetMatrix(matrix, self.kMatrixTypeDict[_type], worldSpace)
except KeyError:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kMatrixTypeDict.keys())))
def GetAnimationNode(self, transform='Translation'):
'''
Get AnimationNode
@param transform: transformation type
'''
animationNode= None
if transform=='Translation':
animationNode = self.component.Translation.GetAnimationNode()
if transform=='Rotation':
animationNode = self.component.Rotation.GetAnimationNode()
if transform=='Scaling':
animationNode = self.component.Scaling.GetAnimationNode()
return animationNode
def GetInverseMatrix(self, worldSpace=False, _type='Transformation'):
'''
Get the inverse matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
matrix = FBMatrix()
try:
self.component.GetMatrix(matrix, self.kInverseMatrixTypeDict[_type], worldSpace)
except KeyError:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kInverseMatrixTypeDict.keys())))
return matrix
def GetMatrix(self, worldSpace=False, _type='Transformation'):
'''
Get the matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
matrix = FBMatrix()
try:
self.component.GetMatrix(matrix, self.kMatrixTypeDict[_type], worldSpace)
except KeyError:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kMatrixTypeDict.keys())))
return matrix
def GetTranslation(self, worldSpace=False):
'''
Get translation vector
@param worldSpace: world space vector (True/False) Default False
'''
vector = FBVector3d()
self.component.GetVector(vector, self.kMatrixTypeDict['Translation'], worldSpace)
return vector
def GetRotation(self, worldSpace=False):
'''
Get rotation vector
@param worldSpace: world space vector (True/False) Default False
'''
vector = FBVector3d()
self.component.GetVector(vector, self.kMatrixTypeDict['Rotation'], worldSpace)
return vector
def GetScale(self, worldSpace=False):
'''
Get scale vector
@param worldSpace: world space vector (True/False) Default False
'''
vector = FBVector3d()
self.component.GetVector(vector, self.kMatrixTypeDict['Scaling'], worldSpace)
return vector
def SetTranslation(self, vector, worldSpace=False):
'''
Set the translation vector
@param worldSpace: world space vector (True/False) Default False
'''
self.component.SetVector(vector, self.kMatrixTypeDict['Translation'], worldSpace)
def SetRotation(self, vector, worldSpace=False):
'''
Set the rotation vector
@param worldSpace: world space vector (True/False) Default False
'''
self.component.SetVector(vector, self.kMatrixTypeDict['Rotation'], worldSpace)
def SetScale(self, vector, worldSpace=False):
'''
Set the scale vector
@param worldSpace: world space vector (True/False) Default False
'''
self.component.SetVector(vector, self.kMatrixTypeDict['Scaling'], worldSpace)
# import other component modules
from pymobu.components.constraints import *
|
|
import csv
from urllib.request import Request, urlopen
import dateutil.parser
import datetime
import re
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path = 'index_files/Page452.htm'
report_date_path = 'index_files/Page648.htm'
head_pattern = re.compile(r'([\d,]+) ?head', re.IGNORECASE)
strip_char = ';,. \n\t'
def get_sale_date():
"""Return the date of the sale."""
request = Request(
base_url + report_date_path,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
tables = soup.find_all('table')
sale_date = datetime.datetime(2005, 1, 1)
for x in range(len(tables) - 1, 0, -1):
a = tables[x].find_all('a')
if not a:
continue
else:
new_date_string = a[-1].get_text()
try:
new_sale_date = dateutil.parser.parse(new_date_string)
if new_sale_date > sale_date:
sale_date = new_sale_date
except TypeError:
pass
return sale_date
def get_sale_head(this_report):
"""Return the head of the sale."""
text = this_report.find(text=head_pattern)
head_match = head_pattern.search(text)
if head_match:
return head_match.group(1).replace(',','')
else:
return None
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
has_number = re.search(r'[0-9]+', this_line)
has_colon = ':' in this_line
return bool(has_number) and has_colon
def get_sale_location(word):
"""Convert address strings into a list of address components."""
sale_location = ' '.join(word)
if ',' in sale_location:
sale_location = sale_location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', sale_location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [sale_location]
return sale_location
def is_number(string):
"""Test whether a string is number-ish. Ignoring units like 'cwt' and 'hd'."""
if string:
string = re.sub(r'\$|[,-/]|cwt|he?a?d?', '', string, flags = re.IGNORECASE)
try:
float(string)
result = True
except ValueError:
result = False
else:
result = False
return result
def get_sale(line):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
consignor_info, cattle_info = line.split(':')
consignor_info_list = consignor_info.split(',')
sale = {
'consignor_name': consignor_info_list.pop(0),
}
if consignor_info_list:
sale['consignor_city'] = consignor_info_list.pop().strip(strip_char)
weight_match = re.search(r'([0-9,]+)#', cattle_info)
if weight_match:
sale['cattle_avg_weight'] = weight_match.group(1).replace(',','')
cattle_info = cattle_info.replace(weight_match.group(),'')
key = 'cattle_price_cwt'
else:
key = 'cattle_price'
if re.search(r'\$', cattle_info):
cattle_string, price_string = cattle_info.split('$')
else:
price_match = re.search(r'([0-9,.]+)$', cattle_info)
if price_match:
price_string = price_match.group(1)
cattle_string = cattle_info.replace(price_match.group(), '')
sale['cattle_cattle'] = cattle_string.strip(strip_char)
try:
float(price_string.replace(',',''))
sale[key] = price_string.replace(',','').strip(strip_char)
except ValueError:
pass
sale = {k:v for k,v in sale.items() if v}
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
for this_line in line:
if is_sale(this_line):
sale = this_default_sale.copy()
sale.update(get_sale(this_line))
writer.writerow(sale)
def main():
# Collect individual reports into a list
request = Request(
base_url + report_path,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
report = [soup]
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# Write a CSV file for each report not in the archive
for this_report in report:
sale_date = get_sale_date()
io_name = archive.new_csv(sale_date)
# Stop iteration if this report is already archived
if not io_name:
break
sale_head = get_sale_head(this_report)
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
'sale_head': sale_head,
})
table = this_report.find_all('table')[-1]
# List each line of the report
line = []
for tr in table.find_all('tr'):
for td in tr.find_all('td'):
line.append(td.get_text().strip())
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
|
|
import datetime
import os
import time
import torch
import torch.utils.data
from filelock import FileLock
from torch import nn
import torchvision
import ray
from ray.util.sgd.torch.examples.segmentation.coco_utils import get_coco
import ray.util.sgd.torch.examples.segmentation.transforms as T
import ray.util.sgd.torch.examples.segmentation.utils as utils
from ray.util.sgd.torch import TrainingOperator
from ray.util.sgd import TorchTrainer
try:
from apex import amp
except ImportError:
amp = None
def get_dataset(name,
image_set,
transform,
num_classes_only=False,
download="auto"):
def sbd(*args, **kwargs):
return torchvision.datasets.SBDataset(
*args, mode="segmentation", **kwargs)
paths = {
"voc": (os.path.expanduser("~/datasets01/VOC/060817/"),
torchvision.datasets.VOCSegmentation, 21),
"voc_aug": (os.path.expanduser("~/datasets01/SBDD/072318/"), sbd, 21),
"coco": (os.path.expanduser("~/datasets01/COCO/022719/"), get_coco, 21)
}
p, ds_fn, num_classes = paths[name]
if num_classes_only:
return None, num_classes
if download == "auto" and os.path.exists(p):
download = False
try:
ds = ds_fn(
p, download=download, image_set=image_set, transforms=transform)
except RuntimeError:
print("data loading failed. Retrying this.")
ds = ds_fn(p, download=True, image_set=image_set, transforms=transform)
return ds, num_classes
def get_transform(train):
base_size = 520
crop_size = 480
min_size = int((0.5 if train else 1.0) * base_size)
max_size = int((2.0 if train else 1.0) * base_size)
transforms = []
transforms.append(T.RandomResize(min_size, max_size))
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.RandomCrop(crop_size))
transforms.append(T.ToTensor())
transforms.append(
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
return T.Compose(transforms)
def criterion(inputs, target):
losses = {}
for name, x in inputs.items():
losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255)
if len(losses) == 1:
return losses["out"]
return losses["out"] + 0.5 * losses["aux"]
def get_optimizer(model, aux_loss):
params_to_optimize = [
{
"params": [
p for p in model.backbone.parameters() if p.requires_grad
]
},
{
"params": [
p for p in model.classifier.parameters() if p.requires_grad
]
},
]
if aux_loss:
params = [
p for p in model.aux_classifier.parameters() if p.requires_grad
]
params_to_optimize.append({"params": params, "lr": args.lr * 10})
optimizer = torch.optim.SGD(
params_to_optimize,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
return optimizer
class SegOperator(TrainingOperator):
def setup(self, config):
args = config["args"]
# Create Data Loaders.
with FileLock(".ray.lock"):
# Within a machine, this code runs synchronously.
dataset, num_classes = get_dataset(
args.dataset, "train", get_transform(train=True))
config["num_classes"] = num_classes
dataset_test, _ = get_dataset(
args.dataset, "val", get_transform(train=False))
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.data_workers,
collate_fn=utils.collate_fn,
drop_last=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=1,
num_workers=args.data_workers,
collate_fn=utils.collate_fn)
# Create model.
model = torchvision.models.segmentation.__dict__[args.model](
num_classes=config["num_classes"],
aux_loss=args.aux_loss,
pretrained=args.pretrained)
if config["num_workers"] > 1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Create optimizer.
optimizer = get_optimizer(model, aux_loss=args.aux_loss)
# Register components.
self.model, self.optimizer = self.register(
models=model,
optimizers=optimizer,
train_loader=data_loader,
validation_loader=data_loader_test)
def train_batch(self, batch, batch_info):
image, target = batch
image, target = image.to(self.device), target.to(self.device)
if self.use_fp16_native:
with self._amp.autocast():
output = self.model(image)
loss = criterion(output, target)
else:
output = self.model(image)
loss = criterion(output, target)
self.optimizer.zero_grad()
if self.use_fp16_apex:
with self._amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.use_fp16_native:
self._amp_scaler.scale(loss).backward()
else:
loss.backward()
if self.use_fp16_native:
self._amp_scaler.step(self.optimizer)
self._amp_scaler.update()
else:
self.optimizer.step()
lr = self.optimizer.param_groups[0]["lr"]
return {"loss": loss.item(), "lr": lr, "num_samples": len(batch)}
def validate(self, data_loader, info=None):
self.model.eval()
confmat = utils.ConfusionMatrix(self.config["num_classes"])
with torch.no_grad():
for image, target in data_loader:
image, target = image.to(self.device), target.to(self.device)
if self.use_fp16_native:
with self._amp.autocast():
output = self.model(image)
else:
output = self.model(image)
output = output["out"]
confmat.update(target.flatten(), output.argmax(1).flatten())
confmat.reduce_from_all_processes()
return confmat
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
print(args)
start_time = time.time()
config = {"args": args, "num_workers": args.num_workers}
trainer = TorchTrainer(
training_operator_cls=SegOperator,
use_tqdm=True,
use_fp16=True,
num_workers=config["num_workers"],
config=config,
use_gpu=torch.cuda.is_available())
for epoch in range(args.epochs):
trainer.train()
confmat = trainer.validate(reduce_results=False)[0]
print(confmat)
state_dict = trainer.state_dict()
state_dict.update(epoch=epoch, args=args)
torch.save(state_dict,
os.path.join(args.output_dir, f"model_{epoch}.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="PyTorch Segmentation Training with RaySGD")
parser.add_argument(
"--address",
required=False,
default=None,
help="the address to use for connecting to a Ray cluster.")
parser.add_argument("--dataset", default="voc", help="dataset")
parser.add_argument("--model", default="fcn_resnet101", help="model")
parser.add_argument(
"--aux-loss", action="store_true", help="auxiliar loss")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("-b", "--batch-size", default=8, type=int)
parser.add_argument(
"-n", "--num-workers", default=1, type=int, help="GPU parallelism")
parser.add_argument(
"--epochs",
default=30,
type=int,
metavar="N",
help="number of total epochs to run")
parser.add_argument(
"--data-workers",
default=16,
type=int,
metavar="N",
help="number of data loading workers (default: 16)")
parser.add_argument(
"--lr", default=0.01, type=float, help="initial learning rate")
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
ray.init(address=args.address)
main(args)
|
|
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from xml.dom import minidom
from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('host_name')
elem.set('service')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
root.set('maintenance_mode')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('power_action')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostUpdateDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = minidom.parseString(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
updates_node = self.find_first_child_named(node, 'updates')
if updates_node is not None:
maintenance = self.find_first_child_named(updates_node,
'maintenance_mode')
if maintenance is not None:
updates[maintenance.tagName] = self.extract_text(maintenance)
status = self.find_first_child_named(updates_node, 'status')
if status is not None:
updates[status.tagName] = self.extract_text(status)
return dict(body=updates)
def _list_hosts(req):
"""Returns a summary list of hosts, optionally filtering
by service type.
"""
context = req.environ['nova.context']
services = db.service_get_all(context, False)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
hosts.append({"host_name": host['host'], 'service': host['topic'],
'zone': host['availability_zone']})
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, *args, **kwargs):
listed_hosts = _list_hosts(req)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
message = _("Host '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=message)
return wrapped
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['nova.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostUpdateDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['nova.context'])
update_values = {}
for raw_key, raw_val in body.iteritems():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
elif key == "maintenance_mode":
if val not in ['enable', 'disable']:
explanation = _("Invalid mode: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_values['maintenance_mode'] = val == 'enable'
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
# this is for handling multiple settings at the same time:
# the result dictionaries are merged in the first one.
# Note: the 'host' key will always be the same so it's
# okay that it gets overwritten.
update_setters = {'status': self._set_enabled_status,
'maintenance_mode': self._set_host_maintenance}
result = {}
for key, value in update_values.iteritems():
result.update(update_setters[key](req, id, value))
return result
def _set_host_maintenance(self, req, host, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
context = req.environ['nova.context']
LOG.audit(_("Putting host %(host)s in maintenance "
"mode %(mode)s.") % locals())
try:
result = self.api.set_host_maintenance(context, host, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {"host": host, "maintenance_mode": result}
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new instances."""
context = req.environ['nova.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
try:
result = self.api.set_host_enabled(context, host=host,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {"host": host, "status": result}
def _host_power_action(self, req, host, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host=host,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {"host": host, "power_action": result}
@wsgi.serializers(xml=HostActionTemplate)
def startup(self, req, id):
return self._host_power_action(req, host=id, action="startup")
@wsgi.serializers(xml=HostActionTemplate)
def shutdown(self, req, id):
return self._host_power_action(req, host=id, action="shutdown")
@wsgi.serializers(xml=HostActionTemplate)
def reboot(self, req, id):
return self._host_power_action(req, host=id, action="reboot")
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
host = id
context = req.environ['nova.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
# Getting compute node info and related instances info
try:
compute_ref = db.service_get_all_compute_by_host(context, host)
compute_ref = compute_ref[0]
except exception.ComputeHostNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
instance_refs = db.instance_get_all_by_host(context,
compute_ref['host'])
# Getting total available/used resource
compute_ref = compute_ref['compute_node'][0]
resources = [{'resource': {'host': host, 'project': '(total)',
'cpu': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'disk_gb': compute_ref['local_gb']}},
{'resource': {'host': host, 'project': '(used_now)',
'cpu': compute_ref['vcpus_used'],
'memory_mb': compute_ref['memory_mb_used'],
'disk_gb': compute_ref['local_gb_used']}}]
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for i in instance_refs:
cpu_sum += i['vcpus']
mem_sum += i['memory_mb']
hdd_sum += i['root_gb'] + i['ephemeral_gb']
resources.append({'resource': {'host': host,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}})
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
resources.append({'resource': {'host': host,
'project': project_id,
'cpu': reduce(lambda x, y: x + y, vcpus),
'memory_mb': reduce(lambda x, y: x + y, mem),
'disk_gb': reduce(lambda x, y: x + y, disk)}})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration"""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
|
|
import unittest2
from .random_samples import \
ReservoirSampler, Reservoir, MultiplexedReservoir
from zounds.timeseries import TimeDimension, Seconds
from zounds.spectral import FrequencyDimension, FrequencyBand, LinearScale
from zounds.core import ArrayWithUnits, IdentityDimension
import numpy as np
class TestReservoir(unittest2.TestCase):
def test_nsamples_must_be_gt_zero(self):
self.assertRaises(ValueError, lambda: Reservoir(0))
def test_can_dictate_dtype(self):
r = Reservoir(100, dtype=np.float32)
r.add(np.ones(10, dtype=np.float64))
self.assertEqual(np.float32, r.get().dtype)
def test_reservoir_has_first_input_dtype_when_unspecified(self):
r = Reservoir(100)
r.add(np.ones(10, dtype=np.float64))
self.assertEqual(np.float64, r.get().dtype)
def test_raises_if_nsamples_is_not_int(self):
self.assertRaises(ValueError, lambda: Reservoir(1e2))
def test_array_has_correct_first_dimension(self):
r = Reservoir(100)
r.add(np.random.random_sample((10, 3)))
self.assertEqual(100, r.arr.shape[0])
def test_can_add_samples_larger_than_reservoir_size(self):
r = Reservoir(100)
r.add(np.random.random_sample((1000, 3)))
self.assertEqual(100, len(r.get()))
def test_array_has_correct_subsequent_dimensions(self):
r = Reservoir(100)
r.add(np.random.random_sample((10, 3, 2)))
self.assertEqual((3, 2), r.arr.shape[1:])
def test_array_with_units(self):
r = Reservoir(100)
frequency_dimension = FrequencyDimension(
LinearScale(FrequencyBand(100, 1000), 100))
samples = ArrayWithUnits(
np.ones((20, 100)),
[
TimeDimension(frequency=Seconds(1)),
frequency_dimension
])
r.add(samples)
mixed = r.get()
self.assertIsInstance(mixed, ArrayWithUnits)
self.assertEqual(100, mixed.shape[1])
self.assertIsInstance(mixed.dimensions[0], IdentityDimension)
self.assertIsInstance(mixed.dimensions[1], FrequencyDimension)
def test_reservoir_is_well_mixed(self):
r = Reservoir(100)
samples = np.arange(100)[..., None]
for i in range(0, 100, 10):
r.add(samples[i: i + 10])
mixed = r.get().squeeze()
diff = np.diff(mixed)
self.assertFalse(np.all(diff == 1))
def test_can_provide_explicit_indices_when_adding(self):
r = Reservoir(10)
samples = np.arange(10)[..., None]
r.add(samples, indices=samples.squeeze()[::-1])
mixed = r.get()
np.testing.assert_allclose(mixed.squeeze(), samples.squeeze()[::-1])
def test_raises_when_samples_and_explicit_indices_dont_match(self):
r = Reservoir(10)
samples = np.arange(10)[..., None]
self.assertRaises(
ValueError, lambda: r.add(samples, indices=samples.squeeze()[:5]))
def test_can_get_batch(self):
r = Reservoir(100)
samples = np.arange(100)[..., None]
for i in range(0, 100, 10):
r.add(samples[i: i + 10])
samples = r.get_batch(15)
self.assertEqual(15, samples.shape[0])
def test_raises_if_get_batch_is_larger_than_total_sample_size(self):
r = Reservoir(100)
samples = np.arange(100)[..., None]
for i in range(0, 100, 10):
r.add(samples[i: i + 10])
self.assertRaises(ValueError, lambda: r.get_batch(1000))
def test_raises_if_get_batch_is_larger_than_available_sample_size(self):
r = Reservoir(100)
samples = np.arange(100)[..., None]
for i in range(0, 50, 10):
r.add(samples[i: i + 10])
self.assertRaises(ValueError, lambda: r.get_batch(64))
class TestMultiplexedReservoir(unittest2.TestCase):
def test_is_consistent_across_keys(self):
r = MultiplexedReservoir(100)
samples = np.random.random_sample((10, 3))
r.add(dict(cat=samples, dog=samples))
mixed = r.get()
np.testing.assert_allclose(mixed['cat'], mixed['dog'])
def test_raises_when_wrong_set_of_keys_passed_to_add(self):
r = MultiplexedReservoir(100)
samples = np.random.random_sample((10, 3))
r.add(dict(cat=samples, dog=samples))
self.assertRaises(
ValueError, lambda: r.add(dict(rat=samples, frog=samples)))
def test_raises_when_some_keys_have_mismatched_lengths(self):
r = MultiplexedReservoir(100)
samples = np.random.random_sample((10, 3))
self.assertRaises(
ValueError, lambda: r.add(dict(cat=samples, dog=samples[:-1])))
def test_raises_when_some_keys_have_mismatched_lengths_second_add(self):
r = MultiplexedReservoir(100)
samples = np.random.random_sample((10, 3))
r.add(dict(cat=samples, dog=samples))
self.assertRaises(
ValueError, lambda: r.add(dict(cat=samples, dog=samples[:-1])))
def test_get_returns_dict_with_user_specified_keys(self):
r = MultiplexedReservoir(100)
samples = np.random.random_sample((10, 3))
d = dict(cat=samples, dog=samples)
r.add(d)
mixed = r.get()
self.assertEqual(set(d.keys()), set(mixed.keys()))
class TestReservoirSampler(unittest2.TestCase):
def test_can_sample_from_one_dimensional_feature(self):
sampler = ReservoirSampler(nsamples=10)
frequency_dimension = FrequencyDimension(
LinearScale(FrequencyBand(100, 1000), 100))
samples = ArrayWithUnits(
np.ones((20, 100)),
[
TimeDimension(frequency=Seconds(1)),
frequency_dimension
])
sampler._enqueue(samples, pusher=None)
reservoir = sampler._r
self.assertEqual((10, 100), reservoir.shape)
self.assertIsInstance(reservoir, ArrayWithUnits)
self.assertEqual(reservoir.dimensions[0], IdentityDimension())
self.assertEqual(reservoir.dimensions[1], frequency_dimension)
def test_can_wrap_samples(self):
sampler = ReservoirSampler(nsamples=10)
frequency_dimension = FrequencyDimension(
LinearScale(FrequencyBand(100, 1000), 100))
samples = ArrayWithUnits(
np.ones((2, 10, 100)),
[
TimeDimension(frequency=Seconds(10)),
TimeDimension(frequency=Seconds(1)),
frequency_dimension
])
sampler._enqueue(samples, pusher=None)
reservoir = sampler._r
self.assertEqual((10, 10, 100), reservoir.shape)
self.assertIsInstance(reservoir, ArrayWithUnits)
self.assertEqual(reservoir.dimensions[0], IdentityDimension())
self.assertEqual(reservoir.dimensions[1], samples.dimensions[1])
self.assertEqual(reservoir.dimensions[2], samples.dimensions[2])
def test_can_dequeue_when_reservoir_is_full(self):
sampler = ReservoirSampler(nsamples=10)
frequency_dimension = FrequencyDimension(
LinearScale(FrequencyBand(100, 1000), 100))
samples = ArrayWithUnits(
np.ones((10, 10, 100)),
[
TimeDimension(frequency=Seconds(10)),
TimeDimension(frequency=Seconds(1)),
frequency_dimension
])
sampler._enqueue(samples, pusher=None)
reservoir = sampler._dequeue()
self.assertEqual((10, 10, 100), reservoir.shape)
self.assertIsInstance(reservoir, ArrayWithUnits)
self.assertEqual(reservoir.dimensions[0], IdentityDimension())
self.assertEqual(reservoir.dimensions[1], samples.dimensions[1])
self.assertEqual(reservoir.dimensions[2], samples.dimensions[2])
def test_can_dequeue_when_reservoir_is_partially_full(self):
sampler = ReservoirSampler(nsamples=10)
frequency_dimension = FrequencyDimension(
LinearScale(FrequencyBand(100, 1000), 100))
samples = ArrayWithUnits(
np.ones((4, 10, 100)),
[
TimeDimension(frequency=Seconds(10)),
TimeDimension(frequency=Seconds(1)),
frequency_dimension
])
sampler._enqueue(samples, pusher=None)
reservoir = sampler._dequeue()
self.assertEqual((4, 10, 100), reservoir.shape)
self.assertIsInstance(reservoir, ArrayWithUnits)
self.assertEqual(reservoir.dimensions[0], IdentityDimension())
self.assertEqual(reservoir.dimensions[1], samples.dimensions[1])
self.assertEqual(reservoir.dimensions[2], samples.dimensions[2])
|
|
""" This module reimplements Python's native threading module using Panda
threading constructs. It's designed as a drop-in replacement for the
threading module for code that works with Panda; it is necessary because
in some compilation models, Panda's threading constructs are
incompatible with the OS-provided threads used by Python's thread
module.
Unlike threading.py, this module is a more explicit implementation of
Python's threading model, designed to more precisely emulate Python's
standard threading semantics. In fact, this is a bald-face copy of
Python's threading module from Python 2.5, with a few lines at the top
to import Panda's thread reimplementation instead of the system thread
module, and so it is therefore layered on top of Panda's thread
implementation. """
import sys as _sys
from direct.stdpy import thread
from direct.stdpy.thread import stack_size, _newname, _local as local
from panda3d import core
_sleep = core.Thread.sleep
from time import time as _time
from traceback import format_exc as _format_exc
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'Condition', 'currentThread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
format = "%s: %s\n" % (
currentThread().getName(), format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
return "<%s(%s, %d)>" % (
self.__class__.__name__,
self.__owner and self.__owner.getName(),
self.__count)
def acquire(self, blocking=1):
me = currentThread()
if self.__owner is me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
me = currentThread()
assert self.__owner is me, "release() of un-acquire()d lock"
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self.__block.acquire()
self.__count, self.__owner = state
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner is currentThread()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by currentThread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
assert self._is_owned(), "wait() of un-acquire()d lock"
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
assert self._is_owned(), "notify() of un-acquire()d lock"
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
assert value >= 0, "Semaphore initial value must be >= 0"
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def isSet(self):
return self.__flag
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notifyAll()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
finally:
self.__cond.release()
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__started = False
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return currentThread().isDaemon()
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started:
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status = status + " daemon"
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
assert self.__initialized, "Thread.__init__() not called"
assert not self.__started, "thread already started"
if __debug__:
self._note("%s.start(): starting thread", self)
_active_limbo_lock.acquire()
_limbo[self] = self
_active_limbo_lock.release()
_start_new_thread(self.__bootstrap, ())
self.__started = True
_sleep(0.000001) # 1 usec, to let the thread run (Solaris hack)
def run(self):
if self.__target:
self.__target(*self.__args, **self.__kwargs)
def __bootstrap(self):
try:
self.__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
del _limbo[self]
_active_limbo_lock.release()
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.getName(), _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
self.__stderr.write("Exception in thread " + self.getName() +
" (most likely raised during interpreter shutdown):\n")
self.__stderr.write("Traceback (most recent call last):\n")
while exc_tb:
self.__stderr.write(' File "%s", line %s, in %s\n' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
self.__stderr.write("%s: %s\n" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
self.__stop()
try:
self.__delete()
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notifyAll()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
_active_limbo_lock.acquire()
try:
try:
del _active[_get_ident()]
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
finally:
_active_limbo_lock.release()
def join(self, timeout=None):
assert self.__initialized, "Thread.__init__() not called"
assert self.__started, "cannot join thread before it is started"
assert self is not currentThread(), "cannot join current thread"
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
def getName(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
def setName(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started and not self.__stopped
def isDaemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
def setDaemon(self, daemonic):
assert self.__initialized, "Thread.__init__() not called"
assert not self.__started, "cannot set daemon status of active thread"
self.__daemonic = daemonic
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.isSet():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
_active_limbo_lock.release()
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.isDaemon() and t.isAlive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls currentThread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from currentThread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
_active_limbo_lock.release()
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "currentThread(): no current thread for", _get_ident()
return _DummyThread()
def activeCount():
_active_limbo_lock.acquire()
count = len(_active) + len(_limbo)
_active_limbo_lock.release()
return count
def enumerate():
_active_limbo_lock.acquire()
active = list(_active.values()) + list(_limbo.values())
_active_limbo_lock.release()
return active
#from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
## try:
## from thread import _local as local
## except ImportError:
## from _threading_local import local
# Self-test code
if __debug__:
def _test():
from collections import deque
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.getName(), counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print(item)
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.setName("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
|
|
#/usr/bin/python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2009-2014 Andre Kahles, Jonas Behr, Gunnar Raetsch
# Copyright (C) 2009-2011 Max Planck Society
# Copyright (C) 2012-2014 Memorial Sloan-Kettering Cancer Center
#
# SplAdder wrapper script to start the interpreter with the correct list of arguments
import sys
import os
import scipy as sp
import cPickle
from modules import settings
from modules.core.spladdercore import spladder_core
from modules.alt_splice.collect import collect_events
from modules.alt_splice.analyze import analyze_events
from modules.count import count_graph_coverage_wrapper
from modules.editgraph import filter_by_edgecount
import modules.init as init
import modules.rproc as rp
from modules.merge import run_merge
from modules.helpers import *
def parse_options(argv):
"""Parses options from the command line """
from optparse import OptionParser, OptionGroup
parser = OptionParser()
required = OptionGroup(parser, 'MANDATORY')
required.add_option('-b', '--bams', dest='bams', metavar='FILE1,FILE2,...', help='alignment files in BAM format (comma separated list)', default='-')
required.add_option('-o', '--outdir', dest='outdir', metavar='DIR', help='output directory', default='-')
required.add_option('-a', '--annotation', dest='annotation', metavar='FILE', help='file name for annotation in GTF/GFF3 or format', default='-')
input = OptionGroup(parser, 'INPUT OPTIONS')
input.add_option('-P', '--primary_only', dest='primary_only', metavar='y|n', help='only use primary alignments [n]', default='n')
input.add_option('-X', '--var_aware', dest='var_aware', metavar='y|n', help='alignment files are variation aware (presence of XM and XG tags) [n]', default='n')
input.add_option('-L', '--label', dest='label', metavar='STRING', help='label for current experiment [-]', default='-')
input.add_option('-S', '--ref_strain', dest='refstrain', metavar='STRING', help='reference strain [-]', default='-')
input.add_option('-n', '--readlen', dest='readlen', metavar='INT', type='int', help='read length (used for automatic confidence levele settings) [36]', default=36)
input.add_option('-x', '--same_genome', dest='same_genome', metavar='y|n', help='input alignments share the same genome [y]', default='y')
input.add_option('-F', '--spladderfile', dest='spladderfile', metavar='FILE', help='use existing SplAdder output file as input (advanced) [-]', default='-')
output = OptionGroup(parser, 'OUTPUT OPTIONS')
output.add_option('-l', '--logfile', dest='logfile', metavar='FILE', help='log file name [stdout]', default='-')
output.add_option('-v', '--verbose', dest='verbose', metavar='y|n', help='verbosity', default='n')
output.add_option('-d', '--debug', dest='debug', metavar='y|n', help='use debug mode [n]', default='n')
graph = OptionGroup(parser, 'GRAPH OPTIONS')
graph.add_option('-c', '--confidence', dest='confidence', metavar='INT', type='int', help='confidence level (0 lowest to 3 highest) [3]', default=3)
graph.add_option('-I', '--iterations', dest='iterations', metavar='INT', type='int', help='number of iterations to insert new introns into the graph [5]', default=5)
graph.add_option('-M', '--merge_strat', dest='merge', metavar='<STRAT>', help='merge strategy, where <STRAT> is one of: merge_bams, merge_graphs, merge_all [merge_graphs]', default='merge_graphs')
graph.add_option('-i', '--insert_ir', dest='insert_ir', metavar='y|n', help='insert intron retentions [y]', default='y')
graph.add_option('-e', '--insert_es', dest='insert_es', metavar='y|n', help='insert cassette exons [y]', default='y')
graph.add_option('-E', '--insert_ni', dest='insert_ni', metavar='y|n', help='insert new intron edges [y]', default='y')
graph.add_option('-r', '--remove_se', dest='remove_se', metavar='y|n', help='remove short exons [n]', default='n')
graph.add_option('-V', '--validate_sg', dest='validate_sg', metavar='y|n', help='validate splice graph [n]', default='n')
graph.add_option('-s', '--re-infer_sg', dest='infer_sg', metavar='y|n', help='re-infer splice graph [n] (advanced)', default='n')
#optional.add_option('-', '--', dest='', metavar='', help='', default='-')
#optional.add_option('-u', '--user', dest='user', metavar='FILE', help='file with user settings [-]', default='-')
splice = OptionGroup(parser, 'SPLICE OPTIONS')
splice.add_option('-T', '--extract_as', dest='extract_as', metavar='y|n', help='extract alternative splicing events [y]', default='y')
splice.add_option('-A', '--curate_alt_prime', dest='curate_alt_prime', metavar='y|n', help='curate alt prime events [y]', default='y')
splice.add_option('-t', '--event_types', dest='event_types', metavar='y|n', help='list of alternative splicing events to extract [exon_skip,intron_retention,alt_3prime,alt_5prime,mult_exon_skip]', default='exon_skip,intron_retention,alt_3prime,alt_5prime,mult_exon_skip')
#optional.add_option('-C', '--truncations', dest='truncations', metavar='y|n', help='truncation detection mode [n]', default='n')
#optional.add_option('-', '--', dest='', metavar='y|n', help='', default=False)
experimental = OptionGroup(parser, 'EXPERIMENTAL - BETA STATE')
experimental.add_option('-p', '--pyproc', dest='pyproc', metavar='y|n', help='use parallel implementation [n]', default='n')
experimental.add_option('-R', '--replicates', dest='replicates', metavar='1,1,2,2,...', help='replicate structure of files (same number as alignment files) [all 1 - no replicated]', default='-')
experimental.add_option('-U', '--intron_cov', dest='intron_cov', metavar='y|n', help='count intron coverage [n]', default='n')
experimental.add_option('', '--sparse_bam', dest='sparse_bam', metavar='y|n', help='store BAM content as sparse representation for later use [n]', default='n')
experimental.add_option('', '--ignore_mismatches', dest='ignore_mismatches', metavar='y|n', help='ignore mismatches - does not filter by edit operations - does not require NM in BAM [n]', default='n')
experimental.add_option('', '--output_struc', dest='output_struc', metavar='y|n', help='outputs events in structured splicing syntax similar to astalavista [n]', default='n')
experimental.add_option('', '--parallel', dest='parallel', metavar='<INT>', type='int', help='use multiple processors [1]', default=1)
experimental.add_option('-q', '--quantify_graph', dest='quantify_graph', metavar='y|n', help='quantify graph - implicilty set then -T is set [n]', default='n')
parser.add_option_group(required)
parser.add_option_group(input)
parser.add_option_group(output)
parser.add_option_group(graph)
parser.add_option_group(splice)
parser.add_option_group(experimental)
(options, args) = parser.parse_args()
if len(argv) < 2:
parser.print_help()
sys.exit(2)
options.parser = parser
return options
def spladder():
### get command line options
options = parse_options(sys.argv)
### parse parameters from options object
CFG = settings.parse_args(options)
### add dependencies provided in config section
#if 'paths' in CFG:
# for i in CFG['paths']:
# eval('import %s'% CFG['paths'][i])
### load confidence level settings
if not CFG['no_reset_conf']:
CFG = settings.set_confidence_level(CFG)
### do not compute components of merged set, if result file already exists
fn_out_merge = get_filename('fn_out_merge', CFG)
fn_out_merge_val = get_filename('fn_out_merge_val', CFG)
if not 'spladder_infile' in CFG and not os.path.exists(fn_out_merge):
### iterate over files, if merge strategy is single
if CFG['merge_strategy'] in ['single', 'merge_graphs']:
idxs = range(len(CFG['samples']))
else:
idxs = [0]
### set parallelization
if CFG['rproc']:
jobinfo = []
### create out-directory
if not os.path.exists(CFG['out_dirname']):
os.makedirs(CFG['out_dirname'])
### create spladder sub-directory
if not os.path.exists(os.path.join(CFG['out_dirname'], 'spladder')):
os.makedirs(os.path.join(CFG['out_dirname'], 'spladder'))
### pre-process annotation, if necessary
if CFG['anno_fname'].split('.')[-1] != 'pickle':
if not os.path.exists(CFG['anno_fname'] + '.pickle'):
if CFG['anno_fname'].split('.')[-1] in ['gff', 'gff3']:
(genes, CFG) = init.init_genes_gff3(CFG['anno_fname'], CFG, CFG['anno_fname'] + '.pickle')
elif CFG['anno_fname'].split('.')[-1] in ['gtf']:
(genes, CFG) = init.init_genes_gtf(CFG['anno_fname'], CFG, CFG['anno_fname'] + '.pickle')
else:
print >> sys.stderr, 'ERROR: Unknown annotation format. File needs to end in gtf or gff/gff3\nCurrent file: %s' % CFG['anno_fname']
sys.exit(1)
CFG['anno_fname'] += '.pickle'
### add anotation contigs into lookup table
if not 'genes' in CFG:
genes = cPickle.load(open(CFG['anno_fname'], 'r'))
else:
genes = CFG['genes']
CFG = init.append_chrms(sp.unique(sp.array([x.chr for x in genes], dtype='str')), CFG)
del genes
for idx in idxs:
CFG_ = dict()
if CFG['merge_strategy'] != 'merge_bams':
CFG_['bam_fnames'] = CFG['bam_fnames']
CFG_['samples'] = CFG['samples']
CFG['bam_fnames'] = CFG['bam_fnames'][idx]
CFG['samples'] = CFG['samples'][idx]
CFG['out_fname'] = '%s/spladder/genes_graph_conf%i.%s.pickle' % (CFG['out_dirname'], CFG['confidence_level'], CFG['samples'])
else:
CFG['out_fname'] = '%s/spladder/genes_graph_conf%i.%s.pickle' % (CFG['out_dirname'], CFG['confidence_level'], CFG['merge_strategy'])
### assemble out filename to check if we are already done
fn_out = CFG['out_fname']
if CFG['do_prune']:
fn_out = re.sub('.pickle$', '_pruned.pickle', fn_out)
if CFG['do_gen_isoforms']:
fn_out = re.sub('.pickle$', '_with_isoforms.pickle', fn_out)
if os.path.exists(fn_out):
print >> sys.stdout, '%s - All result files already exist.' % fn_out
else:
if CFG['rproc']:
jobinfo.append(rp.rproc('spladder_core', CFG, 15000, CFG['options_rproc'], 60*60))
else:
spladder_core(CFG)
for key in CFG_:
try:
CFG[key] = CFG_[key].copy()
except AttributeError:
CFG[key] = CFG_[key]
### collect results after parallelization
if CFG['rproc']:
rp.rproc_wait(jobinfo, 30, 1.0, -1)
### merge parts if necessary
if CFG['merge_strategy'] == 'merge_graphs':
run_merge(CFG)
if not 'spladder_infile' in CFG and CFG['validate_splicegraphs'] and not os.path.exists(fn_out_merge_val):
(genes, inserted) = cPickle.load(open(fn_out_merge, 'r'))
genes = filter_by_edgecount(genes, CFG)
cPickle.dump((genes, inserted), open(fn_out_merge_val, 'w'), -1)
del genes
### get count output file
fn_in_count = get_filename('fn_count_in', CFG)
fn_out_count = get_filename('fn_count_out', CFG)
### convert input BAMs to sparse arrays
if CFG['bam_to_sparse']:
for bfn in CFG['bam_fnames']:
if bfn.endswith('bam') and not os.path.exists(re.sub(r'.bam$', '', bfn) + '.npz'):
cnts = dict()
if not 'chrm_lookup' in CFG:
IN = pysam.Samfile(bfn, 'rb')
CFG = append_chrms([x['SN'] for x in parse_header(IN.text)['SQ']], CFG)
IN.close()
if CFG['parallel'] > 1:
pool = CFG['pool']
result = [pool.apply_async(summarize_chr, args=(bfn, str(chrm), CFG,)) for chrm in sorted(CFG['chrm_lookup'])]
while result:
tmp = result.pop(0).get()
cnts[tmp[0] + '_reads_row'] = tmp[1].row.astype('uint8')
cnts[tmp[0] + '_reads_col'] = tmp[1].col
cnts[tmp[0] + '_reads_dat'] = tmp[1].data
cnts[tmp[0] + '_reads_shp'] = tmp[1].shape
cnts[tmp[0] + '_introns_m'] = tmp[2]
cnts[tmp[0] + '_introns_p'] = tmp[3]
else:
for chrm in CFG['chrm_lookup']:
tmp = summarize_chr(bfn, str(chrm), CFG)
cnts[chrm + '_reads_row'] = tmp[1].row.astype('uint8')
cnts[chrm + '_reads_col'] = tmp[1].col
cnts[chrm + '_reads_dat'] = tmp[1].data
cnts[chrm + '_reads_shp'] = tmp[1].shape
cnts[chrm + '_introns_m'] = tmp[2]
cnts[chrm + '_introns_p'] = tmp[3]
sp.savez_compressed(re.sub(r'.bam$', '', bfn), **cnts)
elif CFG['verbose']:
print >> sys.stdout, 'Sparse BAM representation for %s already exists.' % bfn
### count segment graph
if CFG['run_as_analysis'] or CFG['count_segment_graph']:
if not os.path.exists(fn_out_count):
count_graph_coverage_wrapper(fn_in_count, fn_out_count, CFG)
### count intron coverage phenotype
if CFG['count_intron_cov']:
fn_out_intron_count = fn_out_count.replace('mat', 'introns.pickle')
count_intron_coverage_wrapper(fn_in_count, fn_out_intron_count, CFG)
### handle alternative splicing part
if CFG['run_as_analysis']:
collect_events(CFG)
for idx in range(len(CFG['event_types'])):
analyze_events(CFG, CFG['event_types'][idx])
if __name__ == "__main__":
spladder()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.