repo_name
stringlengths
5
100
path
stringlengths
4
251
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
499
1.05M
license
stringclasses
15 values
altairpearl/scikit-learn
sklearn/utils/testing.py
5
27577
"""Testing utilities.""" # Copyright (c) 2011, 2012 # Authors: Pietro Berkes, # Andreas Muller # Mathieu Blondel # Olivier Grisel # Arnaud Joly # Denis Engemann # Giorgio Patrini # Thierry Guillemot # License: BSD 3 clause import os import inspect import pkgutil import warnings import sys import re import platform import struct import scipy as sp import scipy.io from functools import wraps from operator import itemgetter try: # Python 2 from urllib2 import urlopen from urllib2 import HTTPError except ImportError: # Python 3+ from urllib.request import urlopen from urllib.error import HTTPError import tempfile import shutil import os.path as op import atexit # WindowsError only exist on Windows try: WindowsError except NameError: WindowsError = None import sklearn from sklearn.base import BaseEstimator from sklearn.externals import joblib # Conveniently import all assertions in one place. from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_true from nose.tools import assert_false from nose.tools import assert_raises from nose.tools import raises try: from nose.tools import assert_dict_equal except ImportError: # Not in old versions of nose, but is only for formatting anyway assert_dict_equal = assert_equal from nose import SkipTest from nose import with_setup from numpy.testing import assert_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_less from numpy.testing import assert_approx_equal import numpy as np from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin) from sklearn.cluster import DBSCAN __all__ = ["assert_equal", "assert_not_equal", "assert_raises", "assert_raises_regexp", "raises", "with_setup", "assert_true", "assert_false", "assert_almost_equal", "assert_array_equal", "assert_array_almost_equal", "assert_array_less", "assert_less", "assert_less_equal", "assert_greater", "assert_greater_equal", "assert_approx_equal"] try: from nose.tools import assert_in, assert_not_in except ImportError: # Nose < 1.0.0 def assert_in(x, container): assert_true(x in container, msg="%r in %r" % (x, container)) def assert_not_in(x, container): assert_false(x in container, msg="%r in %r" % (x, container)) try: from nose.tools import assert_raises_regex except ImportError: # for Python 2 def assert_raises_regex(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): """Helper function to check for message patterns in exceptions.""" not_raised = False try: callable_obj(*args, **kwargs) not_raised = True except expected_exception as e: error_message = str(e) if not re.compile(expected_regexp).search(error_message): raise AssertionError("Error message should match pattern " "%r. %r does not." % (expected_regexp, error_message)) if not_raised: raise AssertionError("%s not raised by %s" % (expected_exception.__name__, callable_obj.__name__)) # assert_raises_regexp is deprecated in Python 3.4 in favor of # assert_raises_regex but lets keep the backward compat in scikit-learn with # the old name for now assert_raises_regexp = assert_raises_regex def _assert_less(a, b, msg=None): message = "%r is not lower than %r" % (a, b) if msg is not None: message += ": " + msg assert a < b, message def _assert_greater(a, b, msg=None): message = "%r is not greater than %r" % (a, b) if msg is not None: message += ": " + msg assert a > b, message def assert_less_equal(a, b, msg=None): message = "%r is not lower than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a <= b, message def assert_greater_equal(a, b, msg=None): message = "%r is not greater than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a >= b, message def assert_warns(warning_class, func, *args, **kw): """Test that a certain warning occurs. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func` Returns ------- result : the return value of `func` """ # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = any(warning.category is warning_class for warning in w) if not found: raise AssertionError("%s did not give warning: %s( is %s)" % (func.__name__, warning_class, w)) return result def assert_warns_message(warning_class, message, func, *args, **kw): # very important to avoid uncontrolled state propagation """Test that a certain warning occurs and with a certain message. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. message : str | callable The entire message or a substring to test for. If callable, it takes a string as argument and will trigger an assertion error if it returns `False`. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func`. Returns ------- result : the return value of `func` """ clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") if hasattr(np, 'VisibleDeprecationWarning'): # Let's not catch the numpy internal DeprecationWarnings warnings.simplefilter('ignore', np.VisibleDeprecationWarning) # Trigger a warning. result = func(*args, **kw) # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = [issubclass(warning.category, warning_class) for warning in w] if not any(found): raise AssertionError("No warning raised for %s with class " "%s" % (func.__name__, warning_class)) message_found = False # Checks the message of all warnings belong to warning_class for index in [i for i, x in enumerate(found) if x]: # substring will match, the entire message with typo won't msg = w[index].message # For Python 3 compatibility msg = str(msg.args[0] if hasattr(msg, 'args') else msg) if callable(message): # add support for certain tests check_in_message = message else: check_in_message = lambda msg: message in msg if check_in_message(msg): message_found = True break if not message_found: raise AssertionError("Did not receive the message you expected " "('%s') for <%s>, got: '%s'" % (message, func.__name__, msg)) return result # To remove when we support numpy 1.7 def assert_no_warnings(func, *args, **kw): # XXX: once we may depend on python >= 2.6, this can be replaced by the # warnings module context manager. # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] if len(w) > 0: raise AssertionError("Got warnings when calling %s: [%s]" % (func.__name__, ', '.join(str(warning) for warning in w))) return result def ignore_warnings(obj=None, category=Warning): """Context manager and decorator to ignore warnings. Note. Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging this is not your tool of choice. Parameters ---------- category : warning class, defaults to Warning. The category to filter. If Warning, all categories will be muted. Examples -------- >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if callable(obj): return _IgnoreWarnings(category=category)(obj) else: return _IgnoreWarnings(category=category) class _IgnoreWarnings(object): """Improved and simplified Python warnings context manager and decorator. This class allows to ignore the warnings raise by a function. Copied from Python 2.7.5 and modified as required. Parameters ---------- category : tuple of warning class, defaut to Warning The category to filter. By default, all the categories will be muted. """ def __init__(self, category): self._record = True self._module = sys.modules['warnings'] self._entered = False self.log = [] self.category = category def __call__(self, fn): """Decorator to catch and hide warnings without visual nesting.""" @wraps(fn) def wrapper(*args, **kwargs): # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(): warnings.simplefilter("ignore", self.category) return fn(*args, **kwargs) return wrapper def __repr__(self): args = [] if self._record: args.append("record=True") if self._module is not sys.modules['warnings']: args.append("module=%r" % self._module) name = type(self).__name__ return "%s(%s)" % (name, ", ".join(args)) def __enter__(self): clean_warning_registry() # be safe and not propagate state + chaos warnings.simplefilter("ignore", self.category) if self._entered: raise RuntimeError("Cannot enter %r twice" % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning def __exit__(self, *exc_info): if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters self._module.showwarning = self._showwarning self.log[:] = [] clean_warning_registry() # be safe and not propagate state + chaos try: from nose.tools import assert_less except ImportError: assert_less = _assert_less try: from nose.tools import assert_greater except ImportError: assert_greater = _assert_greater def _assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): actual, desired = np.asanyarray(actual), np.asanyarray(desired) if np.allclose(actual, desired, rtol=rtol, atol=atol): return msg = ('Array not equal to tolerance rtol=%g, atol=%g: ' 'actual %s, desired %s') % (rtol, atol, actual, desired) raise AssertionError(msg) if hasattr(np.testing, 'assert_allclose'): assert_allclose = np.testing.assert_allclose else: assert_allclose = _assert_allclose def assert_raise_message(exceptions, message, function, *args, **kwargs): """Helper function to test error messages in exceptions. Parameters ---------- exceptions : exception or tuple of exception Name of the estimator function : callable Calable object to raise error *args : the positional arguments to `function`. **kw : the keyword arguments to `function` """ try: function(*args, **kwargs) except exceptions as e: error_message = str(e) if message not in error_message: raise AssertionError("Error message does not include the expected" " string: %r. Observed error message: %r" % (message, error_message)) else: # concatenate exception names if isinstance(exceptions, tuple): names = " or ".join(e.__name__ for e in exceptions) else: names = exceptions.__name__ raise AssertionError("%s not raised by %s" % (names, function.__name__)) def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column') class mock_mldata_urlopen(object): def __init__(self, mock_datasets): """Object that mocks the urlopen function to fake requests to mldata. `mock_datasets` is a dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (see `fake_mldata` for details). When requesting a dataset with a name that is in mock_datasets, this object creates a fake dataset in a StringIO object and returns it. Otherwise, it raises an HTTPError. """ self.mock_datasets = mock_datasets def __call__(self, urlname): dataset_name = urlname.split('/')[-1] if dataset_name in self.mock_datasets: resource_name = '_' + dataset_name from io import BytesIO matfile = BytesIO() dataset = self.mock_datasets[dataset_name] ordering = None if isinstance(dataset, tuple): dataset, ordering = dataset fake_mldata(dataset, resource_name, matfile, ordering) matfile.seek(0) return matfile else: raise HTTPError(urlname, 404, dataset_name + " is not available", [], None) def install_mldata_mock(mock_datasets): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets) def uninstall_mldata_mock(): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = urlopen # Meta estimators need another estimator to be instantiated. META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator", "MultiOutputRegressor", "MultiOutputClassifier", "OutputCodeClassifier", "OneVsRestClassifier", "RFE", "RFECV", "BaseEnsemble"] # estimators that there is no way to default-construct sensibly OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV", "SelectFromModel"] # some trange ones DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'TfidfTransformer', 'TfidfVectorizer', 'IsotonicRegression', 'OneHotEncoder', 'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier', 'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures', 'GaussianRandomProjectionHash', 'HashingVectorizer', 'CheckingClassifier', 'PatchExtractor', 'CountVectorizer', # GradientBoosting base estimators, maybe should # exclude them in another way 'ZeroEstimator', 'ScaledLogOddsEstimator', 'QuantileEstimator', 'MeanEstimator', 'LogOddsEstimator', 'PriorProbabilityEstimator', '_SigmoidCalibration', 'VotingClassifier'] def all_estimators(include_meta_estimators=False, include_other=False, type_filter=None, include_dont_test=False): """Get a list of all estimators from sklearn. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. By default meta_estimators such as GridSearchCV are also not included. Parameters ---------- include_meta_estimators : boolean, default=False Whether to include meta-estimators that can be constructed using an estimator as their first argument. These are currently BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier, OneVsRestClassifier, RFE, RFECV. include_other : boolean, default=False Wether to include meta-estimators that are somehow special and can not be default-constructed sensibly. These are currently Pipeline, FeatureUnion and GridSearchCV include_dont_test : boolean, default=False Whether to include "special" label estimator or test processors. type_filter : string, list of string, or None, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actuall type of the class. """ def is_abstract(c): if not(hasattr(c, '__abstractmethods__')): return False if not len(c.__abstractmethods__): return False return True all_classes = [] # get parent folder path = sklearn.__path__ for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix='sklearn.', onerror=lambda x: None): if (".tests." in modname): continue module = __import__(modname, fromlist="dummy") classes = inspect.getmembers(module, inspect.isclass) all_classes.extend(classes) all_classes = set(all_classes) estimators = [c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')] # get rid of abstract base classes estimators = [c for c in estimators if not is_abstract(c[1])] if not include_dont_test: estimators = [c for c in estimators if not c[0] in DONT_TEST] if not include_other: estimators = [c for c in estimators if not c[0] in OTHER] # possibly get rid of meta estimators if not include_meta_estimators: estimators = [c for c in estimators if not c[0] in META_ESTIMATORS] if type_filter is not None: if not isinstance(type_filter, list): type_filter = [type_filter] else: type_filter = list(type_filter) # copy filtered_estimators = [] filters = {'classifier': ClassifierMixin, 'regressor': RegressorMixin, 'transformer': TransformerMixin, 'cluster': ClusterMixin} for name, mixin in filters.items(): if name in type_filter: type_filter.remove(name) filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)]) estimators = filtered_estimators if type_filter: raise ValueError("Parameter type_filter must be 'classifier', " "'regressor', 'transformer', 'cluster' or " "None, got" " %s." % repr(type_filter)) # drop duplicates, sort for reproducibility # itemgetter is used to ensure the sort does not extend to the 2nd item of # the tuple return sorted(set(estimators), key=itemgetter(0)) def set_random_state(estimator, random_state=0): """Set random state of an estimator if it has the `random_state` param. Classes for whom random_state is deprecated are ignored. Currently DBSCAN is one such class. """ if isinstance(estimator, DBSCAN): return if "random_state" in estimator.get_params(): estimator.set_params(random_state=random_state) def if_matplotlib(func): """Test decorator that skips test if matplotlib not installed.""" @wraps(func) def run_test(*args, **kwargs): try: import matplotlib matplotlib.use('Agg', warn=False) # this fails if no $DISPLAY specified import matplotlib.pyplot as plt plt.figure() except ImportError: raise SkipTest('Matplotlib not available.') else: return func(*args, **kwargs) return run_test def skip_if_32bit(func): """Test decorator that skips tests on 32bit platforms.""" @wraps(func) def run_test(*args, **kwargs): bits = 8 * struct.calcsize("P") if bits == 32: raise SkipTest('Test skipped on 32bit platforms.') else: return func(*args, **kwargs) return run_test def if_not_mac_os(versions=('10.7', '10.8', '10.9'), message='Multi-process bug in Mac OS X >= 10.7 ' '(see issue #636)'): """Test decorator that skips test if OS is Mac OS X and its major version is one of ``versions``. """ warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed" " in 0.19: use the safer and more generic" " if_safe_multiprocessing_with_blas instead", DeprecationWarning) mac_version, _, _ = platform.mac_ver() skip = '.'.join(mac_version.split('.')[:2]) in versions def decorator(func): if skip: @wraps(func) def func(*args, **kwargs): raise SkipTest(message) return func return decorator def if_safe_multiprocessing_with_blas(func): """Decorator for tests involving both BLAS calls and multiprocessing. Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with some implementation of BLAS (or other libraries that manage an internal posix thread pool) can cause a crash or a freeze of the Python process. In practice all known packaged distributions (from Linux distros or Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to only impact OSX users. This wrapper makes it possible to skip tests that can possibly cause this crash under OS X with. Under Python 3.4+ it is possible to use the `forkserver` start method for multiprocessing to avoid this issue. However it can cause pickling errors on interactively defined functions. It therefore not enabled by default. """ @wraps(func) def run_test(*args, **kwargs): if sys.platform == 'darwin': raise SkipTest( "Possible multi-process bug with some BLAS") return func(*args, **kwargs) return run_test def clean_warning_registry(): """Safe way to reset warnings.""" warnings.resetwarnings() reg = "__warningregistry__" for mod_name, mod in list(sys.modules.items()): if 'six.moves' in mod_name: continue if hasattr(mod, reg): getattr(mod, reg).clear() def check_skip_network(): if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)): raise SkipTest("Text tutorial requires large dataset download") def check_skip_travis(): """Skip test if being run on Travis.""" if os.environ.get('TRAVIS') == "true": raise SkipTest("This test needs to be skipped on Travis") def _delete_folder(folder_path, warn=False): """Utility function to cleanup a temporary folder if still existing. Copy from joblib.pool (for independence). """ try: if os.path.exists(folder_path): # This can fail under windows, # but will succeed when called by atexit shutil.rmtree(folder_path) except WindowsError: if warn: warnings.warn("Could not delete temporary folder %s" % folder_path) class TempMemmap(object): def __init__(self, data, mmap_mode='r'): self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_') self.mmap_mode = mmap_mode self.data = data def __enter__(self): fpath = op.join(self.temp_folder, 'data.pkl') joblib.dump(self.data, fpath) data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode) atexit.register(lambda: _delete_folder(self.temp_folder, warn=True)) return data_read_only def __exit__(self, exc_type, exc_val, exc_tb): _delete_folder(self.temp_folder) with_network = with_setup(check_skip_network) with_travis = with_setup(check_skip_travis)
bsd-3-clause
thypad/brew
examples/pruning.py
3
1176
import numpy as np import sklearn from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import zero_one_loss from sklearn.cross_validation import train_test_split from brew.generation.bagging import Bagging from brew.base import Ensemble, EnsembleClassifier from brew.selection.pruning.epic import EPIC N = 1000 dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1) X, y = datasets.make_hastie_10_2(n_samples=N, random_state=1) for i, yi in enumerate(set(y)): y[y == yi] = i X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.30) bag = Bagging(base_classifier=dt, n_classifiers=10) bag.fit(X_val, y_val) epic = EPIC() epic.fit(bag.ensemble, X_test, y_test) print('-----------------ERROR RATE----------------------') for p in np.arange(0.1,1.1,0.1): ensemble = epic.get(p) mcs = EnsembleClassifier(Ensemble(classifiers=epic.get(p)), selector=None) y_pred = mcs.predict(X_test) print('p={}, {}'.format(p, zero_one_loss(y_pred, y_test))) print ('------------------------------------------------')
mit
motion2015/edx-platform
lms/djangoapps/bulk_email/forms.py
92
4591
""" Defines a form for providing validation of CourseEmail templates. """ import logging from django import forms from django.core.exceptions import ValidationError from bulk_email.models import CourseEmailTemplate, COURSE_EMAIL_MESSAGE_BODY_TAG, CourseAuthorization from opaque_keys import InvalidKeyError from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey log = logging.getLogger(__name__) class CourseEmailTemplateForm(forms.ModelForm): """Form providing validation of CourseEmail templates.""" name = forms.CharField(required=False) class Meta(object): # pylint: disable=missing-docstring model = CourseEmailTemplate fields = ('html_template', 'plain_template', 'name') def _validate_template(self, template): """Check the template for required tags.""" index = template.find(COURSE_EMAIL_MESSAGE_BODY_TAG) if index < 0: msg = 'Missing tag: "{}"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG) log.warning(msg) raise ValidationError(msg) if template.find(COURSE_EMAIL_MESSAGE_BODY_TAG, index + 1) >= 0: msg = 'Multiple instances of tag: "{}"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG) log.warning(msg) raise ValidationError(msg) # TODO: add more validation here, including the set of known tags # for which values will be supplied. (Email will fail if the template # uses tags for which values are not supplied.) def clean_html_template(self): """Validate the HTML template.""" template = self.cleaned_data["html_template"] self._validate_template(template) return template def clean_plain_template(self): """Validate the plaintext template.""" template = self.cleaned_data["plain_template"] self._validate_template(template) return template def clean_name(self): """Validate the name field. Enforce uniqueness constraint on 'name' field""" # Note that we get back a blank string in the Form for an empty 'name' field # we want those to be set to None in Python and NULL in the database name = self.cleaned_data.get("name").strip() or None # if we are creating a new CourseEmailTemplate, then we need to # enforce the uniquess constraint as part of the Form validation if not self.instance.pk: try: CourseEmailTemplate.get_template(name) # already exists, this is no good raise ValidationError('Name of "{}" already exists, this must be unique.'.format(name)) except CourseEmailTemplate.DoesNotExist: # this is actually the successful validation pass return name class CourseAuthorizationAdminForm(forms.ModelForm): """Input form for email enabling, allowing us to verify data.""" class Meta(object): # pylint: disable=missing-docstring model = CourseAuthorization def clean_course_id(self): """Validate the course id""" cleaned_id = self.cleaned_data["course_id"] try: course_key = CourseKey.from_string(cleaned_id) except InvalidKeyError: try: course_key = SlashSeparatedCourseKey.from_deprecated_string(cleaned_id) except InvalidKeyError: msg = u'Course id invalid.' msg += u' --- Entered course id was: "{0}". '.format(cleaned_id) msg += 'Please recheck that you have supplied a valid course id.' raise forms.ValidationError(msg) if not modulestore().has_course(course_key): msg = u'COURSE NOT FOUND' msg += u' --- Entered course id was: "{0}". '.format(course_key.to_deprecated_string()) msg += 'Please recheck that you have supplied a valid course id.' raise forms.ValidationError(msg) # Now, try and discern if it is a Studio course - HTML editor doesn't work with XML courses is_studio_course = modulestore().get_modulestore_type(course_key) != ModuleStoreEnum.Type.xml if not is_studio_course: msg = "Course Email feature is only available for courses authored in Studio. " msg += '"{0}" appears to be an XML backed course.'.format(course_key.to_deprecated_string()) raise forms.ValidationError(msg) return course_key
agpl-3.0
molmod/zeobuilder
updateheaders.py
1
3958
#!/usr/bin/env python # -*- coding: utf-8 -*- # Zeobuilder is an extensible GUI-toolkit for molecular model construction. # Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center # for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights # reserved unless otherwise stated. # # This file is part of Zeobuilder. # # Zeobuilder is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # In addition to the regulations of the GNU General Public License, # publications and communications based in parts on this program or on # parts of this program are required to cite the following article: # # "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the # nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck # and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48 # (7), 1530-1541, 2008 # DOI:10.1021/ci8000748 # # Zeobuilder is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # #-- from glob import glob import os, sys def strip_header(lines, closing): # search for the header closing line, e.g. '#--\n' counter = 0 found = 0 for line in lines: counter += 1 if line == closing: found = 1 elif found == 1: break if found: del lines[:counter-1] # If the header closing is not found, no headers are removed # add a header closing line lines.insert(0, closing) def fix_python(lines, header_lines): # check if a shebang is present do_shebang = lines[0].startswith('#!') # remove the current header strip_header(lines, '#--\n') # add new header (insert must be in reverse order) for hline in header_lines[::-1]: lines.insert(0, ('# '+hline).strip() + '\n') # add a source code encoding line lines.insert(0, '# -*- coding: utf-8 -*-\n') if do_shebang: lines.insert(0, '#!/usr/bin/env python\n') def fix_c(lines, header_lines): # check for an exception line for line in lines: if 'no_update_headers' in line: return # remove the current header strip_header(lines, '//--\n') # add new header (insert must be in reverse order) for hline in header_lines[::-1]: lines.insert(0, ('// '+hline).strip() + '\n') def fix_f77(lines, header_lines): # check for an exception line for line in lines: if 'no_update_headers' in line: return # remove the current header strip_header(lines, '!--\n') # add new header (insert must be in reverse order) for hline in header_lines[::-1]: lines.insert(0, ('! '+hline).strip() + '\n') def main(fns): fixers = [ ('.py', fix_python), ('.pxd', fix_python), ('.pyx', fix_python), ('.c', fix_c), ('.cpp', fix_c), ('.h', fix_c), ('.pyf', fix_f77), ] f = open('HEADER') header_lines = f.readlines() f.close() for fn in fns: if not os.path.isfile(fn): continue for ext, fixer in fixers: if fn.endswith(ext): print 'Fixing ', fn f = file(fn) lines = f.readlines() f.close() fixer(lines, header_lines) f = file(fn, 'w') f.writelines(lines) f.close() break if __name__ == '__main__': args = sys.argv[1:] main(args)
gpl-3.0
apark263/tensorflow
tensorflow/contrib/deprecated/summaries_test.py
22
2246
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the deprecated summary ops in tf.contrib.deprecated.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import logging_ops from tensorflow.python.platform import test class DeprecatedSummariesTest(test.TestCase): def testScalarSummary(self): with self.cached_session(): c = constant_op.constant(3) s = logging_ops.scalar_summary('tag', c) self.assertEqual(s.op.type, u'ScalarSummary') def testHistogramSummary(self): with self.cached_session(): c = constant_op.constant(3) s = logging_ops.histogram_summary('tag', c) self.assertEqual(s.op.type, u'HistogramSummary') def testImageSummary(self): with self.cached_session(): i = array_ops.ones((5, 4, 4, 3)) s = logging_ops.image_summary('tag', i) self.assertEqual(s.op.type, u'ImageSummary') def testAudioSummary(self): with self.cached_session(): c = constant_op.constant(3.0) s = logging_ops.audio_summary('tag', c, sample_rate=8000) self.assertEqual(s.op.type, u'AudioSummaryV2') def testMergeSummary(self): with self.cached_session(): c = constant_op.constant(3) a = logging_ops.scalar_summary('a', c) b = logging_ops.scalar_summary('b', c) s = logging_ops.merge_summary([a, b]) self.assertEqual(s.op.type, u'MergeSummary') if __name__ == '__main__': test.main()
apache-2.0
2013Commons/hue
apps/filebrowser/src/filebrowser/conf.py
16
1257
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.utils.translation import ugettext_lazy as _ from desktop.lib.conf import Config MAX_SNAPPY_DECOMPRESSION_SIZE = Config( key="max_snappy_decompression_size", help=_("Max snappy decompression size in bytes."), private=True, default=1024*1024*25, type=int) ARCHIVE_UPLOAD_TEMPDIR = Config( key="archive_upload_tempdir", help=_("Location on local filesystem where the uploaded archives are temporary stored."), default=None, type=str)
apache-2.0
2013Commons/hue
desktop/core/ext-py/Django-1.4.5/django/contrib/localflavor/pt/forms.py
88
1577
""" PT-specific Form helpers """ import re from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField from django.utils.encoding import smart_unicode from django.utils.translation import ugettext_lazy as _ phone_digits_re = re.compile(r'^(\d{9}|(00|\+)\d*)$') class PTZipCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a zip code in the format XXXX-XXX.'), } def __init__(self, max_length=None, min_length=None, *args, **kwargs): super(PTZipCodeField, self).__init__(r'^(\d{4}-\d{3}|\d{7})$', max_length, min_length, *args, **kwargs) def clean(self,value): cleaned = super(PTZipCodeField, self).clean(value) if len(cleaned) == 7: return u'%s-%s' % (cleaned[:4],cleaned[4:]) else: return cleaned class PTPhoneNumberField(Field): """ Validate local Portuguese phone number (including international ones) It should have 9 digits (may include spaces) or start by 00 or + (international) """ default_error_messages = { 'invalid': _('Phone numbers must have 9 digits, or start by + or 00.'), } def clean(self, value): super(PTPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' value = re.sub('(\.|\s)', '', smart_unicode(value)) m = phone_digits_re.search(value) if m: return u'%s' % value raise ValidationError(self.error_messages['invalid'])
apache-2.0
rew4332/tensorflow
tensorflow/python/training/proximal_gradient_descent_test.py
8
6187
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for Proximal Gradient Descent operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf class ProximalGradientDescentOptimizerTest(tf.test.TestCase): def testProximalGradientDescentwithoutRegularization(self): with self.test_session() as sess: var0 = tf.Variable([0.0, 0.0]) var1 = tf.Variable([0.0, 0.0]) grads0 = tf.constant([0.1, 0.2]) grads1 = tf.constant([0.01, 0.02]) opt = tf.train.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) self.assertAllClose([0.0, 0.0], v1_val) # Run 3 steps Proximal Gradient Descent. for _ in range(3): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose(np.array([-0.9, -1.8]), v0_val) self.assertAllClose(np.array([-0.09, -0.18]), v1_val) def testProximalGradientDescentwithoutRegularization2(self): with self.test_session() as sess: var0 = tf.Variable([1.0, 2.0]) var1 = tf.Variable([4.0, 3.0]) grads0 = tf.constant([0.1, 0.2]) grads1 = tf.constant([0.01, 0.02]) opt = tf.train.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([4.0, 3.0], v1_val) # Run 3 steps Proximal Gradient Descent for _ in range(3): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose(np.array([0.1, 0.2]), v0_val) self.assertAllClose(np.array([3.91, 2.82]), v1_val) def testProximalGradientDescentWithL1_L2(self): with self.test_session() as sess: var0 = tf.Variable([1.0, 2.0]) var1 = tf.Variable([4.0, 3.0]) grads0 = tf.constant([0.1, 0.2]) grads1 = tf.constant([0.01, 0.02]) opt = tf.train.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([4.0, 3.0], v1_val) # Run 10 steps Proximal Gradient Descent for _ in range(10): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose(np.array([0.037125, 0.074625]), v0_val) self.assertAllClose(np.array([0.003375, 0.007125]), v1_val) def applyOptimizer(self, opt, steps=5, is_sparse=False): if is_sparse: var0 = tf.Variable([[1.0], [2.0]]) var1 = tf.Variable([[3.0], [4.0]]) grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1]), tf.constant([0]), tf.constant([2, 1])) grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1]), tf.constant([1]), tf.constant([2, 1])) else: var0 = tf.Variable([1.0, 2.0]) var1 = tf.Variable([3.0, 4.0]) grads0 = tf.constant([0.1, 0.2]) grads1 = tf.constant([0.01, 0.02]) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() sess = tf.get_default_session() v0_val, v1_val = sess.run([var0, var1]) if is_sparse: self.assertAllClose([[1.0], [2.0]], v0_val) self.assertAllClose([[3.0], [4.0]], v1_val) else: self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run ProximalAdagrad for a few steps for _ in range(steps): update.run() v0_val, v1_val = sess.run([var0, var1]) return v0_val, v1_val def testEquivSparseGradientDescentwithoutRegularizaion(self): with self.test_session(): val0, val1 = self.applyOptimizer( tf.train.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0), is_sparse=True) with self.test_session(): val2, val3 = self.applyOptimizer( tf.train.GradientDescentOptimizer(3.0), is_sparse=True) self.assertAllClose(val0, val2) self.assertAllClose(val1, val3) def testEquivGradientDescentwithoutRegularizaion(self): with self.test_session(): val0, val1 = self.applyOptimizer( tf.train.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)) with self.test_session(): val2, val3 = self.applyOptimizer( tf.train.GradientDescentOptimizer(3.0)) self.assertAllClose(val0, val2) self.assertAllClose(val1, val3) if __name__ == "__main__": tf.test.main()
apache-2.0
TUDelftNAS/SDN-NaaSPlatform
NaaSPlatform/sFlow_Core_Interface_Monitoring_App.py
1
13197
#!/usr/bin/env python # -*- coding: utf-8 -*- # #Copyright (C) 2015, Delft University of Technology, Faculty of Electrical Engineering, Mathematics and Computer Science, Network Architectures and Services and TNO, ICT - Service Enabling and Management, Mani Prashanth Varma Manthena, Niels van Adrichem, Casper van den Broek and F. A. Kuipers # # This file is part of NaaSPlatform. # # NaaSPlatform is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # NaaSPlatform is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NaaSPlatform. If not, see <http://www.gnu.org/licenses/>. # Network-as-a-Service (NaaS) platform's sFlow based core interface monitoring application # Importing Python modules import sys # Python module for system (i.e. interpreter) specific parameters and functions import select # Python module for I/O completion waiting # Importing NaaS platform's main application for performing NaaS related operations and functions from Main_App import * class sflow_network_core_interface_monitoring(): # sFlow based network core interface monitoring application for detecting high bandwidth link utilizations and failures in the network's core global url_sflow, ip_sflow, sflow_header, sflow_agents, sflow, ifin_metric_name, ifout_metric_name, ifin_thresh_name, ifout_thresh_name, ifin_thresh_data, ifout_thresh_data core_sflow_base = naas_arch().core_sflow_base_url() url_sflow = core_sflow_base['URL'] ip_sflow = core_sflow_base['Host IP'] sflow_header = naas_arch().sflow_api_header() sflow_agents = naas_arch().core_sflow_agents() sflow = sflow_api_calls() ifin_metric_name = 'ifinutilization' ifout_metric_name = 'ifoututilization' ifin_thresh_name = 'ifinuti' ifout_thresh_name = 'ifoututi' ifin_thresh_data = {} ifout_thresh_data = {} # Initializing the core network sFlow based interface monitoring application for detecting high bandwidth link utilizations in the network's core def __init__(self): try: print '\n\nStarting a sFlow based core network interface monitoring application...\n\n' print '\n\nFor detecting high bandwidth link utilizations and link failures in the network\'s core...\n\n' print '\n\nEnter the following details in order to start the sFlow based core interface monitoring application...' print '\nNote: If you want to skip an entry, press the enter key\n\n' print '\n\nIncoming traffic monitoring metric name: ', ifin_metric_name print '\n\nOutgoing traffic monitoring metric name: ', ifout_metric_name print '\n\nSet the threshold limits/values (i.e. in % of total available link bandwidth) for the interface\'s incoming and outgoing traffic...\n\n' ifin_thresh_value = raw_input('Enter the Incoming Traffic Link Utilization Threshold Limit/Value (Default value = 10): ') ifout_thresh_value = raw_input('Enter the Outgoing Traffic Link Utilization Threshold Limit/Value (Default value = 10): ') if ifin_thresh_value == '': ifin_thresh_value = '10' if ifout_thresh_value == '': ifout_thresh_value = '10' print '\n\nIncoming traffic link utilization threshold limit/value in % of total available link bandwidth: ', ifin_thresh_value print '\n\nOutgoing traffic link utilization threshold limit/value in % of total available link bandwidth: ', ifout_thresh_value ifin_thresh_data['metric'] = ifin_metric_name ifin_thresh_data['value'] = ifin_thresh_value print '\n\n\nAdding a sFlow threshold (i.e. in % of total available link bandwidth) to detect a high bandwidth link utilization by the incoming traffic of an interface...\n' sflow.sflow_thresh_add(url_sflow, sflow_header, ifin_thresh_name, ifin_thresh_data) ifout_thresh_data['metric'] = ifout_metric_name ifout_thresh_data['value'] = ifout_thresh_value print '\n\n\nAdding a sFlow threshold (i.e. in % of total available link bandwidth) to detect a high bandwidth link utilization by the outgoing traffic of an interface...\n' sflow.sflow_thresh_add(url_sflow, sflow_header, ifout_thresh_name, ifout_thresh_data) except KeyboardInterrupt: print '\n\n\nSaving all the changes...' print '\nYou are now exiting the NaaS sFlow based core interface monitoring application...\n' sys.exit(0) except: print '\n\n\n***ERROR***: Found an exception, restart the application in order to check and debug the errors/exceptions...\n' print '\n\n\nSaving all the changes...' print '\nYou are now exiting the NaaS sFlow based core interface monitoring application...\n' sys.exit(0) # For retrieving high bandwidth utilization events as per the above initialized simple core sflow interface monitoring application def int_high_uti_events(self): try: high_uti_events = {} events_filter = '' eventID = -1 print '\n\n\nQuerying for High Bandwidth Link Utilization Events...\n' events_filter = 'eventID=' events_filter += str(eventID) response = sflow.sflow_events(url_sflow, events_filter) if response.status_code == 200: events = response.json() if len(events) == 0: print '\n\n***No Detected/Triggered High Bandwidth Link Utilization Events***\n\n' else: d = 0 for e in events: if (ifin_metric_name == e['metric'] or ifout_metric_name == e['metric']): d += 1 high_uti_info = {} print '\n\n\n***Detected a High Bandwidth Utilization Event***\n\n' print 'High Bandwidth Utilization Event Number: ', e['eventID'] print 'High Bandwidth Utilization Event Type: ', e['metric'] print 'High Bandwidth Utilization Event Threshold Value in bytes per second: ', e['threshold'] print 'High Bandwidth Utilization Event Actual Value in % of Total Available Link Bandwidth: ', e['value'] print 'Checking the Status of the Detected High Bandwidth Utilization Event...' agent = e['agent'] interface = e['dataSource'] metric = e['metric'] response = sflow.sflow_interface_metric_value(url_sflow, agent, interface, metric) metric_val = response.json() print '\n\n\n' print 'Large Flow Event Current Status: ' print '\n\n\n' if len(metric_val) > 0: status = 'inactive' for key in metric_val[0]: if key == 'metricValue': if metric_val[0]['metricValue'] >= int(e['threshold']): status = 'active' print '***The Detected High Bandwidth Utilization Event is Active***\n\n' print 'Agent :', e['agent'] print 'Interface ID :', e['dataSource'] print e['metric'], ': ', e['value'] print 'Time Stamp: ', e['timestamp'] high_uti_info = { 'Agent' : e['agent'], 'Interface ID' : e['dataSource'], 'Metric' : e['metric'], 'Value' : e['value'], 'Time Stamp' : e['timestamp']} m = 0 for hui in high_uti_events: if (high_uti_events[hui]['Agent'] == e['agent'] and high_uti_events[hui]['Interface ID'] == e['dataSource'] and high_uti_events[hui]['Metric'] == e['metric']): m += 1 if m == 0: print '***Note: This Active High Bandwidth Utilization Event is the Latest/Unique Instance***\n\n' high_uti_events[e['eventID']] = high_uti_info else: print '\n\n***Note: This Active High Bandwidth Utilization Event is Not the Latest/Unique Instance***\n\n' if status == 'inactive': print '\n\n***The Detected High Bandwidth Utilization Event is no Longer Active***\n\n' if d == 0: print '\n\n***No Detected/Triggered High Bandwidth Link Utilization Events***\n\n' return high_uti_events except KeyboardInterrupt: print '\n\n\nSaving all the changes...' print '\nYou are now exiting the NaaS sFlow based core interface monitoring application...\n' sys.exit(0) except: print '\n\n\n***ERROR***: Found an exception, restart the application in order to check and debug the errors/exceptions...\n' print '\n\n\nSaving all the changes...' print '\nYou are now exiting the NaaS sFlow based edge flow monitoring application...\n' sys.exit(0) # For retrieving link/interface failure events in the network core def int_fail_events(self): try: int_fail_events = {} print '\n\n\nQuerying for link/interface failure events...\n' agent = 'ALL' metric = 'ifoperstatus' response = sflow.sflow_metric_values(url_sflow, agent, metric) if response.status_code == 200: events = response.json() if len(events) == 0: print '\n\n***No Detected/Triggered Link/Interface Failure Events***\n\n' i = 0 d = 0 for e in events: if (e['metricValue'] == 'down'): d += 1 i += 1 event_id = str(i) int_fail_info = {} print '\n\n\n***Detected a Link/Interface Failure Event***...\n\n' print 'Link/Interface Failure Event Number: ', event_id print 'Link/Interface Failure Type: ', e['metricName'] print 'Link/Interface Failure Status: ', e['metricValue'] print '\n\n\n' print '\n\n\nPrinting the Link/Interface Failure Event information/details...\n\n' print 'Agent :', e['agent'] print 'Interface ID :', e['dataSource'] print e['metricName'], ': ', e['metricValue'] print 'Last Updated: ', e['lastUpdate'] int_fail_info = { 'Agent' : e['agent'], 'Interface ID' : e['dataSource'], 'Metric' : e['metricName'], 'Value' : e['metricValue'], 'Last Updated' : e['lastUpdate']} int_fail_events[event_id] = int_fail_info if d == 0: print '\n\n***No Detected/Triggered Link/Interface Failure Events***\n\n' return int_fail_events except KeyboardInterrupt: print '\n\n\nSaving all the changes...' print '\nYou are now exiting the NaaS sFlow based core interface monitoring application...\n' sys.exit(0) except: print '\n\n\n***ERROR***: Found an exception, restart the application in order to check and debug the errors/exceptions...\n' print '\n\n\nSaving all the changes...' print '\nYou are now exiting the NaaS sFlow based core interface monitoring application...\n' sys.exit(0)
gpl-3.0
Joel-U/sparkle
sparkle/gui/stim/explore_stim_editor.py
2
6620
from explore_stim_editor_form import Ui_ExploreStimEditor from sparkle.QtWrapper import QtCore, QtGui from sparkle.gui.stim.abstract_stim_editor import AbstractStimulusWidget from sparkle.gui.stim.components.qcomponents import wrapComponent from sparkle.gui.stim.explore_component_editor import ExploreComponentEditor from sparkle.stim.types import get_stimuli_models from sparkle.stim.types.stimuli_classes import Silence class ExploreStimulusEditor(AbstractStimulusWidget): """Editor for StimulusModel used in search mode""" def __init__(self, parent=None): super(ExploreStimulusEditor, self).__init__(parent) self.ui = Ui_ExploreStimEditor() self.ui.setupUi(self) self.trackBtnGroup = QtGui.QButtonGroup() self.ui.addBtn.clicked.connect(self.addComponentEditor) self.ui.exNrepsSpnbx.valueChanged.connect(self.setReps) self.ui.exNrepsSpnbx.setKeyboardTracking(False) self.funit_fields.append(self.ui.aofsSpnbx) self.buttons = [] self.stimuli_types = get_stimuli_models() self._allComponents = [] self._model = None def setModel(self, model): "Sets the StimulusModel for this editor" self._model = model self.ui.aofsSpnbx.setValue(model.samplerate()) #must be at least one component & delay # for row in range(1,model.rowCount()): # delay = model.component(row,0) # self.ui.componentEditor def model(self): return self._model def setStimIndex(self, row, stimIndex): "Change out the component type in row to the one indexed by stimIndex" newcomp = self._allComponents[row][stimIndex] self._model.removeComponent(row, 1) self._model.insertComponent(newcomp, row, 1) def setDelay(self, row, delay): self._model.component(row,0).setDuration(delay) self.valueChanged.emit() def repCount(self): return self.ui.exNrepsSpnbx.value() def setReps(self, reps): if self._model is not None: self._model.setRepCount(reps) self.ui.exNrepsSpnbx.setValue(reps) self.valueChanged.emit() def addComponentEditor(self): """Adds a new component to the model, and an editor for this component to this editor""" row = self._model.rowCount() comp_stack_editor = ExploreComponentEditor() self.ui.trackStack.addWidget(comp_stack_editor) idx_button = IndexButton(row) idx_button.pickMe.connect(self.ui.trackStack.setCurrentIndex) self.trackBtnGroup.addButton(idx_button) self.ui.trackBtnLayout.addWidget(idx_button) self.ui.trackStack.setCurrentIndex(row) comp_stack_editor.closePlease.connect(self.removeComponentEditor) delay = Silence() comp_stack_editor.delaySpnbx.setValue(delay.duration()) self._model.insertComponent(delay, row,0) self._allComponents.append([x() for x in self.stimuli_types if x.explore]) for stim in self._allComponents[row]: editor = wrapComponent(stim).showEditor() comp_stack_editor.addWidget(editor, stim.name) exvocal = comp_stack_editor.widgetForName("Vocalization") if exvocal is not None: exvocal.filelistView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) initcomp = self._allComponents[row][0] self._model.insertComponent(initcomp, row, 1) self.buttons.append(idx_button) comp_stack_editor.exploreStimTypeCmbbx.currentIndexChanged.connect(lambda x : self.setStimIndex(row, x)) comp_stack_editor.delaySpnbx.valueChanged.connect(lambda x : self.setDelay(row, x)) comp_stack_editor.valueChanged.connect(self.valueChanged.emit) return comp_stack_editor def removeComponentEditor(self, widget): ntracks = self.ui.trackStack.count() index = self.ui.trackStack.indexOf(widget) self.ui.trackStack.removeWidget(widget) self._model.removeRow(index) # remove index button and adjust other numbers for idx in range(index+1, ntracks): self.buttons[idx].setNum(idx-1) self.ui.trackBtnLayout.removeWidget(self.buttons[index]) btn = self.buttons.pop(index) btn.setVisible(False) btn.deleteLater() self.valueChanged.emit() if len(self.buttons) > 0: self.buttons[0].setChecked(True) def saveToObject(self): for icomp in range(self.ui.trackStack.count()): self.ui.trackStack.widget(icomp).currentWidget().saveToObject() self.ui.aofsSpnbx.setValue(self._model.samplerate()) def samplerate(self): return self._model.samplerate() def verify(self, winsz): # have the stim check itself and report return self._model.verify(winsz) def allComponentWidgets(self): w = [] for itrack in range(self.ui.trackStack.count()): w.extend(self.ui.trackStack.widget(itrack).widgets()) return w def trackEditorWidgets(self): w = [] for itrack in range(self.ui.trackStack.count()): w.append(self.ui.trackStack.widget(itrack)) return w def saveTemplate(self): template = [] for itrack in range(self.ui.trackStack.count()): comp_stack_editor = self.ui.trackStack.widget(itrack) template.append(comp_stack_editor.saveTemplate()) return template def loadTemplate(self, template): # wipe any current editors and add according to template for i in range(self.ui.trackStack.count()): self.removeComponentEditor(self.ui.trackStack.widget(i)) for track in template: comp_stack_editor = self.addComponentEditor() comp_stack_editor.loadTemplate(track) def closeEvent(self, event): pass class IndexButton(QtGui.QPushButton): """Custom button for explore editor to toggle bettween tracks""" pickMe = QtCore.Signal(int) def __init__(self, num): super(IndexButton, self).__init__("Track {}".format(num+1)) self.num = num self.setCheckable(True) self.setChecked(True) self.toggled.connect(self.toggletoggle) def toggletoggle(self, checked): if checked: self.pickMe.emit(self.num) def setNum(self, num): self.num = num self.setText("Track {}".format(num+1)) if __name__ == '__main__': app = QtGui.QApplication([]) editor = ExploreStimulusEditor() editor.show() app.exec_()
gpl-3.0
salad/salad
salad/terrains/browser.py
2
1725
from lettuce import before, world, after from splinter.browser import Browser from salad.logger import logger @before.all def setup_master_browser(): try: browser = world.drivers[0] remote_url = world.remote_url except AttributeError, IndexError: browser = 'firefox' remote_url = None try: capabilities = world.remote_capabilities except AttributeError: capabilities = {} world.master_browser = setup_browser(browser, remote_url, **capabilities) world.browser = world.master_browser def setup_browser(browser, url=None, **capabilities): logger.info("Setting up browser %s..." % browser) try: if url: logger.warn(capabilities) browser = Browser('remote', url=url, browser=browser, **capabilities) else: browser = Browser(browser) except Exception as e: logger.warn("Error starting up %s: %s" % (browser, e)) raise return browser @before.each_scenario def clear_alternative_browsers(step): world.browser = world.master_browser world.browsers = [] @after.each_scenario def reset_to_parent_frame(step): if hasattr(world, "parent_browser"): world.browser = world.parent_browser @after.each_scenario def restore_browser(step): for browser in world.browsers: teardown_browser(browser) @after.all def teardown_master_browser(total): teardown_browser(world.master_browser) def teardown_browser(browser): name = browser.driver_name logger.info("Tearing down browser %s..." % name) try: browser.quit() except Exception as e: logger.warn("Error tearing down %s: %s" % (name, e))
bsd-3-clause
dkentw/robotframework
utest/writer/test_formatters.py
28
7846
import unittest from robot.parsing.model import TestCaseTable, TestCaseFileSettingTable from robot.writer.formatters import TxtFormatter, TsvFormatter, PipeFormatter from robot.writer.htmlformatter import HtmlFormatter, HtmlCell from robot.utils.asserts import assert_equals, assert_true class TestTxtFormatter(unittest.TestCase): def setUp(self): self._formatter = TxtFormatter(6) def test_escaping_whitespace(self): assert_equals(self._formatter._escape(['so me']), ['so \\ me']) assert_equals(self._formatter._escape([' ']), [' \\ \\ ']) def test_replacing_newlines(self): assert_equals(self._formatter._escape(['so\nme']), ['so me']) def test_escaping_consecutive_spaces(self): settings = TestCaseFileSettingTable(None) settings.force_tags.value = ['f 1'] assert_equals(list(self._formatter.format_table(settings))[0], ['Force Tags ', 'f \\ 1']) def test_escaping_empty_intermediate_cells(self): settings = TestCaseFileSettingTable(None) settings.suite_setup.name = 'Run' settings.suite_setup.args = ['', 'baby'] assert_equals(list(self._formatter.format_table(settings))[0][1:], ['Run', '\\', 'baby']) def test_aligned_header_cells_are_not_escaped(self): table = TestCaseTable(None) table.set_header(['test case', 'cus tom', 'header']) table.add('Test case with a long name').add_step(['keyword here', 'args']) assert_equals(self._formatter.format_header(table), ['*** test case *** ', 'cus \\ tom ', 'header']) class TestPipeFormatter(unittest.TestCase): def test_escaping_pipes(self): formatter = PipeFormatter(7) assert_equals(formatter._escape(['so | me']), ['so \\| me']) assert_equals(formatter._escape(['|so|me|']), ['|so|me|']) assert_equals(formatter._escape(['so |']), ['so \\|']) assert_equals(formatter._escape(['| so']), ['\\| so']) def test_empty_cell(self): settings = TestCaseFileSettingTable(None) settings.force_tags.value = ['f1', '', 'f3'] assert_equals(list(PipeFormatter(4).format_table(settings))[0], ['Force Tags ', 'f1', ' ', 'f3']) class TestTsvFormatter(unittest.TestCase): def setUp(self): self._formatter = TsvFormatter(6) def test_replacing_newlines(self): assert_equals(self._formatter._format_row(['so\nme'])[0], 'so me') def test_escaping_tabs(self): assert_equals(self._formatter._format_row(['so\tme'])[0], 'so\\tme') def test_escaping_consecutive_spaces(self): assert_equals(self._formatter._format_row(['so me'])[0], 'so \ me') class TestHtmlFormatter(unittest.TestCase): def setUp(self): self._formatter = HtmlFormatter(5) def test_setting_table_doc(self): table = TestCaseFileSettingTable(None) table.set_header('Settings') table.doc.value = 'Some documentation' formatted = list(self._formatter.format_table(table)) assert_equals(self._rows_to_text(formatted), [['Documentation', 'Some documentation']]) assert_equals(formatted[0][1].attributes, {'colspan': '4', 'class': 'colspan4'}) def test_test_name_row_formatting(self): table = self._create_test_table() test = table.add('A Test') test.tags.value = ['t1', 't2', 't3', 't4'] formatted = self._rows(table) assert_equals(len(formatted), 2, formatted) assert_equals(formatted[0], ['<a name="test_A Test">A Test</a>', '[Tags]', 't1', 't2', 't3']) assert_equals(formatted[1], ['', '...', 't4', '', '']) def test_test_documentation_colspan(self): table = self._create_test_table() test = table.add('Test') test.doc.value = 'Some doc' assert_equals(self._rows(table)[0], ['<a name="test_Test">Test</a>', '[Documentation]', 'Some doc']) assert_equals(list(self._formatter.format_table(table))[0][2].attributes, {'colspan': '3', 'class': 'colspan3'}) def test_test_documentation_with_comment(self): table = self._create_test_table() test = table.add('Test') test.doc.value = 'Some doc' test.doc._set_comment('a comment') assert_equals(self._rows(table)[0], ['<a name="test_Test">Test</a>', '[Documentation]', 'Some doc', '# a comment', '']) assert_equals(list(self._formatter.format_table(table))[0][2].attributes, {}) def test_testcase_table_custom_headers(self): self._check_header_length([], 1) self._check_header_length(['a', 'b', 'ceee dee'], 4) self._check_header_length(['akjsakjskjd kjsda kdjs'], 2) self._check_header_length([str(i) for i in range(1000)], 1001) def test_header_width_matches_widest_row(self): table = self._create_test_table(['h', 'e']) test = table.add('Some test') test.add_step(['kw', 'arg1', 'arg2', 'arg3']) assert_equals(len(self._formatter.format_header(table)), 5) def _check_header_length(self, headers, expected_length): table = self._create_test_table(headers) assert_equals(len(self._formatter.format_header(table)), expected_length) def test_testcase_table_header_colspan(self): self._assert_header_colspan([], 5) self._assert_header_colspan(['a', 'b'], 1) def _assert_header_colspan(self, header, expected_colspan): table = self._create_test_table(header) row = self._formatter.format_header(table) assert_equals(row[0].attributes['colspan'], str(expected_colspan)) def test_escaping_consecutive_spaces(self): assert_equals(self._formatter._format_row(['so me'])[0].content, 'so \ me') def test_number_of_columns_is_max_of_header_and_row_widths(self): table = self._create_test_table(['a', 'b']) test = table.add('Test') test.add_step(['Log Many', 'kukka', 'nen']) self._check_row_lengths(table, 4) table = self._create_test_table(['a', 'b', 'c']) test = table.add('Test') test.add_step(['No Operation']) self._check_row_lengths(table, 4) def _check_row_lengths(self, table, expected_length): rows = list(self._formatter.format_table(table)) assert_true(len(rows) > 0) for row in rows: assert_equals(len(row), expected_length) def _rows(self, table): return self._rows_to_text(self._formatter.format_table(table)) def _rows_to_text(self, rows): return [[cell.content for cell in row] for row in rows] def _create_test_table(self, additional_headers=()): table = TestCaseTable(None) table.set_header(['Test Cases'] + list(additional_headers)) return table def test_add_br_to_newlines(self): original = """This is real new line: here we have a single backslash n: \\n and here backslash + newline: \\\n and here bslash blash n \\\\n and bslash x 3 n \\\\\\n """ expected = 'This is real new line:\n here we have a single backslash n: \\n<br>\nand here backslash + newline: \\\n and here bslash blash n \\\\n and bslash x 3 n \\\\\\n<br>\n' assert_equals(HtmlCell(original).content, expected) def test_br_to_newlines_without_whitespace(self): original = r"Here there is no space after backslash-n: '\n'" assert_equals(HtmlCell(original).content, original.replace('\\n', '\\n<br>\n')) def test_no_br_to_double_backslashes(self): original = r"Here there is double backslash-n: \\n " assert_equals(HtmlCell(original).content, original) if __name__ == "__main__": unittest.main()
apache-2.0
sharad/calibre
src/calibre/web/jsbrowser/browser.py
1
27404
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os, pprint, time, uuid, re from cookielib import Cookie from threading import current_thread from PyQt5.QtWebKit import QWebSettings, QWebElement from PyQt5.QtWebKitWidgets import QWebPage, QWebView from PyQt5.Qt import ( QObject, QNetworkAccessManager, QNetworkDiskCache, QCoreApplication, QNetworkProxy, QNetworkProxyFactory, QEventLoop, QUrl, pyqtSignal, QDialog, QVBoxLayout, QSize, QNetworkCookieJar, Qt, pyqtSlot, QPixmap) from calibre import USER_AGENT, prints, get_proxies, get_proxy_info, prepare_string_for_xml from calibre.constants import ispy3, cache_dir from calibre.ptempfile import PersistentTemporaryDirectory from calibre.utils.logging import ThreadSafeLog from calibre.gui2 import must_use_qt from calibre.web.jsbrowser.forms import FormsMixin, default_timeout class Timeout(Exception): pass class LoadError(Exception): pass class ElementNotFound(ValueError): pass class NotAFile(ValueError): pass class WebPage(QWebPage): # {{{ def __init__(self, log, confirm_callback=None, prompt_callback=None, user_agent=USER_AGENT, enable_developer_tools=False, parent=None): QWebPage.__init__(self, parent) self.log = log self.user_agent = user_agent if user_agent else USER_AGENT self.confirm_callback = confirm_callback self.prompt_callback = prompt_callback self.setForwardUnsupportedContent(True) self.unsupportedContent.connect(self.on_unsupported_content) settings = self.settings() if enable_developer_tools: settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True) QWebSettings.enablePersistentStorage(os.path.join(cache_dir(), 'webkit-persistence')) QWebSettings.setMaximumPagesInCache(0) self.bridge_name = 'b' + uuid.uuid4().get_hex() self.mainFrame().javaScriptWindowObjectCleared.connect( self.add_window_objects) self.dom_loaded = False def add_window_objects(self): self.dom_loaded = False mf = self.mainFrame() mf.addToJavaScriptWindowObject(self.bridge_name, self) mf.evaluateJavaScript('document.addEventListener( "DOMContentLoaded", %s.content_loaded, false )' % self.bridge_name) def load_url(self, url): self.dom_loaded = False url = QUrl(url) self.mainFrame().load(url) self.ready_state # Without this, DOMContentLoaded does not fire for file:// URLs @pyqtSlot() def content_loaded(self): self.dom_loaded = True def userAgentForUrl(self, url): return self.user_agent def javaScriptAlert(self, frame, msg): if self.view() is not None: return QWebPage.javaScriptAlert(self, frame, msg) prints('JSBrowser alert():', unicode(msg)) def javaScriptConfirm(self, frame, msg): if self.view() is not None: return QWebPage.javaScriptConfirm(self, frame, msg) if self.confirm_callback is not None: return self.confirm_callback(unicode(msg)) return True def javaScriptConsoleMessage(self, msg, lineno, source_id): prints('JSBrowser msg():%s:%s:'%(unicode(source_id), lineno), unicode(msg)) def javaScriptPrompt(self, frame, msg, default_value, *args): if self.view() is not None: return QWebPage.javaScriptPrompt(self, frame, msg, default_value, *args) if self.prompt_callback is None: return (False, default_value) if ispy3 else False value = self.prompt_callback(unicode(msg), unicode(default_value)) ok = value is not None if ispy3: return ok, value if ok: result = args[0] result.clear() result.append(value) return ok @pyqtSlot(result=bool) def shouldInterruptJavaScript(self): if self.view() is not None: return QWebPage.shouldInterruptJavaScript(self) return True def on_unsupported_content(self, reply): reply.abort() self.log.warn('Unsupported content, ignoring: %s'%reply.url()) @property def ready_state(self): return unicode(self.mainFrame().evaluateJavaScript('document.readyState') or '') @pyqtSlot(QPixmap) def transfer_image(self, img): self.saved_img = img def get_image(self, qwe_or_selector): qwe = qwe_or_selector if not isinstance(qwe, QWebElement): qwe = self.mainFrame().findFirstElement(qwe) if qwe.isNull(): raise ValueError('Failed to find element with selector: %r' % qwe_or_selector) self.saved_img = QPixmap() qwe.evaluateJavaScript('%s.transfer_image(this)' % self.bridge_name) try: return self.saved_img finally: del self.saved_img # }}} class ProxyFactory(QNetworkProxyFactory): # {{{ def __init__(self, log): QNetworkProxyFactory.__init__(self) proxies = get_proxies() self.proxies = {} for scheme, proxy_string in proxies.iteritems(): scheme = scheme.lower() info = get_proxy_info(scheme, proxy_string) if info is None: continue hn, port = info['hostname'], info['port'] if not hn or not port: continue log.debug('JSBrowser using proxy:', pprint.pformat(info)) pt = {'socks5':QNetworkProxy.Socks5Proxy}.get(scheme, QNetworkProxy.HttpProxy) proxy = QNetworkProxy(pt, hn, port) un, pw = info['username'], info['password'] if un: proxy.setUser(un) if pw: proxy.setPassword(pw) self.proxies[scheme] = proxy self.default_proxy = QNetworkProxy(QNetworkProxy.DefaultProxy) def queryProxy(self, query): scheme = unicode(query.protocolTag()).lower() return [self.proxies.get(scheme, self.default_proxy)] # }}} class NetworkAccessManager(QNetworkAccessManager): # {{{ OPERATION_NAMES = {getattr(QNetworkAccessManager, '%sOperation'%x) : x.upper() for x in ('Head', 'Get', 'Put', 'Post', 'Delete', 'Custom') } report_reply_signal = pyqtSignal(object) def __init__(self, log, disk_cache_size=50, parent=None): QNetworkAccessManager.__init__(self, parent) self.reply_count = 0 self.log = log if disk_cache_size > 0: self.cache = QNetworkDiskCache(self) self.cache.setCacheDirectory(PersistentTemporaryDirectory(prefix='disk_cache_')) self.cache.setMaximumCacheSize(int(disk_cache_size * 1024 * 1024)) self.setCache(self.cache) self.sslErrors.connect(self.on_ssl_errors) self.pf = ProxyFactory(log) self.setProxyFactory(self.pf) self.finished.connect(self.on_finished) self.cookie_jar = QNetworkCookieJar() self.setCookieJar(self.cookie_jar) self.main_thread = current_thread() self.report_reply_signal.connect(self.report_reply, type=Qt.QueuedConnection) def on_ssl_errors(self, reply, errors): reply.ignoreSslErrors() def createRequest(self, operation, request, data): url = unicode(request.url().toString(QUrl.None)) operation_name = self.OPERATION_NAMES[operation] debug = [] debug.append(('Request: %s %s' % (operation_name, url))) for h in request.rawHeaderList(): try: d = ' %s: %s' % (h, request.rawHeader(h)) except: d = ' %r: %r' % (h, request.rawHeader(h)) debug.append(d) if data is not None: raw = data.peek(1024) try: raw = raw.decode('utf-8') except: raw = repr(raw) debug.append(' Request data: %s'%raw) self.log.debug('\n'.join(debug)) return QNetworkAccessManager.createRequest(self, operation, request, data) def on_finished(self, reply): if current_thread() is not self.main_thread: # This method was called in a thread created by Qt. The python # interpreter may not be in a safe state, so dont do anything # more. This signal is queued which means the reply wont be # reported unless someone spins the event loop. So far, I have only # seen this happen when doing Ctrl+C in the console. self.report_reply_signal.emit(reply) else: self.report_reply(reply) def report_reply(self, reply): reply_url = unicode(reply.url().toString(QUrl.None)) self.reply_count += 1 err = reply.error() if err: l = self.log.debug if err == reply.OperationCanceledError else self.log.warn l("Reply error: %s - %d (%s)" % (reply_url, err, unicode(reply.errorString()))) else: debug = [] debug.append("Reply successful: %s" % reply_url) for h in reply.rawHeaderList(): try: d = ' %s: %s' % (h, reply.rawHeader(h)) except: d = ' %r: %r' % (h, reply.rawHeader(h)) debug.append(d) self.log.debug('\n'.join(debug)) def py_cookies(self): for c in self.cookie_jar.allCookies(): name, value = map(bytes, (c.name(), c.value())) domain = bytes(c.domain()) initial_dot = domain_specified = domain.startswith(b'.') secure = bool(c.isSecure()) path = unicode(c.path()).strip().encode('utf-8') expires = c.expirationDate() is_session_cookie = False if expires.isValid(): expires = expires.toTime_t() else: expires = None is_session_cookie = True path_specified = True if not path: path = b'/' path_specified = False c = Cookie(0, # version name, value, None, # port False, # port specified domain, domain_specified, initial_dot, path, path_specified, secure, expires, is_session_cookie, None, # Comment None, # Comment URL {} # rest ) yield c # }}} class LoadWatcher(QObject): # {{{ def __init__(self, page, parent=None): QObject.__init__(self, parent) self.is_loading = True self.loaded_ok = None page.loadFinished.connect(self) self.page = page def __call__(self, ok): self.loaded_ok = ok self.is_loading = False self.page.loadFinished.disconnect(self) self.page = None # }}} class BrowserView(QDialog): # {{{ def __init__(self, page, parent=None): QDialog.__init__(self, parent) self.l = l = QVBoxLayout(self) self.setLayout(l) self.webview = QWebView(self) l.addWidget(self.webview) self.resize(QSize(1024, 768)) self.webview.setPage(page) # }}} class Browser(QObject, FormsMixin): ''' Browser (WebKit with no GUI). This browser is NOT thread safe. Use it in a single thread only! If you need to run downloads in parallel threads, use multiple browsers (after copying the cookies). ''' def __init__(self, # Logging. If None, uses a default log, which does not output # debugging info log=None, # Receives a string and returns True/False. By default, returns # True for all strings confirm_callback=None, # Prompt callback. Receives a msg string and a default value # string. Should return the user input value or None if the user # canceled the prompt. By default returns None. prompt_callback=None, # User agent to be used user_agent=USER_AGENT, # The size (in MB) of the on disk cache. Note that because the disk # cache cannot be shared between different instances, we currently # use a temporary dir for the cache, which is deleted on # program exit. Set to zero to disable cache. disk_cache_size=50, # Enable Inspect element functionality enable_developer_tools=False, # Verbosity verbosity=0, # The default timeout (in seconds) default_timeout=30 ): must_use_qt() QObject.__init__(self) FormsMixin.__init__(self) if log is None: log = ThreadSafeLog() if verbosity: log.filter_level = log.DEBUG self.log = log self.default_timeout = default_timeout self.page = WebPage(log, confirm_callback=confirm_callback, prompt_callback=prompt_callback, user_agent=user_agent, enable_developer_tools=enable_developer_tools, parent=self) self.nam = NetworkAccessManager(log, disk_cache_size=disk_cache_size, parent=self) self.page.setNetworkAccessManager(self.nam) @property def user_agent(self): return self.page.user_agent def _wait_for_load(self, timeout, url=None): timeout = self.default_timeout if timeout is default_timeout else timeout loop = QEventLoop(self) start_time = time.time() end_time = start_time + timeout lw = LoadWatcher(self.page, parent=self) while lw.is_loading and end_time > time.time(): if not loop.processEvents(): time.sleep(0.01) if lw.is_loading: raise Timeout('Loading of %r took longer than %d seconds'%( url, timeout)) return lw.loaded_ok def _wait_for_replies(self, reply_count, timeout): final_time = time.time() + (self.default_timeout if timeout is default_timeout else timeout) loop = QEventLoop(self) while (time.time() < final_time and self.nam.reply_count < reply_count): loop.processEvents() time.sleep(0.1) if self.nam.reply_count < reply_count: raise Timeout('Waiting for replies took longer than %d seconds' % timeout) def run_for_a_time(self, timeout): final_time = time.time() + timeout loop = QEventLoop(self) while (time.time() < final_time): if not loop.processEvents(): time.sleep(0.1) def wait_for_element(self, selector, timeout=default_timeout): timeout = self.default_timeout if timeout is default_timeout else timeout start_time = time.time() while self.css_select(selector) is None: self.run_for_a_time(0.1) if time.time() - start_time > timeout: raise Timeout('DOM failed to load in %.1g seconds' % timeout) return self.css_select(selector) def visit(self, url, timeout=default_timeout): ''' Open the page specified in URL and wait for it to complete loading. Note that when this method returns, there may still be javascript that needs to execute (this method returns when the loadFinished() signal is called on QWebPage). This method will raise a Timeout exception if loading takes more than timeout seconds. Returns True if loading was successful, False otherwise. ''' self.current_form = None self.page.load_url(url) return self._wait_for_load(timeout, url) def back(self, wait_for_load=True, timeout=default_timeout): ''' Like clicking the back button in the browser. Waits for loading to complete. This method will raise a Timeout exception if loading takes more than timeout seconds. Returns True if loading was successful, False otherwise. ''' self.page.triggerAction(self.page.Back) if wait_for_load: return self._wait_for_load(timeout) def stop(self): 'Stop loading of current page' self.page.triggerAction(self.page.Stop) def stop_scheduled_refresh(self): 'Stop any scheduled page refresh/reloads' self.page.triggerAction(self.page.StopScheduledPageRefresh) def reload(self, bypass_cache=False): action = self.page.ReloadAndBypassCache if bypass_cache else self.page.Reload self.page.triggerAction(action) @property def dom_ready(self): return self.page.dom_loaded def wait_till_dom_ready(self, timeout=default_timeout, url=None): timeout = self.default_timeout if timeout is default_timeout else timeout start_time = time.time() while not self.dom_ready: if time.time() - start_time > timeout: raise Timeout('Loading of %r took longer than %d seconds'%( url, timeout)) self.run_for_a_time(0.1) def start_load(self, url, timeout=default_timeout, selector=None): ''' Start the loading of the page at url and return once the DOM is ready, sub-resources such as scripts/stylesheets/images/etc. may not have all loaded. ''' self.current_form = None self.page.load_url(url) if selector is not None: self.wait_for_element(selector, timeout=timeout) else: self.wait_till_dom_ready(timeout=timeout, url=url) def click(self, qwe_or_selector, wait_for_load=True, ajax_replies=0, timeout=default_timeout): ''' Click the :class:`QWebElement` pointed to by qwe_or_selector. :param wait_for_load: If you know that the click is going to cause a new page to be loaded, set this to True to have the method block until the new page is loaded :para ajax_replies: Number of replies to wait for after clicking a link that triggers some AJAX interaction ''' initial_count = self.nam.reply_count qwe = qwe_or_selector if not isinstance(qwe, QWebElement): qwe = self.css_select(qwe) if qwe is None: raise ElementNotFound('Failed to find element with selector: %r' % qwe_or_selector) js = ''' var e = document.createEvent('MouseEvents'); e.initEvent( 'click', true, true ); this.dispatchEvent(e); ''' qwe.evaluateJavaScript(js) if ajax_replies > 0: reply_count = initial_count + ajax_replies self._wait_for_replies(reply_count, timeout) elif wait_for_load and not self._wait_for_load(timeout): raise LoadError('Clicking resulted in a failed load') def click_text_link(self, text_or_regex, selector='a[href]', wait_for_load=True, ajax_replies=0, timeout=default_timeout): target = None for qwe in self.page.mainFrame().findAllElements(selector): src = unicode(qwe.toPlainText()) if hasattr(text_or_regex, 'match') and text_or_regex.search(src): target = qwe break if src.lower() == text_or_regex.lower(): target = qwe break if target is None: raise ElementNotFound('No element matching %r with text %s found'%( selector, text_or_regex)) return self.click(target, wait_for_load=wait_for_load, ajax_replies=ajax_replies, timeout=timeout) def css_select(self, selector, all=False): if all: return tuple(self.page.mainFrame().findAllElements(selector).toList()) ans = self.page.mainFrame().findFirstElement(selector) if ans.isNull(): ans = None return ans def get_image(self, qwe_or_selector): ''' Return the image identified by qwe_or_selector as a QPixmap. If no such image exists, the returned pixmap will be null. ''' return self.page.get_image(qwe_or_selector) def get_cached(self, url): iod = self.nam.cache.data(QUrl(url)) if iod is not None: try: return bytes(bytearray(iod.readAll())) finally: # Ensure the IODevice is closed right away, so that the # underlying file can be deleted if the space is needed, # otherwise on windows the file stays locked iod.close() del iod def wait_for_resources(self, urls, timeout=default_timeout): timeout = self.default_timeout if timeout is default_timeout else timeout start_time = time.time() ans = {} urls = set(urls) def get_resources(): for url in tuple(urls): raw = self.get_cached(url) if raw is not None: ans[url] = raw urls.discard(url) while urls and time.time() - start_time < timeout and not self.load_completed: get_resources() if urls: self.run_for_a_time(0.1) if urls: get_resources() return ans @property def load_completed(self): return self.page.ready_state in {'complete', 'completed'} def get_resource(self, url, rtype='img', use_cache=True, timeout=default_timeout): ''' Download a resource (image/stylesheet/script). The resource is downloaded by visiting an simple HTML page that contains only that resource. The resource is then returned from the cache (therefore, to use this method you must not disable the cache). If use_cache is True then the cache is queried before loading the resource. This can result in a stale object if the resource has changed on the server, however, it is a big performance boost in the common case, by avoiding a roundtrip to the server. The resource is returned as a bytestring or None if it could not be loaded. ''' if not hasattr(self.nam, 'cache'): raise RuntimeError('Cannot get resources when the cache is disabled') if use_cache: ans = self.get_cached(url) if ans is not None: return ans try: tag = { 'img': '<img src="%s">', 'link': '<link href="%s"></link>', 'script': '<script src="%s"></script>', }[rtype] % prepare_string_for_xml(url, attribute=True) except KeyError: raise ValueError('Unknown resource type: %s' % rtype) self.page.mainFrame().setHtml( '''<!DOCTYPE html><html><body><div>{0}</div></body></html>'''.format(tag)) self._wait_for_load(timeout) ans = self.get_cached(url) if ans is not None: return ans def download_file(self, url_or_selector_or_qwe, timeout=60): ''' Download unsupported content: i.e. files the browser cannot handle itself or files marked for saving as files by the website. Useful if you want to download something like an epub file after authentication. You can pass in either the url to the file to be downloaded, or a selector that points to an element to be clicked on the current page which will cause the file to be downloaded. ''' ans = [False, None, []] loop = QEventLoop(self) start_time = time.time() end_time = start_time + timeout self.page.unsupportedContent.disconnect(self.page.on_unsupported_content) try: def download(reply): if ans[0]: reply.abort() # We only handle the first unsupported download return ans[0] = True while not reply.isFinished() and end_time > time.time(): if not loop.processEvents(): time.sleep(0.01) raw = bytes(bytearray(reply.readAll())) if raw: ans[-1].append(raw) if not reply.isFinished(): ans[1] = Timeout('Loading of %r took longer than %d seconds'%(url_or_selector_or_qwe, timeout)) ans[-1].append(bytes(bytearray(reply.readAll()))) self.page.unsupportedContent.connect(download) if hasattr(url_or_selector_or_qwe, 'rstrip') and re.match('[a-z]+://', url_or_selector_or_qwe) is not None: # We have a URL self.page.mainFrame().load(QUrl(url_or_selector_or_qwe)) else: self.click(url_or_selector_or_qwe, wait_for_load=False) lw = LoadWatcher(self.page) while not ans[0] and lw.is_loading and end_time > time.time(): if not loop.processEvents(): time.sleep(0.01) if not ans[0]: raise NotAFile('%r does not point to a downloadable file. You can only' ' use this method to download files that the browser cannot handle' ' natively. Or files that are marked with the ' ' content-disposition: attachment header' % url_or_selector_or_qwe) if ans[1] is not None: raise ans[1] return b''.join(ans[-1]) finally: self.page.unsupportedContent.disconnect() self.page.unsupportedContent.connect(self.page.on_unsupported_content) def show_browser(self): ''' Show the currently loaded web page in a window. Useful for debugging. ''' if getattr(QCoreApplication.instance(), 'headless', False): raise RuntimeError('Cannot show browser when running in a headless Qt application') view = BrowserView(self.page) view.exec_() @property def cookies(self): ''' Return all the cookies set currently as :class:`Cookie` objects. Returns expired cookies as well. ''' return list(self.nam.py_cookies()) @property def html(self): return unicode(self.page.mainFrame().toHtml()) def blank(self): try: self.visit('about:blank', timeout=0.01) except Timeout: pass def close(self): self.stop() self.blank() self.stop() self.nam.setCache(QNetworkDiskCache()) self.nam.cache = None self.nam = self.page = None def __enter__(self): pass def __exit__(self, *args): self.close()
gpl-3.0
BurnYourPc/PageRankImplementation
src/pagerankutils/utils.py
1
1817
from src.parse import htmlParser as parse from src.purifier import purifier as pure from src.uniq_purifier import unqpurifier as unqpure from src.createPdf import pdfcreator as PdfC import numpy as np from numpy import linalg as LA from copy import deepcopy def getAready(A, n): for j in range(n): nz = np.count_nonzero(A[:, j]) for i in range(n): if (A[i, j] != 0): A[i, j] = A[i, j] / nz return A def removeSpiderTraps(A, n): A = 0.8 * A A2 = np.zeros((n, n), dtype=float) A2 = (1 / n) + A2 A2 = 0.2 * A2 A = A + A2 return A def getRank(A, n): r = np.zeros((n, 1), dtype=float) for i in range(n): r[i] = 1 / n rnew = np.dot(A, r) while (LA.norm(rnew - r, 2) > 0.000001): r = rnew rnew = np.dot(A, rnew) return rnew def rankUrls(urls, n): # we do the same as in siteGui function A = np.zeros((n, n), dtype=float) urls2 = urls col = 0 for link in urls: basetocheck = unqpure.getBaseToCheck(link) checkin, outlinks = unqpure.find_outlinks(link, False, basetocheck, 1) print(outlinks) if (checkin): A[col, col] = 1 counter = 0 for otherLink in urls2: if otherLink != link: counter = counter + 1 if otherLink in outlinks: A[counter, col] = 1 col = col + 1 print(A) A = getAready(A, n) print(A) A = removeSpiderTraps(A, n) r = getRank(A, n) print("The ranking of the sites' is:") print(r) return r def is_ascii(s): # check if url have encoded a unicode string in ascii to be produced (non english characters for example or "–" (not "-") character) isascii = len(s) != len(s.encode()) return isascii
gpl-3.0
niker/elitekernel_oxp
tools/perf/scripts/python/syscall-counts.py
11176
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
zhanglongqi/kernel-for-BBB
tools/perf/scripts/python/syscall-counts.py
11176
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
cgrates/cgrplanner
languages/fr.py
140
7935
# coding: utf8 { '!langcode!': 'fr', '!langname!': 'Français', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN', '%s %%{row} deleted': '%s lignes supprimées', '%s %%{row} updated': '%s lignes mises à jour', '%s selected': '%s sélectionné', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', 'About': 'À propos', 'Access Control': "Contrôle d'accès", 'Administrative Interface': "Interface d'administration", 'Administrative interface': "Interface d'administration", 'Ajax Recipes': 'Recettes Ajax', 'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé", 'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', 'Authentication': 'Authentification', 'Available Databases and Tables': 'Bases de données et tables disponibles', 'Buy this book': 'Acheter ce livre', 'cache': 'cache', 'Cache': 'Cache', 'Cache Keys': 'Clés de cache', 'Cannot be empty': 'Ne peut pas être vide', 'change password': 'changer le mot de passe', 'Check to delete': 'Cliquez pour supprimer', 'Check to delete:': 'Cliquez pour supprimer:', 'Clear CACHE?': 'Vider le CACHE?', 'Clear DISK': 'Vider le DISQUE', 'Clear RAM': 'Vider la RAM', 'Client IP': 'IP client', 'Community': 'Communauté', 'Components and Plugins': 'Composants et Plugins', 'Controller': 'Contrôleur', 'Copyright': 'Copyright', 'Created By': 'Créé par', 'Created On': 'Créé le', 'Current request': 'Demande actuelle', 'Current response': 'Réponse actuelle', 'Current session': 'Session en cours', 'customize me!': 'personnalisez-moi!', 'data uploaded': 'données téléchargées', 'Database': 'base de données', 'Database %s select': 'base de données %s selectionnée', 'db': 'bdd', 'DB Model': 'Modèle BDD', 'Delete:': 'Supprimer:', 'Demo': 'Démo', 'Deployment Recipes': 'Recettes de déploiement', 'Description': 'Description', 'design': 'design', 'DISK': 'DISQUE', 'Disk Cache Keys': 'Clés de cache du disque', 'Disk Cleared': 'Disque vidé', 'Documentation': 'Documentation', "Don't know what to do?": 'Vous ne savez pas quoi faire?', 'done!': 'fait!', 'Download': 'Téléchargement', 'E-mail': 'E-mail', 'Edit': 'Éditer', 'Edit current record': "Modifier l'enregistrement courant", 'edit profile': 'modifier le profil', 'Edit This App': 'Modifier cette application', 'Email and SMS': 'Email et SMS', 'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g', 'Errors': 'Erreurs', 'export as csv file': 'exporter sous forme de fichier csv', 'FAQ': 'FAQ', 'First name': 'Prénom', 'Forms and Validators': 'Formulaires et Validateurs', 'Free Applications': 'Applications gratuites', 'Function disabled': 'Fonction désactivée', 'Group ID': 'Groupe ID', 'Groups': 'Groupes', 'Hello World': 'Bonjour le monde', 'Home': 'Accueil', 'How did you get here?': 'Comment êtes-vous arrivé ici?', 'import': 'import', 'Import/Export': 'Importer/Exporter', 'Index': 'Index', 'insert new': 'insérer un nouveau', 'insert new %s': 'insérer un nouveau %s', 'Internal State': 'État interne', 'Introduction': 'Introduction', 'Invalid email': 'E-mail invalide', 'Invalid Query': 'Requête Invalide', 'invalid request': 'requête invalide', 'Is Active': 'Est actif', 'Key': 'Clé', 'Last name': 'Nom', 'Layout': 'Mise en page', 'Layout Plugins': 'Plugins de mise en page', 'Layouts': 'Mises en page', 'Live chat': 'Chat en direct', 'Live Chat': 'Chat en direct', 'login': 'connectez-vous', 'Login': 'Connectez-vous', 'logout': 'déconnectez-vous', 'lost password': 'mot de passe perdu', 'Lost Password': 'Mot de passe perdu', 'Lost password?': 'Mot de passe perdu?', 'lost password?': 'mot de passe perdu?', 'Main Menu': 'Menu principal', 'Manage Cache': 'Gérer le Cache', 'Menu Model': 'Menu modèle', 'Modified By': 'Modifié par', 'Modified On': 'Modifié le', 'My Sites': 'Mes sites', 'Name': 'Nom', 'New Record': 'Nouvel enregistrement', 'new record inserted': 'nouvel enregistrement inséré', 'next 100 rows': '100 prochaines lignes', 'No databases in this application': "Cette application n'a pas de bases de données", 'Object or table name': 'Objet ou nom de table', 'Online examples': 'Exemples en ligne', 'or import from csv file': "ou importer d'un fichier CSV", 'Origin': 'Origine', 'Other Plugins': 'Autres Plugins', 'Other Recipes': 'Autres recettes', 'Overview': 'Présentation', 'Password': 'Mot de passe', "Password fields don't match": 'Les mots de passe ne correspondent pas', 'Plugins': 'Plugins', 'Powered by': 'Alimenté par', 'Preface': 'Préface', 'previous 100 rows': '100 lignes précédentes', 'Python': 'Python', 'Query:': 'Requête:', 'Quick Examples': 'Exemples Rapides', 'RAM': 'RAM', 'RAM Cache Keys': 'Clés de cache de la RAM', 'Ram Cleared': 'Ram vidée', 'Readme': 'Lisez-moi', 'Recipes': 'Recettes', 'Record': 'enregistrement', 'record does not exist': "l'archive n'existe pas", 'Record ID': "ID d'enregistrement", 'Record id': "id d'enregistrement", 'Register': "S'inscrire", 'register': "s'inscrire", 'Registration identifier': "Identifiant d'enregistrement", 'Registration key': "Clé d'enregistrement", 'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)', 'Request reset password': 'Demande de réinitialiser le mot clé', 'Reset Password key': 'Réinitialiser le mot clé', 'Resources': 'Ressources', 'Role': 'Rôle', 'Rows in Table': 'Lignes du tableau', 'Rows selected': 'Lignes sélectionnées', 'Semantic': 'Sémantique', 'Services': 'Services', 'Size of cache:': 'Taille du cache:', 'state': 'état', 'Statistics': 'Statistiques', 'Stylesheet': 'Feuille de style', 'submit': 'soumettre', 'Submit': 'Soumettre', 'Support': 'Support', 'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', 'Table': 'tableau', 'Table name': 'Nom du tableau', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.', 'The Core': 'Le noyau', 'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s', 'The Views': 'Les Vues', 'This App': 'Cette Appli', 'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage", 'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)', 'Timestamp': 'Horodatage', 'Twitter': 'Twitter', 'unable to parse csv file': "incapable d'analyser le fichier cvs", 'Update:': 'Mise à jour:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.', 'User %(id)s Logged-in': 'Utilisateur %(id)s connecté', 'User %(id)s Registered': 'Utilisateur %(id)s enregistré', 'User ID': 'ID utilisateur', 'User Voice': "Voix de l'utilisateur", 'Verify Password': 'Vérifiez le mot de passe', 'Videos': 'Vidéos', 'View': 'Présentation', 'Web2py': 'Web2py', 'Welcome': 'Bienvenue', 'Welcome %s': 'Bienvenue %s', 'Welcome to web2py': 'Bienvenue à web2py', 'Welcome to web2py!': 'Bienvenue à web2py!', 'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s', 'You are successfully running web2py': 'Vous exécutez avec succès web2py', 'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins", 'You visited the url %s': "Vous avez visité l'URL %s", }
gpl-3.0
ZhangXinNan/tensorflow
tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_benchmark.py
45
9869
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for fused conv2d bias and activation op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op from tensorflow.python.client import session as session_lib from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def build_conv_bias_relu_graph(device, input_shape, filter_shape, strides, padding, num_iters, data_format): """builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. input_shape: Shape of the input tensor. filter_shape: Shape of the filter tensor. strides: A list of ints. 1-D of length 4. The stride of sliding window for each dimension of input. padding: A string from: "SAME", "VALID". The type of padding algorithm to use. num_iters: number of iterations to run conv2d. data_format: data format string of input, 'NHWC' and 'NCHW' are supported. Returns: An array of tensors to run() """ if data_format == "NCHW": input_shape = [ input_shape[0], input_shape[3], input_shape[1], input_shape[2] ] with ops.device("/%s:0" % device): inp = variables.Variable(random_ops.truncated_normal(input_shape)) filt = variables.Variable(random_ops.truncated_normal(filter_shape)) bias_shape = [filter_shape[-1]] bias = variables.Variable(random_ops.truncated_normal(bias_shape)) outputs = [] conv2d_out = nn_ops.conv2d( inp, filt, strides, padding, data_format=data_format) bias_out = nn_ops.bias_add(conv2d_out, bias, data_format=data_format) relu_out = nn_ops.relu(bias_out) outputs.append(relu_out) for _ in range(1, num_iters): with ops.control_dependencies([relu_out]): conv2d_out = nn_ops.conv2d( inp, filt, strides, padding, data_format=data_format) bias_out = nn_ops.bias_add(conv2d_out, bias, data_format=data_format) relu_out = nn_ops.relu(bias_out) outputs.append(relu_out) return control_flow_ops.group(*outputs) def build_fused_conv_bias_relu_graph(device, input_shape, filter_shape, strides, padding, num_iters, data_format): """builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. input_shape: Shape of the input tensor. filter_shape: Shape of the filter tensor. strides: A list of ints. 1-D of length 4. The stride of sliding window for each dimension of input. padding: A string from: "SAME", "VALID". The type of padding algorithm to use. num_iters: number of iterations to run conv2d. data_format: data format string of input, 'NHWC' and 'NCHW' are supported. Returns: An array of tensors to run() """ if data_format == "NCHW": input_shape = [ input_shape[0], input_shape[3], input_shape[1], input_shape[2] ] with ops.device("/%s:0" % device): inp = variables.Variable(random_ops.truncated_normal(input_shape)) filt = variables.Variable(random_ops.truncated_normal(filter_shape)) bias_shape = [filter_shape[-1]] bias = variables.Variable(random_ops.truncated_normal(bias_shape)) outputs = [] fused_out = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation( inp, filt, bias, strides, padding, data_format=data_format, activation_mode="Relu") outputs.append(fused_out) for _ in range(1, num_iters): with ops.control_dependencies([fused_out]): # pylint: disable=g-line-too-long fused_out = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation( # pylint: disable=line-too-long inp, filt, bias, strides, padding, data_format=data_format, activation_mode="Relu") outputs.append(fused_out) return control_flow_ops.group(*outputs) class FusedConv2DBiasActivationBenchmark(test.Benchmark): """Benchmark conv2d!""" def _run_graph(self, device, input_shape, filter_shape, strides, padding, num_iters, data_format): """runs the graph and print its execution time. Args: device: String, the device to run on. input_shape: Shape of the input tensor. filter_shape: Shape of the filter tensor. strides: A list of ints. 1-D of length 4. The stride of sliding window for each dimension of input. padding: A string from: "SAME", "VALID". The type of padding algorithm to use. num_iters: Number of iterations to run the benchmark. num_iters: number of iterations to run conv2d. data_format: data format string of input, 'NHWC' and 'NCHW' are supported. Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): outputs = build_fused_conv_bias_relu_graph(device, input_shape, filter_shape, strides, padding, num_iters, data_format) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() # warmup runs session.run(outputs) start_time = time.time() session.run(outputs) duration = (time.time() - start_time) / num_iters print("%s inputshape:%s filtershape:%s strides:%s padding:%s " "%d iters: %.8f sec" % (device, str(input_shape).replace(" ", ""), str(filter_shape).replace(" ", ""), str(strides).replace(" ", ""), padding, num_iters, duration)) name_template = ( "conv2d_{device}_input_shape_{inputshape}_filter_shape_{filtershape}_" "strides_{strides}_padding_{padding}") self.report_benchmark( name=name_template.format( device=device, inputshape=str(input_shape).replace(" ", ""), filtershape=str(filter_shape).replace(" ", ""), strides=str(strides).replace(" ", ""), padding=padding).replace(" ", ""), iters=num_iters, wall_time=duration) return duration def benchmark_fused_conv2d_bias_activation(self): stride = [1, 1, 1, 1] paddings = ["VALID", "SAME"] data_formats = ["NHWC", "NCHW"] resnet50_input_shapes = [[64, 14, 14, 256], [64, 14, 14, 256], [ 64, 14, 14, 1024 ], [64, 55, 55, 64], [64, 28, 28, 128], [64, 28, 28, 128], [64, 55, 55, 64], [64, 7, 7, 512], [64, 7, 7, 512], [64, 28, 28, 512], [64, 55, 55, 256], [64, 7, 7, 2048]] resnet50_filter_shapes = [[1, 1, 256, 1024], [3, 3, 256, 256], [ 1, 1, 1024, 256 ], [1, 1, 64, 256], [1, 1, 128, 512], [3, 3, 128, 128], [3, 3, 64, 64], [ 3, 3, 512, 512 ], [1, 1, 512, 2048], [1, 1, 512, 128], [1, 1, 256, 64], [1, 1, 2048, 512]] inception3_input_shapes = [[64, 17, 17, 768], [64, 35, 35, 96], [ 64, 35, 35, 288 ], [64, 8, 8, 384], [64, 8, 8, 384], [64, 17, 17, 192], [64, 35, 35, 64], [ 64, 17, 17, 192 ], [64, 17, 17, 160], [64, 17, 17, 160], [64, 17, 17, 768], [ 64, 35, 35, 256 ], [64, 35, 35, 48], [64, 35, 35, 192], [64, 17, 17, 128], [ 64, 17, 17, 160 ], [64, 8, 8, 448], [64, 17, 17, 128], [64, 17, 17, 768], [64, 17, 17, 160]] inception3_filter_shapes = [[1, 1, 768, 192], [3, 3, 96, 96], [ 1, 1, 288, 64 ], [1, 3, 384, 384], [3, 1, 384, 384], [7, 1, 192, 192], [3, 3, 64, 96], [ 1, 7, 192, 192 ], [7, 1, 160, 160], [1, 7, 160, 160], [1, 1, 768, 160], [1, 1, 256, 64], [ 5, 5, 48, 64 ], [1, 1, 192, 64], [1, 7, 128, 128], [1, 7, 160, 192], [3, 3, 448, 384], [7, 1, 128, 128], [1, 1, 768, 128], [7, 1, 160, 192]] print("fused conv2d bias activation benchmark using resnet50's shapes:") for ishape, fshape in zip(resnet50_input_shapes, resnet50_filter_shapes): for padding in paddings: for data_format in data_formats: self._run_graph("gpu", ishape, fshape, stride, padding, 80, data_format) print("fused conv2d bias activation benchmark using inception3's shapes:") for ishape, fshape in zip(inception3_input_shapes, inception3_filter_shapes): for padding in paddings: for data_format in data_formats: self._run_graph("gpu", ishape, fshape, stride, padding, 80, data_format) if __name__ == "__main__": test.main()
apache-2.0
moto-timo/ironpython3
Src/StdLib/Lib/test/test_optparse.py
84
62259
# # Test suite for Optik. Supplied by Johannes Gijsbers # (taradino@softhome.net) -- translated from the original Optik # test suite to this PyUnit-based version. # # $Id$ # import sys import os import re import copy import unittest from io import StringIO from test import support from optparse import make_option, Option, \ TitledHelpFormatter, OptionParser, OptionGroup, \ SUPPRESS_USAGE, OptionError, OptionConflictError, \ BadOptionError, OptionValueError, Values from optparse import _match_abbrev from optparse import _parse_num retype = type(re.compile('')) class InterceptedError(Exception): def __init__(self, error_message=None, exit_status=None, exit_message=None): self.error_message = error_message self.exit_status = exit_status self.exit_message = exit_message def __str__(self): return self.error_message or self.exit_message or "intercepted error" class InterceptingOptionParser(OptionParser): def exit(self, status=0, msg=None): raise InterceptedError(exit_status=status, exit_message=msg) def error(self, msg): raise InterceptedError(error_message=msg) class BaseTest(unittest.TestCase): def assertParseOK(self, args, expected_opts, expected_positional_args): """Assert the options are what we expected when parsing arguments. Otherwise, fail with a nicely formatted message. Keyword arguments: args -- A list of arguments to parse with OptionParser. expected_opts -- The options expected. expected_positional_args -- The positional arguments expected. Returns the options and positional args for further testing. """ (options, positional_args) = self.parser.parse_args(args) optdict = vars(options) self.assertEqual(optdict, expected_opts, """ Options are %(optdict)s. Should be %(expected_opts)s. Args were %(args)s.""" % locals()) self.assertEqual(positional_args, expected_positional_args, """ Positional arguments are %(positional_args)s. Should be %(expected_positional_args)s. Args were %(args)s.""" % locals ()) return (options, positional_args) def assertRaises(self, func, args, kwargs, expected_exception, expected_message): """ Assert that the expected exception is raised when calling a function, and that the right error message is included with that exception. Arguments: func -- the function to call args -- positional arguments to `func` kwargs -- keyword arguments to `func` expected_exception -- exception that should be raised expected_message -- expected exception message (or pattern if a compiled regex object) Returns the exception raised for further testing. """ if args is None: args = () if kwargs is None: kwargs = {} try: func(*args, **kwargs) except expected_exception as err: actual_message = str(err) if isinstance(expected_message, retype): self.assertTrue(expected_message.search(actual_message), """\ expected exception message pattern: /%s/ actual exception message: '''%s''' """ % (expected_message.pattern, actual_message)) else: self.assertEqual(actual_message, expected_message, """\ expected exception message: '''%s''' actual exception message: '''%s''' """ % (expected_message, actual_message)) return err else: self.fail("""expected exception %(expected_exception)s not raised called %(func)r with args %(args)r and kwargs %(kwargs)r """ % locals ()) # -- Assertions used in more than one class -------------------- def assertParseFail(self, cmdline_args, expected_output): """ Assert the parser fails with the expected message. Caller must ensure that self.parser is an InterceptingOptionParser. """ try: self.parser.parse_args(cmdline_args) except InterceptedError as err: self.assertEqual(err.error_message, expected_output) else: self.assertFalse("expected parse failure") def assertOutput(self, cmdline_args, expected_output, expected_status=0, expected_error=None): """Assert the parser prints the expected output on stdout.""" save_stdout = sys.stdout try: try: sys.stdout = StringIO() self.parser.parse_args(cmdline_args) finally: output = sys.stdout.getvalue() sys.stdout = save_stdout except InterceptedError as err: self.assertTrue( isinstance(output, str), "expected output to be an ordinary string, not %r" % type(output)) if output != expected_output: self.fail("expected: \n'''\n" + expected_output + "'''\nbut got \n'''\n" + output + "'''") self.assertEqual(err.exit_status, expected_status) self.assertEqual(err.exit_message, expected_error) else: self.assertFalse("expected parser.exit()") def assertTypeError(self, func, expected_message, *args): """Assert that TypeError is raised when executing func.""" self.assertRaises(func, args, None, TypeError, expected_message) def assertHelp(self, parser, expected_help): actual_help = parser.format_help() if actual_help != expected_help: raise self.failureException( 'help text failure; expected:\n"' + expected_help + '"; got:\n"' + actual_help + '"\n') # -- Test make_option() aka Option ------------------------------------- # It's not necessary to test correct options here. All the tests in the # parser.parse_args() section deal with those, because they're needed # there. class TestOptionChecks(BaseTest): def setUp(self): self.parser = OptionParser(usage=SUPPRESS_USAGE) def assertOptionError(self, expected_message, args=[], kwargs={}): self.assertRaises(make_option, args, kwargs, OptionError, expected_message) def test_opt_string_empty(self): self.assertTypeError(make_option, "at least one option string must be supplied") def test_opt_string_too_short(self): self.assertOptionError( "invalid option string 'b': must be at least two characters long", ["b"]) def test_opt_string_short_invalid(self): self.assertOptionError( "invalid short option string '--': must be " "of the form -x, (x any non-dash char)", ["--"]) def test_opt_string_long_invalid(self): self.assertOptionError( "invalid long option string '---': " "must start with --, followed by non-dash", ["---"]) def test_attr_invalid(self): self.assertOptionError( "option -b: invalid keyword arguments: bar, foo", ["-b"], {'foo': None, 'bar': None}) def test_action_invalid(self): self.assertOptionError( "option -b: invalid action: 'foo'", ["-b"], {'action': 'foo'}) def test_type_invalid(self): self.assertOptionError( "option -b: invalid option type: 'foo'", ["-b"], {'type': 'foo'}) self.assertOptionError( "option -b: invalid option type: 'tuple'", ["-b"], {'type': tuple}) def test_no_type_for_action(self): self.assertOptionError( "option -b: must not supply a type for action 'count'", ["-b"], {'action': 'count', 'type': 'int'}) def test_no_choices_list(self): self.assertOptionError( "option -b/--bad: must supply a list of " "choices for type 'choice'", ["-b", "--bad"], {'type': "choice"}) def test_bad_choices_list(self): typename = type('').__name__ self.assertOptionError( "option -b/--bad: choices must be a list of " "strings ('%s' supplied)" % typename, ["-b", "--bad"], {'type': "choice", 'choices':"bad choices"}) def test_no_choices_for_type(self): self.assertOptionError( "option -b: must not supply choices for type 'int'", ["-b"], {'type': 'int', 'choices':"bad"}) def test_no_const_for_action(self): self.assertOptionError( "option -b: 'const' must not be supplied for action 'store'", ["-b"], {'action': 'store', 'const': 1}) def test_no_nargs_for_action(self): self.assertOptionError( "option -b: 'nargs' must not be supplied for action 'count'", ["-b"], {'action': 'count', 'nargs': 2}) def test_callback_not_callable(self): self.assertOptionError( "option -b: callback not callable: 'foo'", ["-b"], {'action': 'callback', 'callback': 'foo'}) def dummy(self): pass def test_callback_args_no_tuple(self): self.assertOptionError( "option -b: callback_args, if supplied, " "must be a tuple: not 'foo'", ["-b"], {'action': 'callback', 'callback': self.dummy, 'callback_args': 'foo'}) def test_callback_kwargs_no_dict(self): self.assertOptionError( "option -b: callback_kwargs, if supplied, " "must be a dict: not 'foo'", ["-b"], {'action': 'callback', 'callback': self.dummy, 'callback_kwargs': 'foo'}) def test_no_callback_for_action(self): self.assertOptionError( "option -b: callback supplied ('foo') for non-callback option", ["-b"], {'action': 'store', 'callback': 'foo'}) def test_no_callback_args_for_action(self): self.assertOptionError( "option -b: callback_args supplied for non-callback option", ["-b"], {'action': 'store', 'callback_args': 'foo'}) def test_no_callback_kwargs_for_action(self): self.assertOptionError( "option -b: callback_kwargs supplied for non-callback option", ["-b"], {'action': 'store', 'callback_kwargs': 'foo'}) def test_no_single_dash(self): self.assertOptionError( "invalid long option string '-debug': " "must start with --, followed by non-dash", ["-debug"]) self.assertOptionError( "option -d: invalid long option string '-debug': must start with" " --, followed by non-dash", ["-d", "-debug"]) self.assertOptionError( "invalid long option string '-debug': " "must start with --, followed by non-dash", ["-debug", "--debug"]) class TestOptionParser(BaseTest): def setUp(self): self.parser = OptionParser() self.parser.add_option("-v", "--verbose", "-n", "--noisy", action="store_true", dest="verbose") self.parser.add_option("-q", "--quiet", "--silent", action="store_false", dest="verbose") def test_add_option_no_Option(self): self.assertTypeError(self.parser.add_option, "not an Option instance: None", None) def test_add_option_invalid_arguments(self): self.assertTypeError(self.parser.add_option, "invalid arguments", None, None) def test_get_option(self): opt1 = self.parser.get_option("-v") self.assertIsInstance(opt1, Option) self.assertEqual(opt1._short_opts, ["-v", "-n"]) self.assertEqual(opt1._long_opts, ["--verbose", "--noisy"]) self.assertEqual(opt1.action, "store_true") self.assertEqual(opt1.dest, "verbose") def test_get_option_equals(self): opt1 = self.parser.get_option("-v") opt2 = self.parser.get_option("--verbose") opt3 = self.parser.get_option("-n") opt4 = self.parser.get_option("--noisy") self.assertTrue(opt1 is opt2 is opt3 is opt4) def test_has_option(self): self.assertTrue(self.parser.has_option("-v")) self.assertTrue(self.parser.has_option("--verbose")) def assertTrueremoved(self): self.assertTrue(self.parser.get_option("-v") is None) self.assertTrue(self.parser.get_option("--verbose") is None) self.assertTrue(self.parser.get_option("-n") is None) self.assertTrue(self.parser.get_option("--noisy") is None) self.assertFalse(self.parser.has_option("-v")) self.assertFalse(self.parser.has_option("--verbose")) self.assertFalse(self.parser.has_option("-n")) self.assertFalse(self.parser.has_option("--noisy")) self.assertTrue(self.parser.has_option("-q")) self.assertTrue(self.parser.has_option("--silent")) def test_remove_short_opt(self): self.parser.remove_option("-n") self.assertTrueremoved() def test_remove_long_opt(self): self.parser.remove_option("--verbose") self.assertTrueremoved() def test_remove_nonexistent(self): self.assertRaises(self.parser.remove_option, ('foo',), None, ValueError, "no such option 'foo'") @support.impl_detail('Relies on sys.getrefcount', cpython=True) def test_refleak(self): # If an OptionParser is carrying around a reference to a large # object, various cycles can prevent it from being GC'd in # a timely fashion. destroy() breaks the cycles to ensure stuff # can be cleaned up. big_thing = [42] refcount = sys.getrefcount(big_thing) parser = OptionParser() parser.add_option("-a", "--aaarggh") parser.big_thing = big_thing parser.destroy() #self.assertEqual(refcount, sys.getrefcount(big_thing)) del parser self.assertEqual(refcount, sys.getrefcount(big_thing)) class TestOptionValues(BaseTest): def setUp(self): pass def test_basics(self): values = Values() self.assertEqual(vars(values), {}) self.assertEqual(values, {}) self.assertNotEqual(values, {"foo": "bar"}) self.assertNotEqual(values, "") dict = {"foo": "bar", "baz": 42} values = Values(defaults=dict) self.assertEqual(vars(values), dict) self.assertEqual(values, dict) self.assertNotEqual(values, {"foo": "bar"}) self.assertNotEqual(values, {}) self.assertNotEqual(values, "") self.assertNotEqual(values, []) class TestTypeAliases(BaseTest): def setUp(self): self.parser = OptionParser() def test_str_aliases_string(self): self.parser.add_option("-s", type="str") self.assertEqual(self.parser.get_option("-s").type, "string") def test_type_object(self): self.parser.add_option("-s", type=str) self.assertEqual(self.parser.get_option("-s").type, "string") self.parser.add_option("-x", type=int) self.assertEqual(self.parser.get_option("-x").type, "int") # Custom type for testing processing of default values. _time_units = { 's' : 1, 'm' : 60, 'h' : 60*60, 'd' : 60*60*24 } def _check_duration(option, opt, value): try: if value[-1].isdigit(): return int(value) else: return int(value[:-1]) * _time_units[value[-1]] except (ValueError, IndexError): raise OptionValueError( 'option %s: invalid duration: %r' % (opt, value)) class DurationOption(Option): TYPES = Option.TYPES + ('duration',) TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER) TYPE_CHECKER['duration'] = _check_duration class TestDefaultValues(BaseTest): def setUp(self): self.parser = OptionParser() self.parser.add_option("-v", "--verbose", default=True) self.parser.add_option("-q", "--quiet", dest='verbose') self.parser.add_option("-n", type="int", default=37) self.parser.add_option("-m", type="int") self.parser.add_option("-s", default="foo") self.parser.add_option("-t") self.parser.add_option("-u", default=None) self.expected = { 'verbose': True, 'n': 37, 'm': None, 's': "foo", 't': None, 'u': None } def test_basic_defaults(self): self.assertEqual(self.parser.get_default_values(), self.expected) def test_mixed_defaults_post(self): self.parser.set_defaults(n=42, m=-100) self.expected.update({'n': 42, 'm': -100}) self.assertEqual(self.parser.get_default_values(), self.expected) def test_mixed_defaults_pre(self): self.parser.set_defaults(x="barf", y="blah") self.parser.add_option("-x", default="frob") self.parser.add_option("-y") self.expected.update({'x': "frob", 'y': "blah"}) self.assertEqual(self.parser.get_default_values(), self.expected) self.parser.remove_option("-y") self.parser.add_option("-y", default=None) self.expected.update({'y': None}) self.assertEqual(self.parser.get_default_values(), self.expected) def test_process_default(self): self.parser.option_class = DurationOption self.parser.add_option("-d", type="duration", default=300) self.parser.add_option("-e", type="duration", default="6m") self.parser.set_defaults(n="42") self.expected.update({'d': 300, 'e': 360, 'n': 42}) self.assertEqual(self.parser.get_default_values(), self.expected) self.parser.set_process_default_values(False) self.expected.update({'d': 300, 'e': "6m", 'n': "42"}) self.assertEqual(self.parser.get_default_values(), self.expected) class TestProgName(BaseTest): """ Test that %prog expands to the right thing in usage, version, and help strings. """ def assertUsage(self, parser, expected_usage): self.assertEqual(parser.get_usage(), expected_usage) def assertVersion(self, parser, expected_version): self.assertEqual(parser.get_version(), expected_version) def test_default_progname(self): # Make sure that program name taken from sys.argv[0] by default. save_argv = sys.argv[:] try: sys.argv[0] = os.path.join("foo", "bar", "baz.py") parser = OptionParser("%prog ...", version="%prog 1.2") expected_usage = "Usage: baz.py ...\n" self.assertUsage(parser, expected_usage) self.assertVersion(parser, "baz.py 1.2") self.assertHelp(parser, expected_usage + "\n" + "Options:\n" " --version show program's version number and exit\n" " -h, --help show this help message and exit\n") finally: sys.argv[:] = save_argv def test_custom_progname(self): parser = OptionParser(prog="thingy", version="%prog 0.1", usage="%prog arg arg") parser.remove_option("-h") parser.remove_option("--version") expected_usage = "Usage: thingy arg arg\n" self.assertUsage(parser, expected_usage) self.assertVersion(parser, "thingy 0.1") self.assertHelp(parser, expected_usage + "\n") class TestExpandDefaults(BaseTest): def setUp(self): self.parser = OptionParser(prog="test") self.help_prefix = """\ Usage: test [options] Options: -h, --help show this help message and exit """ self.file_help = "read from FILE [default: %default]" self.expected_help_file = self.help_prefix + \ " -f FILE, --file=FILE read from FILE [default: foo.txt]\n" self.expected_help_none = self.help_prefix + \ " -f FILE, --file=FILE read from FILE [default: none]\n" def test_option_default(self): self.parser.add_option("-f", "--file", default="foo.txt", help=self.file_help) self.assertHelp(self.parser, self.expected_help_file) def test_parser_default_1(self): self.parser.add_option("-f", "--file", help=self.file_help) self.parser.set_default('file', "foo.txt") self.assertHelp(self.parser, self.expected_help_file) def test_parser_default_2(self): self.parser.add_option("-f", "--file", help=self.file_help) self.parser.set_defaults(file="foo.txt") self.assertHelp(self.parser, self.expected_help_file) def test_no_default(self): self.parser.add_option("-f", "--file", help=self.file_help) self.assertHelp(self.parser, self.expected_help_none) def test_default_none_1(self): self.parser.add_option("-f", "--file", default=None, help=self.file_help) self.assertHelp(self.parser, self.expected_help_none) def test_default_none_2(self): self.parser.add_option("-f", "--file", help=self.file_help) self.parser.set_defaults(file=None) self.assertHelp(self.parser, self.expected_help_none) def test_float_default(self): self.parser.add_option( "-p", "--prob", help="blow up with probability PROB [default: %default]") self.parser.set_defaults(prob=0.43) expected_help = self.help_prefix + \ " -p PROB, --prob=PROB blow up with probability PROB [default: 0.43]\n" self.assertHelp(self.parser, expected_help) def test_alt_expand(self): self.parser.add_option("-f", "--file", default="foo.txt", help="read from FILE [default: *DEFAULT*]") self.parser.formatter.default_tag = "*DEFAULT*" self.assertHelp(self.parser, self.expected_help_file) def test_no_expand(self): self.parser.add_option("-f", "--file", default="foo.txt", help="read from %default file") self.parser.formatter.default_tag = None expected_help = self.help_prefix + \ " -f FILE, --file=FILE read from %default file\n" self.assertHelp(self.parser, expected_help) # -- Test parser.parse_args() ------------------------------------------ class TestStandard(BaseTest): def setUp(self): options = [make_option("-a", type="string"), make_option("-b", "--boo", type="int", dest='boo'), make_option("--foo", action="append")] self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_list=options) def test_required_value(self): self.assertParseFail(["-a"], "-a option requires 1 argument") def test_invalid_integer(self): self.assertParseFail(["-b", "5x"], "option -b: invalid integer value: '5x'") def test_no_such_option(self): self.assertParseFail(["--boo13"], "no such option: --boo13") def test_long_invalid_integer(self): self.assertParseFail(["--boo=x5"], "option --boo: invalid integer value: 'x5'") def test_empty(self): self.assertParseOK([], {'a': None, 'boo': None, 'foo': None}, []) def test_shortopt_empty_longopt_append(self): self.assertParseOK(["-a", "", "--foo=blah", "--foo="], {'a': "", 'boo': None, 'foo': ["blah", ""]}, []) def test_long_option_append(self): self.assertParseOK(["--foo", "bar", "--foo", "", "--foo=x"], {'a': None, 'boo': None, 'foo': ["bar", "", "x"]}, []) def test_option_argument_joined(self): self.assertParseOK(["-abc"], {'a': "bc", 'boo': None, 'foo': None}, []) def test_option_argument_split(self): self.assertParseOK(["-a", "34"], {'a': "34", 'boo': None, 'foo': None}, []) def test_option_argument_joined_integer(self): self.assertParseOK(["-b34"], {'a': None, 'boo': 34, 'foo': None}, []) def test_option_argument_split_negative_integer(self): self.assertParseOK(["-b", "-5"], {'a': None, 'boo': -5, 'foo': None}, []) def test_long_option_argument_joined(self): self.assertParseOK(["--boo=13"], {'a': None, 'boo': 13, 'foo': None}, []) def test_long_option_argument_split(self): self.assertParseOK(["--boo", "111"], {'a': None, 'boo': 111, 'foo': None}, []) def test_long_option_short_option(self): self.assertParseOK(["--foo=bar", "-axyz"], {'a': 'xyz', 'boo': None, 'foo': ["bar"]}, []) def test_abbrev_long_option(self): self.assertParseOK(["--f=bar", "-axyz"], {'a': 'xyz', 'boo': None, 'foo': ["bar"]}, []) def test_defaults(self): (options, args) = self.parser.parse_args([]) defaults = self.parser.get_default_values() self.assertEqual(vars(defaults), vars(options)) def test_ambiguous_option(self): self.parser.add_option("--foz", action="store", type="string", dest="foo") self.assertParseFail(["--f=bar"], "ambiguous option: --f (--foo, --foz?)") def test_short_and_long_option_split(self): self.assertParseOK(["-a", "xyz", "--foo", "bar"], {'a': 'xyz', 'boo': None, 'foo': ["bar"]}, []) def test_short_option_split_long_option_append(self): self.assertParseOK(["--foo=bar", "-b", "123", "--foo", "baz"], {'a': None, 'boo': 123, 'foo': ["bar", "baz"]}, []) def test_short_option_split_one_positional_arg(self): self.assertParseOK(["-a", "foo", "bar"], {'a': "foo", 'boo': None, 'foo': None}, ["bar"]) def test_short_option_consumes_separator(self): self.assertParseOK(["-a", "--", "foo", "bar"], {'a': "--", 'boo': None, 'foo': None}, ["foo", "bar"]) self.assertParseOK(["-a", "--", "--foo", "bar"], {'a': "--", 'boo': None, 'foo': ["bar"]}, []) def test_short_option_joined_and_separator(self): self.assertParseOK(["-ab", "--", "--foo", "bar"], {'a': "b", 'boo': None, 'foo': None}, ["--foo", "bar"]), def test_hyphen_becomes_positional_arg(self): self.assertParseOK(["-ab", "-", "--foo", "bar"], {'a': "b", 'boo': None, 'foo': ["bar"]}, ["-"]) def test_no_append_versus_append(self): self.assertParseOK(["-b3", "-b", "5", "--foo=bar", "--foo", "baz"], {'a': None, 'boo': 5, 'foo': ["bar", "baz"]}, []) def test_option_consumes_optionlike_string(self): self.assertParseOK(["-a", "-b3"], {'a': "-b3", 'boo': None, 'foo': None}, []) def test_combined_single_invalid_option(self): self.parser.add_option("-t", action="store_true") self.assertParseFail(["-test"], "no such option: -e") class TestBool(BaseTest): def setUp(self): options = [make_option("-v", "--verbose", action="store_true", dest="verbose", default=''), make_option("-q", "--quiet", action="store_false", dest="verbose")] self.parser = OptionParser(option_list = options) def test_bool_default(self): self.assertParseOK([], {'verbose': ''}, []) def test_bool_false(self): (options, args) = self.assertParseOK(["-q"], {'verbose': 0}, []) self.assertTrue(options.verbose is False) def test_bool_true(self): (options, args) = self.assertParseOK(["-v"], {'verbose': 1}, []) self.assertTrue(options.verbose is True) def test_bool_flicker_on_and_off(self): self.assertParseOK(["-qvq", "-q", "-v"], {'verbose': 1}, []) class TestChoice(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-c", action="store", type="choice", dest="choice", choices=["one", "two", "three"]) def test_valid_choice(self): self.assertParseOK(["-c", "one", "xyz"], {'choice': 'one'}, ["xyz"]) def test_invalid_choice(self): self.assertParseFail(["-c", "four", "abc"], "option -c: invalid choice: 'four' " "(choose from 'one', 'two', 'three')") def test_add_choice_option(self): self.parser.add_option("-d", "--default", choices=["four", "five", "six"]) opt = self.parser.get_option("-d") self.assertEqual(opt.type, "choice") self.assertEqual(opt.action, "store") class TestCount(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.v_opt = make_option("-v", action="count", dest="verbose") self.parser.add_option(self.v_opt) self.parser.add_option("--verbose", type="int", dest="verbose") self.parser.add_option("-q", "--quiet", action="store_const", dest="verbose", const=0) def test_empty(self): self.assertParseOK([], {'verbose': None}, []) def test_count_one(self): self.assertParseOK(["-v"], {'verbose': 1}, []) def test_count_three(self): self.assertParseOK(["-vvv"], {'verbose': 3}, []) def test_count_three_apart(self): self.assertParseOK(["-v", "-v", "-v"], {'verbose': 3}, []) def test_count_override_amount(self): self.assertParseOK(["-vvv", "--verbose=2"], {'verbose': 2}, []) def test_count_override_quiet(self): self.assertParseOK(["-vvv", "--verbose=2", "-q"], {'verbose': 0}, []) def test_count_overriding(self): self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"], {'verbose': 1}, []) def test_count_interspersed_args(self): self.assertParseOK(["--quiet", "3", "-v"], {'verbose': 1}, ["3"]) def test_count_no_interspersed_args(self): self.parser.disable_interspersed_args() self.assertParseOK(["--quiet", "3", "-v"], {'verbose': 0}, ["3", "-v"]) def test_count_no_such_option(self): self.assertParseFail(["-q3", "-v"], "no such option: -3") def test_count_option_no_value(self): self.assertParseFail(["--quiet=3", "-v"], "--quiet option does not take a value") def test_count_with_default(self): self.parser.set_default('verbose', 0) self.assertParseOK([], {'verbose':0}, []) def test_count_overriding_default(self): self.parser.set_default('verbose', 0) self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"], {'verbose': 1}, []) class TestMultipleArgs(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-p", "--point", action="store", nargs=3, type="float", dest="point") def test_nargs_with_positional_args(self): self.assertParseOK(["foo", "-p", "1", "2.5", "-4.3", "xyz"], {'point': (1.0, 2.5, -4.3)}, ["foo", "xyz"]) def test_nargs_long_opt(self): self.assertParseOK(["--point", "-1", "2.5", "-0", "xyz"], {'point': (-1.0, 2.5, -0.0)}, ["xyz"]) def test_nargs_invalid_float_value(self): self.assertParseFail(["-p", "1.0", "2x", "3.5"], "option -p: " "invalid floating-point value: '2x'") def test_nargs_required_values(self): self.assertParseFail(["--point", "1.0", "3.5"], "--point option requires 3 arguments") class TestMultipleArgsAppend(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-p", "--point", action="store", nargs=3, type="float", dest="point") self.parser.add_option("-f", "--foo", action="append", nargs=2, type="int", dest="foo") self.parser.add_option("-z", "--zero", action="append_const", dest="foo", const=(0, 0)) def test_nargs_append(self): self.assertParseOK(["-f", "4", "-3", "blah", "--foo", "1", "666"], {'point': None, 'foo': [(4, -3), (1, 666)]}, ["blah"]) def test_nargs_append_required_values(self): self.assertParseFail(["-f4,3"], "-f option requires 2 arguments") def test_nargs_append_simple(self): self.assertParseOK(["--foo=3", "4"], {'point': None, 'foo':[(3, 4)]}, []) def test_nargs_append_const(self): self.assertParseOK(["--zero", "--foo", "3", "4", "-z"], {'point': None, 'foo':[(0, 0), (3, 4), (0, 0)]}, []) class TestVersion(BaseTest): def test_version(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, version="%prog 0.1") save_argv = sys.argv[:] try: sys.argv[0] = os.path.join(os.curdir, "foo", "bar") self.assertOutput(["--version"], "bar 0.1\n") finally: sys.argv[:] = save_argv def test_no_version(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.assertParseFail(["--version"], "no such option: --version") # -- Test conflicting default values and parser.parse_args() ----------- class TestConflictingDefaults(BaseTest): """Conflicting default values: the last one should win.""" def setUp(self): self.parser = OptionParser(option_list=[ make_option("-v", action="store_true", dest="verbose", default=1)]) def test_conflict_default(self): self.parser.add_option("-q", action="store_false", dest="verbose", default=0) self.assertParseOK([], {'verbose': 0}, []) def test_conflict_default_none(self): self.parser.add_option("-q", action="store_false", dest="verbose", default=None) self.assertParseOK([], {'verbose': None}, []) class TestOptionGroup(BaseTest): def setUp(self): self.parser = OptionParser(usage=SUPPRESS_USAGE) def test_option_group_create_instance(self): group = OptionGroup(self.parser, "Spam") self.parser.add_option_group(group) group.add_option("--spam", action="store_true", help="spam spam spam spam") self.assertParseOK(["--spam"], {'spam': 1}, []) def test_add_group_no_group(self): self.assertTypeError(self.parser.add_option_group, "not an OptionGroup instance: None", None) def test_add_group_invalid_arguments(self): self.assertTypeError(self.parser.add_option_group, "invalid arguments", None, None) def test_add_group_wrong_parser(self): group = OptionGroup(self.parser, "Spam") group.parser = OptionParser() self.assertRaises(self.parser.add_option_group, (group,), None, ValueError, "invalid OptionGroup (wrong parser)") def test_group_manipulate(self): group = self.parser.add_option_group("Group 2", description="Some more options") group.set_title("Bacon") group.add_option("--bacon", type="int") self.assertTrue(self.parser.get_option_group("--bacon"), group) # -- Test extending and parser.parse_args() ---------------------------- class TestExtendAddTypes(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_class=self.MyOption) self.parser.add_option("-a", None, type="string", dest="a") self.parser.add_option("-f", "--file", type="file", dest="file") def tearDown(self): if os.path.isdir(support.TESTFN): os.rmdir(support.TESTFN) elif os.path.isfile(support.TESTFN): os.unlink(support.TESTFN) class MyOption (Option): def check_file(option, opt, value): if not os.path.exists(value): raise OptionValueError("%s: file does not exist" % value) elif not os.path.isfile(value): raise OptionValueError("%s: not a regular file" % value) return value TYPES = Option.TYPES + ("file",) TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER) TYPE_CHECKER["file"] = check_file def test_filetype_ok(self): support.create_empty_file(support.TESTFN) self.assertParseOK(["--file", support.TESTFN, "-afoo"], {'file': support.TESTFN, 'a': 'foo'}, []) def test_filetype_noexist(self): self.assertParseFail(["--file", support.TESTFN, "-afoo"], "%s: file does not exist" % support.TESTFN) def test_filetype_notfile(self): os.mkdir(support.TESTFN) self.assertParseFail(["--file", support.TESTFN, "-afoo"], "%s: not a regular file" % support.TESTFN) class TestExtendAddActions(BaseTest): def setUp(self): options = [self.MyOption("-a", "--apple", action="extend", type="string", dest="apple")] self.parser = OptionParser(option_list=options) class MyOption (Option): ACTIONS = Option.ACTIONS + ("extend",) STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) def take_action(self, action, dest, opt, value, values, parser): if action == "extend": lvalue = value.split(",") values.ensure_value(dest, []).extend(lvalue) else: Option.take_action(self, action, dest, opt, parser, value, values) def test_extend_add_action(self): self.assertParseOK(["-afoo,bar", "--apple=blah"], {'apple': ["foo", "bar", "blah"]}, []) def test_extend_add_action_normal(self): self.assertParseOK(["-a", "foo", "-abar", "--apple=x,y"], {'apple': ["foo", "bar", "x", "y"]}, []) # -- Test callbacks and parser.parse_args() ---------------------------- class TestCallback(BaseTest): def setUp(self): options = [make_option("-x", None, action="callback", callback=self.process_opt), make_option("-f", "--file", action="callback", callback=self.process_opt, type="string", dest="filename")] self.parser = OptionParser(option_list=options) def process_opt(self, option, opt, value, parser_): if opt == "-x": self.assertEqual(option._short_opts, ["-x"]) self.assertEqual(option._long_opts, []) self.assertTrue(parser_ is self.parser) self.assertTrue(value is None) self.assertEqual(vars(parser_.values), {'filename': None}) parser_.values.x = 42 elif opt == "--file": self.assertEqual(option._short_opts, ["-f"]) self.assertEqual(option._long_opts, ["--file"]) self.assertTrue(parser_ is self.parser) self.assertEqual(value, "foo") self.assertEqual(vars(parser_.values), {'filename': None, 'x': 42}) setattr(parser_.values, option.dest, value) else: self.fail("Unknown option %r in process_opt." % opt) def test_callback(self): self.assertParseOK(["-x", "--file=foo"], {'filename': "foo", 'x': 42}, []) def test_callback_help(self): # This test was prompted by SF bug #960515 -- the point is # not to inspect the help text, just to make sure that # format_help() doesn't crash. parser = OptionParser(usage=SUPPRESS_USAGE) parser.remove_option("-h") parser.add_option("-t", "--test", action="callback", callback=lambda: None, type="string", help="foo") expected_help = ("Options:\n" " -t TEST, --test=TEST foo\n") self.assertHelp(parser, expected_help) class TestCallbackExtraArgs(BaseTest): def setUp(self): options = [make_option("-p", "--point", action="callback", callback=self.process_tuple, callback_args=(3, int), type="string", dest="points", default=[])] self.parser = OptionParser(option_list=options) def process_tuple(self, option, opt, value, parser_, len, type): self.assertEqual(len, 3) self.assertTrue(type is int) if opt == "-p": self.assertEqual(value, "1,2,3") elif opt == "--point": self.assertEqual(value, "4,5,6") value = tuple(map(type, value.split(","))) getattr(parser_.values, option.dest).append(value) def test_callback_extra_args(self): self.assertParseOK(["-p1,2,3", "--point", "4,5,6"], {'points': [(1,2,3), (4,5,6)]}, []) class TestCallbackMeddleArgs(BaseTest): def setUp(self): options = [make_option(str(x), action="callback", callback=self.process_n, dest='things') for x in range(-1, -6, -1)] self.parser = OptionParser(option_list=options) # Callback that meddles in rargs, largs def process_n(self, option, opt, value, parser_): # option is -3, -5, etc. nargs = int(opt[1:]) rargs = parser_.rargs if len(rargs) < nargs: self.fail("Expected %d arguments for %s option." % (nargs, opt)) dest = parser_.values.ensure_value(option.dest, []) dest.append(tuple(rargs[0:nargs])) parser_.largs.append(nargs) del rargs[0:nargs] def test_callback_meddle_args(self): self.assertParseOK(["-1", "foo", "-3", "bar", "baz", "qux"], {'things': [("foo",), ("bar", "baz", "qux")]}, [1, 3]) def test_callback_meddle_args_separator(self): self.assertParseOK(["-2", "foo", "--"], {'things': [('foo', '--')]}, [2]) class TestCallbackManyArgs(BaseTest): def setUp(self): options = [make_option("-a", "--apple", action="callback", nargs=2, callback=self.process_many, type="string"), make_option("-b", "--bob", action="callback", nargs=3, callback=self.process_many, type="int")] self.parser = OptionParser(option_list=options) def process_many(self, option, opt, value, parser_): if opt == "-a": self.assertEqual(value, ("foo", "bar")) elif opt == "--apple": self.assertEqual(value, ("ding", "dong")) elif opt == "-b": self.assertEqual(value, (1, 2, 3)) elif opt == "--bob": self.assertEqual(value, (-666, 42, 0)) def test_many_args(self): self.assertParseOK(["-a", "foo", "bar", "--apple", "ding", "dong", "-b", "1", "2", "3", "--bob", "-666", "42", "0"], {"apple": None, "bob": None}, []) class TestCallbackCheckAbbrev(BaseTest): def setUp(self): self.parser = OptionParser() self.parser.add_option("--foo-bar", action="callback", callback=self.check_abbrev) def check_abbrev(self, option, opt, value, parser): self.assertEqual(opt, "--foo-bar") def test_abbrev_callback_expansion(self): self.assertParseOK(["--foo"], {}, []) class TestCallbackVarArgs(BaseTest): def setUp(self): options = [make_option("-a", type="int", nargs=2, dest="a"), make_option("-b", action="store_true", dest="b"), make_option("-c", "--callback", action="callback", callback=self.variable_args, dest="c")] self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_list=options) def variable_args(self, option, opt, value, parser): self.assertTrue(value is None) value = [] rargs = parser.rargs while rargs: arg = rargs[0] if ((arg[:2] == "--" and len(arg) > 2) or (arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")): break else: value.append(arg) del rargs[0] setattr(parser.values, option.dest, value) def test_variable_args(self): self.assertParseOK(["-a3", "-5", "--callback", "foo", "bar"], {'a': (3, -5), 'b': None, 'c': ["foo", "bar"]}, []) def test_consume_separator_stop_at_option(self): self.assertParseOK(["-c", "37", "--", "xxx", "-b", "hello"], {'a': None, 'b': True, 'c': ["37", "--", "xxx"]}, ["hello"]) def test_positional_arg_and_variable_args(self): self.assertParseOK(["hello", "-c", "foo", "-", "bar"], {'a': None, 'b': None, 'c':["foo", "-", "bar"]}, ["hello"]) def test_stop_at_option(self): self.assertParseOK(["-c", "foo", "-b"], {'a': None, 'b': True, 'c': ["foo"]}, []) def test_stop_at_invalid_option(self): self.assertParseFail(["-c", "3", "-5", "-a"], "no such option: -5") # -- Test conflict handling and parser.parse_args() -------------------- class ConflictBase(BaseTest): def setUp(self): options = [make_option("-v", "--verbose", action="count", dest="verbose", help="increment verbosity")] self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, option_list=options) def show_version(self, option, opt, value, parser): parser.values.show_version = 1 class TestConflict(ConflictBase): """Use the default conflict resolution for Optik 1.2: error.""" def assertTrueconflict_error(self, func): err = self.assertRaises( func, ("-v", "--version"), {'action' : "callback", 'callback' : self.show_version, 'help' : "show version"}, OptionConflictError, "option -v/--version: conflicting option string(s): -v") self.assertEqual(err.msg, "conflicting option string(s): -v") self.assertEqual(err.option_id, "-v/--version") def test_conflict_error(self): self.assertTrueconflict_error(self.parser.add_option) def test_conflict_error_group(self): group = OptionGroup(self.parser, "Group 1") self.assertTrueconflict_error(group.add_option) def test_no_such_conflict_handler(self): self.assertRaises( self.parser.set_conflict_handler, ('foo',), None, ValueError, "invalid conflict_resolution value 'foo'") class TestConflictResolve(ConflictBase): def setUp(self): ConflictBase.setUp(self) self.parser.set_conflict_handler("resolve") self.parser.add_option("-v", "--version", action="callback", callback=self.show_version, help="show version") def test_conflict_resolve(self): v_opt = self.parser.get_option("-v") verbose_opt = self.parser.get_option("--verbose") version_opt = self.parser.get_option("--version") self.assertTrue(v_opt is version_opt) self.assertTrue(v_opt is not verbose_opt) self.assertEqual(v_opt._long_opts, ["--version"]) self.assertEqual(version_opt._short_opts, ["-v"]) self.assertEqual(version_opt._long_opts, ["--version"]) self.assertEqual(verbose_opt._short_opts, []) self.assertEqual(verbose_opt._long_opts, ["--verbose"]) def test_conflict_resolve_help(self): self.assertOutput(["-h"], """\ Options: --verbose increment verbosity -h, --help show this help message and exit -v, --version show version """) def test_conflict_resolve_short_opt(self): self.assertParseOK(["-v"], {'verbose': None, 'show_version': 1}, []) def test_conflict_resolve_long_opt(self): self.assertParseOK(["--verbose"], {'verbose': 1}, []) def test_conflict_resolve_long_opts(self): self.assertParseOK(["--verbose", "--version"], {'verbose': 1, 'show_version': 1}, []) class TestConflictOverride(BaseTest): def setUp(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.set_conflict_handler("resolve") self.parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", help="don't do anything") self.parser.add_option("--dry-run", "-n", action="store_const", const=42, dest="dry_run", help="dry run mode") def test_conflict_override_opts(self): opt = self.parser.get_option("--dry-run") self.assertEqual(opt._short_opts, ["-n"]) self.assertEqual(opt._long_opts, ["--dry-run"]) def test_conflict_override_help(self): self.assertOutput(["-h"], """\ Options: -h, --help show this help message and exit -n, --dry-run dry run mode """) def test_conflict_override_args(self): self.assertParseOK(["-n"], {'dry_run': 42}, []) # -- Other testing. ---------------------------------------------------- _expected_help_basic = """\ Usage: bar.py [options] Options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit """ _expected_help_long_opts_first = """\ Usage: bar.py [options] Options: -a APPLE throw APPLEs at basket --boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing --help, -h show this help message and exit """ _expected_help_title_formatter = """\ Usage ===== bar.py [options] Options ======= -a APPLE throw APPLEs at basket --boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing --help, -h show this help message and exit """ _expected_help_short_lines = """\ Usage: bar.py [options] Options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit """ _expected_very_help_short_lines = """\ Usage: bar.py [options] Options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit """ class TestHelp(BaseTest): def setUp(self): self.parser = self.make_parser(80) def make_parser(self, columns): options = [ make_option("-a", type="string", dest='a', metavar="APPLE", help="throw APPLEs at basket"), make_option("-b", "--boo", type="int", dest='boo', metavar="NUM", help= "shout \"boo!\" NUM times (in order to frighten away " "all the evil spirits that cause trouble and mayhem)"), make_option("--foo", action="append", type="string", dest='foo', help="store FOO in the foo list for later fooing"), ] # We need to set COLUMNS for the OptionParser constructor, but # we must restore its original value -- otherwise, this test # screws things up for other tests when it's part of the Python # test suite. with support.EnvironmentVarGuard() as env: env['COLUMNS'] = str(columns) return InterceptingOptionParser(option_list=options) def assertHelpEquals(self, expected_output): save_argv = sys.argv[:] try: # Make optparse believe bar.py is being executed. sys.argv[0] = os.path.join("foo", "bar.py") self.assertOutput(["-h"], expected_output) finally: sys.argv[:] = save_argv def test_help(self): self.assertHelpEquals(_expected_help_basic) def test_help_old_usage(self): self.parser.set_usage("Usage: %prog [options]") self.assertHelpEquals(_expected_help_basic) def test_help_long_opts_first(self): self.parser.formatter.short_first = 0 self.assertHelpEquals(_expected_help_long_opts_first) def test_help_title_formatter(self): with support.EnvironmentVarGuard() as env: env["COLUMNS"] = "80" self.parser.formatter = TitledHelpFormatter() self.assertHelpEquals(_expected_help_title_formatter) def test_wrap_columns(self): # Ensure that wrapping respects $COLUMNS environment variable. # Need to reconstruct the parser, since that's the only time # we look at $COLUMNS. self.parser = self.make_parser(60) self.assertHelpEquals(_expected_help_short_lines) self.parser = self.make_parser(0) self.assertHelpEquals(_expected_very_help_short_lines) def test_help_unicode(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE) self.parser.add_option("-a", action="store_true", help="ol\u00E9!") expect = """\ Options: -h, --help show this help message and exit -a ol\u00E9! """ self.assertHelpEquals(expect) def test_help_unicode_description(self): self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE, description="ol\u00E9!") expect = """\ ol\u00E9! Options: -h, --help show this help message and exit """ self.assertHelpEquals(expect) def test_help_description_groups(self): self.parser.set_description( "This is the program description for %prog. %prog has " "an option group as well as single options.") group = OptionGroup( self.parser, "Dangerous Options", "Caution: use of these options is at your own risk. " "It is believed that some of them bite.") group.add_option("-g", action="store_true", help="Group option.") self.parser.add_option_group(group) expect = """\ Usage: bar.py [options] This is the program description for bar.py. bar.py has an option group as well as single options. Options: -a APPLE throw APPLEs at basket -b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the evil spirits that cause trouble and mayhem) --foo=FOO store FOO in the foo list for later fooing -h, --help show this help message and exit Dangerous Options: Caution: use of these options is at your own risk. It is believed that some of them bite. -g Group option. """ self.assertHelpEquals(expect) self.parser.epilog = "Please report bugs to /dev/null." self.assertHelpEquals(expect + "\nPlease report bugs to /dev/null.\n") class TestMatchAbbrev(BaseTest): def test_match_abbrev(self): self.assertEqual(_match_abbrev("--f", {"--foz": None, "--foo": None, "--fie": None, "--f": None}), "--f") def test_match_abbrev_error(self): s = "--f" wordmap = {"--foz": None, "--foo": None, "--fie": None} self.assertRaises( _match_abbrev, (s, wordmap), None, BadOptionError, "ambiguous option: --f (--fie, --foo, --foz?)") class TestParseNumber(BaseTest): def setUp(self): self.parser = InterceptingOptionParser() self.parser.add_option("-n", type=int) self.parser.add_option("-l", type=int) def test_parse_num_fail(self): self.assertRaises( _parse_num, ("", int), {}, ValueError, re.compile(r"invalid literal for int().*: '?'?")) self.assertRaises( _parse_num, ("0xOoops", int), {}, ValueError, re.compile(r"invalid literal for int().*: s?'?0xOoops'?")) def test_parse_num_ok(self): self.assertEqual(_parse_num("0", int), 0) self.assertEqual(_parse_num("0x10", int), 16) self.assertEqual(_parse_num("0XA", int), 10) self.assertEqual(_parse_num("010", int), 8) self.assertEqual(_parse_num("0b11", int), 3) self.assertEqual(_parse_num("0b", int), 0) def test_numeric_options(self): self.assertParseOK(["-n", "42", "-l", "0x20"], { "n": 42, "l": 0x20 }, []) self.assertParseOK(["-n", "0b0101", "-l010"], { "n": 5, "l": 8 }, []) self.assertParseFail(["-n008"], "option -n: invalid integer value: '008'") self.assertParseFail(["-l0b0123"], "option -l: invalid integer value: '0b0123'") self.assertParseFail(["-l", "0x12x"], "option -l: invalid integer value: '0x12x'") def test_main(): support.run_unittest(__name__) if __name__ == '__main__': test_main()
apache-2.0
cboling/xos
xos/core/models/xosmodel.py
1
2211
import os from django.db import models, transaction from core.models import PlCoreBase from core.models.plcorebase import StrippedCharField # XOS: Serves as the root of the build system class XOS(PlCoreBase): name = StrippedCharField(max_length=200, unique=True, help_text="Name of XOS", default="XOS") ui_port = models.IntegerField(help_text="Port for XOS UI", default=80) bootstrap_ui_port = models.IntegerField(help_text="Port for XOS UI", default=81) db_container_name = StrippedCharField(max_length=200, help_text="name of XOS db container", default="xos_db") docker_project_name = StrippedCharField(max_length=200, help_text="docker project name") db_container_name = StrippedCharField(max_length=200, help_text="database container name") enable_build = models.BooleanField(help_text="True if Onboarding Synchronizer should build XOS as necessary", default=True) frontend_only = models.BooleanField(help_text="If True, XOS will not start synchronizer containers", default=False) source_ui_image = StrippedCharField(max_length=200, default="adtran/xos") def __unicode__(self): return u'%s' % (self.name) def __init__(self, *args, **kwargs): super(XOS, self).__init__(*args, **kwargs) def save(self, *args, **kwds): super(XOS, self).save(*args, **kwds) # def can_update(self, user): # return user.can_update_site(self.site, allow=['tech']) def rebuild(self): with transaction.atomic(): for service_controller in self.service_controllers.all(): for scr in service_controller.service_controller_resources.all(): scr.save() service_controller.save() self.save() class XOSVolume(PlCoreBase): xos = models.ForeignKey(XOS, related_name='volumes', help_text="The XOS object for this Volume") container_path=StrippedCharField(max_length=1024, unique=True, help_text="Path of Volume in Container") host_path=StrippedCharField(max_length=1024, help_text="Path of Volume in Host") read_only=models.BooleanField(default=False, help_text="True if mount read-only") def __unicode__(self): return u'%s' % (self.container_path)
apache-2.0
engineer0x47/SCONS
build/lib/SCons/Tool/sgic++.py
2
1995
"""SCons.Tool.sgic++ Tool-specific initialization for MIPSpro C++ on SGI. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/sgic++.py 2014/08/24 12:12:31 garyo" import SCons.Util cplusplus = __import__('c++', globals(), locals(), []) def generate(env): """Add Builders and construction variables for SGI MIPS C++ to an Environment.""" cplusplus.generate(env) env['CXX'] = 'CC' env['CXXFLAGS'] = SCons.Util.CLVar('-LANG:std') env['SHCXX'] = '$CXX' env['SHOBJSUFFIX'] = '.o' env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1 def exists(env): return env.Detect('CC') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
cliffe/SecGen
modules/utilities/unix/ctf/metactf/files/repository/src_angr/14_angr_shared_library/scaffold14.py
4
2549
# The shared library has the function validate, which takes a string and returns # either true (1) or false (0). The binary calls this function. If it returns # true, the program prints "Good Job." otherwise, it prints "Try again." # # Note: When you run this script, make sure you run it on # lib14_angr_shared_library.so, not the executable. This level is intended to # teach how to analyse binary formats that are not typical executables. import angr import claripy import sys def main(argv): path_to_binary = ??? # The shared library is compiled with position-independent code. You will need # to specify the base address. All addresses in the shared library will be # base + offset, where offset is their address in the file. # (!) base = ??? project = angr.Project(path_to_binary, load_options={ 'main_opts' : { 'custom_base_addr' : base } }) # Initialize any symbolic values here; you will need at least one to pass to # the validate function. ... # Begin the state at the beginning of the validate function, as if it was # called by the program. Determine the parameters needed to call validate and # replace 'parameters...' with bitvectors holding the values you wish to pass. # Recall that 'claripy.BVV(value, size_in_bits)' constructs a bitvector # initialized to a single value. # Remember to add the base value you specified at the beginning to the # function address! # Hint: int validate(char* buffer, int length) { ... # Another hint: the password is 8 bytes long. # (!) validate_function_address = ??? initial_state = project.factory.call_state(validate_function_address, parameters...) # You will need to add code to inject a symbolic value into the program at the # end of the function that constrains eax to equal true (value of 1) just # before the function returns. There are multiple ways to do this: # 1. Use a hook. # 2. Search for the address just before the function returns and then # constrain eax (this may require putting code elsewhere) ... simulation = project.factory.simgr(initial_state) success_address = ??? simulation.explore(find=success_address) if simulation.found: solution_state = simulation.found[0] # Determine where the program places the return value, and constrain it so # that it is true. Then, solve for the solution and print it. # (!) solution = ??? print solution else: raise Exception('Could not find the solution') if __name__ == '__main__': main(sys.argv)
gpl-3.0
DataDog/integrations-core
singlestore/datadog_checks/singlestore/config_models/defaults.py
1
2331
# (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) # This file is autogenerated. # To change this file you should edit assets/configuration/spec.yaml and then run the following commands: # ddev -x validate config -s <INTEGRATION_NAME> # ddev -x validate models -s <INTEGRATION_NAME> from datadog_checks.base.utils.models.fields import get_default_field_value def shared_global_custom_queries(field, value): return get_default_field_value(field, value) def shared_service(field, value): return get_default_field_value(field, value) def instance_collect_system_metrics(field, value): return False def instance_connect_timeout(field, value): return 10 def instance_custom_queries(field, value): return get_default_field_value(field, value) def instance_disable_generic_tags(field, value): return False def instance_empty_default_hostname(field, value): return False def instance_metric_patterns(field, value): return get_default_field_value(field, value) def instance_min_collection_interval(field, value): return 15 def instance_only_custom_queries(field, value): return False def instance_password(field, value): return get_default_field_value(field, value) def instance_port(field, value): return 3306 def instance_read_timeout(field, value): return get_default_field_value(field, value) def instance_service(field, value): return get_default_field_value(field, value) def instance_tags(field, value): return get_default_field_value(field, value) def instance_tls_ca_cert(field, value): return get_default_field_value(field, value) def instance_tls_cert(field, value): return get_default_field_value(field, value) def instance_tls_private_key(field, value): return get_default_field_value(field, value) def instance_tls_private_key_password(field, value): return get_default_field_value(field, value) def instance_tls_validate_hostname(field, value): return True def instance_tls_verify(field, value): return True def instance_use_global_custom_queries(field, value): return 'true' def instance_use_tls(field, value): return False def instance_username(field, value): return get_default_field_value(field, value)
bsd-3-clause
MountainWei/nova
nova/tests/unit/api/openstack/compute/test_fping.py
19
5754
# Copyright 2011 Grid Dynamics # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob from nova.api.openstack.compute import fping as fping_v21 from nova.api.openstack.compute.legacy_v2.contrib import fping from nova import exception from nova import test from nova.tests.unit.api.openstack import fakes import nova.utils FAKE_UUID = fakes.FAKE_UUID def execute(*cmd, **args): return "".join(["%s is alive" % ip for ip in cmd[1:]]) class FpingTestV21(test.TestCase): controller_cls = fping_v21.FpingController def setUp(self): super(FpingTestV21, self).setUp() self.flags(verbose=True, use_ipv6=False) return_server = fakes.fake_instance_get() return_servers = fakes.fake_instance_get_all_by_filters() self.stubs.Set(nova.db, "instance_get_all_by_filters", return_servers) self.stubs.Set(nova.db, "instance_get_by_uuid", return_server) self.stubs.Set(nova.utils, "execute", execute) self.stubs.Set(self.controller_cls, "check_fping", lambda self: None) self.controller = self.controller_cls() def _get_url(self): return "/v2/1234" def test_fping_index(self): req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping") res_dict = self.controller.index(req) self.assertIn("servers", res_dict) for srv in res_dict["servers"]: for key in "project_id", "id", "alive": self.assertIn(key, srv) def test_fping_index_policy(self): req = fakes.HTTPRequest.blank(self._get_url() + "os-fping?all_tenants=1") self.assertRaises(exception.Forbidden, self.controller.index, req) req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping?all_tenants=1") req.environ["nova.context"].is_admin = True res_dict = self.controller.index(req) self.assertIn("servers", res_dict) def test_fping_index_include(self): req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping") res_dict = self.controller.index(req) ids = [srv["id"] for srv in res_dict["servers"]] req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping?include=%s" % ids[0]) res_dict = self.controller.index(req) self.assertEqual(len(res_dict["servers"]), 1) self.assertEqual(res_dict["servers"][0]["id"], ids[0]) def test_fping_index_exclude(self): req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping") res_dict = self.controller.index(req) ids = [srv["id"] for srv in res_dict["servers"]] req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping?exclude=%s" % ",".join(ids[1:])) res_dict = self.controller.index(req) self.assertEqual(len(res_dict["servers"]), 1) self.assertEqual(res_dict["servers"][0]["id"], ids[0]) def test_fping_show(self): req = fakes.HTTPRequest.blank(self._get_url() + "os-fping/%s" % FAKE_UUID) res_dict = self.controller.show(req, FAKE_UUID) self.assertIn("server", res_dict) srv = res_dict["server"] for key in "project_id", "id", "alive": self.assertIn(key, srv) @mock.patch('nova.db.instance_get_by_uuid') def test_fping_show_with_not_found(self, mock_get_instance): mock_get_instance.side_effect = exception.InstanceNotFound( instance_id='') req = fakes.HTTPRequest.blank(self._get_url() + "os-fping/%s" % FAKE_UUID) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, FAKE_UUID) class FpingTestV2(FpingTestV21): controller_cls = fping.FpingController class FpingPolicyEnforcementV21(test.NoDBTestCase): def setUp(self): super(FpingPolicyEnforcementV21, self).setUp() self.controller = fping_v21.FpingController() self.req = fakes.HTTPRequest.blank('') def common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules(rule) exc = self.assertRaises( exception.PolicyNotAuthorized, func, *arg, **kwarg) self.assertEqual( "Policy doesn't allow %s to be performed." % rule.popitem()[0], exc.format_message()) def test_list_policy_failed(self): rule = {"os_compute_api:os-fping": "project:non_fake"} self.common_policy_check(rule, self.controller.index, self.req) self.req.GET.update({"all_tenants": "True"}) rule = {"os_compute_api:os-fping:all_tenants": "project:non_fake"} self.common_policy_check(rule, self.controller.index, self.req) def test_show_policy_failed(self): rule = {"os_compute_api:os-fping": "project:non_fake"} self.common_policy_check( rule, self.controller.show, self.req, FAKE_UUID)
apache-2.0
Split-Screen/android_kernel_huawei_msm8928
tools/perf/scripts/python/syscall-counts.py
11176
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
mabuchilab/QNET
tests/algebra/test_commutative_hs_order.py
1
5340
import pytest import sympy from qnet.algebra.core.hilbert_space_algebra import LocalSpace from qnet.algebra.core.operator_algebra import ( OperatorSymbol, tr) from qnet.algebra.library.fock_operators import Phase, Displace from qnet.utils.ordering import ( DisjunctCommutativeHSOrder, FullCommutativeHSOrder, expr_order_key) from qnet.algebra.core.state_algebra import BraKet, KetBra, BasisKet from qnet.algebra.core.scalar_algebra import ScalarValue, Zero, One def test_scalar_expr_order_key(): """Test that expr_order_key for ScalarValue instances compares just like the wrapped values, in particular for Zero and One""" half = ScalarValue(0.5) two = ScalarValue(2.0) alpha = ScalarValue(sympy.symbols('alpha')) neg_two = ScalarValue(-2.0) neg_alpha = ScalarValue(-sympy.symbols('alpha')) key_half = expr_order_key(half) key_two = expr_order_key(two) key_one = expr_order_key(One) key_zero = expr_order_key(Zero) key_alpha = expr_order_key(alpha) key_neg_two = expr_order_key(neg_two) key_neg_alpha = expr_order_key(neg_alpha) assert key_half < key_two assert key_half < key_one assert key_zero < key_half assert key_zero < key_one assert key_neg_two < key_zero # comparison with symbolic should go by string representation, with the # nice side-effect that negative symbols are smaller than positive numbers assert key_one < key_alpha assert key_neg_alpha < key_one assert key_zero < key_alpha assert key_neg_alpha < key_zero assert key_two < key_alpha assert key_neg_alpha < key_two assert str(-2.0) < "alpha" assert key_neg_two < key_alpha assert str(-2.0) < "-alpha" assert key_neg_two < key_neg_alpha def disjunct_commutative_test_data(): A1 = OperatorSymbol("A", hs=1) B1 = OperatorSymbol("B", hs=1) C1 = OperatorSymbol("C", hs=1) A2 = OperatorSymbol("A", hs=2) B2 = OperatorSymbol("B", hs=2) A3 = OperatorSymbol("A", hs=3) B4 = OperatorSymbol("B", hs=4) tr_A1 = tr(A1, over_space=1) tr_A2 = tr(A2, over_space=2) A1_m = OperatorSymbol("A", hs=LocalSpace(1, order_index=2)) B1_m = OperatorSymbol("B", hs=LocalSpace(1, order_index=2)) B2_m = OperatorSymbol("B", hs=LocalSpace(2, order_index=1)) ket_0 = BasisKet(0, hs=1) ket_1 = BasisKet(1, hs=1) ketbra = KetBra(ket_0, ket_1) braket = BraKet(ket_1, ket_1) return [ ([B2, B1, A1], [B1, A1, B2]), ([B2_m, B1_m, A1_m], [B2_m, B1_m, A1_m]), ([B1_m, A1_m, B2_m], [B2_m, B1_m, A1_m]), ([B1, A2, C1, tr_A2], [tr_A2, B1, C1, A2]), ([A1, B1+B2], [A1, B1+B2]), ([B1+B2, A1], [B1+B2, A1]), ([A3+B4, A1+A2], [A1+A2, A3+B4]), ([A1+A2, A3+B4], [A1+A2, A3+B4]), ([B4+A3, A2+A1], [A1+A2, A3+B4]), ([tr_A2, tr_A1], [tr_A1, tr_A2]), ([A2, ketbra, A1], [ketbra, A1, A2]), ([A2, braket, A1], [braket, A1, A2]), ] def full_commutative_test_data(): A1 = OperatorSymbol("A", hs=1) B1 = OperatorSymbol("B", hs=1) C1 = OperatorSymbol("C", hs=1) A2 = OperatorSymbol("A", hs=2) B2 = OperatorSymbol("B", hs=2) A3 = OperatorSymbol("A", hs=3) B4 = OperatorSymbol("B", hs=3) B4 = OperatorSymbol("B", hs=4) tr_A1 = tr(A1, over_space=1) tr_A2 = tr(A2, over_space=2) A1_m = OperatorSymbol("A", hs=LocalSpace(1, order_index=2)) B1_m = OperatorSymbol("B", hs=LocalSpace(1, order_index=2)) B2_m = OperatorSymbol("B", hs=LocalSpace(2, order_index=1)) ket_0 = BasisKet(0, hs=1) ket_1 = BasisKet(1, hs=1) ketbra = KetBra(ket_0, ket_1) braket = BraKet(ket_1, ket_1) a = sympy.symbols('a') Ph = lambda phi: Phase(phi, hs=1) Ph2 = lambda phi: Phase(phi, hs=2) D = lambda alpha: Displace(alpha, hs=1) return [ ([B2, B1, A1], [A1, B1, B2]), ([B2_m, B1_m, A1_m], [B2_m, A1_m, B1_m]), ([B1_m, A1_m, B2_m], [B2_m, A1_m, B1_m]), ([B1, A2, C1, tr_A2], [tr_A2, B1, C1, A2]), ([A1, B1+B2], [A1, B1+B2]), ([B1+B2, A1], [A1, B1+B2]), ([A3+B4, A1+A2], [A1+A2, A3+B4]), ([A1+A2, A3+B4], [A1+A2, A3+B4]), ([B4+A3, A2+A1], [A1+A2, A3+B4]), ([tr_A2, tr_A1], [tr_A1, tr_A2]), ([A2, ketbra, A1], [ketbra, A1, A2]), ([A2, braket, A1], [braket, A1, A2]), ([A2, 0.5*A1, 2*A1, A1, a*A1, -3*A1], [0.5*A1, A1, 2*A1, -3*A1, a*A1, A2]), ([Ph(1), Ph(0.5), D(2), D(0.1)], [D(0.1), D(2), Ph(0.5), Ph(1)]), ([Ph(1), Ph2(1), Ph(0.5)], [Ph(0.5), Ph(1), Ph2(1)]), ([Ph(a), Ph(1)], [Ph(1), Ph(a)]), ] @pytest.mark.parametrize('unsorted_args, sorted_args', disjunct_commutative_test_data()) def test_disjunct_commutative_hs_order(unsorted_args, sorted_args): res = sorted(unsorted_args, key=DisjunctCommutativeHSOrder) assert res == sorted_args @pytest.mark.parametrize('unsorted_args, sorted_args', full_commutative_test_data()) def test_full_commutative_hs_order(unsorted_args, sorted_args): res = sorted(unsorted_args, key=FullCommutativeHSOrder) assert res == sorted_args
mit
brittanystoroz/kitsune
kitsune/questions/urls.py
16
5279
from django.conf.urls import patterns, url from django.contrib.contenttypes.models import ContentType from kitsune.questions.feeds import ( QuestionsFeed, AnswersFeed, TaggedQuestionsFeed) from kitsune.questions.models import Question, Answer from kitsune.flagit import views as flagit_views urlpatterns = patterns( 'kitsune.questions.views', url(r'^$', 'product_list', name='questions.home'), url(r'^/answer-preview-async$', 'answer_preview_async', name='questions.answer_preview_async'), url(r'^/dashboard/metrics$', 'metrics', name='questions.metrics'), url(r'^/dashboard/metrics/(?P<locale_code>[^/]+)$', 'metrics', name='questions.locale_metrics'), # AAQ url(r'^/new$', 'aaq', name='questions.aaq_step1'), url(r'^/new/confirm$', 'aaq_confirm', name='questions.aaq_confirm'), url(r'^/new/(?P<product_key>[\w\-]+)$', 'aaq_step2', name='questions.aaq_step2'), url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)$', 'aaq_step3', name='questions.aaq_step3'), url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/search$', 'aaq_step4', name='questions.aaq_step4'), url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/form$', 'aaq_step5', name='questions.aaq_step5'), # AAQ flow for Marketplace url(r'^/marketplace$', 'marketplace', name='questions.marketplace_aaq'), url(r'^/marketplace/success$', 'marketplace_success', name='questions.marketplace_aaq_success'), url(r'^/marketplace/refund$', 'marketplace_refund', name='questions.marketplace_refund'), url(r'^/marketplace/developer-request$', 'marketplace_developer_request', name='questions.marketplace_developer_request'), url(r'^/marketplace/(?P<category_slug>[\w\-]+)$', 'marketplace_category', name='questions.marketplace_aaq_category'), # TODO: Factor out `/(?P<question_id>\d+)` below url(r'^/(?P<question_id>\d+)$', 'question_details', name='questions.details'), url(r'^/(?P<question_id>\d+)/edit$', 'edit_question', name='questions.edit_question'), url(r'^/(?P<question_id>\d+)/edit-details$', 'edit_details', name='questions.edit_details'), url(r'^/(?P<question_id>\d+)/reply$', 'reply', name='questions.reply'), url(r'^/(?P<question_id>\d+)/delete$', 'delete_question', name='questions.delete'), url(r'^/(?P<question_id>\d+)/lock$', 'lock_question', name='questions.lock'), url(r'^/(?P<question_id>\d+)/archive$', 'archive_question', name='questions.archive'), url(r'^/(?P<question_id>\d+)/delete/(?P<answer_id>\d+)$', 'delete_answer', name='questions.delete_answer'), url(r'^/(?P<question_id>\d+)/edit/(?P<answer_id>\d+)$', 'edit_answer', name='questions.edit_answer'), url(r'^/(?P<question_id>\d+)/solve/(?P<answer_id>\d+)$', 'solve', name='questions.solve'), url(r'^/(?P<question_id>\d+)/unsolve/(?P<answer_id>\d+)$', 'unsolve', name='questions.unsolve'), url(r'^/(?P<question_id>\d+)/vote$', 'question_vote', name='questions.vote'), url(r'^/(?P<question_id>\d+)/vote/(?P<answer_id>\d+)$', 'answer_vote', name='questions.answer_vote'), url(r'^/(?P<question_id>\d+)/add-tag$', 'add_tag', name='questions.add_tag'), url(r'^/(?P<question_id>\d+)/remove-tag$', 'remove_tag', name='questions.remove_tag'), url(r'^/(?P<question_id>\d+)/add-tag-async$', 'add_tag_async', name='questions.add_tag_async'), url(r'^/(?P<question_id>\d+)/remove-tag-async$', 'remove_tag_async', name='questions.remove_tag_async'), url(r'^/(?P<question_id>\d+)/screen-share/$', 'screen_share', name='questions.screen_share'), # Feeds # Note: this needs to be above questions.list because "feed" # matches the product slug regex. url(r'^/feed$', QuestionsFeed(), name='questions.feed'), url(r'^/(?P<question_id>\d+)/feed$', AnswersFeed(), name='questions.answers.feed'), url(r'^/tagged/(?P<tag_slug>[\w\-]+)/feed$', TaggedQuestionsFeed(), name='questions.tagged_feed'), # Mark as spam url(r'^/mark_spam$', 'mark_spam', name='questions.mark_spam'), url(r'^/unmark_spam$', 'unmark_spam', name='questions.unmark_spam'), # Question lists url(r'^/(?P<product_slug>[\w+\-\,]+)$', 'question_list', name='questions.list'), # Flag content ("Report this post") url(r'^/(?P<object_id>\d+)/flag$', flagit_views.flag, {'content_type': ContentType.objects.get_for_model(Question).id}, name='questions.flag'), url(r'^/(?P<question_id>\d+)/flag/(?P<object_id>\d+)$', flagit_views.flag, {'content_type': ContentType.objects.get_for_model(Answer).id}, name='questions.answer_flag'), # Subcribe by email url(r'^/(?P<question_id>\d+)/watch$', 'watch_question', name='questions.watch'), url(r'^/(?P<question_id>\d+)/unwatch$', 'unwatch_question', name='questions.unwatch'), url(r'^/confirm/(?P<watch_id>\d+)/(?P<secret>\w+)$', 'activate_watch', name='questions.activate_watch'), url(r'^/unsubscribe/(?P<watch_id>\d+)/(?P<secret>\w+)$', 'unsubscribe_watch', name='questions.unsubscribe'), )
bsd-3-clause
mabuchilab/QNET
src/qnet/algebra/core/super_operator_algebra.py
1
21383
""" The specification of a quantum mechanics symbolic super-operator algebra. See :ref:`super_operator_algebra` for more details. """ from abc import ABCMeta from collections import OrderedDict, defaultdict from numpy import (array as np_array, sqrt as np_sqrt) from numpy.linalg import eigh from sympy import (I, Matrix as SympyMatrix, sqrt) from .abstract_algebra import Operation from .abstract_quantum_algebra import ( ScalarTimesQuantumExpression, QuantumExpression, QuantumSymbol, QuantumOperation, QuantumPlus, QuantumTimes, QuantumAdjoint, QuantumDerivative) from .algebraic_properties import ( assoc, filter_neutral, match_replace, match_replace_binary, orderby, delegate_to_method, collect_summands) from .exceptions import BadLiouvillianError, CannotSymbolicallyDiagonalize from .hilbert_space_algebra import TrivialSpace from .matrix_algebra import Matrix from .operator_algebra import ( Operator, OperatorPlus, ZeroOperator, sympyOne, Adjoint, PseudoInverse) from .scalar_algebra import is_scalar, One from ...utils.ordering import DisjunctCommutativeHSOrder, KeyTuple from ...utils.singleton import Singleton, singleton_object __all__ = [ 'SPost', 'SPre', 'ScalarTimesSuperOperator', 'SuperAdjoint', 'SuperOperator', 'SuperOperatorPlus', 'SuperOperatorSymbol', 'SuperOperatorTimes', 'SuperOperatorTimesOperator', 'anti_commutator', 'commutator', 'lindblad', 'liouvillian', 'liouvillian_normal_form', 'IdentitySuperOperator', 'ZeroSuperOperator', 'SuperOperatorDerivative'] __private__ = ['SuperCommutativeHSOrder'] # anything not in __all__ must be in __private__ ############################################################################### # Abstract base classes ############################################################################### class SuperOperator(QuantumExpression, metaclass=ABCMeta): """Base class for super-operators""" def __mul__(self, other): if isinstance(other, Operator): return SuperOperatorTimesOperator.create(self, other) else: return super().__mul__(other) ############################################################################### # Superoperator algebra elements ############################################################################### class SuperOperatorSymbol(QuantumSymbol, SuperOperator): """Symbolic super-operator See :class:`.QuantumSymbol`. """ pass @singleton_object class IdentitySuperOperator(SuperOperator, metaclass=Singleton): """Neutral element for product of super-operators""" _order_index = 2 @property def space(self): return TrivialSpace @property def args(self): return tuple() def _diff(self, sym): return ZeroSuperOperator def _adjoint(self): return self def _expand(self): return self @singleton_object class ZeroSuperOperator(SuperOperator, metaclass=Singleton): """Neutral element for sum of super-operators""" _order_index = 2 @property def space(self): return TrivialSpace @property def args(self): return tuple() def _diff(self, sym): return self def _adjoint(self): return self def _expand(self): return self ############################################################################### # Algebra Operations ############################################################################### class SuperOperatorPlus(QuantumPlus, SuperOperator): """A sum of super-operators""" _neutral_element = ZeroSuperOperator _binary_rules = OrderedDict() simplifications = [assoc, orderby, collect_summands, match_replace_binary] class SuperCommutativeHSOrder(DisjunctCommutativeHSOrder): """Ordering class that acts like DisjunctCommutativeHSOrder, but also commutes any `SPost` and `SPre`""" def __lt__(self, other): if isinstance(self.op, SPre) and isinstance(other.op, SPost): return True elif isinstance(self.op, SPost) and isinstance(other.op, SPre): return False else: return DisjunctCommutativeHSOrder.__lt__(self, other) class SuperOperatorTimes(QuantumTimes, SuperOperator): """Product of super-operators""" _neutral_element = IdentitySuperOperator _binary_rules = OrderedDict() # see end of module simplifications = [assoc, orderby, filter_neutral, match_replace_binary] order_key = SuperCommutativeHSOrder @classmethod def create(cls, *ops): # TODO: Add this functionality to QuantumTimes if any(o == ZeroSuperOperator for o in ops): return ZeroSuperOperator return super().create(*ops) class ScalarTimesSuperOperator(SuperOperator, ScalarTimesQuantumExpression): """Product of a :class:`.Scalar` coefficient and a :class:`SuperOperator`""" def _adjoint(self): pass _rules = OrderedDict() # see end of module simplifications = [match_replace, ] # def _pseudo_inverse(self): # c, t = self.operands # return t.pseudo_inverse() / c class SuperAdjoint(QuantumAdjoint, SuperOperator): r"""Adjoint of a super-operator The mathematical notation for this is typically .. math:: {\rm SuperAdjoint}(\mathcal{L}) =: \mathcal{L}^* and for any super operator :math:`\mathcal{L}`, its super-adjoint :math:`\mathcal{L}^*` satisfies for any pair of operators :math:`M,N`: .. math:: {\rm Tr}[M (\mathcal{L}N)] = Tr[(\mathcal{L}^*M) N] """ simplifications = [delegate_to_method('_adjoint')] def __init__(self, operand): super().__init__(operand) class SPre(SuperOperator, Operation): """Linear pre-multiplication operator Acting ``SPre(A)`` on an operator ``B`` just yields the product ``A * B`` """ _rules = OrderedDict() # see end of module simplifications = [match_replace, ] _order_name = 'A_SPre' # "SPre" should go before "SPost" @property def space(self): return self.operands[0].space def _expand(self): oe = self.operands[0].expand() if isinstance(oe, OperatorPlus): return sum(SPre.create(oet) for oet in oe.operands) return SPre.create(oe) def _simplify_scalar(self, func): return self.create(self.operands[0].simplify_scalar(func=func)) def _diff(self, sym): return SuperOperatorDerivative(self, {sym: 1}) def _adjoint(self): return SPost(self.operands[0]) class SPost(SuperOperator, Operation): """Linear post-multiplication operator Acting ``SPost(A)`` on an operator ``B`` just yields the reversed product ``B * A``. """ _order_index = -1 _rules = OrderedDict() # see end of module simplifications = [match_replace, ] _order_name = 'B_SPost' # "SPost" should go before "SPre" @property def space(self): return self.operands[0].space def _expand(self): oe = self.operands[0].expand() if isinstance(oe, OperatorPlus): return sum(SPost.create(oet) for oet in oe.operands) return SPost.create(oe) def _simplify_scalar(self, func): return self.create(self.operands[0].simplify_scalar(func=func)) def _diff(self, sym): return SuperOperatorDerivative(self, {sym: 1}) def _adjoint(self): return SPre(self.operands[0]) class SuperOperatorTimesOperator(Operator, Operation): """Application of a super-operator to an operator The result of this operation is(result is an :class:`Operator` """ _rules = OrderedDict() # see end of module simplifications = [match_replace, ] def __init__(self, sop, op): assert isinstance(sop, SuperOperator) assert isinstance(op, Operator) super().__init__(sop, op) @property def space(self): return self.sop.space * self.op.space @property def sop(self): return self.operands[0] @property def op(self): return self.operands[1] def _expand(self): sop, op = self.operands sope, ope = sop.expand(), op.expand() if isinstance(sope, SuperOperatorPlus): sopet = sope.operands else: sopet = (sope, ) if isinstance(ope, OperatorPlus): opet = ope.operands else: opet = (ope, ) return sum(st * ot for st in sopet for ot in opet) def _series_expand(self, param, about, order): sop, op = self.sop, self.op ope = op.series_expand(param, about, order) return tuple(sop * opet for opet in ope) def _simplify_scalar(self, func): sop, op = self.sop, self.op return sop.simplify_scalar(func=func) * op.simplify_scalar(func=func) def _adjoint(self): return Adjoint(self) def _pseudo_inverse(self): return PseudoInverse(self) def _diff(self, sym): return self.sop.diff(sym) * self.op + self.sop * self.op.diff(sym) class SuperOperatorDerivative(QuantumDerivative, SuperOperator): """Symbolic partial derivative of a super-operator See :class:`.QuantumDerivative`. """ pass ############################################################################### # Constructor Routines ############################################################################### def commutator(A, B=None): """Commutator of `A` and `B` If ``B != None``, return the commutator :math:`[A,B]`, otherwise return the super-operator :math:`[A,\cdot]`. The super-operator :math:`[A,\cdot]` maps any other operator ``B`` to the commutator :math:`[A, B] = A B - B A`. Args: A: The first operator to form the commutator of. B: The second operator to form the commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]` """ if B: return A * B - B * A return SPre(A) - SPost(A) def anti_commutator(A, B=None): """If ``B != None``, return the anti-commutator :math:`\{A,B\}`, otherwise return the super-operator :math:`\{A,\cdot\}`. The super-operator :math:`\{A,\cdot\}` maps any other operator ``B`` to the anti-commutator :math:`\{A, B\} = A B + B A`. Args: A: The first operator to form all anti-commutators of. B: The second operator to form the anti-commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]` """ if B: return A * B + B * A return SPre(A) + SPost(A) def lindblad(C): """Return the super-operator Lindblad term of the Lindblad operator `C` Return ``SPre(C) * SPost(C.adjoint()) - (1/2) * santi_commutator(C.adjoint()*C)``. These are the super-operators :math:`\mathcal{D}[C]` that form the collapse terms of a Master-Equation. Applied to an operator :math:`X` they yield .. math:: \mathcal{D}[C] X = C X C^\dagger - {1\over 2} (C^\dagger C X + X C^\dagger C) Args: C (Operator): The associated collapse operator Returns: SuperOperator: The Lindblad collapse generator. """ if is_scalar(C): return ZeroSuperOperator return ( SPre(C) * SPost(C.adjoint()) - (sympyOne/2) * anti_commutator(C.adjoint() * C)) def liouvillian(H, Ls=None): r"""Return the Liouvillian super-operator associated with `H` and `Ls` The Liouvillian :math:`\mathcal{L}` generates the Markovian-dynamics of a system via the Master equation: .. math:: \dot{\rho} = \mathcal{L}\rho = -i[H,\rho] + \sum_{j=1}^n \mathcal{D}[L_j] \rho Args: H (Operator): The associated Hamilton operator Ls (sequence or Matrix): A sequence of Lindblad operators. Returns: SuperOperator: The Liouvillian super-operator. """ if Ls is None: Ls = [] elif isinstance(Ls, Matrix): Ls = Ls.matrix.ravel().tolist() summands = [-I * commutator(H), ] summands.extend([lindblad(L) for L in Ls]) return SuperOperatorPlus.create(*summands) ############################################################################### # Auxilliary routines ############################################################################### def liouvillian_normal_form(L, symbolic = False): r"""Return a Hamilton operator ``H`` and a minimal list of collapse operators ``Ls`` that generate the liouvillian ``L``. A Liouvillian defined by a hermitian Hamilton operator :math:`H` and a vector of collapse operators :math:`\mathbf{L} = (L_1, L_2, \dots L_n)^T` is invariant under the following two operations: .. math:: \left(H, \mathbf{L}\right) & \mapsto \left(H + {1\over 2i}\left(\mathbf{w}^\dagger \mathbf{L} - \mathbf{L}^\dagger \mathbf{w}\right), \mathbf{L} + \mathbf{w} \right) \\ \left(H, \mathbf{L}\right) & \mapsto \left(H, \mathbf{U}\mathbf{L}\right)\\ where :math:`\mathbf{w}` is just a vector of complex numbers and :math:`\mathbf{U}` is a complex unitary matrix. It turns out that for quantum optical circuit models the set of collapse operators is often linearly dependent. This routine tries to find a representation of the Liouvillian in terms of a Hamilton operator ``H`` with as few non-zero collapse operators ``Ls`` as possible. Consider the following example, which results from a two-port linear cavity with a coherent input into the first port: >>> kappa_1, kappa_2 = sympy.symbols('kappa_1, kappa_2', positive = True) >>> Delta = sympy.symbols('Delta', real = True) >>> alpha = sympy.symbols('alpha') >>> H = (Delta * Create(hs=1) * Destroy(hs=1) + ... (sqrt(kappa_1) / (2 * I)) * ... (alpha * Create(hs=1) - alpha.conjugate() * Destroy(hs=1))) >>> Ls = [sqrt(kappa_1) * Destroy(hs=1) + alpha, ... sqrt(kappa_2) * Destroy(hs=1)] >>> LL = liouvillian(H, Ls) >>> Hnf, Lsnf = liouvillian_normal_form(LL) >>> print(ascii(Hnf)) -I*alpha*sqrt(kappa_1) * a^(1)H + I*sqrt(kappa_1)*conjugate(alpha) * a^(1) + Delta * a^(1)H * a^(1) >>> len(Lsnf) 1 >>> print(ascii(Lsnf[0])) sqrt(kappa_1 + kappa_2) * a^(1) In terms of the ensemble dynamics this final system is equivalent. Note that this function will only work for proper Liouvillians. Args: L (SuperOperator): The Liouvillian Returns: tuple: ``(H, Ls)`` Raises: .BadLiouvillianError """ L = L.expand() if isinstance(L, SuperOperatorPlus): spres = [] sposts = [] collapse_form = defaultdict(lambda: defaultdict(int)) for s in L.operands: if isinstance(s, ScalarTimesSuperOperator): coeff, term = s.operands else: coeff, term = One, s if isinstance(term, SPre): spres.append(coeff * term.operands[0]) elif isinstance(term, SPost): sposts.append((coeff * term.operands[0])) else: if (not isinstance(term, SuperOperatorTimes) or not len(term.operands) == 2 or not (isinstance(term.operands[0], SPre) and isinstance(term.operands[1], SPost))): raise BadLiouvillianError( "All terms of the Liouvillian need to be of form " "SPre(X), SPost(X) or SPre(X)*SPost(X): This term " "is in violation {!s}".format(term)) spreL, spostL = term.operands Li, Ljd = spreL.operands[0], spostL.operands[0] try: complex(coeff) except (ValueError, TypeError): symbolic = True coeff = coeff.simplify_scalar() collapse_form[Li][Ljd] = coeff basis = sorted(collapse_form.keys()) warn_msg = ("Warning: the Liouvillian is probably malformed: " "The coefficients of SPre({!s})*SPost({!s}) and " "SPre({!s})*SPost({!s}) should be complex conjugates " "of each other") for ii, Li in enumerate(basis): for Lj in basis[ii:]: cij = collapse_form[Li][Lj.adjoint()] cji = collapse_form[Lj][Li.adjoint()] if cij !=0 or cji !=0: diff = (cij.conjugate() - cji) try: diff = complex(diff) if abs(diff) > 1e-6: print(warn_msg.format(Li, Lj.adjoint(), Lj, Li.adjoint())) except ValueError: symbolic = True if diff.simplify(): print("Warning: the Liouvillian my be malformed, " "convert to numerical representation") final_Lis = [] if symbolic: if len(basis) == 1: l1 = basis[0] kappa1 = collapse_form[l1][l1.adjoint()] final_Lis = [sqrt(kappa1) * l1] sdiff = (l1.adjoint() * l1 * kappa1 / 2) spres.append(sdiff) sposts.append(sdiff) # elif len(basis) == 2: # l1, l2 = basis # kappa_1 = collapse_form[l1][l1.adjoint()] # kappa_2 = collapse_form[l2][l2.adjoint()] # kappa_12 = collapse_form[l1][l2.adjoint()] # kappa_21 = collapse_form[l2][l1.adjoint()] ## assert (kappa_12.conjugate() - kappa_21) == 0 else: M = SympyMatrix(len(basis), len(basis), lambda i,j: collapse_form[basis[i]][basis[j] .adjoint()]) # First check if M is already diagonal (sympy does not handle # this well, for some reason) diag = True for i in range(len(basis)): for j in range(i): if M[i,j].apply_rules() != 0 or M[j, i].apply_rules != 0: diag = False break if not diag: break if diag: for bj in basis: final_Lis.append( bj * sqrt(collapse_form[bj][bj.adjoint()])) sdiff = (bj.adjoint() * bj * collapse_form[bj][bj.adjoint()]/2) spres.append(sdiff) sposts.append(sdiff) # Try sympy algo else: try: data = M.eigenvects() for evalue, multiplicity, ebasis in data: if not evalue: continue for b in ebasis: new_L = (sqrt(evalue) * sum(cj[0] * Lj for (cj, Lj) in zip(b.tolist(), basis))).expand() final_Lis.append(new_L) sdiff = (new_L.adjoint() * new_L / 2).expand() spres.append(sdiff) sposts.append(sdiff) except NotImplementedError: raise CannotSymbolicallyDiagonalize(( "The matrix {} is too hard to diagonalize " "symbolically. Please try converting to fully " "numerical representation.").format(M)) else: M = np_array([[complex(collapse_form[Li][Lj.adjoint()]) for Lj in basis] for Li in basis]) vals, vecs = eigh(M) for sv, vec in zip(np_sqrt(vals), vecs.transpose()): new_L = sum((sv * ci) * Li for (ci, Li) in zip(vec, basis)) final_Lis.append(new_L) sdiff = (.5 * new_L.adjoint()*new_L).expand() spres.append(sdiff) sposts.append(sdiff) miHspre = sum(spres) iHspost = sum(sposts) if ((not (miHspre + iHspost) is ZeroOperator) or not (miHspre.adjoint() + miHspre) is ZeroOperator): print("Warning, potentially malformed Liouvillian {!s}".format(L)) final_H = (I*miHspre).expand() return final_H, final_Lis else: if L is ZeroSuperOperator: return ZeroOperator, [] raise BadLiouvillianError(str(L)) SuperOperator._zero = ZeroSuperOperator SuperOperator._one = IdentitySuperOperator SuperOperator._base_cls = SuperOperator SuperOperator._scalar_times_expr_cls = ScalarTimesSuperOperator SuperOperator._plus_cls = SuperOperatorPlus SuperOperator._times_cls = SuperOperatorTimes SuperOperator._adjoint_cls = SuperAdjoint SuperOperator._indexed_sum_cls = None # TODO SuperOperator._indexed_sum_cls = None # TODO SuperOperator._derivative_cls = SuperOperatorDerivative
mit
kevinkahn/softconsole
keys/keymodules/onoff.py
1
6387
import debug import logsupport from screens import screen import screens.supportscreens as supportscreens from utils import utilities, displayupdate from keys.keyspecs import KeyTypes from keys.keyutils import _resolvekeyname, BrightnessPossible from logsupport import ConsoleWarning from keyspecs.toucharea import ManualKeyDesc import functools class OnOffKey(ManualKeyDesc): def __init__(self, thisscreen, keysection, kn, keytype='ONOFF'): keyname, self.Hub = _resolvekeyname(kn, thisscreen.DefaultHubObj) # self.ControlObj = None # object on which to make operation calls self.DisplayObj = None # object whose state is reflected in key self.oldval = '####' debug.debugPrint('Screen', " New ", keytype, " Key Desc ", keyname) self.statebasedkey = True ManualKeyDesc.__init__(self, thisscreen, keysection, keyname) if keyname == '*Action*': keyname = self.NodeName # special case for alert screen action keys that always have same name self.ControlObj, self.DisplayObj = self.Hub.GetNode(keyname, self.SceneProxy) if self.ControlObjUndefined(): debug.debugPrint('Screen', "Screen", keyname, "unbound") logsupport.Logs.Log('Key Binding missing: ' + self.name, severity=ConsoleWarning) if hasattr(self.ControlObj, 'SendOnPct'): self.AllowSlider = True if self.Verify: self.VerifyScreen = supportscreens.VerifyScreen(self, self.GoMsg, self.NoGoMsg, self.KeyPressAction, thisscreen, self.KeyColorOff, thisscreen.BackgroundColor, thisscreen.CharColor, self.State, thisscreen.HubInterestList) self.Proc = self.VerifyScreen.Invoke else: self.Proc = self.KeyPressAction self.ProcDblTap = self.KeyPressActionDbl if self.AllowSlider: screen.AddUndefaultedParams(self, keysection, SlideOrientation=0) self.SliderScreen = supportscreens.SliderScreen(self, self.KeyCharColorOn, self.KeyColor, self.ControlObj.GetBrightness, self.UpdateBrightness, orientation=self.SlideOrientation) if hasattr(self.ControlObj, 'IdleSend'): self.SliderScreen.RequestIdles(self.ControlObj.IdleSend, 2) self.ProcLong = self.SliderScreen.Invoke else: self.ProcLong = self.IgnoreLong if keytype == 'ONOFF': self.KeyAction = 'OnOff' elif keytype == 'ON': self.KeyAction = 'On' else: self.KeyAction = 'Off' utilities.register_example("OnOffKey", self) def ConnectandGetNameOverride(self, keyname, keysection): screen.AddUndefaultedParams(self, keysection, SceneProxy='', NodeName='') if keyname == '*Action*': keyname = self.NodeName # special case for alert screen action keys that always have same name self.ControlObj, self.DisplayObj = self.Hub.GetNode(keyname, self.SceneProxy) try: if self.ControlObj.FriendlyName != '': return [self.ControlObj.FriendlyName] except AttributeError: # noinspection PyAttributeOutsideInit return [self.name] def HandleNodeEvent(self, evnt): if not isinstance(evnt.value, int): try: if evnt.node.split('.')[0] == 'scene': evnt.value = 1 else: logsupport.Logs.Log( "Node event to key {} on screen {} with non integer state: {}".format(self.name, self.Screen.name, evnt), severity=ConsoleWarning) evnt.value = 1 except Exception as E: logsupport.Logs.Log( 'Exception handling node event key {} on screen {} event {} exception {}'.format(self.name, self.Screen.name, evnt, E), severity=ConsoleWarning) evnt.value = 1 oldunknown = self.UnknownState self.State = not (evnt.value == 0) # K is off (false) only if state is 0 self.UnknownState = True if evnt.value == -1 else False # if self.UnknownState: # add node to unknowns list for hub # self.ControlObj.Hub.AddToUnknowns(self.ControlObj,evnt) # elif oldunknown and not self.UnknownState: # self.ControlObj.Hub.DeleteFromUnknowns(self.ControlObj,evnt) self.PaintKey() displayupdate.updatedisplay() def FinishKey(self, center, size, firstfont=0, shrink=True): super(OnOffKey, self).FinishKey(center, size, firstfont, shrink) if self.DisplayObj is not None: self.Screen.AddToHubInterestList(self.Hub, self.DisplayObj.address, self) def InitDisplay(self): debug.debugPrint("Screen", "OnOffKey Key.InitDisplay ", self.Screen.name, self.name) state = self.Hub.GetCurrentStatus(self.DisplayObj) if state is None: logsupport.Logs.Log("No state available for key: " + self.name + ' on screen: ' + self.Screen.name) state = -1 self.State = False else: self.State = not (state == 0) # K is off (false) only if state is 0 self.UnknownState = True if state == -1 else False super().InitDisplay() def KeyPressAction(self): if self.KeyAction == "OnOff": self.State = not self.State elif self.KeyAction == "On": self.State = True elif self.KeyAction == "Off": self.State = False if not self.ControlObjUndefined(): poststate = self.ControlObj.SendOnOffCommand(self.State) if poststate is not None: self.State = poststate self.ScheduleBlinkKey(self.Blink) else: logsupport.Logs.Log("Screen: " + self.Screen.name + " press unbound key: " + self.name, severity=ConsoleWarning) self.ScheduleBlinkKey(20) def KeyPressActionDbl(self): if self.KeyAction == "OnOff": self.State = not self.State elif self.KeyAction == "On": self.State = True elif self.KeyAction == "Off": self.State = False if not self.ControlObjUndefined(): self.ControlObj.SendOnOffFastCommand(self.State) self.ScheduleBlinkKey(self.Blink) else: logsupport.Logs.Log("Screen: " + self.Screen.name + " press unbound key: " + self.name, severity=ConsoleWarning) self.ScheduleBlinkKey(20) def IgnoreLong(self): logsupport.Logs.Log('Ignore long press for screen {}, key {}'.format(self.Screen.name, self.name)) self.ScheduleBlinkKey(5) def ProcLong(self): self.State = True self.SliderScreen.Invoke() def UpdateBrightness(self, brtpct, final=False): # print('Update brt {}'.format(brtpct)) self.ControlObj.SendOnPct(brtpct, final=final) BrightnessPossible.append(OnOffKey) KeyTypes['ONOFF'] = functools.partial(OnOffKey, keytype='ONOFF') KeyTypes['ON'] = functools.partial(OnOffKey, keytype='ON') KeyTypes['OFF'] = functools.partial(OnOffKey, keytype='OFF')
apache-2.0
mrquim/repository.mrquim
script.module.exodus/lib/resources/lib/modules/dom_parser.py
35
5326
""" Based on Parsedom for XBMC plugins Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from collections import namedtuple DomMatch = namedtuple('DOMMatch', ['attrs', 'content']) re_type = type(re.compile('')) def __get_dom_content(html, name, match): if match.endswith('/>'): return '' # override tag name with tag from match if possible tag = re.match('<([^\s/>]+)', match) if tag: name = tag.group(1) start_str = '<%s' % name end_str = "</%s" % name # start/end tags without matching case cause issues start = html.find(match) end = html.find(end_str, start) pos = html.find(start_str, start + 1) while pos < end and pos != -1: # Ignore too early </endstr> return tend = html.find(end_str, end + len(end_str)) if tend != -1: end = tend pos = html.find(start_str, pos + 1) if start == -1 and end == -1: result = '' elif start > -1 and end > -1: result = html[start + len(match):end] elif end > -1: result = html[:end] elif start > -1: result = html[start + len(match):] else: result = '' return result def __get_dom_elements(item, name, attrs): if not attrs: pattern = '(<%s(?:\s[^>]*>|/?>))' % name this_list = re.findall(pattern, item, re.M | re.S | re.I) else: last_list = None for key, value in attrs.iteritems(): value_is_regex = isinstance(value, re_type) value_is_str = isinstance(value, basestring) pattern = '''(<{tag}[^>]*\s{key}=(?P<delim>['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key) re_list = re.findall(pattern, item, re.M | re.S | re.I) if value_is_regex: this_list = [r[0] for r in re_list if re.match(value, r[2])] else: temp_value = [value] if value_is_str else value this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))] if not this_list: has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value) if not has_space: pattern = '''(<{tag}[^>]*\s{key}=((?:[^\s>]|/>)*)[^>]*>)'''.format(tag=name, key=key) re_list = re.findall(pattern, item, re.M | re.S | re.I) if value_is_regex: this_list = [r[0] for r in re_list if re.match(value, r[1])] else: this_list = [r[0] for r in re_list if value == r[1]] if last_list is None: last_list = this_list else: last_list = [item for item in this_list if item in last_list] this_list = last_list return this_list def __get_attribs(element): attribs = {} for match in re.finditer('''\s+(?P<key>[^=]+)=\s*(?:(?P<delim>["'])(?P<value1>.*?)(?P=delim)|(?P<value2>[^"'][^>\s]*))''', element): match = match.groupdict() value1 = match.get('value1') value2 = match.get('value2') value = value1 if value1 is not None else value2 if value is None: continue attribs[match['key'].lower().strip()] = value return attribs def parse_dom(html, name='', attrs=None, req=False, exclude_comments=False): if attrs is None: attrs = {} name = name.strip() if isinstance(html, unicode) or isinstance(html, DomMatch): html = [html] elif isinstance(html, str): try: html = [html.decode("utf-8")] # Replace with chardet thingy except: try: html = [html.decode("utf-8", "replace")] except: html = [html] elif not isinstance(html, list): return '' if not name: return '' if not isinstance(attrs, dict): return '' if req: if not isinstance(req, list): req = [req] req = set([key.lower() for key in req]) all_results = [] for item in html: if isinstance(item, DomMatch): item = item.content if exclude_comments: item = re.sub(re.compile('<!--.*?-->', re.DOTALL), '', item) results = [] for element in __get_dom_elements(item, name, attrs): attribs = __get_attribs(element) if req and not req <= set(attribs.keys()): continue temp = __get_dom_content(item, name, element).strip() results.append(DomMatch(attribs, temp)) item = item[item.find(temp, item.find(element)):] all_results += results return all_results
gpl-2.0
matan1008/xmlstruct
tests/test_repeaters.py
1
2510
# coding=utf-8 from xmlstruct import Int, Range, GreedyRange, Array from xmlstruct.exceptions import RangeError import pytest def test_range_build(): xml_range = Range("test", 2, 4, Int("testint")) obj = [1, 2, 3] assert xml_range.build(obj) == r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>" def test_range_parse(): xml_range = Range("test", 2, 4, Int("testint")) obj = [1, 2, 3] assert xml_range.parse(r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>") == obj def test_range_build_less(): xml_range = Range("test", 2, 4, Int("testint")) obj = [1] with pytest.raises(RangeError): xml_range.build(obj) def test_range_parse_less(): xml_range = Range("test", 2, 4, Int("testint")) with pytest.raises(RangeError): xml_range.parse(r"<test><testint>1</testint></test>") def test_range_build_more(): xml_range = Range("test", 0, 2, Int("testint")) obj = [1, 2, 3] with pytest.raises(RangeError): xml_range.build(obj) def test_range_parse_more(): xml_range = Range("test", 0, 2, Int("testint")) with pytest.raises(RangeError): xml_range.parse(r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>") def test_range_build_zero(): xml_range = Range("test", 0, 2, Int("testint")) obj = [] assert xml_range.build(obj) in (r"<test></test>", "<test />") def test_range_parse_zero(): xml_range = Range("test", 0, 2, Int("testint")) obj = [] assert xml_range.parse(r"<test></test>") == obj assert xml_range.parse("<test />") == obj def test_greedy_range_build(): xml_greedy_range = GreedyRange("test", Int("testint")) obj = [1, 2, 3] assert xml_greedy_range.build(obj) == r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>" def test_greedy_range_parse(): xml_greedy_range = GreedyRange("test", Int("testint")) obj = [1, 2, 3] assert xml_greedy_range.parse(r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>") == obj def test_array_build(): xml_array = Array("test", 3, Int("testint")) obj = [1, 2, 3] assert xml_array.build(obj) == r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>" def test_array_parse(): xml_array = Array("test", 3, Int("testint")) obj = [1, 2, 3] assert xml_array.parse(r"<test><testint>1</testint><testint>2</testint><testint>3</testint></test>") == obj
mit
mrquim/repository.mrquim
repo/script.module.pycryptodome/lib/Crypto/Random/random.py
6
5349
# -*- coding: utf-8 -*- # # Random/random.py : Strong alternative for the standard 'random' module # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """A cryptographically strong version of Python's standard "random" module.""" __all__ = ['StrongRandom', 'getrandbits', 'randrange', 'randint', 'choice', 'shuffle', 'sample'] from Crypto import Random class StrongRandom(object): def __init__(self, rng=None, randfunc=None): if randfunc is None and rng is None: self._randfunc = None elif randfunc is not None and rng is None: self._randfunc = randfunc elif randfunc is None and rng is not None: self._randfunc = rng.read else: raise ValueError("Cannot specify both 'rng' and 'randfunc'") def getrandbits(self, k): """Return a python long integer with k random bits.""" if self._randfunc is None: self._randfunc = Random.new().read mask = (1 << k) - 1 return mask & bytes_to_long(self._randfunc(ceil_div(k, 8))) def randrange(self, *args): """randrange([start,] stop[, step]): Return a randomly-selected element from range(start, stop, step).""" if len(args) == 3: (start, stop, step) = args elif len(args) == 2: (start, stop) = args step = 1 elif len(args) == 1: (stop,) = args start = 0 step = 1 else: raise TypeError("randrange expected at most 3 arguments, got %d" % (len(args),)) if (not isinstance(start, (int, long)) or not isinstance(stop, (int, long)) or not isinstance(step, (int, long))): raise TypeError("randrange requires integer arguments") if step == 0: raise ValueError("randrange step argument must not be zero") num_choices = ceil_div(stop - start, step) if num_choices < 0: num_choices = 0 if num_choices < 1: raise ValueError("empty range for randrange(%r, %r, %r)" % (start, stop, step)) # Pick a random number in the range of possible numbers r = num_choices while r >= num_choices: r = self.getrandbits(size(num_choices)) return start + (step * r) def randint(self, a, b): """Return a random integer N such that a <= N <= b.""" if not isinstance(a, (int, long)) or not isinstance(b, (int, long)): raise TypeError("randint requires integer arguments") N = self.randrange(a, b+1) assert a <= N <= b return N def choice(self, seq): """Return a random element from a (non-empty) sequence. If the seqence is empty, raises IndexError. """ if len(seq) == 0: raise IndexError("empty sequence") return seq[self.randrange(len(seq))] def shuffle(self, x): """Shuffle the sequence in place.""" # Fisher-Yates shuffle. O(n) # See http://en.wikipedia.org/wiki/Fisher-Yates_shuffle # Working backwards from the end of the array, we choose a random item # from the remaining items until all items have been chosen. for i in xrange(len(x)-1, 0, -1): # iterate from len(x)-1 downto 1 j = self.randrange(0, i+1) # choose random j such that 0 <= j <= i x[i], x[j] = x[j], x[i] # exchange x[i] and x[j] def sample(self, population, k): """Return a k-length list of unique elements chosen from the population sequence.""" num_choices = len(population) if k > num_choices: raise ValueError("sample larger than population") retval = [] selected = {} # we emulate a set using a dict here for i in xrange(k): r = None while r is None or selected.has_key(r): r = self.randrange(num_choices) retval.append(population[r]) selected[r] = 1 return retval _r = StrongRandom() getrandbits = _r.getrandbits randrange = _r.randrange randint = _r.randint choice = _r.choice shuffle = _r.shuffle sample = _r.sample # These are at the bottom to avoid problems with recursive imports from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes, size # vim:set ts=4 sw=4 sts=4 expandtab:
gpl-2.0
jiss-software/jiss-rendering-service
core/BaseHandler.py
1
2488
import tornado import logging from JsonEncoders import JsonEncoders from json import dumps class BaseHandler(tornado.web.RequestHandler): logger = logging.getLogger(__name__) def __init__(self, application, request, **kwargs): super(BaseHandler, self).__init__(application, request, **kwargs) def get_allowed_methods(self): return [ "GET", "PUT", "POST", "DELETE", "OPTIONS" ] def get_allowed_headers(self): return [ "Origin", "X-Requested-With", "Content-Type", "Accept", "Cache-Control", "Referer" "User-Agent" "Accept-Encoding", "Accept-Language", "X-Jiss-Session", "X-Jiss-Context", "X-Jiss-Language", "X-Jiss-Calculation-Type", "X-Jiss-Issuer", ] def get_allowed_type(self): return "application/json" def set_default_headers(self): common_headers = { "Allow": ", ".join(self.get_allowed_methods()), "Accept": self.get_allowed_type(), "Accept-Charset": "utf-8", "Access-Control-Allow-Origin": "*", "Access-Control-Max-Age": "1728000", "Access-Control-Allow-Methods": ", ".join(self.get_allowed_methods()), "Access-Control-Allow-Headers": ", ".join(self.get_allowed_headers()) } for key, value in common_headers.iteritems(): self.add_header(key, value) def response_error(self, text, code=500): self.set_header('Content-Type', 'application/json') self.set_status(code) self.write(self._dumps({'error': text})) def response_json(self, data, code=200): response = self._dumps(data) self.set_header('Content-Type', 'application/json') self.set_status(code) self.write(response) self.finish() self.logger.info('Response: %s' % response) def response_file(self, path, type='application/x-binary'): self.set_header('Content-Type', type) with open(path, 'rb') as f: data = f.read() self.write(data) self.finish() self.logger.info('Response file: %s' % path) def options(self, **ignore): self.set_status(200) self.finish() def _dumps(self, data): return dumps(data, cls=JsonEncoders)
apache-2.0
savoirfairelinux/OpenUpgrade
addons/im_livechat/im_livechat.py
38
9656
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import json import random import jinja2 import openerp import openerp.addons.im.im as im from openerp.osv import osv, fields from openerp import tools from openerp import http from openerp.http import request env = jinja2.Environment( loader=jinja2.PackageLoader('openerp.addons.im_livechat', "."), autoescape=False ) env.filters["json"] = json.dumps class LiveChatController(http.Controller): def _auth(self, db): reg = openerp.modules.registry.RegistryManager.get(db) uid = request.uid return reg, uid @http.route('/im_livechat/loader', auth="public") def loader(self, **kwargs): p = json.loads(kwargs["p"]) db = p["db"] channel = p["channel"] user_name = p.get("user_name", None) reg, uid = self._auth(db) with reg.cursor() as cr: info = reg.get('im_livechat.channel').get_info_for_chat_src(cr, uid, channel) info["db"] = db info["channel"] = channel info["userName"] = user_name return request.make_response(env.get_template("loader.js").render(info), headers=[('Content-Type', "text/javascript")]) @http.route('/im_livechat/web_page', auth="public") def web_page(self, **kwargs): p = json.loads(kwargs["p"]) db = p["db"] channel = p["channel"] reg, uid = self._auth(db) with reg.cursor() as cr: script = reg.get('im_livechat.channel').read(cr, uid, channel, ["script"])["script"] info = reg.get('im_livechat.channel').get_info_for_chat_src(cr, uid, channel) info["script"] = script return request.make_response(env.get_template("web_page.html").render(info), headers=[('Content-Type', "text/html")]) @http.route('/im_livechat/available', type='json', auth="public") def available(self, db, channel): reg, uid = self._auth(db) with reg.cursor() as cr: return len(reg.get('im_livechat.channel').get_available_users(cr, uid, channel)) > 0 class im_livechat_channel(osv.osv): _name = 'im_livechat.channel' def _get_default_image(self, cr, uid, context=None): image_path = openerp.modules.get_module_resource('im_livechat', 'static/src/img', 'default.png') return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64')) def _get_image(self, cr, uid, ids, name, args, context=None): result = dict.fromkeys(ids, False) for obj in self.browse(cr, uid, ids, context=context): result[obj.id] = tools.image_get_resized_images(obj.image) return result def _set_image(self, cr, uid, id, name, value, args, context=None): return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context) def _are_you_inside(self, cr, uid, ids, name, arg, context=None): res = {} for record in self.browse(cr, uid, ids, context=context): res[record.id] = False for user in record.user_ids: if user.id == uid: res[record.id] = True break return res def _script(self, cr, uid, ids, name, arg, context=None): res = {} for record in self.browse(cr, uid, ids, context=context): res[record.id] = env.get_template("include.html").render({ "url": self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url'), "parameters": {"db":cr.dbname, "channel":record.id}, }) return res def _web_page(self, cr, uid, ids, name, arg, context=None): res = {} for record in self.browse(cr, uid, ids, context=context): res[record.id] = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url') + \ "/im_livechat/web_page?p=" + json.dumps({"db":cr.dbname, "channel":record.id}) return res _columns = { 'name': fields.char(string="Channel Name", size=200, required=True), 'user_ids': fields.many2many('res.users', 'im_livechat_channel_im_user', 'channel_id', 'user_id', string="Users"), 'are_you_inside': fields.function(_are_you_inside, type='boolean', string='Are you inside the matrix?', store=False), 'script': fields.function(_script, type='text', string='Script', store=False), 'web_page': fields.function(_web_page, type='url', string='Web Page', store=False, size="200"), 'button_text': fields.char(string="Text of the Button", size=200), 'input_placeholder': fields.char(string="Chat Input Placeholder", size=200), 'default_message': fields.char(string="Welcome Message", size=200, help="This is an automated 'welcome' message that your visitor will see when they initiate a new chat session."), # image: all image fields are base64 encoded and PIL-supported 'image': fields.binary("Photo", help="This field holds the image used as photo for the group, limited to 1024x1024px."), 'image_medium': fields.function(_get_image, fnct_inv=_set_image, string="Medium-sized photo", type="binary", multi="_get_image", store={ 'im_livechat.channel': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Medium-sized photo of the group. It is automatically "\ "resized as a 128x128px image, with aspect ratio preserved. "\ "Use this field in form views or some kanban views."), 'image_small': fields.function(_get_image, fnct_inv=_set_image, string="Small-sized photo", type="binary", multi="_get_image", store={ 'im_livechat.channel': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Small-sized photo of the group. It is automatically "\ "resized as a 64x64px image, with aspect ratio preserved. "\ "Use this field anywhere a small image is required."), } def _default_user_ids(self, cr, uid, context=None): return [(6, 0, [uid])] _defaults = { 'button_text': "Have a Question? Chat with us.", 'input_placeholder': "How may I help you?", 'default_message': '', 'user_ids': _default_user_ids, 'image': _get_default_image, } def get_available_users(self, cr, uid, channel_id, context=None): channel = self.browse(cr, openerp.SUPERUSER_ID, channel_id, context=context) im_user_ids = self.pool.get("im.user").search(cr, uid, [["user_id", "in", [user.id for user in channel.user_ids]]], context=context) users = [] for iuid in im_user_ids: imuser = self.pool.get("im.user").browse(cr, uid, iuid, context=context) if imuser.im_status: users.append(imuser) return users def get_session(self, cr, uid, channel_id, uuid, context=None): self.pool.get("im.user").get_my_id(cr, uid, uuid, context=context) users = self.get_available_users(cr, openerp.SUPERUSER_ID, channel_id, context=context) if len(users) == 0: return False user_id = random.choice(users).id session = self.pool.get("im.session").session_get(cr, uid, [user_id], uuid, context=context) self.pool.get("im.session").write(cr, openerp.SUPERUSER_ID, session.get("id"), {'channel_id': channel_id}, context=context) return session.get("id") def test_channel(self, cr, uid, channel, context=None): if not channel: return {} return { 'url': self.browse(cr, uid, channel[0], context=context or {}).web_page, 'type': 'ir.actions.act_url' } def get_info_for_chat_src(self, cr, uid, channel, context=None): url = self.pool.get('ir.config_parameter').get_param(cr, openerp.SUPERUSER_ID, 'web.base.url') chan = self.browse(cr, uid, channel, context=context) return { "url": url, 'buttonText': chan.button_text, 'inputPlaceholder': chan.input_placeholder, 'defaultMessage': chan.default_message, "channelName": chan.name, } def join(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'user_ids': [(4, uid)]}) return True def quit(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'user_ids': [(3, uid)]}) return True class im_session(osv.osv): _inherit = 'im.session' _columns = { 'channel_id': fields.many2one("im_livechat.channel", "Channel"), }
agpl-3.0
cfe-lab/MiCall
micall/tests/test_aln2counts.py
1
103374
from io import StringIO import unittest import yaml from micall.core.aln2counts import InsertionWriter, SeedAmino, \ ReportAmino, ConsensusBuilder, ReportNucleotide, SeedNucleotide from micall.tests.test_aln2counts_report import create_sequence_report, prepare_reads LANDMARKS_YAML = """\ - seed_pattern: R1-.* coordinates: R1-seed landmarks: # Extra 3 positions for stop codon to get dropped. - {name: R1, start: 1, end: 12, colour: steelblue} """ # noinspection DuplicatedCode class SequenceReportTest(unittest.TestCase): def setUp(self): self.maxDiff = None self.report = create_sequence_report() self.report_file = StringIO() self.detail_report_file = StringIO() def testEmptyAminoReport(self): expected_text = "" self.report.write_amino_counts(self.report_file) self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testEmptyNucReport(self): expected_text = "" self.report.write_nuc_counts(self.report_file) self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusFromTwoReads(self): """ The second read is out voted by the first one. CCC -> P GGG -> G """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT R1-seed,15,0,1,0,CCCGGG """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,0,AAATTT R1-seed,15,0.100,0,MMMKKK """ self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusExactTie(self): """ There is an exact tie between sequences. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,5,0,AAATTT R1-seed,15,0,5,0,CCCGGG """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,0,MMMKKK R1-seed,15,0.100,0,MMMKKK """ self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusWithOffset(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,3,AAATTT R1-seed,15,0,1,7,TTGGG """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,3,AAATTTGGG R1-seed,15,0.100,3,AAATTTGGG """ self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusFromPartialContig(self): """ Contigs with the -partial suffix report consensus. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ 1-R2-seed-partial,15,0,9,0,AAATTT """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence 1-R2-seed-partial,15,MAX,0,AAATTT 1-R2-seed-partial,15,0.100,0,AAATTT """ self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusLowQualitySections(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,3,NNNTTT R1-seed,15,0,1,7,TTNGG """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,6,TTTxGG R1-seed,15,0.100,6,TTTxGG """ self.report.consensus_min_coverage = 1 self.report.consensus_builder.consensus_min_coverage = 1 self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusLowQuality(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,3,NNNNNN R1-seed,15,0,1,7,NNNNN """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence """ self.report.consensus_min_coverage = 1 self.report.consensus_builder.consensus_min_coverage = 1 self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusLowCoverageInMiddle(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTGGG R1-seed,15,0,1,0,AAAT R1-seed,15,0,1,6,GGG """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,0,AAATxxGGG R1-seed,15,0.100,0,AAATxxGGG """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusLowCoverageAtStart(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTGGG R1-seed,15,0,1,4,TTGGG """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,4,TTGGG R1-seed,15,0.100,4,TTGGG """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusLowCoverageAtEnd(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTGGG R1-seed,15,0,1,0,AAAT """) expected_text = """\ region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,15,MAX,0,AAAT R1-seed,15,0.100,0,AAAT """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllFromTwoReads(self): """ The second read is out voted by the first one. CCC -> P GGG -> G """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT R1-seed,15,0,1,0,CCCGGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,0,,AAATTT R1-seed,R1,15,MAX,0,0,AAATTT """ self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllExactTie(self): """ Exact ties still result in mixtures. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,5,0,AAATTT R1-seed,15,0,5,0,CCCGGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,0,,MMMKKK R1-seed,R1,15,MAX,0,0,MMMKKK """ self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllWithOffset(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,3,AAATTT R1-seed,15,0,1,7,TTGGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,3,,AAATTTGGG R1-seed,R1,15,MAX,3,0,AAATTTGGG """ self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllLowQualitySections(self): """Low-quality bases still get reported as x.""" # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,3,NNNTTT R1-seed,15,0,1,7,TTNGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,6,,TTTxGG R1-seed,R1,15,MAX,6,3,TTTxGG """ self.report.consensus_min_coverage = 1 self.report.consensus_builder.consensus_min_coverage = 1 self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllLowQuality(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,3,NNNNNN R1-seed,15,0,1,7,NNNNN """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence """ self.report.consensus_min_coverage = 1 self.report.consensus_builder.consensus_min_coverage = 1 self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllLowCoverageInMiddle(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTGGG R1-seed,15,0,1,0,AAAT R1-seed,15,0,1,6,GGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,0,,AAATTTGGG R1-seed,R1,15,MAX,0,0,AAATTTGGG """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllLowCoverageAtStart(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTGGG R1-seed,15,0,1,4,TTGGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,0,,AAATTTGGG R1-seed,R1,15,MAX,0,0,AAATTTGGG """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllLowCoverageAtEnd(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTGGG R1-seed,15,0,1,0,AAAT """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R1-seed,,15,MAX,0,,AAATTTGGG R1-seed,R1,15,MAX,0,0,AAATTTGGG """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testConsensusAllMapToMultipleRegions(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R7-seed,15,0,9,0,AAATTTCAGACCCCACGAGAGCAT R7-seed,15,0,1,0,AAATTGCAGACCCCACGAGAGCAT """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,seed-offset,region-offset,sequence R7-seed,,15,MAX,0,,AAATTTCAGACCCCACGAGAGCAT R7-seed,R7a,15,MAX,0,0,AAATTTCAG R7-seed,R7b,15,MAX,15,0,CGAGAGCAT """ self.report.consensus_min_coverage = 10 self.report.consensus_builder.consensus_min_coverage = 10 self.report.write_consensus_all_header(self.report_file) self.report.read(aligned_reads) self.report.write_consensus_all() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testMultiplePrefixAminoReport(self): """ Assemble counts from three contigs to two references. Contig 1-R1 AAATTT -> KF Contig 2-R2 GGCCCG -> GP Contig 3-R1 TTTAGG -> FR Contig 1 and 3 should combine into R1 with KFR. """ # refname,qcut,rank,count,offset,seq aligned_reads1 = prepare_reads("1-R1-seed,15,0,5,0,AAATTT") aligned_reads2 = prepare_reads("2-R2-seed,15,0,4,0,GGCCCG") aligned_reads3 = prepare_reads("3-R1-seed,15,0,2,0,TTTAGG") expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 R1-seed,R1,15,,2,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7 R1-seed,R1,15,4,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2 R2-seed,R2,15,1,3,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 R2-seed,R2,15,4,4,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 """ expected_detail_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage 1-R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 1-R1-seed,R1,15,4,2,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 2-R2-seed,R2,15,1,3,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,4,4,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 3-R1-seed,R1,15,1,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2 3-R1-seed,R1,15,4,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2 """ self.report.write_amino_header(self.report_file) self.report.write_amino_detail_header(self.detail_report_file) self.report.read(aligned_reads1) self.report.write_amino_detail_counts() self.report.combine_reports() self.report.read(aligned_reads2) self.report.write_amino_detail_counts() self.report.combine_reports() self.report.read(aligned_reads3) self.report.write_amino_detail_counts() self.report.combine_reports() self.report.write_amino_counts() self.assertMultiLineEqual(expected_detail_text, self.detail_report_file.getvalue()) self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) # noinspection DuplicatedCode def testMultiplePrefixPartialDeletionAminoReport(self): """ Assemble counts from multiple contigs. Contig 1-R1 AAATTT -> KF Contig 2-R1 TT-AGG -> fR (partial deletion) Contig 3-R1 AAA---AGG -> K-R (full deletion) """ # refname,qcut,rank,count,offset,seq aligned_reads1 = prepare_reads("1-R1-seed,15,0,5,0,AAATTT") aligned_reads2 = prepare_reads("2-R1-seed,15,0,2,0,TT-AGG") aligned_reads3 = prepare_reads("3-R1-seed,15,0,3,0,AAATTTAGG\n" "3-R1-seed,15,0,1,0,AAA---AGG") expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,,2,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,0,9 R1-seed,R1,15,,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,6 """ expected_detail_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage 1-R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 1-R1-seed,R1,15,4,2,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 2-R1-seed,R1,15,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0 2-R1-seed,R1,15,4,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,2 3-R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 3-R1-seed,R1,15,4,2,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,4 3-R1-seed,R1,15,7,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,4 """ self.report.write_amino_header(self.report_file) self.report.write_amino_detail_header(self.detail_report_file) self.report.read(aligned_reads1) self.report.write_amino_detail_counts() self.report.combine_reports() self.report.read(aligned_reads2) self.report.write_amino_detail_counts() self.report.combine_reports() self.report.read(aligned_reads3) self.report.write_amino_detail_counts() self.report.combine_reports() self.report.write_amino_counts() self.assertEqual(expected_detail_text, self.detail_report_file.getvalue()) self.assertEqual(expected_text, self.report_file.getvalue()) def testMultiplePrefixNucleotideReport(self): """ Assemble counts from three contigs to two references. Contig 1-R1 AAATTT -> KF Contig 2-R2 GGCCCG -> GP Contig 3-R1 TTTAGG -> FR Contig 1 and 3 should combine into R1 with KFR. """ # refname,qcut,rank,count,offset,seq aligned_reads1 = prepare_reads("1-R1-seed,15,0,5,0,AAATTT") aligned_reads2 = prepare_reads("2-R2-seed,15,0,4,0,GGCCCG") aligned_reads3 = prepare_reads("3-R1-seed,15,0,2,0,TTTAGG") expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,1,5,0,0,0,0,0,0,0,0,5 R1-seed,R1,15,2,2,2,5,0,0,0,0,0,0,0,0,5 R1-seed,R1,15,3,3,3,5,0,0,0,0,0,0,0,0,5 R1-seed,R1,15,,4,4,0,0,0,7,0,0,0,0,0,7 R1-seed,R1,15,,5,5,0,0,0,7,0,0,0,0,0,7 R1-seed,R1,15,,6,6,0,0,0,7,0,0,0,0,0,7 R1-seed,R1,15,4,7,7,2,0,0,0,0,0,0,0,0,2 R1-seed,R1,15,5,8,8,0,0,2,0,0,0,0,0,0,2 R1-seed,R1,15,6,9,9,0,0,2,0,0,0,0,0,0,2 R2-seed,R2,15,1,7,7,0,0,4,0,0,0,0,0,0,4 R2-seed,R2,15,2,8,8,0,0,4,0,0,0,0,0,0,4 R2-seed,R2,15,3,9,9,0,4,0,0,0,0,0,0,0,4 R2-seed,R2,15,4,10,10,0,4,0,0,0,0,0,0,0,4 R2-seed,R2,15,5,11,11,0,4,0,0,0,0,0,0,0,4 R2-seed,R2,15,6,12,12,0,0,4,0,0,0,0,0,0,4 """ expected_detail_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage 1-R1-seed,R1,15,1,1,1,5,0,0,0,0,0,0,0,0,5 1-R1-seed,R1,15,2,2,2,5,0,0,0,0,0,0,0,0,5 1-R1-seed,R1,15,3,3,3,5,0,0,0,0,0,0,0,0,5 1-R1-seed,R1,15,4,4,4,0,0,0,5,0,0,0,0,0,5 1-R1-seed,R1,15,5,5,5,0,0,0,5,0,0,0,0,0,5 1-R1-seed,R1,15,6,6,6,0,0,0,5,0,0,0,0,0,5 2-R2-seed,R2,15,1,7,7,0,0,4,0,0,0,0,0,0,4 2-R2-seed,R2,15,2,8,8,0,0,4,0,0,0,0,0,0,4 2-R2-seed,R2,15,3,9,9,0,4,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,4,10,10,0,4,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,5,11,11,0,4,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,6,12,12,0,0,4,0,0,0,0,0,0,4 3-R1-seed,R1,15,1,4,4,0,0,0,2,0,0,0,0,0,2 3-R1-seed,R1,15,2,5,5,0,0,0,2,0,0,0,0,0,2 3-R1-seed,R1,15,3,6,6,0,0,0,2,0,0,0,0,0,2 3-R1-seed,R1,15,4,7,7,2,0,0,0,0,0,0,0,0,2 3-R1-seed,R1,15,5,8,8,0,0,2,0,0,0,0,0,0,2 3-R1-seed,R1,15,6,9,9,0,0,2,0,0,0,0,0,0,2 """ self.report.write_nuc_header(self.report_file) self.report.write_nuc_detail_header(self.detail_report_file) self.report.read(aligned_reads1) self.report.write_nuc_detail_counts() self.report.combine_reports() self.report.read(aligned_reads2) self.report.write_nuc_detail_counts() self.report.combine_reports() self.report.read(aligned_reads3) self.report.write_nuc_detail_counts() self.report.combine_reports() self.report.write_nuc_counts() assert self.detail_report_file.getvalue() == expected_detail_text assert self.report_file.getvalue() == expected_text def testNucleotideDetailReportOnlyPartials(self): """ The only contig is a partial BLAST match, not reported. """ # refname,qcut,rank,count,offset,seq aligned_reads1 = prepare_reads("1-R1-seed-partial,15,0,5,0,AAATTT") aligned_reads2 = prepare_reads("2-R2-seed,15,0,4,0,GGCCCG") aligned_reads3 = prepare_reads("3-R1-seed,15,0,2,0,TTTAGG") expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R2-seed,R2,15,1,7,7,0,0,4,0,0,0,0,0,0,4 R2-seed,R2,15,2,8,8,0,0,4,0,0,0,0,0,0,4 R2-seed,R2,15,3,9,9,0,4,0,0,0,0,0,0,0,4 R2-seed,R2,15,4,10,10,0,4,0,0,0,0,0,0,0,4 R2-seed,R2,15,5,11,11,0,4,0,0,0,0,0,0,0,4 R2-seed,R2,15,6,12,12,0,0,4,0,0,0,0,0,0,4 R1-seed,R1,15,1,4,4,0,0,0,2,0,0,0,0,0,2 R1-seed,R1,15,2,5,5,0,0,0,2,0,0,0,0,0,2 R1-seed,R1,15,3,6,6,0,0,0,2,0,0,0,0,0,2 R1-seed,R1,15,4,7,7,2,0,0,0,0,0,0,0,0,2 R1-seed,R1,15,5,8,8,0,0,2,0,0,0,0,0,0,2 R1-seed,R1,15,6,9,9,0,0,2,0,0,0,0,0,0,2 """ expected_detail_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage 2-R2-seed,R2,15,1,7,7,0,0,4,0,0,0,0,0,0,4 2-R2-seed,R2,15,2,8,8,0,0,4,0,0,0,0,0,0,4 2-R2-seed,R2,15,3,9,9,0,4,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,4,10,10,0,4,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,5,11,11,0,4,0,0,0,0,0,0,0,4 2-R2-seed,R2,15,6,12,12,0,0,4,0,0,0,0,0,0,4 3-R1-seed,R1,15,1,4,4,0,0,0,2,0,0,0,0,0,2 3-R1-seed,R1,15,2,5,5,0,0,0,2,0,0,0,0,0,2 3-R1-seed,R1,15,3,6,6,0,0,0,2,0,0,0,0,0,2 3-R1-seed,R1,15,4,7,7,2,0,0,0,0,0,0,0,0,2 3-R1-seed,R1,15,5,8,8,0,0,2,0,0,0,0,0,0,2 3-R1-seed,R1,15,6,9,9,0,0,2,0,0,0,0,0,0,2 """ self.report.write_nuc_header(self.report_file) self.report.write_nuc_detail_header(self.detail_report_file) self.report.read(aligned_reads1) self.report.write_nuc_detail_counts() self.report.combine_reports() self.report.read(aligned_reads2) self.report.write_nuc_detail_counts() self.report.combine_reports() self.report.read(aligned_reads3) self.report.write_nuc_detail_counts() self.report.combine_reports() self.report.write_nuc_counts() assert self.detail_report_file.getvalue() == expected_detail_text assert self.report_file.getvalue() == expected_text def testAminoReportForPartialContig(self): """ Contigs with the -partial suffix shouldn't be reported. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ 1-R1-seed-partial,15,0,9,0,AAATTT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage """ self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testSoftClippingNucleotideReport(self): """ Combine the soft clipping data with the read counts. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,2,ATTTA """) clipping = StringIO("""\ refname,pos,count R1-seed,1,9 R1-seed,2,9 R1-seed,8,9 """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,,1,1,0,0,0,0,0,0,0,9,0,0 R1-seed,R1,15,,2,2,0,0,0,0,0,0,0,9,0,0 R1-seed,R1,15,3,3,3,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,4,4,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,5,5,5,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,6,6,6,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,7,7,7,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,,8,8,0,0,0,0,0,0,0,9,0,0 """ self.report.read_clipping(clipping) self.report.write_nuc_header(self.report_file) self.report.read(aligned_reads) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testSoftClippingAminoReport(self): """ Combine the soft clipping data with the read counts. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,2,ATTTA """) clipping = StringIO("""\ refname,pos,count R1-seed,1,9 R1-seed,2,9 R1-seed,8,9 """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0 R1-seed,R1,15,4,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,7,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0 """ self.report.read_clipping(clipping) self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_amino_counts() assert self.report_file.getvalue() == expected_text def testSoftClippingAminoReportMoreOffset(self): """ Combine the soft clipping data with the read counts. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,5,TTTAGG """) clipping = StringIO("""\ refname,pos,count R1-seed,3,9 R1-seed,4,9 R1-seed,5,9 R1-seed,11,9 """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0 R1-seed,R1,15,6,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,9,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,9,0,9 """ self.report.read_clipping(clipping) self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_amino_counts() assert self.report_file.getvalue() == expected_text def testMultiplePrefixSoftClippingAminoReport(self): """ Combine the soft clipping data with the read counts. """ """ Assemble counts from three contigs to two references. Contig 1-R1 AAATTT -> KF Contig 2-R2 GGCCCG -> GP Contig 3-R1 TTTAGG -> FR Contig 1 and 3 should combine into R1 with KFR. """ # refname,qcut,rank,count,offset,seq aligned_reads1 = prepare_reads("1-R1-seed,15,0,5,0,AAATTT") aligned_reads2 = prepare_reads("2-R2-seed,15,0,4,0,GGCCCG") aligned_reads3 = prepare_reads("3-R1-seed,15,0,2,0,TTTAGG") clipping = StringIO("""\ refname,pos,count 1-R1-seed,7,5 3-R1-seed,-1,2 """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,5 R1-seed,R1,15,,2,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7 R1-seed,R1,15,4,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,5,0,2 R2-seed,R2,15,1,3,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 R2-seed,R2,15,4,4,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4 """ self.report.read_clipping(clipping) self.report.write_amino_header(self.report_file) self.report.write_amino_detail_header(self.detail_report_file) self.report.write_nuc_header(StringIO()) self.report.read(aligned_reads1) self.report.write_nuc_counts() self.report.write_amino_detail_counts() self.report.combine_reports() self.report.read(aligned_reads2) self.report.write_nuc_counts() self.report.write_amino_detail_counts() self.report.combine_reports() self.report.read(aligned_reads3) self.report.write_nuc_counts() self.report.write_amino_detail_counts() self.report.combine_reports() self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testInsertionBetweenReadAndConsensusNucleotideReport(self): """ Combine the soft clipping data with the read counts. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT """) conseq_ins_csv = StringIO("""\ qname,fwd_rev,refname,pos,insert,qual Example_read_1,F,R1-seed,3,AAC,AAA Example_read_3,F,R2-seed,6,GTA,AAA Example_read_2,F,R1-seed,3,AAC,AAA Example_read_2,R,R1-seed,3,AAC,AAA """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,1,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,2,2,2,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,3,3,3,9,0,0,0,0,0,2,0,0,9 R1-seed,R1,15,4,4,4,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,5,5,5,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,6,6,6,0,0,0,9,0,0,0,0,0,9 """ self.report.read_insertions(conseq_ins_csv) self.report.write_nuc_header(self.report_file) self.report.read(aligned_reads) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) # noinspection DuplicatedCode def testInsertionBetweenReadAndConsensusAminoReport(self): """ Combine the soft clipping data with the read counts. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT """) conseq_ins_csv = StringIO("""\ qname,fwd_rev,refname,pos,insert,qual Example_read_1,F,R1-seed,3,AAC,AAA Example_read_2,F,R1-seed,3,AAC,AAA """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,9 R1-seed,R1,15,4,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.read_insertions(conseq_ins_csv) self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() # calculates ins counts self.report.write_amino_counts() assert self.report_file.getvalue() == expected_text def testSubstitutionAtBoundary(self): """ In this sample, there are nine identical reads with six codons. ATG -> M GCA -> A AAC -> N TGG -> W ATC -> I AAT -> N GGG -> G The R4 coordinate reference is SING, so its first position will not map. However, the ING should map, so the first position should get treated as a substitution. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R4-seed,15,0,9,0,ATGGCAAACTGGATCAATGGG """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R4-seed,R4,15,10,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,9 R4-seed,R4,15,13,2,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R4-seed,R4,15,16,3,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R4-seed,R4,15,19,4,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_amino_counts() assert self.report_file.getvalue() == expected_text def testCoverageSummary(self): """ R1 has coverage 9. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTTCGA """) expected_summary = dict(avg_coverage=9.0, coverage_region='R1', region_width=3) summary = {} self.report.read(aligned_reads) self.report.write_amino_header(StringIO()) self.report.write_amino_counts(coverage_summary=summary) self.assertEqual(expected_summary, summary) def testCoverageSummaryNotImproved(self): """ R2 has coverage 9, and R1 had coverage 50. Report R1. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R2-seed,15,0,9,0,AAATTTCGA """) expected_summary = dict(avg_coverage=50.0, coverage_region='R1', region_width=3) summary = dict(expected_summary) self.report.read(aligned_reads) self.report.write_amino_header(StringIO()) self.report.write_amino_counts(coverage_summary=summary) self.assertEqual(expected_summary, summary) def testCoverageSummaryNoCoverage(self): """ Stuff mapped to the seed, but didn't align with the coordinate region. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,10,TGGTGGTGG """) expected_summary = {} summary = {} self.report.read(aligned_reads) self.report.write_amino_header(StringIO()) self.report.write_amino_counts(coverage_summary=summary) self.assertEqual(expected_summary, summary) def testOffsetNucleotideReport(self): """ The first row provides alignment so the partial codon at the start of the second row will map to the reference. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,1,3,TTT R1-seed,15,0,8,5,TCGA """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,4,4,4,0,0,0,1,0,0,0,0,0,1 R1-seed,R1,15,5,5,5,0,0,0,1,0,0,0,0,0,1 R1-seed,R1,15,6,6,6,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,7,7,7,0,8,0,0,0,0,0,0,0,8 R1-seed,R1,15,8,8,8,0,0,8,0,0,0,0,0,0,8 R1-seed,R1,15,9,9,9,8,0,0,0,0,0,0,0,0,8 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testPartialCodonNucleotideReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,1,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,2,2,2,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,3,3,3,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,4,4,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,5,5,5,0,0,0,9,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testPartialStartCodonNucleotideReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,TTAGG """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,5,5,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,2,6,6,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,3,7,7,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,8,8,0,0,9,0,0,0,0,0,0,9 R1-seed,R1,15,5,9,9,0,0,9,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testReadPairGapInMiddleOfAminoReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,AAATTTCAGACCCCAnnnnnnnnnTACTAC """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R3-seed,R3,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,4,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,7,3,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,10,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,13,5,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testLowQualityNucleotideReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATNT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,1,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,2,2,2,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,3,3,3,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,4,4,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,5,5,5,0,0,0,0,9,0,0,0,0,0 R1-seed,R1,15,6,6,6,0,0,0,9,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testLowQualityAminoReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATNT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testPartialDeletionAminoReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAAT-T """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testShiftedReadingFrameAminoReport(self): """ The seed's reading frame doesn't match the coordinate reference's reading frame, so there is an extra nucleotide at the beginning of the reads. It will try padding the first codon to see which of the three possible reading frames gives the highest alignment score. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,GAAATTTCGA """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,2,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,5,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,8,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testShiftedReadingFrameNucleotideReport(self): """ The seed's reading frame doesn't match the coordinate reference's reading frame, so there is an extra nucleotide at the beginning of the reads. It will try padding the first codon to see which of the three possible reading frames gives the highest alignment score. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,GAAATTTCGA """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,2,1,1,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,3,2,2,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,3,3,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,5,4,4,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,6,5,5,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,7,6,6,0,0,0,9,0,0,0,0,0,9 R1-seed,R1,15,8,7,7,0,9,0,0,0,0,0,0,0,9 R1-seed,R1,15,9,8,8,0,0,9,0,0,0,0,0,0,9 R1-seed,R1,15,10,9,9,9,0,0,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testDeletionNucleotideReport(self): """ Coordinate sequence is KFGPR, and this aligned read is KFPR. Must be a deletion in the seed reference with respect to the coordinate reference. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAA---AGG """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,1,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,2,2,2,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,3,3,3,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,4,4,4,0,0,0,0,0,9,0,0,0,9 R1-seed,R1,15,5,5,5,0,0,0,0,0,9,0,0,0,9 R1-seed,R1,15,6,6,6,0,0,0,0,0,9,0,0,0,9 R1-seed,R1,15,7,7,7,9,0,0,0,0,0,0,0,0,9 R1-seed,R1,15,8,8,8,0,0,9,0,0,0,0,0,0,9 R1-seed,R1,15,9,9,9,0,0,9,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testDeletionBetweenSeedAndCoordinateNucleotideReport(self): """ Coordinate sequence is KFQTPREH, and this aligned read is KFQPREH. Must be a deletion in the seed reference with respect to the coordinate reference. """ self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: - {name: R3, start: 1, end: 27} """) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,AAATTTCAGCCACGAGAGCAT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R3-seed,R3,15,1,1,1,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,2,2,2,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,3,3,3,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,4,4,4,0,0,0,9,0,0,0,0,0,9 R3-seed,R3,15,5,5,5,0,0,0,9,0,0,0,0,0,9 R3-seed,R3,15,6,6,6,0,0,0,9,0,0,0,0,0,9 R3-seed,R3,15,7,7,7,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,8,8,8,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,9,9,9,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,,10,10,0,0,0,0,0,9,0,0,0,9 R3-seed,R3,15,,11,11,0,0,0,0,0,9,0,0,0,9 R3-seed,R3,15,,12,12,0,0,0,0,0,9,0,0,0,9 R3-seed,R3,15,10,13,13,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,11,14,14,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,12,15,15,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,13,16,16,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,14,17,17,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,15,18,18,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,16,19,19,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,17,20,20,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,18,21,21,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,19,22,22,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,20,23,23,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,21,24,24,0,0,0,9,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testDeletionBetweenSeedAndCoordinateAminoReport(self): """ Coordinate sequence is KFQTPREH, and this aligned read is KFQPREH. Must be a deletion in the seed reference with respect to the coordinate reference. """ self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: - {name: R3, start: 1, end: 27} """) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,AAATTTCAGCCACGAGAGCAT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R3-seed,R3,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,4,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,7,3,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,9 R3-seed,R3,15,10,5,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,13,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,16,7,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,19,8,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testDeletionBetweenSeedAndConsensusAminoReport(self): """ Coordinate and consensus are KFGPR, but seed is KFPR. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R5-seed,15,0,9,0,AAATTTGGCCCCCGACCTCAGGTCACTCTTTGG """) # seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos, # A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R5-seed,R5,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,4,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,7,3,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,10,4,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,13,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,16,6,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,19,7,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,22,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,25,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,9 R5-seed,R5,15,28,10,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_amino_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testDeletionWithMinorityVariant(self): """ Aligned reads are mostly K-R, but some are KFR. Must be a deletion in the sample with respect to the seed reference, but some variants in the sample do not have that deletion. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,5,0,AAA---AGG R1-seed,15,0,2,0,AAATTTAGG """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7 R1-seed,R1,15,4,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,7 R1-seed,R1,15,7,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,7 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testDeletionNotAlignedToCodons(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,5,0,AAAA---GG """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1,15,1,1,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 R1-seed,R1,15,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,5 R1-seed,R1,15,7,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,5 """ self.report.remap_conseqs = {'R1-seed': 'AAATTTAGG'} self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testInsertionBetweenSeedAndCoordinateAminoReport(self): """ Coordinate sequence is KFQTPREH, and this aligned read is HERKFQTGPREH. The G must be an insertion in the seed reference with respect to the coordinate reference. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,CATGAGCGAAAATTTCAGACTGGGCCCCGAGAGCAT """) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: # Extra 3 positions for stop codons to get dropped, one codon overlaps. - {name: R3, start: 1, end: 27, colour: lightblue} """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R3-seed,R3,15,10,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,13,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,16,3,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,19,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,9,0,0,9 R3-seed,R3,15,25,5,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,28,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,31,7,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,34,8,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ expected_insertions = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R3-seed,MAX,R3,12,12,21,GGG R3-seed,0.100,R3,12,12,21,GGG """ self.report.read(aligned_reads) self.report.write_insertions() self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() # calculates insertion counts self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testInsertionBetweenSeedAndCoordinateNucleotideReport(self): """ Coordinate sequence is KFQTPREH, and this aligned read is HERKFQTGPREH. The G must be an insertion in the seed reference with respect to the coordinate reference. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,CATGAGCGAAAATTTCAGACTGGGCCCCGAGAGCAT """) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: # Extra 3 positions for stop codons to get dropped, one codon overlaps. - {name: R3, start: 1, end: 27, colour: lightblue} """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.nuc.pos,genome.pos,\ A,C,G,T,N,del,ins,clip,v3_overlap,coverage R3-seed,R3,15,10,1,1,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,11,2,2,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,12,3,3,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,13,4,4,0,0,0,9,0,0,0,0,0,9 R3-seed,R3,15,14,5,5,0,0,0,9,0,0,0,0,0,9 R3-seed,R3,15,15,6,6,0,0,0,9,0,0,0,0,0,9 R3-seed,R3,15,16,7,7,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,17,8,8,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,18,9,9,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,19,10,10,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,20,11,11,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,21,12,12,0,0,0,9,0,0,9,0,0,9 R3-seed,R3,15,25,13,13,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,26,14,14,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,27,15,15,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,28,16,16,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,29,17,17,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,30,18,18,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,31,19,19,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,32,20,20,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,33,21,21,0,0,9,0,0,0,0,0,0,9 R3-seed,R3,15,34,22,22,0,9,0,0,0,0,0,0,0,9 R3-seed,R3,15,35,23,23,9,0,0,0,0,0,0,0,0,9 R3-seed,R3,15,36,24,24,0,0,0,9,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_insertions() self.report.write_nuc_header(self.report_file) self.report.write_nuc_counts() self.assertEqual(expected_text, self.report_file.getvalue()) def testInsertionsSortedByCount(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,CATGAGCGAAAATTTCAGACTGGGCCCCGAGAGCAT R3-seed,15,0,8,0,CATGAGCGAAAATTTCAGACTAAACCCCGAGAGCAT """) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: # Extra 3 positions for stop codons to get dropped, one codon overlaps. - {name: R3, start: 1, end: 27, colour: lightblue} """) expected_insertions = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R3-seed,MAX,R3,12,12,21,GGG R3-seed,0.100,R3,12,12,21,RRR """ self.report.read(aligned_reads) self.report.write_insertions() self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testInsertionsSortedByLeft(self): """ Two insertions within a single consensus, sorted by position. Consensus is HERKFQTGPRKEHQFKL Reference is ERKF-TGPRK-HQFKL (without the dashes) """ self.report.projects.load(StringIO("""\ { "projects": { "R3": { "max_variants": 0, "regions": [ { "coordinate_region": "R3", "seed_region_names": ["R3-seed"] } ] } }, "regions": { "R3-seed": { "is_nucleotide": true, "reference": [ "CATGAGCGAAAATTTACTGGGCCCCGAAAACATCAGTTTAAACTC" ] }, "R3": { "is_nucleotide": false, "reference": [ "ERKFTGPRKHQFKL" ] } } } """)) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-seed coordinates: R3-seed landmarks: # Extra 3 nucleotides at end, because stop codons will get dropped. - {name: R3, start: 4, end: 48, frame: 0} """) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,CATGAGCGAAAATTTCAGACTGGGCCCCGAAAAGAGCATCAGTTTAAACTC """) expected_insertions = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R3-seed,MAX,R3,12,15,15,CAG R3-seed,0.100,R3,12,15,15,CAG R3-seed,MAX,R3,27,30,33,GAG R3-seed,0.100,R3,27,30,33,GAG """ self.report.read(aligned_reads) self.report.write_insertions() self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testInsertionInDifferentReadingFrame(self): """ Delete part of the first codon to throw off the reading frame. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,AATTTCAGACTGGGCCCCGAGAGCAT """) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: # Extra 3 positions for stop codons to get dropped, one codon overlaps. - {name: R3, start: 1, end: 27, colour: lightblue} """) expected_insertions = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R3-seed,MAX,R3,12,12,11,GGG R3-seed,0.100,R3,12,12,11,GGG """ self.report.read(aligned_reads) self.report.write_amino_header(StringIO()) self.report.write_amino_counts() self.report.write_insertions() self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testInsertionInSomeReads(self): """ Not all reads have the insertion, some end before it. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,AAATTTCAGACTGGGCCCCGAGAGCAT R3-seed,15,1,5,0,AAATTTCAG R3-seed,15,2,4,0,AAATTTCAGACTG """) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-.* coordinates: R3-seed landmarks: # Extra 3 positions for stop codons to get dropped, one codon overlaps. - {name: R3, start: 1, end: 27, colour: lightblue} """) expected_insertions = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R3-seed,MAX,R3,12,12,12,GGG R3-seed,0.100,R3,12,12,12,Ggg """ self.report.read(aligned_reads) self.report.write_amino_header(StringIO()) self.report.write_amino_counts() self.report.write_insertions() self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testMultipleCoordinateInsertionReport(self): """ Two coordinate regions map the same seed region, the consensus has an insertion relative to only one of them. """ self.report.projects.load(StringIO("""\ { "projects": { "R3": { "max_variants": 0, "regions": [ { "coordinate_region": "R3a", "seed_region_names": ["R3-seed"] }, { "coordinate_region": "R3b", "seed_region_names": ["R3-seed"] } ] } }, "regions": { "R3-seed": { "is_nucleotide": true, "reference": [ "AAATTTCAGACCGGGCCACGAGAGCAT" ] }, "R3a": { "is_nucleotide": false, "reference": [ "KFQTPREH" ] }, "R3b": { "is_nucleotide": false, "reference": [ "KFQTGPREH" ] } } } """)) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R3-seed coordinates: R3-seed landmarks: # Extra 3 nucleotides at end, because stop codons will get dropped. - {name: R3a, start: 1, end: 27, frame: 0} - {name: R3b, start: 1, end: 27, frame: 0} """) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,0,AAATTTCAGACTGGGCCCCGAGAGCAT """) expected_insertions = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R3-seed,MAX,R3a,12,12,12,GGG R3-seed,0.100,R3a,12,12,12,GGG """ self.report.read(aligned_reads) self.report.write_insertions() self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testInsertionsRelativeToConsensus(self): """ Test that insertions relative to the consensus are handled correctly """ aligned_reads = prepare_reads("""\ R1-seed,15,0,10,0,AAATTTAGG """) conseq_ins_csv = StringIO("""\ qname,fwd_rev,refname,pos,insert,qual Example_read_1,F,R1-seed,3,AAC,AAA Example_read_2,F,R1-seed,3,AAC,AAA """) expected_insertions = ("""\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,0.100,R1,3,3,3,aac """) self.report.read_insertions(conseq_ins_csv) self.report.write_amino_header(self.report_file) self.report.read(aligned_reads) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() # calculates ins counts self.report.write_amino_counts() self.report.insert_writer.write(self.report.inserts, self.report.detail_seed, self.report.reports, self.report.report_nucleotides, self.report.landmarks, self.report.consensus_builder) self.assertEqual(expected_insertions, self.report.insert_writer.insert_file.getvalue()) def testGapBetweenForwardAndReverse(self): """ Lower-case n represents a gap between forward and reverse reads. Region R2 has sequence KFGPR, so this read has a gap at the end of G and beginning of P. Partial codons at the ends of a read or next to the gap are ignored, even though G is still unambiguous. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R2-seed,15,0,5,0,AAATTTGGnnCCCGA """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R2-seed,R2,15,1,1,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 R2-seed,R2,15,4,2,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5 R2-seed,R2,15,13,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0,0,0,0,0,0,0,5 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testFailedAlignmentAminoReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,2,0,TTATCCTAC """) expected_text = "" self.report.read(aligned_reads) self.report.write_amino_counts(self.report_file) self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testFailedAlignmentFailureReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,2,0,TTATCCTAC """) expected_text = """\ seed,region,qcut,queryseq,refseq R1-seed,R1,15,LSY,KFR """ self.report.write_failure_header(self.report_file) self.report.read(aligned_reads) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testFailedAlignmentWithHeadToTailMatch(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,2,0,TTATCCTACTTATCCTACTTATCCAAA """) expected_text = """\ seed,region,qcut,queryseq,refseq R3-seed,R3,15,LSYLSYLSK,KFQTPREH """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testGoodAlignmentWithTinyCoordinateReference(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,2,0,AAATTTCGATTATCCTACTTATCCTACTTATCCTACTTATCCTACTTATCCTACTTATCCTACTTATCCTACTTATCCTAC """) expected_text = """\ seed,region,qcut,queryseq,refseq """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testGoodAlignmentWithGiantSeed(self): """ Short consensus with long seed and long coordinate reference. Even when the consensus maps to the end of the seed, it should still only require a low alignment score. """ self.report.projects.load(StringIO("""\ { "projects": { "R3": { "max_variants": 0, "regions": [ { "coordinate_region": "R3", "seed_region_names": ["R3-seed"] } ] } }, "regions": { "R3-seed": { "is_nucleotide": true, "reference": [ "TGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGG", "TGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGTGGAAATTTAGG" ] }, "R3": { "is_nucleotide": false, "reference": [ "WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWKFR" ] } } } """)) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R3-seed,15,0,9,123,AAATTTCGA """) # seed,region,qcut,queryseq,refseq expected_text = """\ seed,region,qcut,queryseq,refseq """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testFailedAlignmentWithOffset(self): """ Be careful that an offset from the seed reference doesn't match the dashes in the failed alignment. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,2,3,TTATCCTAC """) expected_text = """\ seed,region,qcut,queryseq,refseq R1-seed,R1,15,-LSY,KFR """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testMultipleCoordinateRefsNoAlignment(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R7-seed,15,0,2,0,TTATCCTAC """) expected_text = """\ seed,region,qcut,queryseq,refseq R7-seed,R7a,15,LSY,KFQ R7-seed,R7b,15,LSY,REH """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testMultipleCoordinateRefsOneAlignment(self): """ If one coordinate aligns, don't complain about the others. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R7-seed,15,0,2,0,AAATTT """) expected_text = """\ seed,region,qcut,queryseq,refseq """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testNoFailureReport(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT """) expected_text = """\ seed,region,qcut,queryseq,refseq """ self.report.read(aligned_reads) self.report.write_failure_header(self.report_file) self.report.write_failure() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testMultipleCoordinateAminoReport(self): """ Two coordinate regions map the same seed region, report both. """ self.report.projects.load(StringIO("""\ { "projects": { "R1": { "max_variants": 10, "regions": [ { "coordinate_region": "R1a", "seed_region_names": ["R1-seed"] }, { "coordinate_region": "R1b", "seed_region_names": ["R1-seed"] } ] } }, "regions": { "R1-seed": { "is_nucleotide": true, "reference": [ "TGGAAATTTAGG" ] }, "R1a": { "is_nucleotide": false, "reference": [ "KFR" ] }, "R1b": { "is_nucleotide": false, "reference": [ "WKFR" ] } } } """)) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R1-seed coordinates: R1-seed landmarks: # Extra 3 nucleotides at end, because stop codons will get dropped. - {name: R1a, start: 4, end: 15, frame: 0} - {name: R1b, start: 1, end: 15, frame: 0} """) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT """) expected_text = """\ seed,region,q-cutoff,query.nuc.pos,refseq.aa.pos,\ A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,*,X,partial,del,ins,clip,v3_overlap,coverage R1-seed,R1a,15,1,1,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1a,15,4,2,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1b,15,1,2,0,0,0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 R1-seed,R1b,15,4,3,0,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9 """ self.report.read(aligned_reads) self.report.write_amino_header(self.report_file) self.report.write_amino_counts() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testMultipleCoordinateConsensusRegionsReport(self): """ Two coordinate regions map the same seed region, report both. """ self.report.projects.load(StringIO("""\ { "projects": { "R1": { "max_variants": 10, "regions": [ { "coordinate_region": "R1a", "seed_region_names": ["R1-seed"] }, { "coordinate_region": "R1b", "seed_region_names": ["R1-seed"] } ] } }, "regions": { "R1-seed": { "is_nucleotide": true, "reference": [ "TGGAAATTTAGG" ] }, "R1a": { "is_nucleotide": false, "reference": [ "KFR" ] }, "R1b": { "is_nucleotide": false, "reference": [ "WKFR" ] } } } """)) self.report.landmarks = yaml.safe_load("""\ - seed_pattern: R1-seed coordinates: R1-seed landmarks: # Extra 3 nucleotides at end, because stop codons will get dropped. - {name: R1a, start: 4, end: 15, frame: 0} - {name: R1b, start: 1, end: 15, frame: 0} """) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("""\ R1-seed,15,0,9,0,AAATTT R1-seed,15,0,1,0,CCCGGG """) expected_text = """\ seed,region,q-cutoff,consensus-percent-cutoff,offset,sequence R1-seed,R1a,15,MAX,0,AAATTT R1-seed,R1a,15,0.100,0,MMMKKK R1-seed,R1b,15,MAX,3,AAATTT R1-seed,R1b,15,0.100,3,MMMKKK """ self.report.read(aligned_reads) self.report.write_consensus_regions_header(self.report_file) self.report.combine_reports() self.report.write_consensus_regions() self.assertMultiLineEqual(expected_text, self.report_file.getvalue()) def testReadRemapConseqs(self): remap_conseqs_csv = StringIO("""\ region,sequence R1,ACATAGCCCGGG R2,GCCATTAAA """) expected_conseqs = {'R1': 'ACATAGCCCGGG', 'R2': 'GCCATTAAA'} self.report.read_remap_conseqs(remap_conseqs_csv) self.assertEqual(expected_conseqs, self.report.remap_conseqs) def testAlignDeletionsWithoutDeletion(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R1-seed,15,0,10,0,AAATTTAGG") self.report.remap_conseqs = {'R1-seed': 'AAATTTAGG'} expected_reads = [dict(refname='R1-seed', qcut='15', rank='0', count='10', offset='0', seq='AAATTTAGG')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsNoChange(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R1-seed,15,0,10,0,AAA---AGG") self.report.remap_conseqs = {'R1-seed': 'AAATTTAGG'} expected_reads = [dict(refname='R1-seed', qcut='15', rank='0', count='10', offset='0', seq='AAA---AGG')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsShiftedRight(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R1-seed,15,0,10,0,AAAA---GG") self.report.remap_conseqs = {'R1-seed': 'AAATTTAGG'} expected_reads = [dict(refname='R1-seed', qcut='15', rank='0', count='10', offset='0', seq='AAA---AGG')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsShiftedLeft(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R1-seed,15,0,10,0,AA---AAGG") self.report.remap_conseqs = {'R1-seed': 'AAATTTAGG'} expected_reads = [dict(refname='R1-seed', qcut='15', rank='0', count='10', offset='0', seq='AAA---AGG')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsTwoCodons(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R2-seed,15,0,10,0,AA------ACCGAGA") self.report.remap_conseqs = {'R2-seed': 'AAATTTGGCCCGAGA'} expected_reads = [dict(refname='R2-seed', qcut='15', rank='0', count='10', offset='0', seq='AAA------CCGAGA')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsUsingOffset(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R1-seed,15,0,10,1,AA---AGG") self.report.remap_conseqs = {'R1-seed': 'AAATTTAGG'} expected_reads = [dict(refname='R1-seed', qcut='15', rank='0', count='10', offset='1', seq='AA---AGG')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsUsingReadingFrame1(self): self.report.projects.load(StringIO("""\ { "projects": { "R1": { "max_variants": 10, "regions": [ { "coordinate_region": "R1", "seed_region_names": ["R1-seed"] } ] } }, "regions": { "R1-seed": { "is_nucleotide": true, "reference": ["CCAAATTTAGG"] }, "R1": { "is_nucleotide": false, "reference": ["KFR"] } } } """)) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R1-seed,15,0,10,2,AAA---AGG") self.report.remap_conseqs = {'R1-seed': 'CCAAATTTAGG'} expected_reads = [dict(refname='R1-seed', qcut='15', rank='0', count='10', offset='2', seq='AAA---AGG')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testAlignDeletionsMultipleReadingFrames(self): self.report.projects.load(StringIO("""\ { "projects": { "Rs": { "max_variants": 10, "regions": [ { "coordinate_region": "R1", "seed_region_names": ["R-seed"] }, { "coordinate_region": "R2", "seed_region_names": ["R-seed"] } ] } }, "regions": { "R-seed": { "is_nucleotide": true, "reference": ["GAAATTTCAGTTTTTTTTCGAGAGCAT"], "comment": " ^KkkFffQqq^^^^^^^^RrrEeeHhh (two reading frames)" }, "R1": { "is_nucleotide": false, "reference": ["KFQ"] }, "R2": { "is_nucleotide": false, "reference": ["REH"] } } } """)) # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R-seed,15,0,10,1,AAA---CAGTTTTTTTTC---AGCAT") self.report.remap_conseqs = {'R-seed': 'GAAATTTCAGTTTTTTTTCGAGAGCAT'} expected_reads = [dict(refname='R-seed', qcut='15', rank='0', count='10', offset='1', seq='AAA---CAGTTTTTTTT---CAGCAT')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testCombineDeletions(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R2-seed,15,0,10,0,AA-TCG--CCCGAGA") self.report.remap_conseqs = {'R2-seed': 'AAATTTGGCCCGAGA'} expected_reads = [dict(refname='R2-seed', qcut='15', rank='0', count='10', offset='0', seq='AATCGC---CCGAGA')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testCombineDeletionsTwoCodons(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R2-seed,15,0,10,0,AA--TC----CGAGA") self.report.remap_conseqs = {'R2-seed': 'AAATTTGGCCCGAGA'} expected_reads = [dict(refname='R2-seed', qcut='15', rank='0', count='10', offset='0', seq='AAT------CCGAGA')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testCombineDeletionsMaxSpread(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R3-seed,15,0,10,0,AA--TTCAGACCCC-CGAGAGCAT") self.report.remap_conseqs = {'R3-seed': 'AAATTTCAGACCCCACGAGAGCAT'} expected_reads = [dict(refname='R3-seed', qcut='15', rank='0', count='10', offset='0', seq='AAT---TCAGACCCCCGAGAGCAT')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testCombineDeletionsBeyondMaxSpread(self): # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R3-seed,15,0,10,0,AA--TTCAGACCCCA-GAGAGCAT") self.report.remap_conseqs = {'R3-seed': 'AAATTTCAGACCCCACGAGAGCAT'} expected_reads = [dict(refname='R3-seed', qcut='15', rank='0', count='10', offset='0', seq='AA--TTCAGACCCCA-GAGAGCAT')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testCombineDeletionsTooCrowded(self): """ There must be a buffer of 13 with no deletions. Otherwise, sweeping is not allowed. """ # refname,qcut,rank,count,offset,seq aligned_reads = prepare_reads("R3-seed,15,0,10,0,AA--TTCAGACCCC-CGA---CAT") self.report.remap_conseqs = {'R3-seed': 'AAATTTCAGACCCCACGAGAGCAT'} expected_reads = [dict(refname='R3-seed', qcut='15', rank='0', count='10', offset='0', seq='AA--TTCAGACCCC-CGA---CAT')] reads = list(self.report.align_deletions(aligned_reads)) self.assertEqual(expected_reads, reads) def testCombinedCoordinateConcordance(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,0,AAATTTAGGTAG") expected_file = """\ reference,region,pct_concordance,pct_covered R1A-seed,R1A,100.0,100.0 R1A-seed,R1A_second,0.0,0.0 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(concordance_file) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.combine_reports() self.report.write_coordinate_concordance(self.report.concordance_writer, use_combined_reports=True) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testReportCoordinateConcordance(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,0,AAATTTAGGTAG") expected_file = """\ reference,region,pct_concordance,pct_covered R1A-seed,R1A,100.0,100.0 R1A-seed,R1A_second,0.0,0.0 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(concordance_file) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testCoordinateConcordanceCoverage(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,0,AAATTT") expected_file = """\ reference,region,pct_concordance,pct_covered R1A-seed,R1A,100.0,50.0 R1A-seed,R1A_second,0.0,0.0 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(concordance_file) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testCoordinateConcordanceMismatch(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,0,AAATTTGGGTAG") # 1 different nuc here: ^ expected_file = """\ reference,region,pct_concordance,pct_covered R1A-seed,R1A,91.66666666666667,100.0 R1A-seed,R1A_second,0.0,0.0 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(concordance_file) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testCoordinateConcordanceMismatchCoverage(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,0,AAATTTGGG") # 1 different nuc here: ^ expected_file = """\ reference,region,pct_concordance,pct_covered R1A-seed,R1A,88.88888888888889,75.0 R1A-seed,R1A_second,0.0,0.0 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(concordance_file) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testCoordinateConcordanceDeletion(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,0,AAATTT---TAG") expected_file = """\ reference,region,pct_concordance,pct_covered R1A-seed,R1A,100.0,75.0 R1A-seed,R1A_second,0.0,0.0 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(concordance_file) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testDetailedCombinedCoordinateConcordance(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,12,CCGAGACCTCAGGTCACTCTTTGGTAG") expected_file = """\ reference,region,pct_concordance,pct_covered,position R1A-seed,R1A_second,100.0,100.0,10 R1A-seed,R1A_second,100.0,100.0,11 R1A-seed,R1A_second,100.0,100.0,12 R1A-seed,R1A_second,100.0,100.0,13 R1A-seed,R1A_second,100.0,100.0,14 R1A-seed,R1A_second,100.0,100.0,15 R1A-seed,R1A_second,100.0,100.0,16 R1A-seed,R1A_second,100.0,100.0,17 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(StringIO()) self.report.write_concordance_header(concordance_file, is_detailed=True) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.combine_reports() self.report.write_coordinate_concordance(self.report.concordance_writer, self.report.detailed_concordance_writer, use_combined_reports=True) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testDetailedCoordinateConcordance(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,12,CCGAGACCTCAGGTCACTCTTTGGTAG") expected_file = """\ reference,region,pct_concordance,pct_covered,position R1A-seed,R1A_second,100.0,100.0,10 R1A-seed,R1A_second,100.0,100.0,11 R1A-seed,R1A_second,100.0,100.0,12 R1A-seed,R1A_second,100.0,100.0,13 R1A-seed,R1A_second,100.0,100.0,14 R1A-seed,R1A_second,100.0,100.0,15 R1A-seed,R1A_second,100.0,100.0,16 R1A-seed,R1A_second,100.0,100.0,17 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(StringIO()) self.report.write_concordance_header(concordance_file, is_detailed=True) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer, self.report.detailed_concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testDetailedCoordinateConcordanceCoverage(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,12,CCGAGACCTCAGGTCACTCTTTGG") expected_file = """\ reference,region,pct_concordance,pct_covered,position R1A-seed,R1A_second,100.0,100.0,10 R1A-seed,R1A_second,100.0,100.0,11 R1A-seed,R1A_second,100.0,100.0,12 R1A-seed,R1A_second,100.0,100.0,13 R1A-seed,R1A_second,100.0,100.0,14 R1A-seed,R1A_second,95.0,95.0,15 R1A-seed,R1A_second,90.0,90.0,16 R1A-seed,R1A_second,85.0,85.0,17 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(StringIO()) self.report.write_concordance_header(concordance_file, is_detailed=True) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer, self.report.detailed_concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testDetailedCoordinateConcordanceMismatch(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,12,CCGAGCCCTCTGGTCACTCTGTGGTAG") # mismatch: ^ ^ ^ expected_file = """\ reference,region,pct_concordance,pct_covered,position R1A-seed,R1A_second,90.0,100.0,10 R1A-seed,R1A_second,85.0,100.0,11 R1A-seed,R1A_second,85.0,100.0,12 R1A-seed,R1A_second,85.0,100.0,13 R1A-seed,R1A_second,85.0,100.0,14 R1A-seed,R1A_second,85.0,100.0,15 R1A-seed,R1A_second,90.0,100.0,16 R1A-seed,R1A_second,90.0,100.0,17 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(StringIO()) self.report.write_concordance_header(concordance_file, is_detailed=True) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer, self.report.detailed_concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) def testDetailedCoordinateConcordanceDeletion(self): aligned_reads = prepare_reads("R1A-seed,15,0,10,12,CCGAGACCTCAGGTCACTCTT---TAG") expected_file = """\ reference,region,pct_concordance,pct_covered,position R1A-seed,R1A_second,100.0,100.0,10 R1A-seed,R1A_second,100.0,100.0,11 R1A-seed,R1A_second,95.0,95.0,12 R1A-seed,R1A_second,90.0,90.0,13 R1A-seed,R1A_second,85.0,85.0,14 R1A-seed,R1A_second,85.0,85.0,15 R1A-seed,R1A_second,85.0,85.0,16 R1A-seed,R1A_second,85.0,85.0,17 """ self.report.read(aligned_reads) concordance_file = StringIO() self.report.write_concordance_header(StringIO()) self.report.write_concordance_header(concordance_file, is_detailed=True) self.report.write_nuc_header(StringIO()) self.report.write_nuc_counts() self.report.write_coordinate_concordance(self.report.concordance_writer, self.report.detailed_concordance_writer) self.assertMultiLineEqual(expected_file, concordance_file.getvalue()) class InsertionWriterTest(unittest.TestCase): def setUp(self): self.insert_file = StringIO() self.writer = InsertionWriter(self.insert_file) self.writer.start_group(seed='R1-seed', qcut=15) self.nuc_seq_acdef = 'GCTTGTGACGAGTTT' self.nuc_seq_afdef = 'GCTTTTGACGAGTTT' def testNoInserts(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion """ self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=1) self.writer.write(insertions={}, seed_name='', report_aminos_all=[], report_nucleotides_all=[], landmarks=None, consensus_builder=ConsensusBuilder([0.1, 'MAX'], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testInsert(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,6,GAC R1-seed,0.100,R1,6,6,6,GAC """ expected_counts = {('R1-seed', 'R1'): {6: 1}} report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(9), 3), ReportAmino(SeedAmino(12), 4)]} self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=1) self.writer.write(insertions={'R1': [6]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) self.assertEqual(expected_counts, self.writer.insert_pos_counts) def testInsertNuc(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,2,2,2,T R1-seed,0.100,R1,2,2,2,T """ report_nucleotides = {'R1': [ReportNucleotide(1, SeedNucleotide()), ReportNucleotide(2, SeedNucleotide()), ReportNucleotide(3, SeedNucleotide()), ReportNucleotide(4, SeedNucleotide())]} report_nucleotides['R1'][0].seed_nucleotide.consensus_index = 0 report_nucleotides['R1'][1].seed_nucleotide.consensus_index = 1 report_nucleotides['R1'][2].seed_nucleotide.consensus_index = 3 report_nucleotides['R1'][3].seed_nucleotide.consensus_index = 4 self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=1) self.writer.write(insertions={'R1': [2]}, seed_name='R1-seed', report_aminos_all={'R1': []}, report_nucleotides_all=report_nucleotides, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testInsertDifferentReadingFrame(self): """ Add a partial codon at the start of the read to shift the reading frame. """ expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,7,GAC R1-seed,0.100,R1,6,6,7,GAC """ report_aminos = {'R1': [ReportAmino(SeedAmino(1), 1), ReportAmino(SeedAmino(4), 2), ReportAmino(SeedAmino(10), 3), ReportAmino(SeedAmino(13), 4)]} self.writer.add_nuc_read(offset_sequence='A' + self.nuc_seq_acdef, count=1) self.writer.write(insertions={'R1': [7]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testInsertWithOffset(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,6,GAC R1-seed,0.100,R1,6,6,6,GAC """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(9), 3), ReportAmino(SeedAmino(12), 4)]} # C D E F self.writer.add_nuc_read(offset_sequence='---TGTGACGAGTTT', count=1) self.writer.write(insertions={'R1': [6]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testInsertWithDeletion(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(9), 3), ReportAmino(SeedAmino(12), 4)]} # C D E F self.writer.add_nuc_read(offset_sequence='TGTGAC---GAGTTT', count=1) self.writer.write(insertions={'R1': [3, 6]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testTwoInsertsWithOffset(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,6,GAC R1-seed,0.100,R1,6,6,6,GAC R1-seed,MAX,R1,9,9,12,TTT R1-seed,0.100,R1,9,9,12,TTT """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(9), 3), ReportAmino(SeedAmino(15), 4)]} # C D E F G self.writer.add_nuc_read(offset_sequence='---TGTGACGAGTTTGGG', count=1) self.writer.write(insertions={'R1': [6, 12]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testInsertsWithVariants(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,6,GAC R1-seed,0.100,R1,6,6,6,GAC """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(9), 3), ReportAmino(SeedAmino(12), 4)]} self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=1) self.writer.add_nuc_read(offset_sequence=self.nuc_seq_afdef, count=1) self.writer.write(insertions={'R1': [6]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testDifferentInserts(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,3,3,3,TTT R1-seed,0.100,R1,3,3,3,TKT """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(6), 2), ReportAmino(SeedAmino(9), 3), ReportAmino(SeedAmino(12), 4)]} self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=2) self.writer.add_nuc_read(offset_sequence=self.nuc_seq_afdef, count=3) self.writer.write(insertions={'R1': [3]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testMulticharacterInsert(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,6,GACGAG R1-seed,0.100,R1,6,6,6,GACGAG """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(12), 3)]} self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=1) self.writer.write(insertions={'R1': [6, 9]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testReadGapInInsert(self): nuc_seq = 'GCTCTnGACGAGTTT' expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(6), 2), ReportAmino(SeedAmino(9), 3)]} self.writer.add_nuc_read(nuc_seq, count=1) self.writer.write(insertions={'R1': [3]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue()) def testUnsortedInserts(self): expected_text = """\ seed,mixture_cutoff,region,ref_region_pos,ref_genome_pos,query_pos,insertion R1-seed,MAX,R1,6,6,6,GACGAG R1-seed,0.100,R1,6,6,6,GACGAG """ report_aminos = {'R1': [ReportAmino(SeedAmino(0), 1), ReportAmino(SeedAmino(3), 2), ReportAmino(SeedAmino(12), 3)]} self.writer.add_nuc_read(offset_sequence=self.nuc_seq_acdef, count=1) self.writer.write(insertions={'R1': [9, 6]}, seed_name='R1-seed', report_aminos_all=report_aminos, report_nucleotides_all={'R1': []}, landmarks=yaml.safe_load(LANDMARKS_YAML), consensus_builder=ConsensusBuilder(['MAX', 0.1], 0)) self.assertMultiLineEqual(expected_text, self.insert_file.getvalue())
agpl-3.0
andre-senna/opencog
opencog/python/utility/evolutionary.py
33
9725
__author__ = 'keyvan' from random import randrange, random as rand def new_offspring(parent): return type(parent)(parent=parent) def new_individual(population): return population.type_of_individuals(population=population) class IndividualBase(object): """ An individual object should be a collection of Genes, each gene should be accessible via a 'locus', so if individual is implemented in terms of dic, locus is key and """ ############################################################# ## User of this code should implement the following. ## Refer to examples/genetic_algorithm for demo loci = None _out_of_date_fitness_value = True def __init__(self, **kwargs): """ Do not override __init___ instead, override __init_normal___ and/or __init_by_parent__ """ if 'parent' in kwargs: parent = kwargs['parent'] self.__dict__.update(parent.population.common_attributes) self.population = parent.population self.__init_by_parent__(parent) elif 'population' in kwargs: self.population = kwargs['population'] self.__dict__.update(self.population.common_attributes) self.__init_normal__() def __fitness__(self): pass def __mutate__(self): """ return an offspring with a mutated gene """ pass def __crossover__(self, other): """ return an offspring """ return self.fitness_proportionate_crossover(other) # implement getitem and setitem if you're not using an # standard structure. If list, set or dict satisfy you're # needs, you can subclass from IndividualListBase, # IndividualSetBase or IndividualDictBase, respectively. # def __getitem__(self, key): # pass # # def __setitem__(self, key, value): # pass def __init_by_parent__(self, parent): pass def __init_normal__(self): pass ############################################################# @property def fitness(self): if self._out_of_date_fitness_value: self._fitness = self.__fitness__() self._out_of_date_fitness_value = False return self._fitness def mutate(self): self._out_of_date_fitness_value = True return self.__mutate__() def __add__(self, other): # + operator does crossover return self.__crossover__(other) def __cmp__(self, other): return other.fitness - self.fitness # return self.fitness - other.fitness ############################################################# ## Predefined crossover methods def proportionate_crossover(self, other, self_share): offspring = new_offspring(self) for locus in set(self.loci) | set(other.loci): if rand() < self_share: if locus not in self.loci: continue offspring[locus] = self[locus] else: if locus not in other.loci: continue offspring[locus] = other[locus] return offspring def uniform_crossover(self, other): return self.proportionate_crossover(other, 0.5) def fitness_proportionate_crossover(self, other): if self.fitness == 0 and other.fitness == 0: self_share = 0.5 else: self_share = float(self.fitness) / (self.fitness + other.fitness) return self.proportionate_crossover(other, self_share) def one_point_crossover(self, other, point_index): pass # TODO def two_point_crossover(self, other, first_point_index = None, second_point_index = None): pass # TODO ############################################################# class Population(object): def __init__(self, type_of_individuals, number_of_individuals=0, **common_attributes_between_individuals): self.common_attributes = common_attributes_between_individuals self.current_generation = [] self.next_generation = [] self.type_of_individuals = type_of_individuals self.generation_count = 0 self.add_many(number_of_individuals) def __selection__(self): """ Override this method to control selection behaviour. Select method should return one individual. The default implementation returns an individual from the fitter half of the population. Population is sorted in the entry point of this method. """ return self.current_generation[randrange(0, len(self.current_generation)/2)] def __crossover_selection__(self): """ Override if your crossover selection method is different from your mutation selection. """ return self.__selection__(), self.__selection__() def select_for_mutation(self): return self.__selection__() def select_for_crossover(self): return self.__crossover_selection__() def add_many(self, quantity): for _ in range(quantity): individual = new_individual(self) self.add_to_current_generation(individual) def add_to_current_generation(self, individual): self.current_generation.append(individual) def add_to_next_generation(self, offspring): self.next_generation.append(offspring) def switch_to_next_generation(self): self.current_generation = self.next_generation self.next_generation = [] self._sorted = False self.generation_count += 1 def sort(self): self.current_generation.sort() def __len__(self): return len(self.current_generation) def __getitem__(self, index): return self.current_generation[index] class GeneticAlgorithm(object): sort_population_each_step = True def __init__(self, **kwargs): """ Two ways for initialising: 1) giving the population by passing 'population' parameter 2) specifying 'type_of_individuals' and 'type_of_individuals' """ self.__dict__.update(kwargs) if not self.__dict__.has_key('population'): if not self.__dict__.has_key('type_of_individuals')\ or not self.__dict__.has_key('number_of_individuals'): raise ValueError('since population is not specified,' ' type_of_individuals and number_of_individuals' ' should be present') self.population = Population(self.type_of_individuals, self.number_of_individuals) self.highest_fitness_found = 0 def step(self, mutation_rate=1, crossover_rate = 1, number_of_individuals=0): highest_fitness_this_generation = 0 fittest_individual_this_generation = None if number_of_individuals <= 0: number_of_individuals = len(self.population) while len(self.population.next_generation) < number_of_individuals: if self.sort_population_each_step: self.population.sort() if 0 < crossover_rate > rand(): parent, other_parent = self.population.select_for_crossover() offspring = parent + other_parent # crossover else: offspring = self.population.select_for_mutation() if 0 < mutation_rate > rand(): offspring = offspring.mutate() self.population.add_to_next_generation(offspring) if offspring.fitness > highest_fitness_this_generation: fittest_individual_this_generation = offspring highest_fitness_this_generation = offspring.fitness if highest_fitness_this_generation > self.highest_fitness_found: self.highest_fitness_found = fittest_individual_this_generation.fitness self.fittest_individual_found = fittest_individual_this_generation self.population.switch_to_next_generation() return fittest_individual_this_generation def run(self, show_population_each_generation=True): while True: if show_population_each_generation: print '#################### Generation ' +\ str(self.population.generation_count) +\ ' ####################' for individual in self.population: print individual print 'Fittest:', str(self.step()) class IndividualListBase(IndividualBase, list): @property def loci(self): return range(len(self)) class IndividualDictBase(IndividualBase, dict): @property def loci(self): return self class IndividualSetBase(IndividualBase, set): @property def loci(self): return self def __getitem__(self, item): return item def __setitem__(self, key, value): self.remove(key) self.add(value) #class NoneEpistaticGeneticAlgorithm(GeneticAlgorithm): # # fitness_unit = 1 # # class _contribution_dict(dict): # def __getitem__(self, item): # if item not in self: # return NoneEpistaticGeneticAlgorithm.fitness_unit # return dict.__getitem__(self, item) # # fitness_contribution_by_locus = _contribution_dict() # # def __init__(self, type_of_individuals, number_of_individuals): # self.population = _none_epistatic_population(type_of_individuals, number_of_individuals) # self.population.a # # def step(self, mutation_rate=1, crossover_rate = 1, # number_of_individuals=0): # pass
agpl-3.0
amyshi188/osf.io
website/addons/github/routes.py
21
2795
# -*- coding: utf-8 -*- from framework.routing import Rule, json_renderer from website.addons.github import views api_routes = { 'rules': [ Rule( [ '/settings/github/accounts/', ], 'get', views.github_account_list, json_renderer, ), Rule( [ '/project/<pid>/github/settings/', '/project/<pid>/node/<nid>/github/settings/' ], 'get', views.github_get_config, json_renderer, ), Rule( [ '/project/<pid>/github/settings/', '/project/<pid>/node/<nid>/github/settings/', ], 'post', views.github_set_config, json_renderer, ), Rule( [ '/project/<pid>/github/user_auth/', '/project/<pid>/node/<nid>/github/user_auth/' ], 'put', views.github_import_auth, json_renderer, ), Rule( [ '/project/<pid>/github/user_auth/', '/project/<pid>/node/<nid>/github/user_auth/' ], 'delete', views.github_deauthorize_node, json_renderer, ), Rule( [ '/project/<pid>/github/tarball/', '/project/<pid>/node/<nid>/github/tarball/', ], 'get', views.github_download_starball, json_renderer, {'archive': 'tar'}, endpoint_suffix='__tar', ), Rule( [ '/project/<pid>/github/zipball/', '/project/<pid>/node/<nid>/github/zipball/', ], 'get', views.github_download_starball, json_renderer, {'archive': 'zip'}, endpoint_suffix='__zip', ), Rule( [ '/project/<pid>/github/hook/', '/project/<pid>/node/<nid>/github/hook/', ], 'post', views.github_hook_callback, json_renderer, ), Rule( [ '/project/<pid>/github/repo/create/', '/project/<pid>/node/<nid>/github/repo/create/', ], 'post', views.github_create_repo, json_renderer, ), Rule( [ '/project/<pid>/github/hgrid/root/', '/project/<pid>/node/<nid>/github/hgrid/root/', ], 'get', views.github_root_folder, json_renderer, ), ], 'prefix': '/api/v1' }
apache-2.0
chrz89/upb-son-editor-backend
src/son_editor/tests/project_test.py
3
4973
import unittest import json from son_editor.tests.utils import * from son_editor.util.constants import WORKSPACES, PROJECTS from son_editor.util.context import init_test_context class ProjectTest(unittest.TestCase): def setUp(self): # Initializes test context self.app = init_test_context() self.user = create_logged_in_user(self.app, "user_a") # Create real workspace by request self.wsid = create_workspace(self.user, 'ProjectTest') # Create project def test_create_project(self): # Setup request dict request_dict = {"name": "projectName"} # Post request on projects rv = self.app.post('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/', data=json.dumps(request_dict), content_type='application/json') # Expect workspace gets created self.assertEqual(201, rv.status_code) result_dict = json.loads(rv.data.decode()) self.assertEqual(request_dict['name'], result_dict['name']) self.assertEqual(["personal"], result_dict['publish_to']) # Post same request on projects again, should fail rv = self.app.post('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/', data=json.dumps(request_dict), content_type='application/json') # Expect workspace creation fails self.assertEqual(409, rv.status_code) def test_get_projects(self): request_dict1 = {"name": "projectsGet1"} request_dict2 = {"name": "projectsGet2"} # Post request on projects self.app.post('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/', data=json.dumps(request_dict1), content_type='application/json') self.app.post('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/', data=json.dumps(request_dict2), content_type='application/json') # Post request on projects rv = self.app.get('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/') result = json.loads(rv.data.decode()) self.assertEqual(len(result), 2) self.assertEqual(result[0]['name'], request_dict1['name']) self.assertEqual(result[1]['name'], request_dict2['name']) def test_get_project(self): request_dict = {"name": "projectGet"} # Post request on projects rv = self.app.post('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/', data=json.dumps(request_dict), content_type='application/json') project_id = json.loads(rv.data.decode())['id'] # Post request on projects rv = self.app.get('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/' + str(project_id)) result = json.loads(rv.data.decode()) self.assertEqual(200, rv.status_code) self.assertEqual(result['name'], request_dict['name']) def test_delete_project(self): request_dict = {"name": "projectDelete"} # Post request on projects rv = self.app.post('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/', data=json.dumps(request_dict), content_type='application/json') project_id = json.loads(rv.data.decode())['id'] # Post request on projects rv = self.app.delete('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/' + str(project_id)) result = json.loads(rv.data.decode()) self.assertEqual(200, rv.status_code) self.assertEqual(result['name'], request_dict['name']) # check if its really been deleted rv = self.app.get('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/' + str(project_id)) self.assertEqual(404, rv.status_code) def test_update_project(self): # Setup request dict pj_id = create_project(self.wsid, "update_project_name") create_project(self.wsid, "another_project_name") request_dict = {"name": "new_project_name", "publish_to": ["cat2"]} # Post request on projects rv = self.app.put('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/' + str(pj_id), data=json.dumps(request_dict), content_type='application/json') # Expect project gets updated self.assertEqual(200, rv.status_code) result_dict = json.loads(rv.data.decode()) self.assertEqual(request_dict['name'], result_dict['name']) self.assertEqual(["cat2"], result_dict['publish_to']) request_dict = {"name": "another_project_name", "publish_to": ["cat2"]} # update to existing name, should fail rv = self.app.put('/' + WORKSPACES + '/' + str(self.wsid) + '/' + PROJECTS + '/' + str(pj_id), data=json.dumps(request_dict), content_type='application/json') # Expect project update fails self.assertEqual(409, rv.status_code)
apache-2.0
noobcoderT/ryu-3.21
ryu/tests/integrated/vrrp_common.py
17
8841
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import time import random from ryu.base import app_manager from ryu.lib import hub from ryu.lib import mac as lib_mac from ryu.lib.packet import vrrp from ryu.services.protocols.vrrp import api as vrrp_api from ryu.services.protocols.vrrp import event as vrrp_event _VRID = 7 _PRIMARY_IP_ADDRESS0 = '10.0.0.2' _PRIMARY_IP_ADDRESS1 = '10.0.0.3' class VRRPCommon(app_manager.RyuApp): _IFNAME0 = None _IFNAME1 = None def __init__(self, *args, **kwargs): super(VRRPCommon, self).__init__(*args, **kwargs) def _main(self): self._main_version(vrrp.VRRP_VERSION_V3) self._main_version(vrrp.VRRP_VERSION_V2) print("done!") def _main_version(self, vrrp_version): self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_ADDRESS_OWNER) self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_MAX) self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_DEFAULT) self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_MIN) def _main_version_priority(self, vrrp_version, priority): self._main_version_priority_sleep(vrrp_version, priority, False) self._main_version_priority_sleep(vrrp_version, priority, True) def _check(self, vrrp_api, instances): while True: while True: rep = vrrp_api.vrrp_list(self) if len(rep.instance_list) >= len(instances) * 2: if any(i.state == vrrp_event.VRRP_STATE_INITIALIZE for i in rep.instance_list): continue break print('%s / %s' % (len(rep.instance_list), len(instances) * 2)) time.sleep(1) # for i in rep.instance_list: # print('%s %s %s %s %s' % (i.instance_name, # i.monitor_name, # i.config, # i.interface, # i.state)) assert len(rep.instance_list) == len(instances) * 2 num_of_master = 0 d = dict(((i.instance_name, i) for i in rep.instance_list)) bad = 0 for i in rep.instance_list: assert i.state in (vrrp_event.VRRP_STATE_MASTER, vrrp_event.VRRP_STATE_BACKUP) if i.state == vrrp_event.VRRP_STATE_MASTER: num_of_master += 1 vr = instances[i.config.vrid] if (vr[0].config.priority > vr[1].config.priority and i.instance_name == vr[1].instance_name) or \ (vr[0].config.priority < vr[1].config.priority and i.instance_name == vr[0].instance_name): if i.state == vrrp_event.VRRP_STATE_MASTER: print("bad master:") print('%s %s' % (d[vr[0].instance_name].state, d[vr[0].instance_name].config.priority)) print('%s %s' % (d[vr[1].instance_name].state, d[vr[1].instance_name].config.priority)) bad += 1 # assert i.state != vrrp_event.VRRP_STATE_MASTER if bad > 0: # this could be a transient state print("%s bad masters" % bad) time.sleep(1) continue if num_of_master >= len(instances): assert num_of_master == len(instances) break print('%s / %s' % (num_of_master, len(instances))) time.sleep(1) continue def _main_version_priority_sleep(self, vrrp_version, priority, do_sleep): app_mgr = app_manager.AppManager.get_instance() self.logger.debug('%s', app_mgr.applications) vrrp_mgr = app_mgr.applications['VRRPManager'] step = 5 instances = {} for vrid in range(1, 256, step): if vrid == _VRID: continue print("vrid %s" % vrid) l = {} prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN, min(vrrp.VRRP_PRIORITY_BACKUP_MAX, vrid)) rep0 = self._configure_vrrp_router(vrrp_version, prio, _PRIMARY_IP_ADDRESS0, self._IFNAME0, vrid) assert rep0.instance_name is not None l[0] = rep0 prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN, min(vrrp.VRRP_PRIORITY_BACKUP_MAX, 256 - vrid)) rep1 = self._configure_vrrp_router(vrrp_version, prio, _PRIMARY_IP_ADDRESS1, self._IFNAME1, vrid) assert rep1.instance_name is not None l[1] = rep1 instances[vrid] = l print("vrid %s" % _VRID) l = {} rep0 = self._configure_vrrp_router(vrrp_version, priority, _PRIMARY_IP_ADDRESS0, self._IFNAME0, _VRID) assert rep0.instance_name is not None l[0] = rep0 rep1 = self._configure_vrrp_router( vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_DEFAULT, _PRIMARY_IP_ADDRESS1, self._IFNAME1, _VRID) assert rep1.instance_name is not None l[1] = rep1 instances[_VRID] = l self.logger.debug('%s', vrrp_mgr._instances) if do_sleep: print("priority %s" % priority) print("waiting for instances starting") self._check(vrrp_api, instances) for vrid in instances.keys(): if vrid == _VRID: continue which = vrid & 1 new_priority = int(random.uniform(vrrp.VRRP_PRIORITY_BACKUP_MIN, vrrp.VRRP_PRIORITY_BACKUP_MAX)) i = instances[vrid][which] vrrp_api.vrrp_config_change(self, i.instance_name, priority=new_priority) i.config.priority = new_priority if do_sleep: print("priority shuffled") self._check(vrrp_api, instances) for vrid in instances.keys(): if vrid == _VRID: continue which = vrid & 1 vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name) vrrp_api.vrrp_shutdown(self, instances[_VRID][0].instance_name) if do_sleep: print("shutting down instances") while True: rep = vrrp_api.vrrp_list(self) if len(rep.instance_list) <= len(instances): break print("left %s" % len(rep.instance_list)) time.sleep(1) assert len(rep.instance_list) == len(instances) print("waiting for the rest becoming master") while True: rep = vrrp_api.vrrp_list(self) if all(i.state == vrrp_event.VRRP_STATE_MASTER for i in rep.instance_list): break time.sleep(1) vrrp_api.vrrp_shutdown(self, instances[_VRID][1].instance_name) for vrid in instances.keys(): if vrid == _VRID: continue which = 1 - (vrid & 1) vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name) print("waiting for instances shutting down") while True: rep = vrrp_api.vrrp_list(self) if not rep.instance_list: break print("left %s" % len(rep.instance_list)) time.sleep(1)
apache-2.0
piger/dulbecco
utils/sputa.py
1
1103
#!/usr/bin/env python # -*- coding: utf-8 -*- import cPickle as pickle import shutil import os import json import sys class PersistentDict(dict): def __init__(self, filename, *args, **kwargs): self.filename = filename dict.__init__(self, *args, **kwargs) def save(self): tmpfile = self.filename + ".tmp" try: with open(tmpfile, "wb") as fd: pickle.dump(dict(self), fd, 2) except (OSError, pickle.PickleError): os.remove(tmpfile) raise shutil.move(tmpfile, self.filename) def load(self): if not os.path.exists(self.filename): return with open(self.filename, "rb") as fd: data = pickle.load(fd) self.update(data) if __name__ == '__main__': filename = "markov.pickle" pd = PersistentDict(filename) pd.load() i = 0 for key in pd: jkey = json.dumps(key, separators=(',', ':')) for subkey in pd[key]: line = u"%s\n%s\n" % (jkey, subkey) sys.stdout.write(line.encode('utf-8'))
bsd-2-clause
balister/GNU-Radio
gr-blocks/python/blocks/qa_max.py
11
1954
#!/usr/bin/env python # # Copyright 2007,2010,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest, blocks import math class test_max(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_001(self): src_data = (0,0.2,-0.3,0,12,0) expected_result = (float(max(src_data)),) src = blocks.vector_source_f(src_data) s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data)) op = blocks.max_ff(len(src_data)) dst = blocks.vector_sink_f() self.tb.connect(src, s2v, op, dst) self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) def test_002(self): src_data=(-100,-99,-98,-97,-96,-1) expected_result = (float(max(src_data)),) src = blocks.vector_source_f(src_data) s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data)) op = blocks.max_ff(len(src_data)) dst = blocks.vector_sink_f() self.tb.connect(src, s2v, op, dst) self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) if __name__ == '__main__': gr_unittest.run(test_max, "test_max.xml")
gpl-3.0
aioue/ansible
lib/ansible/modules/network/panos/panos_service.py
76
5072
#!/usr/bin/python # -*- coding: utf-8 -*- # # Ansible module to manage PaloAltoNetworks Firewall # (c) 2016, techbizdev <techbizdev@paloaltonetworks.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: panos_service short_description: create a service object description: - Create a service object. Service objects are fundamental representation of the applications given src/dst ports and protocol author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" version_added: "2.3" requirements: - pan-python options: ip_address: description: - IP address (or hostname) of PAN-OS device required: true password: description: - password for authentication required: true username: description: - username for authentication required: false default: "admin" service_name: description: - name of the service required: true protocol: description: - protocol for the service, should be tcp or udp required: true port: description: - destination port required: true source_port: description: - source port required: false default: None commit: description: - commit if changed required: false default: true ''' EXAMPLES = ''' # Creates service for port 22 - name: create SSH service panos_service: ip_address: "192.168.1.1" password: "admin" service_name: "service-tcp-22" protocol: "tcp" port: "22" ''' RETURN=''' # Default return values ''' ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import get_exception try: import pan.xapi from pan.xapi import PanXapiError HAS_LIB = True except ImportError: HAS_LIB = False _SERVICE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ "/vsys/entry[@name='vsys1']" +\ "/service/entry[@name='%s']" def service_exists(xapi, service_name): xapi.get(_SERVICE_XPATH % service_name) e = xapi.element_root.find('.//entry') if e is None: return False return True def add_service(xapi, module, service_name, protocol, port, source_port): if service_exists(xapi, service_name): return False exml = ['<protocol>'] exml.append('<%s>' % protocol) exml.append('<port>%s</port>' % port) if source_port: exml.append('<source-port>%s</source-port>' % source_port) exml.append('</%s>' % protocol) exml.append('</protocol>') exml = ''.join(exml) xapi.set(xpath=_SERVICE_XPATH % service_name, element=exml) return True def main(): argument_spec = dict( ip_address=dict(required=True), password=dict(required=True, no_log=True), username=dict(default='admin'), service_name=dict(required=True), protocol=dict(required=True, choices=['tcp', 'udp']), port=dict(required=True), source_port=dict(), commit=dict(type='bool', default=True) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_LIB: module.fail_json(msg='pan-python is required for this module') ip_address = module.params["ip_address"] password = module.params["password"] username = module.params['username'] service_name = module.params['service_name'] protocol = module.params['protocol'] port = module.params['port'] source_port = module.params['source_port'] commit = module.params['commit'] xapi = pan.xapi.PanXapi( hostname=ip_address, api_username=username, api_password=password ) try: changed = add_service(xapi, module, service_name, protocol, port, source_port) if changed and commit: xapi.commit(cmd="<commit></commit>", sync=True, interval=1) except PanXapiError: exc = get_exception() module.fail_json(msg=exc.message) module.exit_json(changed=changed, msg="okey dokey") if __name__ == '__main__': main()
gpl-3.0
aslamplr/shorts
lib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py
12
5292
# -*- coding: utf-8 -*- """ oauthlib.oauth2.rfc6749.grant_types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ from __future__ import unicode_literals, absolute_import import json from oauthlib.common import log from .base import GrantTypeBase from .. import errors, utils from ..request_validator import RequestValidator class RefreshTokenGrant(GrantTypeBase): """`Refresh token grant`_ .. _`Refresh token grant`: http://tools.ietf.org/html/rfc6749#section-6 """ @property def issue_new_refresh_tokens(self): return True def __init__(self, request_validator=None, issue_new_refresh_tokens=True): self.request_validator = request_validator or RequestValidator() def create_token_response(self, request, token_handler): """Create a new access token from a refresh_token. If valid and authorized, the authorization server issues an access token as described in `Section 5.1`_. If the request failed verification or is invalid, the authorization server returns an error response as described in `Section 5.2`_. The authorization server MAY issue a new refresh token, in which case the client MUST discard the old refresh token and replace it with the new refresh token. The authorization server MAY revoke the old refresh token after issuing a new refresh token to the client. If a new refresh token is issued, the refresh token scope MUST be identical to that of the refresh token included by the client in the request. .. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1 .. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2 """ headers = { 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache', } try: log.debug('Validating refresh token request, %r.', request) self.validate_token_request(request) except errors.OAuth2Error as e: return headers, e.json, e.status_code token = token_handler.create_token(request, refresh_token=self.issue_new_refresh_tokens) log.debug('Issuing new token to client id %r (%r), %r.', request.client_id, request.client, token) return headers, json.dumps(token), 200 def validate_token_request(self, request): # REQUIRED. Value MUST be set to "refresh_token". if request.grant_type != 'refresh_token': raise errors.UnsupportedGrantTypeError(request=request) if request.refresh_token is None: raise errors.InvalidRequestError( description='Missing refresh token parameter.', request=request) # Because refresh tokens are typically long-lasting credentials used to # request additional access tokens, the refresh token is bound to the # client to which it was issued. If the client type is confidential or # the client was issued client credentials (or assigned other # authentication requirements), the client MUST authenticate with the # authorization server as described in Section 3.2.1. # http://tools.ietf.org/html/rfc6749#section-3.2.1 if self.request_validator.client_authentication_required(request): log.debug('Authenticating client, %r.', request) if not self.request_validator.authenticate_client(request): log.debug('Invalid client (%r), denying access.', request) raise errors.InvalidClientError(request=request, status_code=401) elif not self.request_validator.authenticate_client_id(request.client_id, request): log.debug('Client authentication failed, %r.', request) raise errors.InvalidClientError(request=request) # Ensure client is authorized use of this grant type self.validate_grant_type(request) # REQUIRED. The refresh token issued to the client. log.debug('Validating refresh token %s for client %r.', request.refresh_token, request.client) if not self.request_validator.validate_refresh_token( request.refresh_token, request.client, request): log.debug('Invalid refresh token, %s, for client %r.', request.refresh_token, request.client) raise errors.InvalidGrantError(request=request) original_scopes = utils.scope_to_list( self.request_validator.get_original_scopes( request.refresh_token, request)) if request.scope: request.scopes = utils.scope_to_list(request.scope) if (not all((s in original_scopes for s in request.scopes)) and not self.request_validator.is_within_original_scope( request.scopes, request.refresh_token, request)): log.debug('Refresh token %s lack requested scopes, %r.', request.refresh_token, request.scopes) raise errors.InvalidScopeError( state=request.state, request=request, status_code=401) else: request.scopes = original_scopes
mit
st-tech/zr-obp
tests/ope/test_importance_weight_estimator.py
1
23554
from pathlib import Path from typing import Dict from conftest import generate_action_dist import numpy as np import pytest from sklearn.base import BaseEstimator from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score import yaml from obp.ope import ImportanceWeightEstimator from obp.types import BanditFeedback np.random.seed(1) binary_model_dict = dict( logistic_regression=LogisticRegression, lightgbm=GradientBoostingClassifier, random_forest=RandomForestClassifier, ) # hyperparameter settings for the base ML model in importance weight estimator cd_path = Path(__file__).parent.resolve() with open(cd_path / "hyperparams.yaml", "rb") as f: hyperparams = yaml.safe_load(f) # action_context, n_actions, len_list, fitting_method, base_model, calibration_cv, err, description n_rounds = 1000 n_actions = 3 len_list = 3 invalid_input_of_initializing_importance_weight_estimator = [ ( np.random.uniform(size=(n_actions, 8)), "a", # len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), 2, TypeError, "n_actions must be an instance of <class 'int'>, not <class 'str'>.", ), ( np.random.uniform(size=(n_actions, 8)), 1, # len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), 2, ValueError, "n_actions == 1, must be >= 2", ), ( np.random.uniform(size=(n_actions, 8)), n_actions, "a", # "sample", RandomForestClassifier(**hyperparams["random_forest"]), 2, TypeError, "len_list must be an instance of <class 'int'>, not <class 'str'>.", ), ( np.random.uniform(size=(n_actions, 8)), n_actions, 0, # "sample", RandomForestClassifier(**hyperparams["random_forest"]), 2, ValueError, "len_list == 0, must be >= 1", ), ( np.random.uniform(size=(n_actions, 8)), n_actions, len_list, 1, # RandomForestClassifier(**hyperparams["random_forest"]), 2, ValueError, "`fitting_method` must be either 'sample' or 'raw', but 1 is given", ), ( np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "awesome", # RandomForestClassifier(**hyperparams["random_forest"]), 2, ValueError, "`fitting_method` must be either 'sample' or 'raw', but awesome is given", ), ( np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", "RandomForest", # 2, ValueError, "`base_model` must be BaseEstimator or a child class of BaseEstimator", ), ( np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), 1.5, TypeError, "calibration_cv must be an instance of <class 'int'>, not <class 'float'>.", ), ] # context, action, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, calibration_cv, err, description invalid_input_of_fitting_importance_weight_estimator = [ ( None, # np.random.choice(n_actions, size=n_rounds), np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`context` must be 2D array", ), ( np.random.uniform(size=(n_rounds, 7)), None, # np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`action` must be 1D array", ), ( np.random.uniform(size=(n_rounds, 7, 3)), # np.random.choice(n_actions, size=n_rounds), np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`context` must be 2D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=(n_rounds, 3)), # np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`action` must be 1D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(["1", "a"], size=n_rounds), # np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`action` elements must be non-negative integers", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice([-1, -3], size=n_rounds), # np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`action` elements must be non-negative integers", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds), "3", # None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`position` must be 1D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds), np.random.choice(len_list, size=(n_rounds, 3)), # None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`position` must be 1D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds), np.random.choice(len_list, size=n_rounds - 1), # None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "Expected `context.shape[0]", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds), np.random.choice(["a", "1"], size=n_rounds), # None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`position` elements must be non-negative integers", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds), np.random.choice([-1, -3], size=n_rounds), # None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`position` elements must be non-negative integers", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds - 1), # None, None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "Expected `context.shape[0]", ), ( np.random.uniform(size=(n_rounds, 7)), np.random.choice(n_actions, size=n_rounds - 1), # None, None, n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "Expected `context.shape[0]", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), "3", # n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`action_context` must be 2D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8, 3)), # n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`action_context` must be 2D array", ), ( np.random.uniform(size=(n_rounds, 7)), (np.arange(n_rounds) % n_actions) + 1, # np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, r"`action` elements must be integers in the range of", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.ones((n_rounds, 2)), # np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`position` must be 1D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.ones(n_rounds, dtype=int) * len_list, # np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "`position` elements must be smaller than `len_list`", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), None, # 3, 1, 2, ValueError, "`action_dist` must be 3D array", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), np.zeros((n_rounds, n_actions, len_list - 1)), # 3, 1, 2, ValueError, "shape of `action_dist` must be (n_rounds, n_actions, len_list)", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), np.zeros((n_rounds, n_actions, len_list)), # 3, 1, 2, ValueError, "`action_dist` must be a probability distribution", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), "a", # None, 2, TypeError, "n_folds must be an instance of <class 'int'>, not <class 'str'>", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 0, # None, 2, ValueError, "n_folds == 0, must be >= 1.", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, "a", # 2, ValueError, "'a' cannot be used to seed a numpy.random.RandomState instance", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.zeros(n_rounds, dtype=int), # np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 3, 1, 2, ValueError, "No training data at position", ), ] valid_input_of_importance_weight_estimator = [ ( np.random.uniform(size=(n_rounds * 100, 7)), np.arange(n_rounds * 100) % n_actions, np.random.choice(len_list, size=n_rounds * 100), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds * 100, n_actions, len_list), 3, 1, 2, "valid input with cross fitting", ), ( np.random.uniform(size=(n_rounds * 100, 7)), np.arange(n_rounds * 100) % n_actions, np.random.choice(len_list, size=n_rounds * 100), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds * 100, n_actions, len_list), 3, 2, 1, "valid input with cross fitting", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), np.random.uniform(size=(n_actions, 8)), n_actions, len_list, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 1, 1, 2, "valid input without cross fitting", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, None, np.random.uniform(size=(n_actions, 8)), n_actions, 1, "sample", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, 1), 1, 1, 2, "valid input without position", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, None, None, n_actions, 1, "raw", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, 1), 1, 1, 2, "valid input without position when fitting_method is `raw`", ), ( np.random.uniform(size=(n_rounds, 7)), np.arange(n_rounds) % n_actions, np.random.choice(len_list, size=n_rounds), None, n_actions, len_list, "raw", RandomForestClassifier(**hyperparams["random_forest"]), generate_action_dist(n_rounds, n_actions, len_list), 1, 1, 2, "valid input when fitting_method is `raw`", ), ] @pytest.mark.parametrize( "action_context, n_actions, len_list, fitting_method, base_model, calibration_cv, err, description", invalid_input_of_initializing_importance_weight_estimator, ) def test_initializing_importance_weight_estimator_using_invalid_input_data( action_context: np.ndarray, n_actions: int, len_list: int, fitting_method: str, base_model: BaseEstimator, calibration_cv: int, err, description: str, ) -> None: # initialization raises ValueError with pytest.raises(err, match=f"{description}*"): _ = ImportanceWeightEstimator( n_actions=n_actions, len_list=len_list, action_context=action_context, base_model=base_model, fitting_method=fitting_method, calibration_cv=calibration_cv, ) @pytest.mark.parametrize( "context, action, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, calibration_cv, err, description", invalid_input_of_fitting_importance_weight_estimator, ) def test_fitting_importance_weight_estimator_using_invalid_input_data( context: np.ndarray, action: np.ndarray, position: np.ndarray, action_context: np.ndarray, n_actions: int, len_list: int, fitting_method: str, base_model: BaseEstimator, action_dist: np.ndarray, n_folds: int, random_state: int, calibration_cv: int, err, description: str, ) -> None: # fit_predict function raises ValueError with pytest.raises(err, match=f"{description}*"): importance_weight_estimator = ImportanceWeightEstimator( n_actions=n_actions, len_list=len_list, action_context=action_context, base_model=base_model, fitting_method=fitting_method, calibration_cv=calibration_cv, ) # train importance weight estimator on logged bandit feedback data _ = importance_weight_estimator.fit_predict( context=context, action=action, position=position, n_folds=n_folds, random_state=random_state, action_dist=action_dist, ) @pytest.mark.parametrize( "context, action, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, calibration_cv, description", valid_input_of_importance_weight_estimator, ) def test_importance_weight_estimator_using_valid_input_data( context: np.ndarray, action: np.ndarray, position: np.ndarray, action_context: np.ndarray, n_actions: int, len_list: int, fitting_method: str, base_model: BaseEstimator, action_dist: np.ndarray, n_folds: int, random_state: int, calibration_cv: int, description: str, ) -> None: # fit_predict importance_weight_estimator = ImportanceWeightEstimator( n_actions=n_actions, len_list=len_list, action_context=action_context, base_model=base_model, fitting_method=fitting_method, calibration_cv=calibration_cv, ) # train importance weight estimator on logged bandit feedback data _ = importance_weight_estimator.fit_predict( context=context, action=action, action_dist=action_dist, position=position, n_folds=n_folds, random_state=random_state, ) def test_performance_of_binary_outcome_models( fixed_synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray ) -> None: """ Test the performance of ope estimators using synthetic bandit data and random evaluation policy when the importance weight estimator is estimated by a logistic regression """ bandit_feedback = fixed_synthetic_bandit_feedback.copy() action_dist = random_action_dist random_state = 12345 auc_scores: Dict[str, float] = {} fit_methods = ["sample", "raw"] for fit_method in fit_methods: for model_name, model in binary_model_dict.items(): importance_weight_estimator = ImportanceWeightEstimator( n_actions=bandit_feedback["n_actions"], action_context=bandit_feedback["action_context"], base_model=model(**hyperparams[model_name]), fitting_method=fit_method, len_list=1, ) # train importance weight estimator on logged bandit feedback data estimated_importance_weight = importance_weight_estimator.fit_predict( context=bandit_feedback["context"], action=bandit_feedback["action"], action_dist=action_dist, n_folds=2, # 2-fold cross-fitting random_state=random_state, evaluate_model_performance=True, ) assert np.all( estimated_importance_weight >= 0 ), "estimated_importance_weight must be non-negative" # extract predictions tmp_y = [] tmp_pred = [] for i in range(len(importance_weight_estimator.eval_result["y"])): tmp_y.append(importance_weight_estimator.eval_result["y"][i]) tmp_pred.append(importance_weight_estimator.eval_result["proba"][i]) y_test = np.array(tmp_y).flatten() y_pred = np.array(tmp_pred).flatten() auc_scores[model_name + "_" + fit_method] = roc_auc_score( y_true=y_test, y_score=y_pred, ) for model_name in auc_scores: print(f"AUC of {model_name} is {auc_scores[model_name]}") assert ( auc_scores[model_name] > 0.5 ), f"AUC of {model_name} should be greater than 0.5"
apache-2.0
espadrine/opera
chromium/src/third_party/trace-viewer/third_party/closure_linter/closure_linter/common/tokens.py
139
4407
#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to represent tokens and positions within them.""" __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') class TokenType(object): """Token types common to all languages.""" NORMAL = 'normal' WHITESPACE = 'whitespace' BLANK_LINE = 'blank line' class Token(object): """Token class for intelligent text splitting. The token class represents a string of characters and an identifying type. Attributes: type: The type of token. string: The characters the token comprises. length: The length of the token. line: The text of the line the token is found in. line_number: The number of the line the token is found in. values: Dictionary of values returned from the tokens regex match. previous: The token before this one. next: The token after this one. start_index: The character index in the line where this token starts. attached_object: Object containing more information about this token. metadata: Object containing metadata about this token. Must be added by a separate metadata pass. """ def __init__(self, string, token_type, line, line_number, values=None): """Creates a new Token object. Args: string: The string of input the token contains. token_type: The type of token. line: The text of the line this token is in. line_number: The line number of the token. values: A dict of named values within the token. For instance, a function declaration may have a value called 'name' which captures the name of the function. """ self.type = token_type self.string = string self.length = len(string) self.line = line self.line_number = line_number self.values = values # These parts can only be computed when the file is fully tokenized self.previous = None self.next = None self.start_index = None # This part is set in statetracker.py # TODO(robbyw): Wrap this in to metadata self.attached_object = None # This part is set in *metadatapass.py self.metadata = None def IsFirstInLine(self): """Tests if this token is the first token in its line. Returns: Whether the token is the first token in its line. """ return not self.previous or self.previous.line_number != self.line_number def IsLastInLine(self): """Tests if this token is the last token in its line. Returns: Whether the token is the last token in its line. """ return not self.next or self.next.line_number != self.line_number def IsType(self, token_type): """Tests if this token is of the given type. Args: token_type: The type to test for. Returns: True if the type of this token matches the type passed in. """ return self.type == token_type def IsAnyType(self, *token_types): """Tests if this token is any of the given types. Args: token_types: The types to check. Also accepts a single array. Returns: True if the type of this token is any of the types passed in. """ if not isinstance(token_types[0], basestring): return self.type in token_types[0] else: return self.type in token_types def __repr__(self): return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string, self.values, self.line_number, self.metadata) def __iter__(self): """Returns a token iterator.""" node = self while node: yield node node = node.next def __reversed__(self): """Returns a reverse-direction token iterator.""" node = self while node: yield node node = node.previous
bsd-3-clause
SnappleCap/oh-mainline
vendor/packages/twisted/twisted/python/zshcomp.py
18
27574
# -*- test-case-name: twisted.test.test_zshcomp -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Rebuild the completion functions for the currently active version of Twisted:: $ python zshcomp.py -i This module implements a zsh code generator which generates completion code for commands that use twisted.python.usage. This is the stuff that makes pressing Tab at the command line work. Maintainer: Eric Mangold To build completion functions for your own commands, and not Twisted commands, then just do something like this:: o = mymodule.MyOptions() f = file('_mycommand', 'w') Builder("mycommand", o, f).write() Then all you have to do is place the generated file somewhere in your C{$fpath}, and restart zsh. Note the "site-functions" directory in your C{$fpath} where you may install 3rd-party completion functions (like the one you're building). Call C{siteFunctionsPath} to locate this directory programmatically. SPECIAL CLASS VARIABLES. You may set these on your usage.Options subclass:: zsh_altArgDescr zsh_multiUse zsh_mutuallyExclusive zsh_actions zsh_actionDescr zsh_extras Here is what they mean (with examples):: zsh_altArgDescr = {"foo":"use this description for foo instead"} A dict mapping long option names to alternate descriptions. When this variable is present, the descriptions contained here will override those descriptions provided in the optFlags and optParameters variables. zsh_multiUse = ["foo", "bar"] A sequence containing those long option names which may appear on the command line more than once. By default, options will only be completed one time. zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")] A sequence of sequences, with each sub-sequence containing those long option names that are mutually exclusive. That is, those options that cannot appear on the command line together. zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)", "colors":"_values -s , 'colors to use' red green blue"} A dict mapping long option names to Zsh "actions". These actions define what will be completed as the argument to the given option. By default, all files/dirs will be completed if no action is given. Callables may instead be given for the values in this dict. The callable should accept no arguments, and return a string that will be used as the zsh "action" in the same way as the literal strings in the examples above. As you can see in the example above. The "foo" option will have files that end in .foo completed when the user presses Tab. The "bar" option will have either of the strings "one", "two", or "three" completed when the user presses Tab. "colors" will allow multiple arguments to be completed, seperated by commas. The possible arguments are red, green, and blue. Examples:: my_command --foo some-file.foo --colors=red,green my_command --colors=green my_command --colors=green,blue Actions may take many forms, and it is beyond the scope of this document to illustrate them all. Please refer to the documention for the Zsh _arguments function. zshcomp is basically a front-end to Zsh's _arguments completion function. That documentation is available on the zsh web site at this URL: U{http://zsh.sunsite.dk/Doc/Release/zsh_19.html#SEC124} zsh_actionDescr = {"logfile":"log file name", "random":"random seed"} A dict mapping long option names to a description for the corresponding zsh "action". These descriptions are show above the generated matches when the user is doing completions for this option. Normally Zsh does not show these descriptions unless you have "verbose" completion turned on. Turn on verbosity with this in your ~/.zshrc:: zstyle ':completion:*' verbose yes zstyle ':completion:*:descriptions' format '%B%d%b' zsh_extras = [":file to read from:action", ":file to write to:action"] A sequence of extra arguments that will be passed verbatim to Zsh's _arguments completion function. The _arguments function does all the hard work of doing command line completions. You can see how zshcomp invokes the _arguments call by looking at the generated completion files that this module creates. *** NOTE *** You will need to use this variable to describe completions for normal command line arguments. That is, those arguments that are not associated with an option. That is, the arguments that are given to the parseArgs method of your usage.Options subclass. In the example above, the 1st non-option argument will be described as "file to read from" and completion options will be generated in accordance with the "action". (See above about zsh "actions") The 2nd non-option argument will be described as "file to write to" and the action will be interpreted likewise. Things you can put here are all documented under the _arguments function here: U{http://zsh.sunsite.dk/Doc/Release/zsh_19.html#SEC124} Zsh Notes: To enable advanced completion add something like this to your ~/.zshrc:: autoload -U compinit compinit For some extra verbosity, and general niceness add these lines too:: zstyle ':completion:*' verbose yes zstyle ':completion:*:descriptions' format '%B%d%b' zstyle ':completion:*:messages' format '%d' zstyle ':completion:*:warnings' format 'No matches for: %d' Have fun! """ import itertools, sys, commands, os.path from twisted.python import reflect, util, usage from twisted.scripts.mktap import IServiceMaker class MyOptions(usage.Options): """ Options for this file """ longdesc = "" synopsis = "Usage: python zshcomp.py [--install | -i] | <output directory>" optFlags = [["install", "i", 'Output files to the "installation" directory ' \ '(twisted/python/zsh in the currently active ' \ 'Twisted package)']] optParameters = [["directory", "d", None, "Output files to this directory"]] def postOptions(self): if self['install'] and self['directory']: raise usage.UsageError, "Can't have --install and " \ "--directory at the same time" if not self['install'] and not self['directory']: raise usage.UsageError, "Not enough arguments" if self['directory'] and not os.path.isdir(self['directory']): raise usage.UsageError, "%s is not a directory" % self['directory'] class Builder: def __init__(self, cmd_name, options, file): """ @type cmd_name: C{str} @param cmd_name: The name of the command @type options: C{twisted.usage.Options} @param options: The C{twisted.usage.Options} instance defined for this command @type file: C{file} @param file: The C{file} to write the completion function to """ self.cmd_name = cmd_name self.options = options self.file = file def write(self): """ Write the completion function to the file given to __init__ @return: C{None} """ # by default, we just write out a single call to _arguments self.file.write('#compdef %s\n' % (self.cmd_name,)) gen = ArgumentsGenerator(self.cmd_name, self.options, self.file) gen.write() class SubcommandBuilder(Builder): """ Use this builder for commands that have sub-commands. twisted.python.usage has the notion of sub-commands that are defined using an entirely seperate Options class. """ interface = None subcmdLabel = None def write(self): """ Write the completion function to the file given to __init__ @return: C{None} """ self.file.write('#compdef %s\n' % (self.cmd_name,)) self.file.write('local _zsh_subcmds_array\n_zsh_subcmds_array=(\n') from twisted import plugin as newplugin plugins = newplugin.getPlugins(self.interface) for p in plugins: self.file.write('"%s:%s"\n' % (p.tapname, p.description)) self.file.write(")\n\n") self.options.__class__.zsh_extras = ['*::subcmd:->subcmd'] gen = ArgumentsGenerator(self.cmd_name, self.options, self.file) gen.write() self.file.write("""if (( CURRENT == 1 )); then _describe "%s" _zsh_subcmds_array && ret=0 fi (( ret )) || return 0 service="$words[1]" case $service in\n""" % (self.subcmdLabel,)) plugins = newplugin.getPlugins(self.interface) for p in plugins: self.file.write(p.tapname + ")\n") gen = ArgumentsGenerator(p.tapname, p.options(), self.file) gen.write() self.file.write(";;\n") self.file.write("*) _message \"don't know how to" \ " complete $service\";;\nesac") class MktapBuilder(SubcommandBuilder): """ Builder for the mktap command """ interface = IServiceMaker subcmdLabel = 'tap to build' class TwistdBuilder(SubcommandBuilder): """ Builder for the twistd command """ interface = IServiceMaker subcmdLabel = 'service to run' class ArgumentsGenerator: """ Generate a call to the zsh _arguments completion function based on data in a usage.Options subclass """ def __init__(self, cmd_name, options, file): """ @type cmd_name: C{str} @param cmd_name: The name of the command @type options: C{twisted.usage.Options} @param options: The C{twisted.usage.Options} instance defined for this command @type file: C{file} @param file: The C{file} to write the completion function to """ self.cmd_name = cmd_name self.options = options self.file = file self.altArgDescr = {} self.actionDescr = {} self.multiUse = [] self.mutuallyExclusive = [] self.actions = {} self.extras = [] aCL = reflect.accumulateClassList aCD = reflect.accumulateClassDict aCD(options.__class__, 'zsh_altArgDescr', self.altArgDescr) aCD(options.__class__, 'zsh_actionDescr', self.actionDescr) aCL(options.__class__, 'zsh_multiUse', self.multiUse) aCL(options.__class__, 'zsh_mutuallyExclusive', self.mutuallyExclusive) aCD(options.__class__, 'zsh_actions', self.actions) aCL(options.__class__, 'zsh_extras', self.extras) optFlags = [] optParams = [] aCL(options.__class__, 'optFlags', optFlags) aCL(options.__class__, 'optParameters', optParams) for i, optList in enumerate(optFlags): if len(optList) != 3: optFlags[i] = util.padTo(3, optList) for i, optList in enumerate(optParams): if len(optList) != 4: optParams[i] = util.padTo(4, optList) self.optFlags = optFlags self.optParams = optParams optParams_d = {} for optList in optParams: optParams_d[optList[0]] = optList[1:] self.optParams_d = optParams_d optFlags_d = {} for optList in optFlags: optFlags_d[optList[0]] = optList[1:] self.optFlags_d = optFlags_d optAll_d = {} optAll_d.update(optParams_d) optAll_d.update(optFlags_d) self.optAll_d = optAll_d self.addAdditionalOptions() # makes sure none of the zsh_ data structures reference option # names that don't exist. (great for catching typos) self.verifyZshNames() self.excludes = self.makeExcludesDict() def write(self): """ Write the zsh completion code to the file given to __init__ @return: C{None} """ self.writeHeader() self.writeExtras() self.writeOptions() self.writeFooter() def writeHeader(self): """ This is the start of the code that calls _arguments @return: C{None} """ self.file.write('_arguments -s -A "-*" \\\n') def writeOptions(self): """ Write out zsh code for each option in this command @return: C{None} """ optNames = self.optAll_d.keys() optNames.sort() for long in optNames: self.writeOpt(long) def writeExtras(self): """ Write out the "extras" list. These are just passed verbatim to the _arguments call @return: C{None} """ for s in self.extras: self.file.write(escape(s)) self.file.write(' \\\n') def writeFooter(self): """ Write the last bit of code that finishes the call to _arguments @return: C{None} """ self.file.write('&& return 0\n') def verifyZshNames(self): """ Ensure that none of the names given in zsh_* variables are typoed @return: C{None} @raise ValueError: Raised if unknown option names have been given in zsh_* variables """ def err(name): raise ValueError, "Unknown option name \"%s\" found while\n" \ "examining zsh_ attributes for the %s command" % ( name, self.cmd_name) for name in itertools.chain(self.altArgDescr, self.actionDescr, self.actions, self.multiUse): if name not in self.optAll_d: err(name) for seq in self.mutuallyExclusive: for name in seq: if name not in self.optAll_d: err(name) def excludeStr(self, long, buildShort=False): """ Generate an "exclusion string" for the given option @type long: C{str} @param long: The long name of the option (i.e. "verbose" instead of "v") @type buildShort: C{bool} @param buildShort: May be True to indicate we're building an excludes string for the short option that correspondes to the given long opt @return: The generated C{str} """ if long in self.excludes: exclusions = self.excludes[long][:] else: exclusions = [] # if long isn't a multiUse option (can't appear on the cmd line more # than once), then we have to exclude the short option if we're # building for the long option, and vice versa. if long not in self.multiUse: if buildShort is False: short = self.getShortOption(long) if short is not None: exclusions.append(short) else: exclusions.append(long) if not exclusions: return '' strings = [] for optName in exclusions: if len(optName) == 1: # short option strings.append("-" + optName) else: strings.append("--" + optName) return "(%s)" % " ".join(strings) def makeExcludesDict(self): """ @return: A C{dict} that maps each option name appearing in self.mutuallyExclusive to a list of those option names that is it mutually exclusive with (can't appear on the cmd line with) """ #create a mapping of long option name -> single character name longToShort = {} for optList in itertools.chain(self.optParams, self.optFlags): try: if optList[1] != None: longToShort[optList[0]] = optList[1] except IndexError: pass excludes = {} for lst in self.mutuallyExclusive: for i, long in enumerate(lst): tmp = [] tmp.extend(lst[:i]) tmp.extend(lst[i+1:]) for name in tmp[:]: if name in longToShort: tmp.append(longToShort[name]) if long in excludes: excludes[long].extend(tmp) else: excludes[long] = tmp return excludes def writeOpt(self, long): """ Write out the zsh code for the given argument. This is just part of the one big call to _arguments @type long: C{str} @param long: The long name of the option (i.e. "verbose" instead of "v") @return: C{None} """ if long in self.optFlags_d: # It's a flag option. Not one that takes a parameter. long_field = "--%s" % long else: long_field = "--%s=" % long short = self.getShortOption(long) if short != None: short_field = "-" + short else: short_field = '' descr = self.getDescription(long) descr_field = descr.replace("[", "\[") descr_field = descr_field.replace("]", "\]") descr_field = '[%s]' % descr_field if long in self.actionDescr: actionDescr_field = self.actionDescr[long] else: actionDescr_field = descr action_field = self.getAction(long) if long in self.multiUse: multi_field = '*' else: multi_field = '' longExclusions_field = self.excludeStr(long) if short: #we have to write an extra line for the short option if we have one shortExclusions_field = self.excludeStr(long, buildShort=True) self.file.write(escape('%s%s%s%s%s' % (shortExclusions_field, multi_field, short_field, descr_field, action_field))) self.file.write(' \\\n') self.file.write(escape('%s%s%s%s%s' % (longExclusions_field, multi_field, long_field, descr_field, action_field))) self.file.write(' \\\n') def getAction(self, long): """ Return a zsh "action" string for the given argument @return: C{str} """ if long in self.actions: if callable(self.actions[long]): action = self.actions[long]() else: action = self.actions[long] return ":%s:%s" % (self.getActionDescr(long), action) if long in self.optParams_d: return ':%s:_files' % self.getActionDescr(long) return '' def getActionDescr(self, long): """ Return the description to be used when this argument is completed @return: C{str} """ if long in self.actionDescr: return self.actionDescr[long] else: return long def getDescription(self, long): """ Return the description to be used for this argument @return: C{str} """ #check if we have an alternate descr for this arg, and if so use it if long in self.altArgDescr: return self.altArgDescr[long] #otherwise we have to get it from the optFlags or optParams try: descr = self.optFlags_d[long][1] except KeyError: try: descr = self.optParams_d[long][2] except KeyError: descr = None if descr is not None: return descr # lets try to get it from the opt_foo method doc string if there is one longMangled = long.replace('-', '_') # this is what t.p.usage does obj = getattr(self.options, 'opt_%s' % longMangled, None) if obj: descr = descrFromDoc(obj) if descr is not None: return descr return long # we really ought to have a good description to use def getShortOption(self, long): """ Return the short option letter or None @return: C{str} or C{None} """ optList = self.optAll_d[long] try: return optList[0] or None except IndexError: pass def addAdditionalOptions(self): """ Add additional options to the optFlags and optParams lists. These will be defined by 'opt_foo' methods of the Options subclass @return: C{None} """ methodsDict = {} reflect.accumulateMethods(self.options, methodsDict, 'opt_') methodToShort = {} for name in methodsDict.copy(): if len(name) == 1: methodToShort[methodsDict[name]] = name del methodsDict[name] for methodName, methodObj in methodsDict.items(): long = methodName.replace('_', '-') # t.p.usage does this # if this option is already defined by the optFlags or # optParameters then we don't want to override that data if long in self.optAll_d: continue descr = self.getDescription(long) short = None if methodObj in methodToShort: short = methodToShort[methodObj] reqArgs = methodObj.im_func.func_code.co_argcount if reqArgs == 2: self.optParams.append([long, short, None, descr]) self.optParams_d[long] = [short, None, descr] self.optAll_d[long] = [short, None, descr] elif reqArgs == 1: self.optFlags.append([long, short, descr]) self.optFlags_d[long] = [short, descr] self.optAll_d[long] = [short, None, descr] else: raise TypeError, '%r has wrong number ' \ 'of arguments' % (methodObj,) def descrFromDoc(obj): """ Generate an appropriate description from docstring of the given object """ if obj.__doc__ is None: return None lines = obj.__doc__.split("\n") descr = None try: if lines[0] != "" and not lines[0].isspace(): descr = lines[0].lstrip() # skip first line if it's blank elif lines[1] != "" and not lines[1].isspace(): descr = lines[1].lstrip() except IndexError: pass return descr def firstLine(s): """ Return the first line of the given string """ try: i = s.index('\n') return s[:i] except ValueError: return s def escape(str): """ Shell escape the given string """ return commands.mkarg(str)[1:] def siteFunctionsPath(): """ Return the path to the system-wide site-functions directory or C{None} if it cannot be determined """ try: cmd = "zsh -f -c 'echo ${(M)fpath:#/*/site-functions}'" output = commands.getoutput(cmd) if os.path.isdir(output): return output except: pass generateFor = [('conch', 'twisted.conch.scripts.conch', 'ClientOptions'), ('mktap', 'twisted.scripts.mktap', 'FirstPassOptions'), ('trial', 'twisted.scripts.trial', 'Options'), ('cftp', 'twisted.conch.scripts.cftp', 'ClientOptions'), ('tapconvert', 'twisted.scripts.tapconvert', 'ConvertOptions'), ('twistd', 'twisted.scripts.twistd', 'ServerOptions'), ('ckeygen', 'twisted.conch.scripts.ckeygen', 'GeneralOptions'), ('lore', 'twisted.lore.scripts.lore', 'Options'), ('pyhtmlizer', 'twisted.scripts.htmlizer', 'Options'), ('tap2deb', 'twisted.scripts.tap2deb', 'MyOptions'), ('tkconch', 'twisted.conch.scripts.tkconch', 'GeneralOptions'), ('manhole', 'twisted.scripts.manhole', 'MyOptions'), ('tap2rpm', 'twisted.scripts.tap2rpm', 'MyOptions'), ('websetroot', None, None), ('tkmktap', None, None), ] # NOTE: the commands using None above are no longer included in Twisted. # However due to limitations in zsh's completion system the version of # _twisted_zsh_stub shipped with zsh contains a static list of Twisted's # commands. It will display errors if completion functions for these missing # commands are not found :( So we just include dummy (empty) completion # function files specialBuilders = {'mktap' : MktapBuilder, 'twistd' : TwistdBuilder} def makeCompFunctionFiles(out_path, generateFor=generateFor, specialBuilders=specialBuilders): """ Generate completion function files in the given directory for all twisted commands @type out_path: C{str} @param out_path: The path to the directory to generate completion function fils in @param generateFor: Sequence in the form of the 'generateFor' top-level variable as defined in this module. Indicates what commands to build completion files for. @param specialBuilders: Sequence in the form of the 'specialBuilders' top-level variable as defined in this module. Indicates what commands require a special Builder class. @return: C{list} of 2-tuples of the form (cmd_name, error) indicating commands that we skipped building completions for. cmd_name is the name of the skipped command, and error is the Exception that was raised when trying to import the script module. Commands are usually skipped due to a missing dependency, e.g. Tkinter. """ skips = [] for cmd_name, module_name, class_name in generateFor: if module_name is None: # create empty file f = _openCmdFile(out_path, cmd_name) f.close() continue try: m = __import__('%s' % (module_name,), None, None, (class_name)) f = _openCmdFile(out_path, cmd_name) o = getattr(m, class_name)() # instantiate Options class if cmd_name in specialBuilders: b = specialBuilders[cmd_name](cmd_name, o, f) b.write() else: b = Builder(cmd_name, o, f) b.write() except Exception, e: skips.append( (cmd_name, e) ) continue return skips def _openCmdFile(out_path, cmd_name): return file(os.path.join(out_path, '_'+cmd_name), 'w') def run(): options = MyOptions() try: options.parseOptions(sys.argv[1:]) except usage.UsageError, e: print e print options.getUsage() sys.exit(2) if options['install']: import twisted dir = os.path.join(os.path.dirname(twisted.__file__), "python", "zsh") skips = makeCompFunctionFiles(dir) else: skips = makeCompFunctionFiles(options['directory']) for cmd_name, error in skips: sys.stderr.write("zshcomp: Skipped building for %s. Script module " \ "could not be imported:\n" % (cmd_name,)) sys.stderr.write(str(error)+'\n') if skips: sys.exit(3) if __name__ == '__main__': run()
agpl-3.0
emergence/suds-philpem
suds/soaparray.py
1
2250
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{soaparray} module provides XSD extensions for handling soap (section 5) encoded arrays. """ from suds import * from logging import getLogger from suds.xsd.sxbasic import Factory as SXFactory from suds.xsd.sxbasic import Attribute as SXAttribute class Attribute(SXAttribute): """ Represents an XSD <attribute/> that handles special attributes that are extensions for WSDLs. @ivar aty: Array type information. @type aty: The value of wsdl:arrayType. """ def __init__(self, schema, root, aty): """ @param aty: Array type information. @type aty: The value of wsdl:arrayType. """ SXAttribute.__init__(self, schema, root) if aty.endswith('[]'): self.aty = aty[:-2] else: self.aty = aty def autoqualified(self): aqs = SXAttribute.autoqualified(self) aqs.append('aty') return aqs def description(self): d = SXAttribute.description(self) d = d+('aty',) return d # # Builder function, only builds Attribute when arrayType # attribute is defined on root. # def __fn(x, y): ns = (None, "http://schemas.xmlsoap.org/wsdl/") aty = y.get('arrayType', ns=ns) if aty is None: return SXAttribute(x, y) else: return Attribute(x, y, aty) # # Remap <xs:attrbute/> tags to __fn() builder. # SXFactory.maptag('attribute', __fn)
lgpl-3.0
emacsway/ascetic
ascetic/models.py
1
3121
from ascetic.mappers import Mapper, thread_safe from ascetic.signals import pre_init, post_init from ascetic.utils import classproperty, to_tuple class ModelBase(type): """Metaclass for Model""" mapper_class = Mapper @thread_safe def __new__(mcs, name, bases, attrs): new_cls = type.__new__(mcs, name, bases, attrs) if name in ('Model', 'NewBase', ): return new_cls mapper_class = getattr(new_cls, 'Mapper', None) or getattr(new_cls, 'Meta', None) bases = [] if mapper_class is not None: bases.append(mapper_class) if not isinstance(mapper_class, new_cls.mapper_class): bases.append(new_cls.mapper_class) mapper_factory = type("{}Mapper".format(new_cls.__name__), tuple(bases), {}) new_cls._mapper = mapper_factory(new_cls) for k in to_tuple(new_cls._mapper.pk): setattr(new_cls, k, None) return new_cls class Model(ModelBase("NewBase", (object, ), {})): _new_record = True _s = None def __init__(self, *args, **kwargs): pre_init.send(sender=self.__class__, instance=self, args=args, kwargs=kwargs) if args: self.__dict__.update(zip(self._mapper.fields.keys(), args)) if kwargs: self.__dict__.update(kwargs) post_init.send(sender=self.__class__, instance=self) def __eq__(self, other): return isinstance(other, self.__class__) and self._get_pk() == other._get_pk() def __ne__(self, other): return not self.__eq__(other) def __hash__(self): if not all(to_tuple(self._get_pk())): raise TypeError("Model instances without primary key value are unhashable") return hash(self._get_pk()) def __dir__(self): return dir(super(Model, self)) + list(self._mapper.fields) def _get_pk(self): return self._mapper.get_pk(self) def _set_pk(self, value): self._mapper.set_pk(self, value) pk = property(_get_pk, _set_pk) def validate(self, fields=frozenset(), exclude=frozenset()): return self._mapper.validate(self, fields=fields, exclude=exclude) def save(self, *args, **kwargs): return self._mapper.save(self, *args, **kwargs) def delete(self, *args, **kwargs): return self._mapper.delete(self, *args, **kwargs) @classproperty def s(cls): # TODO: Use Model class descriptor without __set__(). return cls._mapper.sql_table @classproperty def q(cls): return cls._mapper.query @classmethod def get(cls, *args, **kwargs): return cls._mapper.get(*args, **kwargs) def __repr__(self): return "<{0}.{1}: {2}>".format(type(self).__module__, type(self).__name__, self.pk) class CompositeModel(object): """Composite model. Exaple of usage: >>> rows = CompositeModel(Model1, Model2).q...filter(...) >>> type(rows[0]): CompositeModel >>> list(rows[0]) [<Model1: 1>, <Model2: 2>] """ def __init__(self, *models): self.models = models # TODO: build me.
mit
ggenikus/cld
src/libs/bs4/builder/_htmlparser.py
21
11609
"""Use the HTMLParser library to parse HTML files that aren't too bad.""" # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __all__ = [ 'HTMLParserTreeBuilder', ] from HTMLParser import HTMLParser try: from HTMLParser import HTMLParseError except ImportError, e: # HTMLParseError is removed in Python 3.5. Since it can never be # thrown in 3.5, we can just define our own class as a placeholder. class HTMLParseError(Exception): pass import sys import warnings # Starting in Python 3.2, the HTMLParser constructor takes a 'strict' # argument, which we'd like to set to False. Unfortunately, # http://bugs.python.org/issue13273 makes strict=True a better bet # before Python 3.2.3. # # At the end of this file, we monkeypatch HTMLParser so that # strict=True works well on Python 3.2.2. major, minor, release = sys.version_info[:3] CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3 CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3 CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4 from bs4.element import ( CData, Comment, Declaration, Doctype, ProcessingInstruction, ) from bs4.dammit import EntitySubstitution, UnicodeDammit from bs4.builder import ( HTML, HTMLTreeBuilder, STRICT, ) HTMLPARSER = 'html.parser' class BeautifulSoupHTMLParser(HTMLParser): def __init__(self, *args, **kwargs): HTMLParser.__init__(self, *args, **kwargs) # Keep a list of empty-element tags that were encountered # without an explicit closing tag. If we encounter a closing tag # of this type, we'll associate it with one of those entries. # # This isn't a stack because we don't care about the # order. It's a list of closing tags we've already handled and # will ignore, assuming they ever show up. self.already_closed_empty_element = [] def handle_startendtag(self, name, attrs): # This is only called when the markup looks like # <tag/>. # is_startend() tells handle_starttag not to close the tag # just because its name matches a known empty-element tag. We # know that this is an empty-element tag and we want to call # handle_endtag ourselves. tag = self.handle_starttag(name, attrs, handle_empty_element=False) self.handle_endtag(name) def handle_starttag(self, name, attrs, handle_empty_element=True): # XXX namespace attr_dict = {} for key, value in attrs: # Change None attribute values to the empty string # for consistency with the other tree builders. if value is None: value = '' attr_dict[key] = value attrvalue = '""' #print "START", name tag = self.soup.handle_starttag(name, None, None, attr_dict) if tag and tag.is_empty_element and handle_empty_element: # Unlike other parsers, html.parser doesn't send separate end tag # events for empty-element tags. (It's handled in # handle_startendtag, but only if the original markup looked like # <tag/>.) # # So we need to call handle_endtag() ourselves. Since we # know the start event is identical to the end event, we # don't want handle_endtag() to cross off any previous end # events for tags of this name. self.handle_endtag(name, check_already_closed=False) # But we might encounter an explicit closing tag for this tag # later on. If so, we want to ignore it. self.already_closed_empty_element.append(name) def handle_endtag(self, name, check_already_closed=True): #print "END", name if check_already_closed and name in self.already_closed_empty_element: # This is a redundant end tag for an empty-element tag. # We've already called handle_endtag() for it, so just # check it off the list. # print "ALREADY CLOSED", name self.already_closed_empty_element.remove(name) else: self.soup.handle_endtag(name) def handle_data(self, data): self.soup.handle_data(data) def handle_charref(self, name): # XXX workaround for a bug in HTMLParser. Remove this once # it's fixed in all supported versions. # http://bugs.python.org/issue13633 if name.startswith('x'): real_name = int(name.lstrip('x'), 16) elif name.startswith('X'): real_name = int(name.lstrip('X'), 16) else: real_name = int(name) try: data = unichr(real_name) except (ValueError, OverflowError), e: data = u"\N{REPLACEMENT CHARACTER}" self.handle_data(data) def handle_entityref(self, name): character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) if character is not None: data = character else: data = "&%s;" % name self.handle_data(data) def handle_comment(self, data): self.soup.endData() self.soup.handle_data(data) self.soup.endData(Comment) def handle_decl(self, data): self.soup.endData() if data.startswith("DOCTYPE "): data = data[len("DOCTYPE "):] elif data == 'DOCTYPE': # i.e. "<!DOCTYPE>" data = '' self.soup.handle_data(data) self.soup.endData(Doctype) def unknown_decl(self, data): if data.upper().startswith('CDATA['): cls = CData data = data[len('CDATA['):] else: cls = Declaration self.soup.endData() self.soup.handle_data(data) self.soup.endData(cls) def handle_pi(self, data): self.soup.endData() self.soup.handle_data(data) self.soup.endData(ProcessingInstruction) class HTMLParserTreeBuilder(HTMLTreeBuilder): is_xml = False picklable = True NAME = HTMLPARSER features = [NAME, HTML, STRICT] def __init__(self, *args, **kwargs): if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED: kwargs['strict'] = False if CONSTRUCTOR_TAKES_CONVERT_CHARREFS: kwargs['convert_charrefs'] = False self.parser_args = (args, kwargs) def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None, exclude_encodings=None): """ :return: A 4-tuple (markup, original encoding, encoding declared within markup, whether any characters had to be replaced with REPLACEMENT CHARACTER). """ if isinstance(markup, unicode): yield (markup, None, None, False) return try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit(markup, try_encodings, is_html=True, exclude_encodings=exclude_encodings) yield (dammit.markup, dammit.original_encoding, dammit.declared_html_encoding, dammit.contains_replacement_characters) def feed(self, markup): args, kwargs = self.parser_args parser = BeautifulSoupHTMLParser(*args, **kwargs) parser.soup = self.soup try: parser.feed(markup) except HTMLParseError, e: warnings.warn(RuntimeWarning( "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) raise e parser.already_closed_empty_element = [] # Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some # 3.2.3 code. This ensures they don't treat markup like <p></p> as a # string. # # XXX This code can be removed once most Python 3 users are on 3.2.3. if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: import re attrfind_tolerant = re.compile( r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant locatestarttagend = re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name (?:\s+ # whitespace before attribute name (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name (?:\s*=\s* # value indicator (?:'[^']*' # LITA-enclosed value |\"[^\"]*\" # LIT-enclosed value |[^'\">\s]+ # bare value ) )? ) )* \s* # trailing whitespace """, re.VERBOSE) BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend from html.parser import tagfind, attrfind def parse_starttag(self, i): self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = rawdata[i+1:k].lower() while k < endpos: if self.strict: m = attrfind.match(rawdata, k) else: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = self.unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) if self.strict: self.error("junk characters in start tag: %r" % (rawdata[k:endpos][:20],)) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos def set_cdata_mode(self, elem): self.cdata_elem = elem.lower() self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I) BeautifulSoupHTMLParser.parse_starttag = parse_starttag BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode CONSTRUCTOR_TAKES_STRICT = True
mit
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/celery/tests/worker/test_loops.py
6
14163
from __future__ import absolute_import import socket from kombu.async import Hub, READ, WRITE, ERR from celery.bootsteps import CLOSE, RUN from celery.exceptions import InvalidTaskError, WorkerShutdown, WorkerTerminate from celery.five import Empty from celery.worker import state from celery.worker.consumer import Consumer from celery.worker.loops import asynloop, synloop from celery.tests.case import AppCase, Mock, body_from_sig class X(object): def __init__(self, app, heartbeat=None, on_task_message=None): hub = Hub() ( self.obj, self.connection, self.consumer, self.blueprint, self.hub, self.qos, self.heartbeat, self.clock, ) = self.args = [Mock(name='obj'), Mock(name='connection'), Mock(name='consumer'), Mock(name='blueprint'), hub, Mock(name='qos'), heartbeat, Mock(name='clock')] self.connection.supports_heartbeats = True self.connection.get_heartbeat_interval.side_effect = ( lambda: self.heartbeat ) self.consumer.callbacks = [] self.obj.strategies = {} self.connection.connection_errors = (socket.error, ) self.hub.readers = {} self.hub.writers = {} self.hub.consolidate = set() self.hub.timer = Mock(name='hub.timer') self.hub.timer._queue = [Mock()] self.hub.fire_timers = Mock(name='hub.fire_timers') self.hub.fire_timers.return_value = 1.7 self.hub.poller = Mock(name='hub.poller') self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close self.Hub = self.hub self.blueprint.state = RUN # need this for create_task_handler _consumer = Consumer(Mock(), timer=Mock(), app=app) _consumer.on_task_message = on_task_message or [] self.obj.create_task_handler = _consumer.create_task_handler self.on_unknown_message = self.obj.on_unknown_message = Mock( name='on_unknown_message', ) _consumer.on_unknown_message = self.on_unknown_message self.on_unknown_task = self.obj.on_unknown_task = Mock( name='on_unknown_task', ) _consumer.on_unknown_task = self.on_unknown_task self.on_invalid_task = self.obj.on_invalid_task = Mock( name='on_invalid_task', ) _consumer.on_invalid_task = self.on_invalid_task _consumer.strategies = self.obj.strategies def timeout_then_error(self, mock): def first(*args, **kwargs): mock.side_effect = socket.error() self.connection.more_to_read = False raise socket.timeout() mock.side_effect = first def close_then_error(self, mock=None, mod=0, exc=None): mock = Mock() if mock is None else mock def first(*args, **kwargs): if not mod or mock.call_count > mod: self.close() self.connection.more_to_read = False raise (socket.error() if exc is None else exc) mock.side_effect = first return mock def close(self, *args, **kwargs): self.blueprint.state = CLOSE def closer(self, mock=None, mod=0): mock = Mock() if mock is None else mock def closing(*args, **kwargs): if not mod or mock.call_count >= mod: self.close() mock.side_effect = closing return mock def get_task_callback(*args, **kwargs): x = X(*args, **kwargs) x.blueprint.state = CLOSE asynloop(*x.args) return x, x.consumer.callbacks[0] class test_asynloop(AppCase): def setup(self): @self.app.task(shared=False) def add(x, y): return x + y self.add = add def test_setup_heartbeat(self): x = X(self.app, heartbeat=10) x.hub.call_repeatedly = Mock(name='x.hub.call_repeatedly()') x.blueprint.state = CLOSE asynloop(*x.args) x.consumer.consume.assert_called_with() x.obj.on_ready.assert_called_with() x.hub.call_repeatedly.assert_called_with( 10 / 2.0, x.connection.heartbeat_check, 2.0, ) def task_context(self, sig, **kwargs): x, on_task = get_task_callback(self.app, **kwargs) body = body_from_sig(self.app, sig) message = Mock() strategy = x.obj.strategies[sig.task] = Mock() return x, on_task, body, message, strategy def test_on_task_received(self): _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) on_task(body, msg) strategy.assert_called_with( msg, body, msg.ack_log_error, msg.reject_log_error, [], ) def test_on_task_received_executes_on_task_message(self): cbs = [Mock(), Mock(), Mock()] _, on_task, body, msg, strategy = self.task_context( self.add.s(2, 2), on_task_message=cbs, ) on_task(body, msg) strategy.assert_called_with( msg, body, msg.ack_log_error, msg.reject_log_error, cbs, ) def test_on_task_message_missing_name(self): x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) body.pop('task') on_task(body, msg) x.on_unknown_message.assert_called_with(body, msg) def test_on_task_not_registered(self): x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) exc = strategy.side_effect = KeyError(self.add.name) on_task(body, msg) x.on_unknown_task.assert_called_with(body, msg, exc) def test_on_task_InvalidTaskError(self): x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) exc = strategy.side_effect = InvalidTaskError() on_task(body, msg) x.on_invalid_task.assert_called_with(body, msg, exc) def test_should_terminate(self): x = X(self.app) # XXX why aren't the errors propagated?!? state.should_terminate = True try: with self.assertRaises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = False def test_should_terminate_hub_close_raises(self): x = X(self.app) # XXX why aren't the errors propagated?!? state.should_terminate = True x.hub.close.side_effect = MemoryError() try: with self.assertRaises(WorkerTerminate): asynloop(*x.args) finally: state.should_terminate = False def test_should_stop(self): x = X(self.app) state.should_stop = True try: with self.assertRaises(WorkerShutdown): asynloop(*x.args) finally: state.should_stop = False def test_updates_qos(self): x = X(self.app) x.qos.prev = 3 x.qos.value = 3 x.hub.on_tick.add(x.closer(mod=2)) x.hub.timer._queue = [1] asynloop(*x.args) self.assertFalse(x.qos.update.called) x = X(self.app) x.qos.prev = 1 x.qos.value = 6 x.hub.on_tick.add(x.closer(mod=2)) asynloop(*x.args) x.qos.update.assert_called_with() x.hub.fire_timers.assert_called_with(propagate=(socket.error, )) def test_poll_empty(self): x = X(self.app) x.hub.readers = {6: Mock()} x.hub.timer._queue = [1] x.close_then_error(x.hub.poller.poll) x.hub.fire_timers.return_value = 33.37 poller = x.hub.poller poller.poll.return_value = [] with self.assertRaises(socket.error): asynloop(*x.args) poller.poll.assert_called_with(33.37) def test_poll_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) poller = x.hub.poller poller.poll.return_value = [(6, READ)] with self.assertRaises(socket.error): asynloop(*x.args) reader.assert_called_with(6) self.assertTrue(poller.poll.called) def test_poll_readable_raises_Empty(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, READ)] reader.side_effect = Empty() with self.assertRaises(socket.error): asynloop(*x.args) reader.assert_called_with(6) self.assertTrue(poller.poll.called) def test_poll_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, WRITE)] with self.assertRaises(socket.error): asynloop(*x.args) writer.assert_called_with(6) self.assertTrue(poller.poll.called) def test_poll_writable_none_registered(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(7, WRITE)] with self.assertRaises(socket.error): asynloop(*x.args) self.assertTrue(poller.poll.called) def test_poll_unknown_event(self): x = X(self.app) writer = Mock(name='reader') x.hub.add_writer(6, writer, 6) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) poller = x.hub.poller poller.poll.return_value = [(6, 0)] with self.assertRaises(socket.error): asynloop(*x.args) self.assertTrue(poller.poll.called) def test_poll_keep_draining_disabled(self): x = X(self.app) x.hub.writers = {6: Mock()} poll = x.hub.poller.poll def se(*args, **kwargs): poll.side_effect = socket.error() poll.side_effect = se poller = x.hub.poller poll.return_value = [(6, 0)] with self.assertRaises(socket.error): asynloop(*x.args) self.assertTrue(poller.poll.called) def test_poll_err_writable(self): x = X(self.app) writer = Mock(name='writer') x.hub.add_writer(6, writer, 6, 48) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) poller = x.hub.poller poller.poll.return_value = [(6, ERR)] with self.assertRaises(socket.error): asynloop(*x.args) writer.assert_called_with(6, 48) self.assertTrue(poller.poll.called) def test_poll_write_generator(self): x = X(self.app) x.hub.remove = Mock(name='hub.remove()') def Gen(): yield 1 yield 2 gen = Gen() x.hub.add_writer(6, gen) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] with self.assertRaises(socket.error): asynloop(*x.args) self.assertTrue(gen.gi_frame.f_lasti != -1) self.assertFalse(x.hub.remove.called) def test_poll_write_generator_stopped(self): x = X(self.app) def Gen(): raise StopIteration() yield gen = Gen() x.hub.add_writer(6, gen) x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] x.hub.remove = Mock(name='hub.remove()') with self.assertRaises(socket.error): asynloop(*x.args) self.assertIsNone(gen.gi_frame) def test_poll_write_generator_raises(self): x = X(self.app) def Gen(): raise ValueError('foo') yield gen = Gen() x.hub.add_writer(6, gen) x.hub.remove = Mock(name='hub.remove()') x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) x.hub.poller.poll.return_value = [(6, WRITE)] with self.assertRaises(ValueError): asynloop(*x.args) self.assertIsNone(gen.gi_frame) x.hub.remove.assert_called_with(6) def test_poll_err_readable(self): x = X(self.app) reader = Mock(name='reader') x.hub.add_reader(6, reader, 6, 24) x.hub.on_tick.add(x.close_then_error(Mock(), 2)) poller = x.hub.poller poller.poll.return_value = [(6, ERR)] with self.assertRaises(socket.error): asynloop(*x.args) reader.assert_called_with(6, 24) self.assertTrue(poller.poll.called) def test_poll_raises_ValueError(self): x = X(self.app) x.hub.readers = {6: Mock()} poller = x.hub.poller x.close_then_error(poller.poll, exc=ValueError) asynloop(*x.args) self.assertTrue(poller.poll.called) class test_synloop(AppCase): def test_timeout_ignored(self): x = X(self.app) x.timeout_then_error(x.connection.drain_events) with self.assertRaises(socket.error): synloop(*x.args) self.assertEqual(x.connection.drain_events.call_count, 2) def test_updates_qos_when_changed(self): x = X(self.app) x.qos.prev = 2 x.qos.value = 2 x.timeout_then_error(x.connection.drain_events) with self.assertRaises(socket.error): synloop(*x.args) self.assertFalse(x.qos.update.called) x.qos.value = 4 x.timeout_then_error(x.connection.drain_events) with self.assertRaises(socket.error): synloop(*x.args) x.qos.update.assert_called_with() def test_ignores_socket_errors_when_closed(self): x = X(self.app) x.close_then_error(x.connection.drain_events) self.assertIsNone(synloop(*x.args))
agpl-3.0
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/sklearn/pipeline.py
2
7661
""" The :mod:`sklearn.pipeline` module implements utilites to build a composite estimator, as a chain of transforms and estimators. """ # Author: Edouard Duchesnay # Gael Varoquaux # Virgile Fritsch # Alexandre Gramfort # Licence: BSD from .base import BaseEstimator __all__ = ['Pipeline'] # One round of beers on me if someone finds out why the backslash # is needed in the Attributes section so as not to upset sphinx. class Pipeline(BaseEstimator): """Pipeline of transforms with a final estimator. Sequentially apply a list of transforms and a final estimator. Intermediate steps of the pipeline must be 'transforms', that is, they must implements fit and transform methods. The final estimator needs only implements fit. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a '__', as in the example below. Parameters ---------- steps: list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. Attributes ---------- `steps` : list of (name, object) List of the named object that compose the pipeline, in the \ order that they are applied on the data. Examples -------- >>> from sklearn import svm >>> from sklearn.datasets import samples_generator >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.feature_selection import f_regression >>> from sklearn.pipeline import Pipeline >>> # generate some data to play with >>> X, y = samples_generator.make_classification( ... n_informative=5, n_redundant=0, random_state=42) >>> # ANOVA SVM-C >>> anova_filter = SelectKBest(f_regression, k=5) >>> clf = svm.SVC(kernel='linear') >>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)]) >>> # You can set the parameters using the names issued >>> # For instance, fit using a k of 10 in the SelectKBest >>> # and a parameter 'C' of the svn >>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y) ... # doctest: +ELLIPSIS Pipeline(steps=[...]) >>> prediction = anova_svm.predict(X) >>> anova_svm.score(X, y) 0.75 """ # BaseEstimator interface def __init__(self, steps): self.named_steps = dict(steps) names, estimators = zip(*steps) if len(self.named_steps) != len(steps): raise ValueError("Names provided are not unique: %s" % names) self.steps = zip(names, estimators) # shallow copy of steps transforms = estimators[:-1] estimator = estimators[-1] for t in transforms: if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) \ or not hasattr(t, "transform"): raise TypeError("All intermediate steps a the chain should " "be transforms and implement fit and transform" "'%s' (type %s) doesn't)" % (t, type(t))) if not hasattr(estimator, "fit"): raise TypeError("Last step of chain should implement fit " "'%s' (type %s) doesn't)" % (estimator, type(estimator))) def get_params(self, deep=True): if not deep: return super(Pipeline, self).get_params(deep=False) else: out = self.named_steps.copy() for name, step in self.named_steps.iteritems(): for key, value in step.get_params(deep=True).iteritems(): out['%s__%s' % (name, key)] = value return out # Estimator interface def _pre_transform(self, X, y=None, **fit_params): fit_params_steps = dict((step, {}) for step, _ in self.steps) for pname, pval in fit_params.iteritems(): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval Xt = X for name, transform in self.steps[:-1]: if hasattr(transform, "fit_transform"): Xt = transform.fit_transform(Xt, y, **fit_params_steps[name]) else: Xt = transform.fit(Xt, y, **fit_params_steps[name]) \ .transform(Xt) return Xt, fit_params_steps[self.steps[-1][0]] def fit(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) self.steps[-1][-1].fit(Xt, y, **fit_params) return self def fit_transform(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then use fit_transform on transformed data using the final estimator. Valid only if the final estimator implements fit_transform.""" Xt, fit_params = self._pre_transform(X, y, **fit_params) return self.steps[-1][-1].fit_transform(Xt, y, **fit_params) def predict(self, X): """Applies transforms to the data, and the predict method of the final estimator. Valid only if the final estimator implements predict.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt) def predict_proba(self, X): """Applies transforms to the data, and the predict_proba method of the final estimator. Valid only if the final estimator implements predict_proba.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_proba(Xt) def decision_function(self, X): """Applies transforms to the data, and the decision_function method of the final estimator. Valid only if the final estimator implements decision_function.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].decision_function(Xt) def predict_log_proba(self, X): Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_log_proba(Xt) def transform(self, X): """Applies transforms to the data, and the transform method of the final estimator. Valid only if the final estimator implements transform.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].transform(Xt) def inverse_transform(self, X): if X.ndim == 1: X = X[None, :] Xt = X for name, step in self.steps[:-1][::-1]: Xt = step.inverse_transform(Xt) return Xt def score(self, X, y=None): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score.""" Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].score(Xt, y) @property def _pairwise(self): # check if first estimator expects pairwise input return getattr(self.steps[0][1], '_pairwise', False)
agpl-3.0
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/astroid/tests/testdata/python2/data/joined_strings.py
20
72168
x = ('R0lGODlhigJnAef/AAABAAEEAAkCAAMGAg0GBAYJBQoMCBMODQ4QDRITEBkS' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7' +'CxsSEhkWDhYYFQ0aJhkaGBweGyccGh8hHiIkIiMmGTEiHhQoPSYoJSkqKDcp' +'Ii0uLDAxLzI0Mh44U0gxMDI5JkM0JjU3NDY6Kjc5Njo7OUE8Ozw+Oz89QTxA' +'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7')
agpl-3.0
waelrash1/or-tools
data/nonogram_regular/nonogram_gondola.py
74
2187
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Gondola # From http://www.conceptispuzzles.com # rows = 30 row_rule_len = 8 row_rules = [ [0,0,0,0,0,0,5,6], [0,0,0,0,6,1,1,1], [0,0,0,0,0,3,11,3], [0,0,6,1,1,1,1,1], [0,7,1,1,1,2,1,3], [0,0,4,1,1,2,1,4], [0,7,1,1,1,2,3,1], [0,0,7,1,1,3,1,1], [0,0,4,1,1,1,1,9], [0,0,0,0,4,8,1,1], [0,0,0,4,1,4,1,3], [0,0,0,4,1,7,1,5], [4,1,1,2,1,4,1,1], [0,0,0,4,9,2,1,2], [0,0,4,1,3,1,2,1], [0,0,4,1,6,1,1,1], [0,0,0,0,4,8,3,1], [0,0,0,0,10,3,5,3], [0,0,4,1,2,3,5,2], [0,0,0,0,3,5,2,8], [0,0,0,2,6,3,1,1], [0,0,0,0,0,1,12,1], [0,0,0,0,0,20,1,1], [0,0,0,0,0,0,2,25], [0,0,0,0,0,2,3,20], [2,5,3,2,2,2,2,1], [0,0,0,0,0,1,2,22], [0,0,0,0,0,0,0,20], [0,0,0,0,0,0,3,18], [0,0,0,0,0,0,1,2] ] cols = 30 col_rule_len = 8 col_rules = [ [0,0,2,2,2,1,2,1], [0,0,0,2,2,2,1,2], [0,0,0,2,2,2,3,1], [0,0,0,0,0,18,2,1], [0,0,0,0,0,23,1,1], [0,0,0,0,0,20,2,1], [0,0,0,0,0,0,16,4], [0,0,0,0,0,0,2,6], [0,0,0,0,0,1,7,8], [0,0,3,1,1,8,2,1], [0,0,0,1,1,7,9,1], [0,0,0,0,7,1,1,15], [0,0,1,1,3,1,12,3], [0,1,1,1,1,3,2,8], [0,1,1,1,2,3,4,8], [0,1,1,1,1,3,1,14], [0,0,0,0,7,6,8,3], [0,0,0,0,0,1,4,9], [0,0,0,1,2,1,1,7], [0,0,0,0,5,1,3,3], [0,0,0,0,0,2,1,6], [0,0,0,0,0,5,2,6], [0,0,0,0,1,4,2,3], [0,0,0,0,0,1,7,8], [0,0,0,0,7,4,5,6], [2,1,1,1,2,3,3,3], [0,0,0,7,2,1,1,6], [0,1,1,2,1,1,1,6], [0,2,1,1,1,3,2,3], [0,0,0,0,1,1,9,6] ]
apache-2.0
a10networks/a10sdk-python
a10sdk/core/authentication/authentication.py
2
4505
from a10sdk.common.A10BaseClass import A10BaseClass class ModeCfg(A10BaseClass): """This class does not support CRUD Operations please use parent. :param mode: {"default": 0, "type": "number", "description": "Configure authentication mode", "format": "flag"} :param mode_type: {"default": "\"single\"", "enum": ["multiple", "single"], "type": "string", "description": "'multiple': Multiple authentication mode. If an authentication method rejected, try next one; 'single': Single authentication mode. If an authentication method rejected, don't try next one; ", "format": "enum"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "mode-cfg" self.DeviceProxy = "" self.mode = "" self.mode_type = "" for keys, value in kwargs.items(): setattr(self,keys, value) class TypeCfg(A10BaseClass): """This class does not support CRUD Operations please use parent. :param authen_type: {"enum": ["ldap", "local", "radius", "tacplus"], "type": "string", "format": "enum-list"} :param type: {"default": 0, "type": "number", "description": "The login authentication type", "format": "flag"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "type-cfg" self.DeviceProxy = "" self.authen_type = "" self.A10WW_type = "" for keys, value in kwargs.items(): setattr(self,keys, value) class LoginCfg(A10BaseClass): """This class does not support CRUD Operations please use parent. :param local: {"default": 0, "type": "number", "description": "Configure local user to enter privilege-mode", "format": "flag"} :param privilege_mode: {"default": 0, "type": "number", "description": "Configure to enter privilege-mode", "format": "flag"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "login-cfg" self.DeviceProxy = "" self.local = "" self.privilege_mode = "" for keys, value in kwargs.items(): setattr(self,keys, value) class EnableCfg(A10BaseClass): """This class does not support CRUD Operations please use parent. :param enable_auth_type: {"default": "local", "enum": ["local", "tacplus"], "type": "string", "description": "The enable-password authentication type", "format": "enum-list"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "enable-cfg" self.DeviceProxy = "" self.enable_auth_type = "" for keys, value in kwargs.items(): setattr(self,keys, value) class Authentication(A10BaseClass): """Class Description:: Configure authentication feature. Class authentication supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"} :param multiple_auth_reject: {"default": 0, "optional": true, "type": "number", "description": "Multiple same user login reject", "format": "flag"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/authentication`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required=[] self.b_key = "authentication" self.a10_url="/axapi/v3/authentication" self.DeviceProxy = "" self.console = {} self.uuid = "" self.mode_cfg = {} self.type_cfg = {} self.multiple_auth_reject = "" self.login_cfg = {} self.enable_cfg = {} for keys, value in kwargs.items(): setattr(self,keys, value)
apache-2.0
commaai/panda
examples/query_fw_versions.py
1
2951
#!/usr/bin/env python3 import argparse from tqdm import tqdm from panda import Panda from panda.python.uds import UdsClient, MessageTimeoutError, NegativeResponseError, SESSION_TYPE, DATA_IDENTIFIER_TYPE if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--rxoffset', default="") parser.add_argument('--nonstandard', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--addr') parser.add_argument('--bus') args = parser.parse_args() if args.addr: addrs = [int(args.addr, base=16)] else: addrs = [0x700 + i for i in range(256)] addrs += [0x18da0000 + (i << 8) + 0xf1 for i in range(256)] results = {} uds_data_ids = {} for std_id in DATA_IDENTIFIER_TYPE: uds_data_ids[std_id.value] = std_id.name if args.nonstandard: for uds_id in range(0xf100,0xf180): uds_data_ids[uds_id] = "IDENTIFICATION_OPTION_VEHICLE_MANUFACTURER_SPECIFIC_DATA_IDENTIFIER" for uds_id in range(0xf1a0,0xf1f0): uds_data_ids[uds_id] = "IDENTIFICATION_OPTION_VEHICLE_MANUFACTURER_SPECIFIC" for uds_id in range(0xf1f0,0xf200): uds_data_ids[uds_id] = "IDENTIFICATION_OPTION_SYSTEM_SUPPLIER_SPECIFIC" panda = Panda() panda.set_safety_mode(Panda.SAFETY_ELM327) print("querying addresses ...") with tqdm(addrs) as t: for addr in t: # skip functional broadcast addrs if addr == 0x7df or addr == 0x18db33f1: continue t.set_description(hex(addr)) if args.bus: bus = int(args.bus) else: bus = 1 if panda.has_obd() else 0 rx_addr = addr + int(args.rxoffset, base=16) if args.rxoffset else None uds_client = UdsClient(panda, addr, rx_addr, bus, timeout=0.2, debug=args.debug) # Check for anything alive at this address, and switch to the highest # available diagnostic session without security access try: uds_client.tester_present() uds_client.diagnostic_session_control(SESSION_TYPE.DEFAULT) uds_client.diagnostic_session_control(SESSION_TYPE.EXTENDED_DIAGNOSTIC) except NegativeResponseError: pass except MessageTimeoutError: continue # Run queries against all standard UDS data identifiers, plus selected # non-standardized identifier ranges if requested resp = {} for uds_data_id in sorted(uds_data_ids): try: data = uds_client.read_data_by_identifier(uds_data_id) # type: ignore if data: resp[uds_data_id] = data except (NegativeResponseError, MessageTimeoutError): pass if resp.keys(): results[addr] = resp if len(results.items()): for addr, resp in results.items(): print(f"\n\n*** Results for address 0x{addr:X} ***\n\n") for rid, dat in resp.items(): print(f"0x{rid:02X} {uds_data_ids[rid]}: {dat}") else: print("no fw versions found!")
mit
darthbhyrava/pywikibot-local
scripts/panoramiopicker.py
3
13036
#!/usr/bin/python # -*- coding: utf-8 -*- """Tool to copy a Panoramio set to Commons.""" # # (C) Multichill, 2010 # (C) Pywikibot team, 2010-2015 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __version__ = '$Id$' import base64 import hashlib import json import re import socket import StringIO from BeautifulSoup import BeautifulSoup import pywikibot from pywikibot import config from pywikibot.tools import PY2 from scripts import imagerecat, upload if not PY2: from urllib.request import urlopen else: from urllib import urlopen try: from pywikibot.userinterfaces.gui import Tkdialog except ImportError as _tk_error: Tkdialog = None def isAllowedLicense(photoInfo): """ Check if the image contains the right license. TODO: Maybe add more licenses """ allowed = [u'by-sa'] return photoInfo[u'license'] in allowed def downloadPhoto(photoUrl): """ Download the photo and store it in a StrinIO.StringIO object. TODO: Add exception handling """ imageFile = urlopen(photoUrl).read() return StringIO.StringIO(imageFile) def findDuplicateImages(photo, site=None): """Return list of duplicate images. Takes the photo, calculates the SHA1 hash and asks the mediawiki api for a list of duplicates. TODO: Add exception handling, fix site thing """ if not site: site = pywikibot.Site('commons', 'commons') hashObject = hashlib.sha1() hashObject.update(photo.getvalue()) return site.allimages(sha1=base64.b16encode(hashObject.digest())) def getLicense(photoInfo): """Adding license to the Panoramio API with a beautiful soup hack.""" photoInfo['license'] = u'c' page = urlopen(photoInfo.get(u'photo_url')) data = page.read() soup = BeautifulSoup(data) if soup.find("div", {'id': 'photo-info'}): pointer = soup.find("div", {'id': 'photo-info'}) if pointer.find("div", {'id': 'photo-details'}): pointer = pointer.find("div", {'id': 'photo-details'}) if pointer.find("ul", {'id': 'details'}): pointer = pointer.find("ul", {'id': 'details'}) if pointer.find("li", {'class': 'license by-sa'}): photoInfo['license'] = u'by-sa' # Does Panoramio have more license options? return photoInfo def getFilename(photoInfo, site=None, project=u'Panoramio'): """Build a good filename for the upload. The name is based on the username and the title. Prevents naming collisions. """ if not site: site = pywikibot.Site('commons', 'commons') username = photoInfo.get(u'owner_name') title = photoInfo.get(u'photo_title') if title: title = cleanUpTitle(title) else: title = u'' if pywikibot.Page(site, u'File:%s - %s - %s.jpg' % (project, username, title)).exists(): i = 1 while True: if (pywikibot.Page(site, u'File:%s - %s - %s (%s).jpg' % (project, username, title, str(i))).exists()): i += 1 else: return u'%s - %s - %s (%s).jpg' % (project, username, title, str(i)) else: return u'%s - %s - %s.jpg' % (project, username, title) def cleanUpTitle(title): """Clean up the title of a potential mediawiki page. Otherwise the title of the page might not be allowed by the software. """ title = title.strip() title = re.sub(u"[<{\\[]", u"(", title) title = re.sub(u"[>}\\]]", u")", title) title = re.sub(u"[ _]?\\(!\\)", u"", title) title = re.sub(u",:[ _]", u", ", title) title = re.sub(u"[;:][ _]", u", ", title) title = re.sub(u"[\t\n ]+", u" ", title) title = re.sub(u"[\r\n ]+", u" ", title) title = re.sub(u"[\n]+", u"", title) title = re.sub(u"[?!]([.\"]|$)", u"\\1", title) title = re.sub(u"[&#%?!]", u"^", title) title = re.sub(u"[;]", u",", title) title = re.sub(u"[/+\\\\:]", u"-", title) title = re.sub(u"--+", u"-", title) title = re.sub(u",,+", u",", title) title = re.sub(u"[-,^]([.]|$)", u"\\1", title) title = title.replace(u" ", u"_") return title def getDescription(photoInfo, panoramioreview=False, reviewer='', override=u'', addCategory=u''): """Build description for the image.""" desc = u'' desc += u'{{Information\n' desc += u'|description=%(photo_title)s\n' desc += u'|date=%(upload_date)s (upload date)\n' desc += u'|source=[%(photo_url)s Panoramio]\n' desc += u'|author=[%(owner_url)s?with_photo_id=%(photo_id)s %(owner_name)s] \n' desc += u'|permission=\n' desc += u'|other_versions=\n' desc += u'|other_fields=\n' desc += u'}}\n' if photoInfo.get(u'latitude') and photoInfo.get(u'longitude'): desc += u'{{Location dec|%(latitude)s|%(longitude)s|source:Panoramio}}\n' desc += u'\n' desc += u'=={{int:license-header}}==\n' if override: desc += override else: if photoInfo.get(u'license') == u'by-sa': desc += u'{{Cc-by-sa-3.0}}\n' if panoramioreview: desc += ('{{Panoramioreview|%s|{{subst:CURRENTYEAR}}-' '{{subst:CURRENTMONTH}}-{{subst:CURRENTDAY2}}}}\n' % reviewer) else: desc += u'{{Panoramioreview}}\n' desc += u'\n' cats = u'' if addCategory: desc += u'\n[[Category:%s]]\n' % (addCategory,) cats = True # Get categories based on location if photoInfo.get(u'latitude') and photoInfo.get(u'longitude'): cats = imagerecat.getOpenStreetMapCats(photoInfo.get(u'latitude'), photoInfo.get(u'longitude')) cats = imagerecat.applyAllFilters(cats) for cat in cats: desc += u'[[Category:%s]]\n' % (cat,) if not cats: desc += u'{{subst:Unc}}\n' return desc % photoInfo def processPhoto(photoInfo, panoramioreview=False, reviewer='', override=u'', addCategory=u'', autonomous=False, site=None): """Process a single Panoramio photo.""" if not site: site = pywikibot.Site('commons', 'commons') if isAllowedLicense(photoInfo) or override: # Should download the photo only once photo = downloadPhoto(photoInfo.get(u'photo_file_url')) # Don't upload duplicate images, should add override option duplicates = findDuplicateImages(photo, site=site) if duplicates: pywikibot.output(u'Found duplicate image at %s' % duplicates.pop()) else: filename = getFilename(photoInfo, site=site) pywikibot.output(filename) description = getDescription(photoInfo, panoramioreview, reviewer, override, addCategory) pywikibot.output(description) if not autonomous: (newDescription, newFilename, skip) = Tkdialog( description, photo, filename).show_dialog() else: newDescription = description newFilename = filename skip = False # pywikibot.output(newPhotoDescription) # if (pywikibot.Page(title=u'File:'+ filename, # site=pywikibot.Site()).exists()): # # I should probably check if the hash is the same and if not upload # # it under a different name # pywikibot.output(u'File:' + filename + u' already exists!') # else: # Do the actual upload # Would be nice to check before I upload if the file is already at # Commons # Not that important for this program, but maybe for derived # programs if not skip: bot = upload.UploadRobot(photoInfo.get(u'photo_file_url'), description=newDescription, useFilename=newFilename, keepFilename=True, verifyDescription=False, site=site) bot.upload_image(debug=False) return 1 return 0 def getPhotos(photoset=u'', start_id='', end_id='', interval=100): """Loop over a set of Panoramio photos.""" i = 0 has_more = True url = ('http://www.panoramio.com/map/get_panoramas.php?' 'set=%s&from=%s&to=%s&size=original') while has_more: gotInfo = False maxtries = 10 tries = 0 while not gotInfo: try: if tries < maxtries: tries += 1 panoramioApiPage = urlopen(url % (photoset, i, i + interval)) contents = panoramioApiPage.read().decode('utf-8') gotInfo = True i += interval else: break except IOError: pywikibot.output(u'Got an IOError, let\'s try again') except socket.timeout: pywikibot.output(u'Got a timeout, let\'s try again') metadata = json.loads(contents) photos = metadata.get(u'photos') for photo in photos: yield photo has_more = metadata.get(u'has_more') return def usage(): """Print usage information. TODO : Need more. """ pywikibot.output( u"Panoramiopicker is a tool to transfer Panaramio photos to Wikimedia " u"Commons") pywikibot.output(u"-set:<set_id>\n") return def main(*args): """Process command line arguments and perform task.""" # imagerecat.initLists() photoset = u'' # public (popular photos), full (all photos), user ID number start_id = u'' end_id = u'' addCategory = u'' autonomous = False totalPhotos = 0 uploadedPhotos = 0 # Do we mark the images as reviewed right away? if config.panoramio['review']: panoramioreview = config.panoramio['review'] else: panoramioreview = False # Set the Panoramio reviewer if config.panoramio['reviewer']: reviewer = config.panoramio['reviewer'] elif 'commons' in config.sysopnames['commons']: reviewer = config.sysopnames['commons']['commons'] elif 'commons' in config.usernames['commons']: reviewer = config.usernames['commons']['commons'] else: reviewer = u'' # Should be renamed to overrideLicense or something like that override = u'' local_args = pywikibot.handle_args(args) for arg in local_args: if arg.startswith('-set'): if len(arg) == 4: photoset = pywikibot.input(u'What is the set?') else: photoset = arg[5:] elif arg.startswith('-start_id'): if len(arg) == 9: start_id = pywikibot.input( u'What is the id of the photo you want to start at?') else: start_id = arg[10:] elif arg.startswith('-end_id'): if len(arg) == 7: end_id = pywikibot.input( u'What is the id of the photo you want to end at?') else: end_id = arg[8:] elif arg == '-panoramioreview': panoramioreview = True elif arg.startswith('-reviewer'): if len(arg) == 9: reviewer = pywikibot.input(u'Who is the reviewer?') else: reviewer = arg[10:] elif arg.startswith('-override'): if len(arg) == 9: override = pywikibot.input(u'What is the override text?') else: override = arg[10:] elif arg.startswith('-addcategory'): if len(arg) == 12: addCategory = pywikibot.input( u'What category do you want to add?') else: addCategory = arg[13:] elif arg == '-autonomous': autonomous = True if photoset: site = pywikibot.Site() if site != pywikibot.Site('commons', 'commons'): pywikibot.warning( 'Using {0} instead of Wikimedia Commons'.format(site)) for photoInfo in getPhotos(photoset, start_id, end_id): photoInfo = getLicense(photoInfo) # time.sleep(10) uploadedPhotos += processPhoto(photoInfo, panoramioreview, reviewer, override, addCategory, autonomous, site=site) totalPhotos += 1 else: usage() pywikibot.output(u'Finished running') pywikibot.output(u'Total photos: ' + str(totalPhotos)) pywikibot.output(u'Uploaded photos: ' + str(uploadedPhotos)) if __name__ == "__main__": main()
mit
sasukeh/neutron
neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py
8
6780
# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.agent.common import config from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants DEFAULT_BRIDGE_MAPPINGS = [] DEFAULT_VLAN_RANGES = [] DEFAULT_TUNNEL_RANGES = [] DEFAULT_TUNNEL_TYPES = [] ovs_opts = [ cfg.StrOpt('integration_bridge', default='br-int', help=_("Integration bridge to use.")), cfg.StrOpt('tunnel_bridge', default='br-tun', help=_("Tunnel bridge to use.")), cfg.StrOpt('int_peer_patch_port', default='patch-tun', help=_("Peer patch port in integration bridge for tunnel " "bridge.")), cfg.StrOpt('tun_peer_patch_port', default='patch-int', help=_("Peer patch port in tunnel bridge for integration " "bridge.")), cfg.IPOpt('local_ip', version=4, help=_("Local IP address of tunnel endpoint.")), cfg.ListOpt('bridge_mappings', default=DEFAULT_BRIDGE_MAPPINGS, help=_("List of <physical_network>:<bridge>. " "Deprecated for ofagent.")), cfg.BoolOpt('use_veth_interconnection', default=False, help=_("Use veths instead of patch ports to interconnect the " "integration bridge to physical bridges.")), cfg.StrOpt('of_interface', default='ovs-ofctl', choices=['ovs-ofctl', 'native'], help=_("OpenFlow interface to use.")), cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM, choices=[constants.OVS_DATAPATH_SYSTEM, constants.OVS_DATAPATH_NETDEV], help=_("OVS datapath to use.")), cfg.IPOpt('of_listen_address', default='127.0.0.1', help=_("Address to listen on for OpenFlow connections. " "Used only for 'native' driver.")), cfg.IntOpt('of_listen_port', default=6633, help=_("Port to listen on for OpenFlow connections. " "Used only for 'native' driver.")), cfg.IntOpt('of_connect_timeout', default=30, help=_("Timeout in seconds to wait for " "the local switch connecting the controller. " "Used only for 'native' driver.")), cfg.IntOpt('of_request_timeout', default=10, help=_("Timeout in seconds to wait for a single " "OpenFlow request. " "Used only for 'native' driver.")), ] agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), cfg.BoolOpt('minimize_polling', default=True, help=_("Minimize polling by monitoring ovsdb for interface " "changes.")), cfg.IntOpt('ovsdb_monitor_respawn_interval', default=constants.DEFAULT_OVSDBMON_RESPAWN, help=_("The number of seconds to wait before respawning the " "ovsdb monitor after losing communication with it.")), cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, help=_("Network types supported by the agent " "(gre and/or vxlan).")), cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT, help=_("The UDP port to use for VXLAN tunnels.")), cfg.IntOpt('veth_mtu', help=_("MTU size of veth interfaces")), cfg.BoolOpt('l2_population', default=False, help=_("Use ML2 l2population mechanism driver to learn " "remote MAC and IPs and improve tunnel scalability.")), cfg.BoolOpt('arp_responder', default=False, help=_("Enable local ARP responder if it is supported. " "Requires OVS 2.1 and ML2 l2population driver. " "Allows the switch (when supporting an overlay) " "to respond to an ARP request locally without " "performing a costly ARP broadcast into the overlay.")), cfg.BoolOpt('prevent_arp_spoofing', default=True, help=_("Enable suppression of ARP responses that don't match " "an IP address that belongs to the port from which " "they originate. Note: This prevents the VMs attached " "to this agent from spoofing, it doesn't protect them " "from other devices which have the capability to spoof " "(e.g. bare metal or VMs attached to agents without " "this flag set to True). Spoofing rules will not be " "added to any ports that have port security disabled. " "For LinuxBridge, this requires ebtables. For OVS, it " "requires a version that supports matching ARP " "headers.")), cfg.BoolOpt('dont_fragment', default=True, help=_("Set or un-set the don't fragment (DF) bit on " "outgoing IP packet carrying GRE/VXLAN tunnel.")), cfg.BoolOpt('enable_distributed_routing', default=False, help=_("Make the l2 agent run in DVR mode.")), cfg.IntOpt('quitting_rpc_timeout', default=10, help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " "timeout won't be changed")), cfg.BoolOpt('drop_flows_on_start', default=False, help=_("Reset flow table on start. Setting this to True will " "cause brief traffic interruption.")), cfg.BoolOpt('tunnel_csum', default=False, help=_("Set or un-set the tunnel header checksum on " "outgoing IP packet carrying GRE/VXLAN tunnel.")) ] cfg.CONF.register_opts(ovs_opts, "OVS") cfg.CONF.register_opts(agent_opts, "AGENT") config.register_agent_state_opts_helper(cfg.CONF)
apache-2.0
dotcs/doimgr
tests/tests_filters.py
1
4995
import unittest from lib.filter import Filters class TestDOI(unittest.TestCase): def setUp(self): self.valid_doi = "10.1063/1.3458497" self.valid_doi_with_protocol = "http://dx.doi.org/10.1063/1.3458497" self.invalid_doi = "a10.1063/1.3458497" self.valid_doi_organization = "10.1000" #### DOI TESTS def test_add_valid_doi_filter(self): f = Filters() try: f.add('doi', self.valid_doi) except Exception: self.fail("Valid DOI could not be added") self.assertEqual(f.get_formatted_filters(), "doi:10.1063/1.3458497") def test_add_invalid_doi_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'doi', self.invalid_doi) #### DATE TESTS def _test_valid_date_filter(self, filtername, date, awaited_result): f = Filters() try: f.add(filtername, date) except Exception: self.fail("Valid date could not be added: {}".format(filtername)) self.assertEqual(f.get_formatted_filters(), awaited_result) def test_add_valid_from_pub_date_filter(self): for date in ('2013', '2013-02', '2013-02-10'): self._test_valid_date_filter('from-pub-date', date, "{}:{}".format('from-pub-date', date)) def test_add_invalid_from_pub_date_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'from-pub-date', "20132") def test_add_valid_from_index_date_filter(self): for date in ('2013', '2013-02', '2013-02-10'): self._test_valid_date_filter('from-index-date', date, "{}:{}".format('from-index-date', date)) def test_add_invalid_from_index_date_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'from-index-date', "20132") def test_add_valid_until_index_date_filter(self): for date in ('2013', '2013-02', '2013-02-10'): self._test_valid_date_filter('until-index-date', date, "{}:{}".format('until-index-date', date)) def test_add_invalid_until_index_date_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'until-index-date', "20132") def test_add_valid_from_deposition_date_filter(self): for date in ('2013', '2013-02', '2013-02-10'): self._test_valid_date_filter('from-deposition-date', date, "{}:{}".format('from-deposition-date', date)) def test_add_invalid_from_deposition_date_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'from-deposition-date', "20132") def test_add_valid_until_deposition_date_filter(self): for date in ('2013', '2013-02', '2013-02-10'): self._test_valid_date_filter('until-deposition-date', date, "{}:{}".format('until-deposition-date', date)) def test_add_invalid_until_deposition_date_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'until-deposition-date', "20132") ### BOOLEAN FILTER TESTS def test_add_valid_has_funder_filter(self): f = Filters() try: f.add('has-funder', True) except Exception: self.fail("Valid has-funder could not be added") # use lower() since "True" and "False" are written with capital first # letters self.assertEqual(f.get_formatted_filters().lower(), "has-funder:true".lower()) def test_add_invalid_has_funder_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'has-funder', "true") ### URL TESTS def test_add_valid_license_url_filter(self): f = Filters() try: f.add('license.url', 'http://example.com/this/is/a/test.html') except Exception: self.fail("Valid license.url could not be added") def test_add_invalid_license_url_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'license.url', 'httpf://example.com/this/is/a/invalid/url.html') ### STRING TESTS def test_add_valid_full_text_version_filter(self): f = Filters() try: f.add('full-text.version', '1.01.beta') except Exception: self.fail("Valid full-text.version could not be added") def test_add_invalid_full_text_version_filter(self): f = Filters() self.assertRaises(ValueError, f.add, 'full-text.version', 4) ### OTHER TESTS def test_empty_filter(self): f = Filters() self.assertEqual(f.get_formatted_filters(), "") def test_multiple_filter_entries(self): f = Filters() f.add('doi', self.valid_doi) f.add('from-pub-date', '2013') for entry in f.get_formatted_filters().split(','): self.assertIn(entry, ["doi:10.1063/1.3458497", "from-pub-date:2013"]) if __name__ == "__main__": unittest.main()
mit
rkizen/ice_cycle
ice_cycle_recover.py
1
4570
import re import sys import time from Bio import SeqIO from Bio.Align.Applications import MuscleCommandline from StringIO import StringIO from Bio import AlignIO import os import argparse # # This script examines the outputs from each split cluster run to find clusters which may have been split by the length cutoffs. # It then generates a new fasta file containing the fasta sequences for the flnc sequences that were involved in these split cluster scenarios. # Run the output fasta through cluster again and then merge. # ap = argparse.ArgumentParser(description='This script examines the outputs from each split cluster run to find clusters which \ may have been split by the length cutoffs. It then generates a new fasta file containing the fasta sequences for the flnc sequences \ that were involved in these split cluster scenarios. Run the output fasta through cluster again and then merge.') ap.add_argument('-r', type=str, nargs=1, help='File with list of cluster report csv files') ap.add_argument('-f', type=str, nargs=1, help='File with list of split fasta files used for clustering') ap.add_argument('-o', type=str, nargs=1, help='Output fasta file name') opts = ap.parse_args() #check for missing args missing_arg_flag = 0 if not opts.r: print("Cluster report file list missing") missing_arg_flag = 1 if not opts.f: print("Fasta file list missing") missing_arg_flag = 1 if not opts.o: print("Output file name missing") missing_arg_flag = 1 if missing_arg_flag == 1: print("Please try again with complete arguments") filelist_file = opts.r[0] fastalist_file = opts.f[0] outfile_name = opts.o[0] ######################################## filelist_file_contents = open(filelist_file).read().rstrip("\n").split("\n") fastalist_file_contents = open(fastalist_file).read().rstrip("\n").split("\n") outfile = open(outfile_name,"w") fasta_dict = {} # fasta_dict[fasta id] = fasta seq for filepath in fastalist_file_contents: print("going through fasta") for seq_record in SeqIO.parse(filepath, "fasta"): seq_name = str(seq_record.id) seq_string = str(seq_record.seq) seq_string = seq_string.upper() seq_length = len(seq_string) fasta_dict[seq_name] = seq_string cluster_dict = {} # cluster_dict[file num][cluster id][flnc id] = 1 flnc_cluster_dict = {} # flnc_cluster_dict[flnc id][file cluster id] = 1 multi_cluster_flnc_dict = {} # multi_cluster_flnc_dict[flnc id] = number of occurrences file_count = 0 flnc_list = [] for filepath in filelist_file_contents: report_file_content = open(filepath).read().rstrip("\n").split("\n") file_count += 1 file_id = str(file_count) cluster_dict[file_id] = {} for line in report_file_content: if line.startswith("cluster_id"): continue line_split = line.split(",") cluster_id = line_split[0] flnc_id = line_split[1] if cluster_id not in cluster_dict[file_id]: cluster_dict[file_id][cluster_id] = {} cluster_dict[file_id][cluster_id][flnc_id] = 1 if flnc_id not in flnc_cluster_dict: flnc_cluster_dict[flnc_id] = {} file_cluster_id = file_id + "_" + cluster_id flnc_cluster_dict[flnc_id][file_cluster_id] = 1 if flnc_id not in multi_cluster_flnc_dict: multi_cluster_flnc_dict[flnc_id] = 0 flnc_list.append(flnc_id) multi_cluster_flnc_dict[flnc_id] += 1 uniq_flnc_dict = {} # uniq_flnc_dict[flnc] = 1 for flnc_id in flnc_list: if multi_cluster_flnc_dict[flnc_id] > 1: file_cluster_id_list = flnc_cluster_dict[flnc_id].keys() #collect all clusters that flnc belongs to for file_cluster_id in file_cluster_id_list: file_id = file_cluster_id.split("_")[0] cluster_id = file_cluster_id.split("_")[1] #collect all flnc in cluster for this_flnc_id in cluster_dict[file_id][cluster_id]: uniq_flnc_dict[this_flnc_id] = 1 uniq_flnc_list = uniq_flnc_dict.keys() uniq_flnc_list.sort() for flnc_id in uniq_flnc_list: flnc_seq = fasta_dict[flnc_id] outline = ">" + flnc_id outfile.write(outline) outfile.write("\n") outline = flnc_seq outfile.write(outline) outfile.write("\n")
gpl-3.0
ArturGaspar/scrapy
scrapy/core/scraper.py
5
10149
"""This module implements the Scraper component which parses responses and extracts information from them""" import logging from collections import deque from twisted.python.failure import Failure from twisted.internet import defer from scrapy.utils.defer import defer_result, defer_succeed, parallel, iter_errback from scrapy.utils.spider import iterate_spider_output from scrapy.utils.misc import load_object from scrapy.utils.log import logformatter_adapter, failure_to_exc_info from scrapy.exceptions import CloseSpider, DropItem, IgnoreRequest from scrapy import signals from scrapy.http import Request, Response from scrapy.item import BaseItem from scrapy.core.spidermw import SpiderMiddlewareManager from scrapy.utils.request import referer_str logger = logging.getLogger(__name__) class Slot(object): """Scraper slot (one per running spider)""" MIN_RESPONSE_SIZE = 1024 def __init__(self, max_active_size=5000000): self.max_active_size = max_active_size self.queue = deque() self.active = set() self.active_size = 0 self.itemproc_size = 0 self.closing = None def add_response_request(self, response, request): deferred = defer.Deferred() self.queue.append((response, request, deferred)) if isinstance(response, Response): self.active_size += max(len(response.body), self.MIN_RESPONSE_SIZE) else: self.active_size += self.MIN_RESPONSE_SIZE return deferred def next_response_request_deferred(self): response, request, deferred = self.queue.popleft() self.active.add(request) return response, request, deferred def finish_response(self, response, request): self.active.remove(request) if isinstance(response, Response): self.active_size -= max(len(response.body), self.MIN_RESPONSE_SIZE) else: self.active_size -= self.MIN_RESPONSE_SIZE def is_idle(self): return not (self.queue or self.active) def needs_backout(self): return self.active_size > self.max_active_size class Scraper(object): def __init__(self, crawler): self.slot = None self.spidermw = SpiderMiddlewareManager.from_crawler(crawler) itemproc_cls = load_object(crawler.settings['ITEM_PROCESSOR']) self.itemproc = itemproc_cls.from_crawler(crawler) self.concurrent_items = crawler.settings.getint('CONCURRENT_ITEMS') self.crawler = crawler self.signals = crawler.signals self.logformatter = crawler.logformatter @defer.inlineCallbacks def open_spider(self, spider): """Open the given spider for scraping and allocate resources for it""" self.slot = Slot() yield self.itemproc.open_spider(spider) def close_spider(self, spider): """Close a spider being scraped and release its resources""" slot = self.slot slot.closing = defer.Deferred() slot.closing.addCallback(self.itemproc.close_spider) self._check_if_closing(spider, slot) return slot.closing def is_idle(self): """Return True if there isn't any more spiders to process""" return not self.slot def _check_if_closing(self, spider, slot): if slot.closing and slot.is_idle(): slot.closing.callback(spider) def enqueue_scrape(self, response, request, spider): slot = self.slot dfd = slot.add_response_request(response, request) def finish_scraping(_): slot.finish_response(response, request) self._check_if_closing(spider, slot) self._scrape_next(spider, slot) return _ dfd.addBoth(finish_scraping) dfd.addErrback( lambda f: logger.error('Scraper bug processing %(request)s', {'request': request}, exc_info=failure_to_exc_info(f), extra={'spider': spider})) self._scrape_next(spider, slot) return dfd def _scrape_next(self, spider, slot): while slot.queue: response, request, deferred = slot.next_response_request_deferred() self._scrape(response, request, spider).chainDeferred(deferred) def _scrape(self, response, request, spider): """Handle the downloaded response or failure through the spider callback/errback""" assert isinstance(response, (Response, Failure)) dfd = self._scrape2(response, request, spider) # returns spiders processed output dfd.addErrback(self.handle_spider_error, request, response, spider) dfd.addCallback(self.handle_spider_output, request, response, spider) return dfd def _scrape2(self, request_result, request, spider): """Handle the different cases of request's result been a Response or a Failure""" if not isinstance(request_result, Failure): return self.spidermw.scrape_response( self.call_spider, request_result, request, spider) else: # FIXME: don't ignore errors in spider middleware dfd = self.call_spider(request_result, request, spider) return dfd.addErrback( self._log_download_errors, request_result, request, spider) def call_spider(self, result, request, spider): result.request = request dfd = defer_result(result) dfd.addCallbacks(request.callback or spider.parse, request.errback) return dfd.addCallback(iterate_spider_output) def handle_spider_error(self, _failure, request, response, spider): exc = _failure.value if isinstance(exc, CloseSpider): self.crawler.engine.close_spider(spider, exc.reason or 'cancelled') return logger.error( "Spider error processing %(request)s (referer: %(referer)s)", {'request': request, 'referer': referer_str(request)}, exc_info=failure_to_exc_info(_failure), extra={'spider': spider} ) self.signals.send_catch_log( signal=signals.spider_error, failure=_failure, response=response, spider=spider ) self.crawler.stats.inc_value( "spider_exceptions/%s" % _failure.value.__class__.__name__, spider=spider ) def handle_spider_output(self, result, request, response, spider): if not result: return defer_succeed(None) it = iter_errback(result, self.handle_spider_error, request, response, spider) dfd = parallel(it, self.concurrent_items, self._process_spidermw_output, request, response, spider) return dfd def _process_spidermw_output(self, output, request, response, spider): """Process each Request/Item (given in the output parameter) returned from the given spider """ if isinstance(output, Request): self.crawler.engine.crawl(request=output, spider=spider) elif isinstance(output, (BaseItem, dict)): self.slot.itemproc_size += 1 dfd = self.itemproc.process_item(output, spider) dfd.addBoth(self._itemproc_finished, output, response, spider) return dfd elif output is None: pass else: typename = type(output).__name__ logger.error('Spider must return Request, BaseItem, dict or None, ' 'got %(typename)r in %(request)s', {'request': request, 'typename': typename}, extra={'spider': spider}) def _log_download_errors(self, spider_failure, download_failure, request, spider): """Log and silence errors that come from the engine (typically download errors that got propagated thru here) """ if (isinstance(download_failure, Failure) and not download_failure.check(IgnoreRequest)): if download_failure.frames: logger.error('Error downloading %(request)s', {'request': request}, exc_info=failure_to_exc_info(download_failure), extra={'spider': spider}) else: errmsg = download_failure.getErrorMessage() if errmsg: logger.error('Error downloading %(request)s: %(errmsg)s', {'request': request, 'errmsg': errmsg}, extra={'spider': spider}) if spider_failure is not download_failure: return spider_failure def _itemproc_finished(self, output, item, response, spider): """ItemProcessor finished for the given ``item`` and returned ``output`` """ self.slot.itemproc_size -= 1 if isinstance(output, Failure): ex = output.value if isinstance(ex, DropItem): logkws = self.logformatter.dropped(item, ex, response, spider) logger.log(*logformatter_adapter(logkws), extra={'spider': spider}) return self.signals.send_catch_log_deferred( signal=signals.item_dropped, item=item, response=response, spider=spider, exception=output.value) else: logger.error('Error processing %(item)s', {'item': item}, exc_info=failure_to_exc_info(output), extra={'spider': spider}) return self.signals.send_catch_log_deferred( signal=signals.item_error, item=item, response=response, spider=spider, failure=output) else: logkws = self.logformatter.scraped(output, response, spider) logger.log(*logformatter_adapter(logkws), extra={'spider': spider}) return self.signals.send_catch_log_deferred( signal=signals.item_scraped, item=output, response=response, spider=spider)
bsd-3-clause
spacetelescope/asv
asv/commands/setup.py
2
2829
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import traceback from collections import defaultdict from . import Command from ..console import log from .. import environment from .. import util from . import common_args def _create(env): with log.set_level(logging.WARN): env.create() def _create_parallel(envs): try: for env in envs: _create(env) except BaseException as exc: raise util.ParallelFailure(str(exc), exc.__class__, traceback.format_exc()) class Setup(Command): @classmethod def setup_arguments(cls, subparsers): parser = subparsers.add_parser( "setup", help="Setup virtual environments", description="""Setup virtual environments for each combination of Python version and third-party requirement. This is called by the ``run`` command implicitly, and isn't generally required to be run on its own.""" ) common_args.add_parallel(parser) common_args.add_environment(parser) parser.set_defaults(func=cls.run_from_args) return parser @classmethod def run_from_conf_args(cls, conf, args): return cls.run(conf=conf, parallel=args.parallel, env_spec=args.env_spec) @classmethod def run(cls, conf, parallel=-1, env_spec=None): environments = list(environment.get_environments(conf, env_spec)) cls.perform_setup(environments, parallel=parallel) return environments @classmethod def perform_setup(cls, environments, parallel=-1): if environment.is_existing_only(environments): # Nothing to do, so don't print anything return environments parallel, multiprocessing = util.get_multiprocessing(parallel) log.info("Creating environments") with log.indent(): if parallel != 1: try: # Run creation in parallel only for environments with # different dir_names environment_groups = defaultdict(list) for env in environments: environment_groups[env.dir_name].append(env) pool = util.get_multiprocessing_pool(parallel) try: pool.map(_create_parallel, environment_groups.values()) pool.close() pool.join() finally: pool.terminate() except util.ParallelFailure as exc: exc.reraise() else: list(map(_create, environments))
bsd-3-clause
darthbhyrava/pywikibot-local
pywikibot/comms/threadedhttp.py
3
5526
# -*- coding: utf-8 -*- """Http backend layer, formerly providing a httplib2 wrapper.""" from __future__ import absolute_import, unicode_literals # (C) Pywikibot team, 2007-2015 __version__ = '$Id$' __docformat__ = 'epytext' # standard python libraries import codecs import sys if sys.version_info[0] > 2: from urllib.parse import urlparse else: from urlparse import urlparse import pywikibot from pywikibot.tools import UnicodeMixin _logger = "comm.threadedhttp" class HttpRequest(UnicodeMixin): """Object wrapper for HTTP requests that need to block origin thread. self.data will be either: * a tuple of (dict, unicode) if the request was successful * an exception """ def __init__(self, uri, method="GET", body=None, headers=None, callbacks=None, charset=None, **kwargs): """ Constructor. See C{Http.request} for parameters. """ self.uri = uri self.method = method self.body = body self.headers = headers if isinstance(charset, codecs.CodecInfo): self.charset = charset.name elif charset: self.charset = charset elif headers and 'accept-charset' in headers: self.charset = headers['accept-charset'] else: self.charset = None self.callbacks = callbacks self.args = [uri, method, body, headers] self.kwargs = kwargs self._parsed_uri = None self._data = None @property def data(self): """Return the requests response tuple.""" assert(self._data is not None) return self._data @data.setter def data(self, value): """Set the requests response and invoke each callback.""" self._data = value if self.callbacks: for callback in self.callbacks: callback(self) @property def exception(self): """Get the exception, if any.""" if isinstance(self.data, Exception): return self.data @property def response_headers(self): """Return the response headers.""" if not self.exception: return self.data.headers @property def raw(self): """Return the raw response body.""" if not self.exception: return self.data.content @property def parsed_uri(self): """Return the parsed requested uri.""" if not self._parsed_uri: self._parsed_uri = urlparse(self.uri) return self._parsed_uri @property def hostname(self): """Return the host of the request.""" return self.parsed_uri.netloc @property def status(self): """Return the HTTP response status. @rtype: int """ if not self.exception: return self.data.status_code @property def header_encoding(self): """Return charset given by the response header.""" if not hasattr(self, '_header_encoding'): pos = self.response_headers['content-type'].find('charset=') if pos >= 0: pos += len('charset=') encoding = self.response_headers['content-type'][pos:] self._header_encoding = encoding else: self._header_encoding = None return self._header_encoding @property def encoding(self): """Detect the response encoding.""" if not hasattr(self, '_encoding'): if not self.charset and not self.header_encoding: pywikibot.log(u"Http response doesn't contain a charset.") charset = 'latin1' else: charset = self.charset if (self.header_encoding and codecs.lookup(self.header_encoding) != (codecs.lookup(charset) if charset else None)): if charset: pywikibot.warning( 'Encoding "{0}" requested but "{1}" ' 'received in the header.'.format( charset, self.header_encoding)) try: # TODO: Buffer decoded content, weakref does remove it too # early (directly after this method) self.raw.decode(self.header_encoding) except UnicodeError as e: self._encoding = e else: self._encoding = self.header_encoding else: self._encoding = None if charset and (isinstance(self._encoding, Exception) or not self._encoding): try: self.raw.decode(charset) except UnicodeError as e: self._encoding = e else: self._encoding = charset if isinstance(self._encoding, Exception): raise self._encoding return self._encoding def decode(self, encoding, errors='strict'): """Return the decoded response.""" return self.raw.decode(encoding, errors) @property def content(self): """Return the response decoded by the detected encoding.""" return self.decode(self.encoding) def __unicode__(self): """Return the response decoded by the detected encoding.""" return self.content def __bytes__(self): """Return the undecoded response.""" return self.raw
mit
madmouser1/beets
beetsplug/convert.py
8
18010
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Jakob Schnitzer. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Converts tracks or albums to external directory """ from __future__ import division, absolute_import, print_function import os import threading import subprocess import tempfile import shlex import six from string import Template from beets import ui, util, plugins, config from beets.plugins import BeetsPlugin from beets.util.confit import ConfigTypeError from beets import art from beets.util.artresizer import ArtResizer _fs_lock = threading.Lock() _temp_files = [] # Keep track of temporary transcoded files for deletion. # Some convenient alternate names for formats. ALIASES = { u'wma': u'windows media', u'vorbis': u'ogg', } LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav', 'aiff'] def replace_ext(path, ext): """Return the path with its extension replaced by `ext`. The new extension must not contain a leading dot. """ ext_dot = b'.' + ext return os.path.splitext(path)[0] + ext_dot def get_format(fmt=None): """Return the command template and the extension from the config. """ if not fmt: fmt = config['convert']['format'].as_str().lower() fmt = ALIASES.get(fmt, fmt) try: format_info = config['convert']['formats'][fmt].get(dict) command = format_info['command'] extension = format_info.get('extension', fmt) except KeyError: raise ui.UserError( u'convert: format {0} needs the "command" field' .format(fmt) ) except ConfigTypeError: command = config['convert']['formats'][fmt].get(str) extension = fmt # Convenience and backwards-compatibility shortcuts. keys = config['convert'].keys() if 'command' in keys: command = config['convert']['command'].as_str() elif 'opts' in keys: # Undocumented option for backwards compatibility with < 1.3.1. command = u'ffmpeg -i $source -y {0} $dest'.format( config['convert']['opts'].as_str() ) if 'extension' in keys: extension = config['convert']['extension'].as_str() return (command.encode('utf-8'), extension.encode('utf-8')) def should_transcode(item, fmt): """Determine whether the item should be transcoded as part of conversion (i.e., its bitrate is high or it has the wrong format). """ if config['convert']['never_convert_lossy_files'] and \ not (item.format.lower() in LOSSLESS_FORMATS): return False maxbr = config['convert']['max_bitrate'].get(int) return fmt.lower() != item.format.lower() or \ item.bitrate >= 1000 * maxbr class ConvertPlugin(BeetsPlugin): def __init__(self): super(ConvertPlugin, self).__init__() self.config.add({ u'dest': None, u'pretend': False, u'threads': util.cpu_count(), u'format': u'mp3', u'formats': { u'aac': { u'command': u'ffmpeg -i $source -y -vn -acodec libfaac ' u'-aq 100 $dest', u'extension': u'm4a', }, u'alac': { u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest', u'extension': u'm4a', }, u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest', u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest', u'opus': u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest', u'ogg': u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest', u'wma': u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest', }, u'max_bitrate': 500, u'auto': False, u'tmpdir': None, u'quiet': False, u'embed': True, u'paths': {}, u'never_convert_lossy_files': False, u'copy_album_art': False, u'album_art_maxwidth': 0, }) self.import_stages = [self.auto_convert] self.register_listener('import_task_files', self._cleanup) def commands(self): cmd = ui.Subcommand('convert', help=u'convert to external location') cmd.parser.add_option('-p', '--pretend', action='store_true', help=u'show actions but do nothing') cmd.parser.add_option('-t', '--threads', action='store', type='int', help=u'change the number of threads, \ defaults to maximum available processors') cmd.parser.add_option('-k', '--keep-new', action='store_true', dest='keep_new', help=u'keep only the converted \ and move the old files') cmd.parser.add_option('-d', '--dest', action='store', help=u'set the destination directory') cmd.parser.add_option('-f', '--format', action='store', dest='format', help=u'set the target format of the tracks') cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes', help=u'do not ask for confirmation') cmd.parser.add_album_option() cmd.func = self.convert_func return [cmd] def auto_convert(self, config, task): if self.config['auto']: for item in task.imported_items(): self.convert_on_import(config.lib, item) # Utilities converted from functions to methods on logging overhaul def encode(self, command, source, dest, pretend=False): """Encode `source` to `dest` using command template `command`. Raises `subprocess.CalledProcessError` if the command exited with a non-zero status code. """ # The paths and arguments must be bytes. assert isinstance(command, bytes) assert isinstance(source, bytes) assert isinstance(dest, bytes) quiet = self.config['quiet'].get(bool) if not quiet and not pretend: self._log.info(u'Encoding {0}', util.displayable_path(source)) # Substitute $source and $dest in the argument list. if not six.PY2: command = command.decode(util.arg_encoding(), 'surrogateescape') source = source.decode(util.arg_encoding(), 'surrogateescape') dest = dest.decode(util.arg_encoding(), 'surrogateescape') args = shlex.split(command) encode_cmd = [] for i, arg in enumerate(args): args[i] = Template(arg).safe_substitute({ 'source': source, 'dest': dest, }) if six.PY2: encode_cmd.append(args[i]) else: encode_cmd.append(args[i].encode(util.arg_encoding())) if pretend: self._log.info(u'{0}', u' '.join(ui.decargs(args))) return try: util.command_output(encode_cmd) except subprocess.CalledProcessError as exc: # Something went wrong (probably Ctrl+C), remove temporary files self._log.info(u'Encoding {0} failed. Cleaning up...', util.displayable_path(source)) self._log.debug(u'Command {0} exited with status {1}: {2}', args, exc.returncode, exc.output) util.remove(dest) util.prune_dirs(os.path.dirname(dest)) raise except OSError as exc: raise ui.UserError( u"convert: couldn't invoke '{0}': {1}".format( u' '.join(ui.decargs(args)), exc ) ) if not quiet and not pretend: self._log.info(u'Finished encoding {0}', util.displayable_path(source)) def convert_item(self, dest_dir, keep_new, path_formats, fmt, pretend=False): """A pipeline thread that converts `Item` objects from a library. """ command, ext = get_format(fmt) item, original, converted = None, None, None while True: item = yield (item, original, converted) dest = item.destination(basedir=dest_dir, path_formats=path_formats) # When keeping the new file in the library, we first move the # current (pristine) file to the destination. We'll then copy it # back to its old path or transcode it to a new path. if keep_new: original = dest converted = item.path if should_transcode(item, fmt): converted = replace_ext(converted, ext) else: original = item.path if should_transcode(item, fmt): dest = replace_ext(dest, ext) converted = dest # Ensure that only one thread tries to create directories at a # time. (The existence check is not atomic with the directory # creation inside this function.) if not pretend: with _fs_lock: util.mkdirall(dest) if os.path.exists(util.syspath(dest)): self._log.info(u'Skipping {0} (target file exists)', util.displayable_path(item.path)) continue if keep_new: if pretend: self._log.info(u'mv {0} {1}', util.displayable_path(item.path), util.displayable_path(original)) else: self._log.info(u'Moving to {0}', util.displayable_path(original)) util.move(item.path, original) if should_transcode(item, fmt): try: self.encode(command, original, converted, pretend) except subprocess.CalledProcessError: continue else: if pretend: self._log.info(u'cp {0} {1}', util.displayable_path(original), util.displayable_path(converted)) else: # No transcoding necessary. self._log.info(u'Copying {0}', util.displayable_path(item.path)) util.copy(original, converted) if pretend: continue # Write tags from the database to the converted file. item.try_write(path=converted) if keep_new: # If we're keeping the transcoded file, read it again (after # writing) to get new bitrate, duration, etc. item.path = converted item.read() item.store() # Store new path and audio data. if self.config['embed']: album = item.get_album() if album and album.artpath: self._log.debug(u'embedding album art from {}', util.displayable_path(album.artpath)) art.embed_item(self._log, item, album.artpath, itempath=converted) if keep_new: plugins.send('after_convert', item=item, dest=dest, keepnew=True) else: plugins.send('after_convert', item=item, dest=converted, keepnew=False) def copy_album_art(self, album, dest_dir, path_formats, pretend=False): """Copies or converts the associated cover art of the album. Album must have at least one track. """ if not album or not album.artpath: return album_item = album.items().get() # Album shouldn't be empty. if not album_item: return # Get the destination of the first item (track) of the album, we use # this function to format the path accordingly to path_formats. dest = album_item.destination(basedir=dest_dir, path_formats=path_formats) # Remove item from the path. dest = os.path.join(*util.components(dest)[:-1]) dest = album.art_destination(album.artpath, item_dir=dest) if album.artpath == dest: return if not pretend: util.mkdirall(dest) if os.path.exists(util.syspath(dest)): self._log.info(u'Skipping {0} (target file exists)', util.displayable_path(album.artpath)) return # Decide whether we need to resize the cover-art image. resize = False maxwidth = None if self.config['album_art_maxwidth']: maxwidth = self.config['album_art_maxwidth'].get(int) size = ArtResizer.shared.get_size(album.artpath) self._log.debug('image size: {}', size) if size: resize = size[0] > maxwidth else: self._log.warning(u'Could not get size of image (please see ' u'documentation for dependencies).') # Either copy or resize (while copying) the image. if resize: self._log.info(u'Resizing cover art from {0} to {1}', util.displayable_path(album.artpath), util.displayable_path(dest)) if not pretend: ArtResizer.shared.resize(maxwidth, album.artpath, dest) else: if pretend: self._log.info(u'cp {0} {1}', util.displayable_path(album.artpath), util.displayable_path(dest)) else: self._log.info(u'Copying cover art to {0}', util.displayable_path(album.artpath), util.displayable_path(dest)) util.copy(album.artpath, dest) def convert_func(self, lib, opts, args): dest = opts.dest or self.config['dest'].get() if not dest: raise ui.UserError(u'no convert destination set') dest = util.bytestring_path(dest) threads = opts.threads or self.config['threads'].get(int) path_formats = ui.get_path_formats(self.config['paths'] or None) fmt = opts.format or self.config['format'].as_str().lower() if opts.pretend is not None: pretend = opts.pretend else: pretend = self.config['pretend'].get(bool) if opts.album: albums = lib.albums(ui.decargs(args)) items = [i for a in albums for i in a.items()] if not pretend: for a in albums: ui.print_(format(a, u'')) else: items = list(lib.items(ui.decargs(args))) if not pretend: for i in items: ui.print_(format(i, u'')) if not items: self._log.error(u'Empty query result.') return if not (pretend or opts.yes or ui.input_yn(u"Convert? (Y/n)")): return if opts.album and self.config['copy_album_art']: for album in albums: self.copy_album_art(album, dest, path_formats, pretend) convert = [self.convert_item(dest, opts.keep_new, path_formats, fmt, pretend) for _ in range(threads)] pipe = util.pipeline.Pipeline([iter(items), convert]) pipe.run_parallel() def convert_on_import(self, lib, item): """Transcode a file automatically after it is imported into the library. """ fmt = self.config['format'].as_str().lower() if should_transcode(item, fmt): command, ext = get_format() # Create a temporary file for the conversion. tmpdir = self.config['tmpdir'].get() if tmpdir: tmpdir = util.py3_path(util.bytestring_path(tmpdir)) fd, dest = tempfile.mkstemp(util.py3_path(b'.' + ext), dir=tmpdir) os.close(fd) dest = util.bytestring_path(dest) _temp_files.append(dest) # Delete the transcode later. # Convert. try: self.encode(command, item.path, dest) except subprocess.CalledProcessError: return # Change the newly-imported database entry to point to the # converted file. item.path = dest item.write() item.read() # Load new audio information data. item.store() def _cleanup(self, task, session): for path in task.old_paths: if path in _temp_files: if os.path.isfile(path): util.remove(path) _temp_files.remove(path)
mit
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/PIL/JpegImagePlugin.py
2
27663
# # The Python Imaging Library. # $Id$ # # JPEG (JFIF) file handling # # See "Digital Compression and Coding of Continuous-Tone Still Images, # Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1) # # History: # 1995-09-09 fl Created # 1995-09-13 fl Added full parser # 1996-03-25 fl Added hack to use the IJG command line utilities # 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug # 1996-05-28 fl Added draft support, JFIF version (0.1) # 1996-12-30 fl Added encoder options, added progression property (0.2) # 1997-08-27 fl Save mode 1 images as BW (0.3) # 1998-07-12 fl Added YCbCr to draft and save methods (0.4) # 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1) # 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2) # 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3) # 2003-04-25 fl Added experimental EXIF decoder (0.5) # 2003-06-06 fl Added experimental EXIF GPSinfo decoder # 2003-09-13 fl Extract COM markers # 2009-09-06 fl Added icc_profile support (from Florian Hoech) # 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6) # 2009-03-08 fl Added subsampling support (from Justin Huff). # # Copyright (c) 1997-2003 by Secret Labs AB. # Copyright (c) 1995-1996 by Fredrik Lundh. # # See the README file for information on usage and redistribution. # from __future__ import print_function import array import struct import io import warnings from . import Image, ImageFile, TiffImagePlugin from ._binary import i8, o8, i16be as i16 from .JpegPresets import presets from ._util import isStringType __version__ = "0.6" # # Parser def Skip(self, marker): n = i16(self.fp.read(2))-2 ImageFile._safe_read(self.fp, n) def APP(self, marker): # # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. n = i16(self.fp.read(2))-2 s = ImageFile._safe_read(self.fp, n) app = "APP%d" % (marker & 15) self.app[app] = s # compatibility self.applist.append((app, s)) if marker == 0xFFE0 and s[:4] == b"JFIF": # extract JFIF information self.info["jfif"] = version = i16(s, 5) # version self.info["jfif_version"] = divmod(version, 256) # extract JFIF properties try: jfif_unit = i8(s[7]) jfif_density = i16(s, 8), i16(s, 10) except: pass else: if jfif_unit == 1: self.info["dpi"] = jfif_density self.info["jfif_unit"] = jfif_unit self.info["jfif_density"] = jfif_density elif marker == 0xFFE1 and s[:5] == b"Exif\0": if "exif" not in self.info: # extract Exif information (incomplete) self.info["exif"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == b"FPXR\0": # extract FlashPix information (incomplete) self.info["flashpix"] = s # FIXME: value will change elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": # Since an ICC profile can be larger than the maximum size of # a JPEG marker (64K), we need provisions to split it into # multiple markers. The format defined by the ICC specifies # one or more APP2 markers containing the following data: # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) # Marker sequence number 1, 2, etc (1 byte) # Number of markers Total of APP2's used (1 byte) # Profile data (remainder of APP2 data) # Decoders should use the marker sequence numbers to # reassemble the profile, rather than assuming that the APP2 # markers appear in the correct sequence. self.icclist.append(s) elif marker == 0xFFEE and s[:5] == b"Adobe": self.info["adobe"] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = i8(s[1]) except: pass else: self.info["adobe_transform"] = adobe_transform elif marker == 0xFFE2 and s[:4] == b"MPF\0": # extract MPO information self.info["mp"] = s[4:] # offset is current location minus buffer size # plus constant header size self.info["mpoffset"] = self.fp.tell() - n + 4 # If DPI isn't in JPEG header, fetch from EXIF if "dpi" not in self.info and "exif" in self.info: try: exif = self._getexif() resolution_unit = exif[0x0128] x_resolution = exif[0x011A] try: dpi = x_resolution[0] / x_resolution[1] except TypeError: dpi = x_resolution if resolution_unit == 3: # cm # 1 dpcm = 2.54 dpi dpi *= 2.54 self.info["dpi"] = dpi, dpi except (KeyError, SyntaxError, ZeroDivisionError): # SyntaxError for invalid/unreadable exif # KeyError for dpi not included # ZeroDivisionError for invalid dpi rational value self.info["dpi"] = 72, 72 def COM(self, marker): # # Comment marker. Store these in the APP dictionary. n = i16(self.fp.read(2))-2 s = ImageFile._safe_read(self.fp, n) self.app["COM"] = s # compatibility self.applist.append(("COM", s)) def SOF(self, marker): # # Start of frame marker. Defines the size and mode of the # image. JPEG is colour blind, so we use some simple # heuristics to map the number of layers to an appropriate # mode. Note that this could be made a bit brighter, by # looking for JFIF and Adobe APP markers. n = i16(self.fp.read(2))-2 s = ImageFile._safe_read(self.fp, n) self._size = i16(s[3:]), i16(s[1:]) self.bits = i8(s[0]) if self.bits != 8: raise SyntaxError("cannot handle %d-bit layers" % self.bits) self.layers = i8(s[5]) if self.layers == 1: self.mode = "L" elif self.layers == 3: self.mode = "RGB" elif self.layers == 4: self.mode = "CMYK" else: raise SyntaxError("cannot handle %d-layer images" % self.layers) if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: self.info["progressive"] = self.info["progression"] = 1 if self.icclist: # fixup icc profile self.icclist.sort() # sort by sequence number if i8(self.icclist[0][13]) == len(self.icclist): profile = [] for p in self.icclist: profile.append(p[14:]) icc_profile = b"".join(profile) else: icc_profile = None # wrong number of fragments self.info["icc_profile"] = icc_profile self.icclist = None for i in range(6, len(s), 3): t = s[i:i+3] # 4-tuples: id, vsamp, hsamp, qtable self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2]))) def DQT(self, marker): # # Define quantization table. Support baseline 8-bit tables # only. Note that there might be more than one table in # each marker. # FIXME: The quantization tables can be used to estimate the # compression quality. n = i16(self.fp.read(2))-2 s = ImageFile._safe_read(self.fp, n) while len(s): if len(s) < 65: raise SyntaxError("bad quantization table marker") v = i8(s[0]) if v//16 == 0: self.quantization[v & 15] = array.array("B", s[1:65]) s = s[65:] else: return # FIXME: add code to read 16-bit tables! # raise SyntaxError, "bad quantization table element size" # # JPEG marker table MARKER = { 0xFFC0: ("SOF0", "Baseline DCT", SOF), 0xFFC1: ("SOF1", "Extended Sequential DCT", SOF), 0xFFC2: ("SOF2", "Progressive DCT", SOF), 0xFFC3: ("SOF3", "Spatial lossless", SOF), 0xFFC4: ("DHT", "Define Huffman table", Skip), 0xFFC5: ("SOF5", "Differential sequential DCT", SOF), 0xFFC6: ("SOF6", "Differential progressive DCT", SOF), 0xFFC7: ("SOF7", "Differential spatial", SOF), 0xFFC8: ("JPG", "Extension", None), 0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF), 0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF), 0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF), 0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip), 0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF), 0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF), 0xFFCF: ("SOF15", "Differential spatial (AC)", SOF), 0xFFD0: ("RST0", "Restart 0", None), 0xFFD1: ("RST1", "Restart 1", None), 0xFFD2: ("RST2", "Restart 2", None), 0xFFD3: ("RST3", "Restart 3", None), 0xFFD4: ("RST4", "Restart 4", None), 0xFFD5: ("RST5", "Restart 5", None), 0xFFD6: ("RST6", "Restart 6", None), 0xFFD7: ("RST7", "Restart 7", None), 0xFFD8: ("SOI", "Start of image", None), 0xFFD9: ("EOI", "End of image", None), 0xFFDA: ("SOS", "Start of scan", Skip), 0xFFDB: ("DQT", "Define quantization table", DQT), 0xFFDC: ("DNL", "Define number of lines", Skip), 0xFFDD: ("DRI", "Define restart interval", Skip), 0xFFDE: ("DHP", "Define hierarchical progression", SOF), 0xFFDF: ("EXP", "Expand reference component", Skip), 0xFFE0: ("APP0", "Application segment 0", APP), 0xFFE1: ("APP1", "Application segment 1", APP), 0xFFE2: ("APP2", "Application segment 2", APP), 0xFFE3: ("APP3", "Application segment 3", APP), 0xFFE4: ("APP4", "Application segment 4", APP), 0xFFE5: ("APP5", "Application segment 5", APP), 0xFFE6: ("APP6", "Application segment 6", APP), 0xFFE7: ("APP7", "Application segment 7", APP), 0xFFE8: ("APP8", "Application segment 8", APP), 0xFFE9: ("APP9", "Application segment 9", APP), 0xFFEA: ("APP10", "Application segment 10", APP), 0xFFEB: ("APP11", "Application segment 11", APP), 0xFFEC: ("APP12", "Application segment 12", APP), 0xFFED: ("APP13", "Application segment 13", APP), 0xFFEE: ("APP14", "Application segment 14", APP), 0xFFEF: ("APP15", "Application segment 15", APP), 0xFFF0: ("JPG0", "Extension 0", None), 0xFFF1: ("JPG1", "Extension 1", None), 0xFFF2: ("JPG2", "Extension 2", None), 0xFFF3: ("JPG3", "Extension 3", None), 0xFFF4: ("JPG4", "Extension 4", None), 0xFFF5: ("JPG5", "Extension 5", None), 0xFFF6: ("JPG6", "Extension 6", None), 0xFFF7: ("JPG7", "Extension 7", None), 0xFFF8: ("JPG8", "Extension 8", None), 0xFFF9: ("JPG9", "Extension 9", None), 0xFFFA: ("JPG10", "Extension 10", None), 0xFFFB: ("JPG11", "Extension 11", None), 0xFFFC: ("JPG12", "Extension 12", None), 0xFFFD: ("JPG13", "Extension 13", None), 0xFFFE: ("COM", "Comment", COM) } def _accept(prefix): return prefix[0:1] == b"\377" ## # Image plugin for JPEG and JFIF images. class JpegImageFile(ImageFile.ImageFile): format = "JPEG" format_description = "JPEG (ISO 10918)" def _open(self): s = self.fp.read(1) if i8(s) != 255: raise SyntaxError("not a JPEG file") # Create attributes self.bits = self.layers = 0 # JPEG specifics (internal) self.layer = [] self.huffman_dc = {} self.huffman_ac = {} self.quantization = {} self.app = {} # compatibility self.applist = [] self.icclist = [] while True: i = i8(s) if i == 0xFF: s = s + self.fp.read(1) i = i16(s) else: # Skip non-0xFF junk s = self.fp.read(1) continue if i in MARKER: name, description, handler = MARKER[i] if handler is not None: handler(self, i) if i == 0xFFDA: # start of scan rawmode = self.mode if self.mode == "CMYK": rawmode = "CMYK;I" # assume adobe conventions self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))] # self.__offset = self.fp.tell() break s = self.fp.read(1) elif i == 0 or i == 0xFFFF: # padded marker or junk; move on s = b"\xff" elif i == 0xFF00: # Skip extraneous data (escaped 0xFF) s = self.fp.read(1) else: raise SyntaxError("no marker found") def load_read(self, read_bytes): """ internal: read more image data For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker so libjpeg can finish decoding """ s = self.fp.read(read_bytes) if not s and ImageFile.LOAD_TRUNCATED_IMAGES: # Premature EOF. # Pretend file is finished adding EOI marker return b"\xFF\xD9" return s def draft(self, mode, size): if len(self.tile) != 1: return # Protect from second call if self.decoderconfig: return d, e, o, a = self.tile[0] scale = 0 if a[0] == "RGB" and mode in ["L", "YCbCr"]: self.mode = mode a = mode, "" if size: scale = min(self.size[0] // size[0], self.size[1] // size[1]) for s in [8, 4, 2, 1]: if scale >= s: break e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1] self._size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s) scale = s self.tile = [(d, e, o, a)] self.decoderconfig = (scale, 0) return self def load_djpeg(self): # ALTERNATIVE: handle JPEGs via the IJG command line utilities import subprocess import tempfile import os f, path = tempfile.mkstemp() os.close(f) if os.path.exists(self.filename): subprocess.check_call(["djpeg", "-outfile", path, self.filename]) else: raise ValueError("Invalid Filename") try: _im = Image.open(path) _im.load() self.im = _im.im finally: try: os.unlink(path) except OSError: pass self.mode = self.im.mode self._size = self.im.size self.tile = [] def _getexif(self): return _getexif(self) def _getmp(self): return _getmp(self) def _fixup_dict(src_dict): # Helper function for _getexif() # returns a dict with any single item tuples/lists as individual values def _fixup(value): try: if len(value) == 1 and not isinstance(value, dict): return value[0] except: pass return value return {k: _fixup(v) for k, v in src_dict.items()} def _getexif(self): # Extract EXIF information. This method is highly experimental, # and is likely to be replaced with something better in a future # version. # The EXIF record consists of a TIFF file embedded in a JPEG # application marker (!). try: data = self.info["exif"] except KeyError: return None file = io.BytesIO(data[6:]) head = file.read(8) # process dictionary info = TiffImagePlugin.ImageFileDirectory_v1(head) info.load(file) exif = dict(_fixup_dict(info)) # get exif extension try: # exif field 0x8769 is an offset pointer to the location # of the nested embedded exif ifd. # It should be a long, but may be corrupted. file.seek(exif[0x8769]) except (KeyError, TypeError): pass else: info = TiffImagePlugin.ImageFileDirectory_v1(head) info.load(file) exif.update(_fixup_dict(info)) # get gpsinfo extension try: # exif field 0x8825 is an offset pointer to the location # of the nested embedded gps exif ifd. # It should be a long, but may be corrupted. file.seek(exif[0x8825]) except (KeyError, TypeError): pass else: info = TiffImagePlugin.ImageFileDirectory_v1(head) info.load(file) exif[0x8825] = _fixup_dict(info) return exif def _getmp(self): # Extract MP information. This method was inspired by the "highly # experimental" _getexif version that's been in use for years now, # itself based on the ImageFileDirectory class in the TIFF plug-in. # The MP record essentially consists of a TIFF file embedded in a JPEG # application marker. try: data = self.info["mp"] except KeyError: return None file_contents = io.BytesIO(data) head = file_contents.read(8) endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<' # process dictionary try: info = TiffImagePlugin.ImageFileDirectory_v2(head) info.load(file_contents) mp = dict(info) except: raise SyntaxError("malformed MP Index (unreadable directory)") # it's an error not to have a number of images try: quant = mp[0xB001] except KeyError: raise SyntaxError("malformed MP Index (no number of images)") # get MP entries mpentries = [] try: rawmpentries = mp[0xB002] for entrynum in range(0, quant): unpackedentry = struct.unpack_from( '{}LLLHH'.format(endianness), rawmpentries, entrynum * 16) labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1', 'EntryNo2') mpentry = dict(zip(labels, unpackedentry)) mpentryattr = { 'DependentParentImageFlag': bool(mpentry['Attribute'] & (1 << 31)), 'DependentChildImageFlag': bool(mpentry['Attribute'] & (1 << 30)), 'RepresentativeImageFlag': bool(mpentry['Attribute'] & (1 << 29)), 'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27, 'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24, 'MPType': mpentry['Attribute'] & 0x00FFFFFF } if mpentryattr['ImageDataFormat'] == 0: mpentryattr['ImageDataFormat'] = 'JPEG' else: raise SyntaxError("unsupported picture format in MPO") mptypemap = { 0x000000: 'Undefined', 0x010001: 'Large Thumbnail (VGA Equivalent)', 0x010002: 'Large Thumbnail (Full HD Equivalent)', 0x020001: 'Multi-Frame Image (Panorama)', 0x020002: 'Multi-Frame Image: (Disparity)', 0x020003: 'Multi-Frame Image: (Multi-Angle)', 0x030000: 'Baseline MP Primary Image' } mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'], 'Unknown') mpentry['Attribute'] = mpentryattr mpentries.append(mpentry) mp[0xB002] = mpentries except KeyError: raise SyntaxError("malformed MP Index (bad MP Entry)") # Next we should try and parse the individual image unique ID list; # we don't because I've never seen this actually used in a real MPO # file and so can't test it. return mp # -------------------------------------------------------------------- # stuff to save JPEG files RAWMODE = { "1": "L", "L": "L", "RGB": "RGB", "RGBX": "RGB", "CMYK": "CMYK;I", # assume adobe conventions "YCbCr": "YCbCr", } zigzag_index = (0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53, 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, 21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63) samplings = {(1, 1, 1, 1, 1, 1): 0, (2, 1, 1, 1, 1, 1): 1, (2, 2, 1, 1, 1, 1): 2, } def convert_dict_qtables(qtables): qtables = [qtables[key] for key in range(len(qtables)) if key in qtables] for idx, table in enumerate(qtables): qtables[idx] = [table[i] for i in zigzag_index] return qtables def get_sampling(im): # There's no subsampling when image have only 1 layer # (grayscale images) or when they are CMYK (4 layers), # so set subsampling to default value. # # NOTE: currently Pillow can't encode JPEG to YCCK format. # If YCCK support is added in the future, subsampling code will have # to be updated (here and in JpegEncode.c) to deal with 4 layers. if not hasattr(im, 'layers') or im.layers in (1, 4): return -1 sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] return samplings.get(sampling, -1) def _save(im, fp, filename): try: rawmode = RAWMODE[im.mode] except KeyError: raise IOError("cannot write mode %s as JPEG" % im.mode) info = im.encoderinfo dpi = [int(round(x)) for x in info.get("dpi", (0, 0))] quality = info.get("quality", 0) subsampling = info.get("subsampling", -1) qtables = info.get("qtables") if quality == "keep": quality = 0 subsampling = "keep" qtables = "keep" elif quality in presets: preset = presets[quality] quality = 0 subsampling = preset.get('subsampling', -1) qtables = preset.get('quantization') elif not isinstance(quality, int): raise ValueError("Invalid quality setting") else: if subsampling in presets: subsampling = presets[subsampling].get('subsampling', -1) if isStringType(qtables) and qtables in presets: qtables = presets[qtables].get('quantization') if subsampling == "4:4:4": subsampling = 0 elif subsampling == "4:2:2": subsampling = 1 elif subsampling == "4:2:0": subsampling = 2 elif subsampling == "4:1:1": # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0. # Set 4:2:0 if someone is still using that value. subsampling = 2 elif subsampling == "keep": if im.format != "JPEG": raise ValueError( "Cannot use 'keep' when original image is not a JPEG") subsampling = get_sampling(im) def validate_qtables(qtables): if qtables is None: return qtables if isStringType(qtables): try: lines = [int(num) for line in qtables.splitlines() for num in line.split('#', 1)[0].split()] except ValueError: raise ValueError("Invalid quantization table") else: qtables = [lines[s:s+64] for s in range(0, len(lines), 64)] if isinstance(qtables, (tuple, list, dict)): if isinstance(qtables, dict): qtables = convert_dict_qtables(qtables) elif isinstance(qtables, tuple): qtables = list(qtables) if not (0 < len(qtables) < 5): raise ValueError("None or too many quantization tables") for idx, table in enumerate(qtables): try: if len(table) != 64: raise TypeError table = array.array('B', table) except TypeError: raise ValueError("Invalid quantization table") else: qtables[idx] = list(table) return qtables if qtables == "keep": if im.format != "JPEG": raise ValueError( "Cannot use 'keep' when original image is not a JPEG") qtables = getattr(im, "quantization", None) qtables = validate_qtables(qtables) extra = b"" icc_profile = info.get("icc_profile") if icc_profile: ICC_OVERHEAD_LEN = 14 MAX_BYTES_IN_MARKER = 65533 MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN markers = [] while icc_profile: markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER]) icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:] i = 1 for marker in markers: size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker)) extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) + o8(len(markers)) + marker) i += 1 # "progressive" is the official name, but older documentation # says "progression" # FIXME: issue a warning if the wrong form is used (post-1.1.7) progressive = (info.get("progressive", False) or info.get("progression", False)) optimize = info.get("optimize", False) # get keyword arguments im.encoderconfig = ( quality, progressive, info.get("smooth", 0), optimize, info.get("streamtype", 0), dpi[0], dpi[1], subsampling, qtables, extra, info.get("exif", b"") ) # if we optimize, libjpeg needs a buffer big enough to hold the whole image # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is # channels*size, this is a value that's been used in a django patch. # https://github.com/matthewwithanm/django-imagekit/issues/50 bufsize = 0 if optimize or progressive: # CMYK can be bigger if im.mode == 'CMYK': bufsize = 4 * im.size[0] * im.size[1] # keep sets quality to 0, but the actual value may be high. elif quality >= 95 or quality == 0: bufsize = 2 * im.size[0] * im.size[1] else: bufsize = im.size[0] * im.size[1] # The exif info needs to be written as one block, + APP1, + one spare byte. # Ensure that our buffer is big enough. Same with the icc_profile block. bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5, len(extra) + 1) ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize) def _save_cjpeg(im, fp, filename): # ALTERNATIVE: handle JPEGs via the IJG command line utilities. import os import subprocess tempfile = im._dump() subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) try: os.unlink(tempfile) except OSError: pass ## # Factory for making JPEG and MPO instances def jpeg_factory(fp=None, filename=None): im = JpegImageFile(fp, filename) try: mpheader = im._getmp() if mpheader[45057] > 1: # It's actually an MPO from .MpoImagePlugin import MpoImageFile im = MpoImageFile(fp, filename) except (TypeError, IndexError): # It is really a JPEG pass except SyntaxError: warnings.warn("Image appears to be a malformed MPO file, it will be " "interpreted as a base JPEG file") return im # --------------------------------------------------------------------- # Registry stuff Image.register_open(JpegImageFile.format, jpeg_factory, _accept) Image.register_save(JpegImageFile.format, _save) Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) Image.register_mime(JpegImageFile.format, "image/jpeg")
gpl-3.0
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/oauth2client/contrib/_appengine_ndb.py
70
5381
# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Google App Engine utilities helper. Classes that directly require App Engine's ndb library. Provided as a separate module in case of failure to import ndb while other App Engine libraries are present. """ import logging from google.appengine.ext import ndb from oauth2client import client NDB_KEY = ndb.Key """Key constant used by :mod:`oauth2client.contrib.appengine`.""" NDB_MODEL = ndb.Model """Model constant used by :mod:`oauth2client.contrib.appengine`.""" _LOGGER = logging.getLogger(__name__) class SiteXsrfSecretKeyNDB(ndb.Model): """NDB Model for storage for the sites XSRF secret key. Since this model uses the same kind as SiteXsrfSecretKey, it can be used interchangeably. This simply provides an NDB model for interacting with the same data the DB model interacts with. There should only be one instance stored of this model, the one used for the site. """ secret = ndb.StringProperty() @classmethod def _get_kind(cls): """Return the kind name for this class.""" return 'SiteXsrfSecretKey' class FlowNDBProperty(ndb.PickleProperty): """App Engine NDB datastore Property for Flow. Serves the same purpose as the DB FlowProperty, but for NDB models. Since PickleProperty inherits from BlobProperty, the underlying representation of the data in the datastore will be the same as in the DB case. Utility property that allows easy storage and retrieval of an oauth2client.Flow """ def _validate(self, value): """Validates a value as a proper Flow object. Args: value: A value to be set on the property. Raises: TypeError if the value is not an instance of Flow. """ _LOGGER.info('validate: Got type %s', type(value)) if value is not None and not isinstance(value, client.Flow): raise TypeError( 'Property {0} must be convertible to a flow ' 'instance; received: {1}.'.format(self._name, value)) class CredentialsNDBProperty(ndb.BlobProperty): """App Engine NDB datastore Property for Credentials. Serves the same purpose as the DB CredentialsProperty, but for NDB models. Since CredentialsProperty stores data as a blob and this inherits from BlobProperty, the data in the datastore will be the same as in the DB case. Utility property that allows easy storage and retrieval of Credentials and subclasses. """ def _validate(self, value): """Validates a value as a proper credentials object. Args: value: A value to be set on the property. Raises: TypeError if the value is not an instance of Credentials. """ _LOGGER.info('validate: Got type %s', type(value)) if value is not None and not isinstance(value, client.Credentials): raise TypeError( 'Property {0} must be convertible to a credentials ' 'instance; received: {1}.'.format(self._name, value)) def _to_base_type(self, value): """Converts our validated value to a JSON serialized string. Args: value: A value to be set in the datastore. Returns: A JSON serialized version of the credential, else '' if value is None. """ if value is None: return '' else: return value.to_json() def _from_base_type(self, value): """Converts our stored JSON string back to the desired type. Args: value: A value from the datastore to be converted to the desired type. Returns: A deserialized Credentials (or subclass) object, else None if the value can't be parsed. """ if not value: return None try: # Uses the from_json method of the implied class of value credentials = client.Credentials.new_from_json(value) except ValueError: credentials = None return credentials class CredentialsNDBModel(ndb.Model): """NDB Model for storage of OAuth 2.0 Credentials Since this model uses the same kind as CredentialsModel and has a property which can serialize and deserialize Credentials correctly, it can be used interchangeably with a CredentialsModel to access, insert and delete the same entities. This simply provides an NDB model for interacting with the same data the DB model interacts with. Storage of the model is keyed by the user.user_id(). """ credentials = CredentialsNDBProperty() @classmethod def _get_kind(cls): """Return the kind name for this class.""" return 'CredentialsModel'
gpl-3.0
pombredanne/pyjs
examples/maparea/__main__.py
8
1049
#!/usr/bin/env python # -*- coding: utf-8 -*- TARGETS = [ 'MapAreaDemo.py', ] PACKAGE = { 'title': 'Map Area Demo', 'desc': '(unknown)', } def setup(targets): '''Setup example for translation, MUST call util.setup(targets).''' util.setup(targets) def translate(): '''Translate example, MUST call util.translate().''' util.translate() def install(package): '''Install and cleanup example module. MUST call util.install(package)''' util.install(package) ##---------------------------------------## # --------- (-: DO NOT EDIT :-) --------- # ##---------------------------------------## import sys import os examples = head = os.path.abspath(os.path.dirname(__file__)) while os.path.split(examples)[1].lower() != 'examples': examples = os.path.split(examples)[0] if not examples: raise ValueError("Cannot determine examples directory") sys.path.insert(0, os.path.join(examples)) from _examples import util sys.path.pop(0) util.init(head) setup(TARGETS) translate() install(PACKAGE)
apache-2.0
Francis-Liu/animated-broccoli
nova/tests/unit/pci/test_devspec.py
69
7568
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from nova import exception from nova import objects from nova.pci import devspec from nova import test dev = {"vendor_id": "8086", "product_id": "5057", "address": "1234:5678:8988.5", "phys_function": "0000:0a:00.0"} class PciAddressTestCase(test.NoDBTestCase): def test_wrong_address(self): pci_info = {"vendor_id": "8086", "address": "*: *: *.6", "product_id": "5057", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) self.assertFalse(pci.match(dev)) def test_address_too_big(self): pci_info = {"address": "0000:0a:0b:00.5", "physical_network": "hr_net"} self.assertRaises(exception.PciDeviceWrongAddressFormat, devspec.PciDeviceSpec, pci_info) def test_address_invalid_character(self): pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"} self.assertRaises(exception.PciDeviceWrongAddressFormat, devspec.PciDeviceSpec, pci_info) def test_max_func(self): pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1), "physical_network": "hr_net"} exc = self.assertRaises(exception.PciDeviceInvalidAddressField, devspec.PciDeviceSpec, pci_info) msg = ('Invalid PCI Whitelist: ' 'The PCI address 0000:0a:00.%s has an invalid function.' % (devspec.MAX_FUNC + 1)) self.assertEqual(msg, six.text_type(exc)) def test_max_domain(self): pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1), "physical_network": "hr_net"} exc = self.assertRaises(exception.PciConfigInvalidWhitelist, devspec.PciDeviceSpec, pci_info) msg = ('Invalid PCI devices Whitelist config invalid domain %x' % (devspec.MAX_DOMAIN + 1)) self.assertEqual(msg, six.text_type(exc)) def test_max_bus(self): pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1), "physical_network": "hr_net"} exc = self.assertRaises(exception.PciConfigInvalidWhitelist, devspec.PciDeviceSpec, pci_info) msg = ('Invalid PCI devices Whitelist config invalid bus %x' % (devspec.MAX_BUS + 1)) self.assertEqual(msg, six.text_type(exc)) def test_max_slot(self): pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1), "physical_network": "hr_net"} exc = self.assertRaises(exception.PciConfigInvalidWhitelist, devspec.PciDeviceSpec, pci_info) msg = ('Invalid PCI devices Whitelist config invalid slot %x' % (devspec.MAX_SLOT + 1)) self.assertEqual(msg, six.text_type(exc)) def test_address_is_undefined(self): pci_info = {"vendor_id": "8086", "product_id": "5057"} pci = devspec.PciDeviceSpec(pci_info) self.assertTrue(pci.match(dev)) def test_partial_address(self): pci_info = {"address": ":0a:00.", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) dev = {"vendor_id": "1137", "product_id": "0071", "address": "0000:0a:00.5", "phys_function": "0000:0a:00.0"} self.assertTrue(pci.match(dev)) @mock.patch('nova.pci.utils.is_physical_function', return_value = True) def test_address_is_pf(self, mock_is_physical_function): pci_info = {"address": "0000:0a:00.0", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) self.assertTrue(pci.match(dev)) class PciDevSpecTestCase(test.NoDBTestCase): def test_spec_match(self): pci_info = {"vendor_id": "8086", "address": "*: *: *.5", "product_id": "5057", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) self.assertTrue(pci.match(dev)) def test_invalid_vendor_id(self): pci_info = {"vendor_id": "8087", "address": "*: *: *.5", "product_id": "5057", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) self.assertFalse(pci.match(dev)) def test_vendor_id_out_of_range(self): pci_info = {"vendor_id": "80860", "address": "*:*:*.5", "product_id": "5057", "physical_network": "hr_net"} exc = self.assertRaises(exception.PciConfigInvalidWhitelist, devspec.PciDeviceSpec, pci_info) self.assertEqual("Invalid PCI devices Whitelist config " "invalid vendor_id 80860", six.text_type(exc)) def test_invalid_product_id(self): pci_info = {"vendor_id": "8086", "address": "*: *: *.5", "product_id": "5056", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) self.assertFalse(pci.match(dev)) def test_product_id_out_of_range(self): pci_info = {"vendor_id": "8086", "address": "*:*:*.5", "product_id": "50570", "physical_network": "hr_net"} exc = self.assertRaises(exception.PciConfigInvalidWhitelist, devspec.PciDeviceSpec, pci_info) self.assertEqual("Invalid PCI devices Whitelist config " "invalid product_id 50570", six.text_type(exc)) def test_devname_and_address(self): pci_info = {"devname": "eth0", "vendor_id": "8086", "address": "*:*:*.5", "physical_network": "hr_net"} self.assertRaises(exception.PciDeviceInvalidDeviceName, devspec.PciDeviceSpec, pci_info) @mock.patch('nova.pci.utils.get_function_by_ifname', return_value = ("0000:0a:00.0", True)) def test_by_name(self, mock_get_function_by_ifname): pci_info = {"devname": "eth0", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) self.assertTrue(pci.match(dev)) @mock.patch('nova.pci.utils.get_function_by_ifname', return_value = (None, False)) def test_invalid_name(self, mock_get_function_by_ifname): pci_info = {"devname": "lo", "physical_network": "hr_net"} exc = self.assertRaises(exception.PciDeviceNotFoundById, devspec.PciDeviceSpec, pci_info) self.assertEqual('PCI device lo not found', six.text_type(exc)) def test_pci_obj(self): pci_info = {"vendor_id": "8086", "address": "*:*:*.5", "product_id": "5057", "physical_network": "hr_net"} pci = devspec.PciDeviceSpec(pci_info) pci_dev = { 'compute_node_id': 1, 'address': '0000:00:00.5', 'product_id': '5057', 'vendor_id': '8086', 'status': 'available', 'extra_k1': 'v1', } pci_obj = objects.PciDevice.create(pci_dev) self.assertTrue(pci.match_pci_obj(pci_obj))
apache-2.0
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/test/test_memoryio.py
6
28418
"""Unit tests for memory-based file-like objects. StringIO -- for unicode strings BytesIO -- for bytes """ from __future__ import unicode_literals from __future__ import print_function import unittest from test import test_support as support import io import _pyio as pyio import pickle class MemorySeekTestMixin: def testInit(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) def testRead(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEqual(buf[:1], bytesIo.read(1)) self.assertEqual(buf[1:5], bytesIo.read(4)) self.assertEqual(buf[5:], bytesIo.read(900)) self.assertEqual(self.EOF, bytesIo.read()) def testReadNoArgs(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEqual(buf, bytesIo.read()) self.assertEqual(self.EOF, bytesIo.read()) def testSeek(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) bytesIo.read(5) bytesIo.seek(0) self.assertEqual(buf, bytesIo.read()) bytesIo.seek(3) self.assertEqual(buf[3:], bytesIo.read()) self.assertRaises(TypeError, bytesIo.seek, 0.0) def testTell(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEqual(0, bytesIo.tell()) bytesIo.seek(5) self.assertEqual(5, bytesIo.tell()) bytesIo.seek(10000) self.assertEqual(10000, bytesIo.tell()) class MemoryTestMixin: def test_detach(self): buf = self.ioclass() self.assertRaises(self.UnsupportedOperation, buf.detach) def write_ops(self, f, t): self.assertEqual(f.write(t("blah.")), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(t("Hello.")), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(5), 5) self.assertEqual(f.tell(), 5) self.assertEqual(f.write(t(" world\n\n\n")), 9) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(t("h")), 1) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 1) def test_write(self): buf = self.buftype("hello world\n") memio = self.ioclass(buf) self.write_ops(memio, self.buftype) self.assertEqual(memio.getvalue(), buf) memio = self.ioclass() self.write_ops(memio, self.buftype) self.assertEqual(memio.getvalue(), buf) self.assertRaises(TypeError, memio.write, None) memio.close() self.assertRaises(ValueError, memio.write, self.buftype("")) def test_writelines(self): buf = self.buftype("1234567890") memio = self.ioclass() self.assertEqual(memio.writelines([buf] * 100), None) self.assertEqual(memio.getvalue(), buf * 100) memio.writelines([]) self.assertEqual(memio.getvalue(), buf * 100) memio = self.ioclass() self.assertRaises(TypeError, memio.writelines, [buf] + [1]) self.assertEqual(memio.getvalue(), buf) self.assertRaises(TypeError, memio.writelines, None) memio.close() self.assertRaises(ValueError, memio.writelines, []) def test_writelines_error(self): memio = self.ioclass() def error_gen(): yield self.buftype('spam') raise KeyboardInterrupt self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen()) def test_truncate(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertRaises(ValueError, memio.truncate, -1) memio.seek(6) self.assertEqual(memio.truncate(), 6) self.assertEqual(memio.getvalue(), buf[:6]) self.assertEqual(memio.truncate(4), 4) self.assertEqual(memio.getvalue(), buf[:4]) # truncate() accepts long objects self.assertEqual(memio.truncate(4L), 4) self.assertEqual(memio.getvalue(), buf[:4]) self.assertEqual(memio.tell(), 6) memio.seek(0, 2) memio.write(buf) self.assertEqual(memio.getvalue(), buf[:4] + buf) pos = memio.tell() self.assertEqual(memio.truncate(None), pos) self.assertEqual(memio.tell(), pos) self.assertRaises(TypeError, memio.truncate, '0') memio.close() self.assertRaises(ValueError, memio.truncate, 0) def test_init(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.getvalue(), buf) memio = self.ioclass(None) self.assertEqual(memio.getvalue(), self.EOF) memio.__init__(buf * 2) self.assertEqual(memio.getvalue(), buf * 2) memio.__init__(buf) self.assertEqual(memio.getvalue(), buf) def test_read(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.read(0), self.EOF) self.assertEqual(memio.read(1), buf[:1]) # read() accepts long objects self.assertEqual(memio.read(4L), buf[1:5]) self.assertEqual(memio.read(900), buf[5:]) self.assertEqual(memio.read(), self.EOF) memio.seek(0) self.assertEqual(memio.read(), buf) self.assertEqual(memio.read(), self.EOF) self.assertEqual(memio.tell(), 10) memio.seek(0) self.assertEqual(memio.read(-1), buf) memio.seek(0) self.assertEqual(type(memio.read()), type(buf)) memio.seek(100) self.assertEqual(type(memio.read()), type(buf)) memio.seek(0) self.assertEqual(memio.read(None), buf) self.assertRaises(TypeError, memio.read, '') memio.close() self.assertRaises(ValueError, memio.read) def test_readline(self): buf = self.buftype("1234567890\n") memio = self.ioclass(buf * 2) self.assertEqual(memio.readline(0), self.EOF) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), self.EOF) memio.seek(0) self.assertEqual(memio.readline(5), buf[:5]) # readline() accepts long objects self.assertEqual(memio.readline(5L), buf[5:10]) self.assertEqual(memio.readline(5), buf[10:15]) memio.seek(0) self.assertEqual(memio.readline(-1), buf) memio.seek(0) self.assertEqual(memio.readline(0), self.EOF) buf = self.buftype("1234567890\n") memio = self.ioclass((buf * 3)[:-1]) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), buf[:-1]) self.assertEqual(memio.readline(), self.EOF) memio.seek(0) self.assertEqual(type(memio.readline()), type(buf)) self.assertEqual(memio.readline(), buf) self.assertRaises(TypeError, memio.readline, '') memio.close() self.assertRaises(ValueError, memio.readline) def test_readlines(self): buf = self.buftype("1234567890\n") memio = self.ioclass(buf * 10) self.assertEqual(memio.readlines(), [buf] * 10) memio.seek(5) self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9) memio.seek(0) # readlines() accepts long objects self.assertEqual(memio.readlines(15L), [buf] * 2) memio.seek(0) self.assertEqual(memio.readlines(-1), [buf] * 10) memio.seek(0) self.assertEqual(memio.readlines(0), [buf] * 10) memio.seek(0) self.assertEqual(type(memio.readlines()[0]), type(buf)) memio.seek(0) self.assertEqual(memio.readlines(None), [buf] * 10) self.assertRaises(TypeError, memio.readlines, '') memio.close() self.assertRaises(ValueError, memio.readlines) def test_iterator(self): buf = self.buftype("1234567890\n") memio = self.ioclass(buf * 10) self.assertEqual(iter(memio), memio) self.assertTrue(hasattr(memio, '__iter__')) self.assertTrue(hasattr(memio, 'next')) i = 0 for line in memio: self.assertEqual(line, buf) i += 1 self.assertEqual(i, 10) memio.seek(0) i = 0 for line in memio: self.assertEqual(line, buf) i += 1 self.assertEqual(i, 10) memio = self.ioclass(buf * 2) memio.close() self.assertRaises(ValueError, next, memio) def test_getvalue(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.getvalue(), buf) memio.read() self.assertEqual(memio.getvalue(), buf) self.assertEqual(type(memio.getvalue()), type(buf)) memio = self.ioclass(buf * 1000) self.assertEqual(memio.getvalue()[-3:], self.buftype("890")) memio = self.ioclass(buf) memio.close() self.assertRaises(ValueError, memio.getvalue) def test_seek(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) memio.read(5) self.assertRaises(ValueError, memio.seek, -1) self.assertRaises(ValueError, memio.seek, 1, -1) self.assertRaises(ValueError, memio.seek, 1, 3) self.assertEqual(memio.seek(0), 0) self.assertEqual(memio.seek(0, 0), 0) self.assertEqual(memio.read(), buf) self.assertEqual(memio.seek(3), 3) # seek() accepts long objects self.assertEqual(memio.seek(3L), 3) self.assertEqual(memio.seek(0, 1), 3) self.assertEqual(memio.read(), buf[3:]) self.assertEqual(memio.seek(len(buf)), len(buf)) self.assertEqual(memio.read(), self.EOF) memio.seek(len(buf) + 1) self.assertEqual(memio.read(), self.EOF) self.assertEqual(memio.seek(0, 2), len(buf)) self.assertEqual(memio.read(), self.EOF) memio.close() self.assertRaises(ValueError, memio.seek, 0) def test_overseek(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.seek(len(buf) + 1), 11) self.assertEqual(memio.read(), self.EOF) self.assertEqual(memio.tell(), 11) self.assertEqual(memio.getvalue(), buf) memio.write(self.EOF) self.assertEqual(memio.getvalue(), buf) memio.write(buf) self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf) def test_tell(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.tell(), 0) memio.seek(5) self.assertEqual(memio.tell(), 5) memio.seek(10000) self.assertEqual(memio.tell(), 10000) memio.close() self.assertRaises(ValueError, memio.tell) def test_flush(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.flush(), None) def test_flags(self): memio = self.ioclass() self.assertEqual(memio.writable(), True) self.assertEqual(memio.readable(), True) self.assertEqual(memio.seekable(), True) self.assertEqual(memio.isatty(), False) self.assertEqual(memio.closed, False) memio.close() self.assertRaises(ValueError, memio.writable) self.assertRaises(ValueError, memio.readable) self.assertRaises(ValueError, memio.seekable) self.assertRaises(ValueError, memio.isatty) self.assertEqual(memio.closed, True) def test_subclassing(self): buf = self.buftype("1234567890") def test1(): class MemIO(self.ioclass): pass m = MemIO(buf) return m.getvalue() def test2(): class MemIO(self.ioclass): def __init__(me, a, b): self.ioclass.__init__(me, a) m = MemIO(buf, None) return m.getvalue() self.assertEqual(test1(), buf) self.assertEqual(test2(), buf) def test_instance_dict_leak(self): # Test case for issue #6242. # This will be caught by regrtest.py -R if this leak. for _ in range(100): memio = self.ioclass() memio.foo = 1 def test_pickling(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) memio.foo = 42 memio.seek(2) class PickleTestMemIO(self.ioclass): def __init__(me, initvalue, foo): self.ioclass.__init__(me, initvalue) me.foo = foo # __getnewargs__ is undefined on purpose. This checks that PEP 307 # is used to provide pickling support. # Pickle expects the class to be on the module level. Here we use a # little hack to allow the PickleTestMemIO class to derive from # self.ioclass without having to define all combinations explicitly on # the module-level. import __main__ PickleTestMemIO.__module__ = '__main__' PickleTestMemIO.__qualname__ = PickleTestMemIO.__name__ __main__.PickleTestMemIO = PickleTestMemIO submemio = PickleTestMemIO(buf, 80) submemio.seek(2) # We only support pickle protocol 2 and onward since we use extended # __reduce__ API of PEP 307 to provide pickling support. for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): for obj in (memio, submemio): obj2 = pickle.loads(pickle.dumps(obj, protocol=proto)) self.assertEqual(obj.getvalue(), obj2.getvalue()) self.assertEqual(obj.__class__, obj2.__class__) self.assertEqual(obj.foo, obj2.foo) self.assertEqual(obj.tell(), obj2.tell()) obj.close() self.assertRaises(ValueError, pickle.dumps, obj, proto) del __main__.PickleTestMemIO class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin, unittest.TestCase): # Test _pyio.BytesIO; class also inherited for testing C implementation UnsupportedOperation = pyio.UnsupportedOperation @staticmethod def buftype(s): return s.encode("ascii") ioclass = pyio.BytesIO EOF = b"" def test_read1(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertRaises(TypeError, memio.read1) self.assertEqual(memio.read(), buf) def test_readinto(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) b = bytearray(b"hello") self.assertEqual(memio.readinto(b), 5) self.assertEqual(b, b"12345") self.assertEqual(memio.readinto(b), 5) self.assertEqual(b, b"67890") self.assertEqual(memio.readinto(b), 0) self.assertEqual(b, b"67890") b = bytearray(b"hello world") memio.seek(0) self.assertEqual(memio.readinto(b), 10) self.assertEqual(b, b"1234567890d") b = bytearray(b"") memio.seek(0) self.assertEqual(memio.readinto(b), 0) self.assertEqual(b, b"") self.assertRaises(TypeError, memio.readinto, '') import array a = array.array(b'b', b"hello world") memio = self.ioclass(buf) memio.readinto(a) self.assertEqual(a.tostring(), b"1234567890d") memio.close() self.assertRaises(ValueError, memio.readinto, b) memio = self.ioclass(b"123") b = bytearray() memio.seek(42) memio.readinto(b) self.assertEqual(b, b"") def test_relative_seek(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.seek(-1, 1), 0) self.assertEqual(memio.seek(3, 1), 3) self.assertEqual(memio.seek(-4, 1), 0) self.assertEqual(memio.seek(-1, 2), 9) self.assertEqual(memio.seek(1, 1), 10) self.assertEqual(memio.seek(1, 2), 11) memio.seek(-3, 2) self.assertEqual(memio.read(), buf[-3:]) memio.seek(0) memio.seek(1, 1) self.assertEqual(memio.read(), buf[1:]) def test_unicode(self): memio = self.ioclass() self.assertRaises(TypeError, self.ioclass, "1234567890") self.assertRaises(TypeError, memio.write, "1234567890") self.assertRaises(TypeError, memio.writelines, ["1234567890"]) def test_bytes_array(self): buf = b"1234567890" import array a = array.array(b'b', buf) memio = self.ioclass(a) self.assertEqual(memio.getvalue(), buf) self.assertEqual(memio.write(a), 10) self.assertEqual(memio.getvalue(), buf) def test_issue5449(self): buf = self.buftype("1234567890") self.ioclass(initial_bytes=buf) self.assertRaises(TypeError, self.ioclass, buf, foo=None) class TextIOTestMixin: def test_newlines_property(self): memio = self.ioclass(newline=None) # The C StringIO decodes newlines in write() calls, but the Python # implementation only does when reading. This function forces them to # be decoded for testing. def force_decode(): memio.seek(0) memio.read() self.assertEqual(memio.newlines, None) memio.write("a\n") force_decode() self.assertEqual(memio.newlines, "\n") memio.write("b\r\n") force_decode() self.assertEqual(memio.newlines, ("\n", "\r\n")) memio.write("c\rd") force_decode() self.assertEqual(memio.newlines, ("\r", "\n", "\r\n")) def test_relative_seek(self): memio = self.ioclass() self.assertRaises(IOError, memio.seek, -1, 1) self.assertRaises(IOError, memio.seek, 3, 1) self.assertRaises(IOError, memio.seek, -3, 1) self.assertRaises(IOError, memio.seek, -1, 2) self.assertRaises(IOError, memio.seek, 1, 1) self.assertRaises(IOError, memio.seek, 1, 2) def test_textio_properties(self): memio = self.ioclass() # These are just dummy values but we nevertheless check them for fear # of unexpected breakage. self.assertIsNone(memio.encoding) self.assertIsNone(memio.errors) self.assertFalse(memio.line_buffering) def test_newline_default(self): memio = self.ioclass("a\nb\r\nc\rd") self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") memio = self.ioclass() self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") def test_newline_none(self): # newline=None memio = self.ioclass("a\nb\r\nc\rd", newline=None) self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"]) memio.seek(0) self.assertEqual(memio.read(1), "a") self.assertEqual(memio.read(2), "\nb") self.assertEqual(memio.read(2), "\nc") self.assertEqual(memio.read(1), "\n") self.assertEqual(memio.getvalue(), "a\nb\nc\nd") memio = self.ioclass(newline=None) self.assertEqual(2, memio.write("a\n")) self.assertEqual(3, memio.write("b\r\n")) self.assertEqual(3, memio.write("c\rd")) memio.seek(0) self.assertEqual(memio.read(), "a\nb\nc\nd") self.assertEqual(memio.getvalue(), "a\nb\nc\nd") memio = self.ioclass("a\r\nb", newline=None) self.assertEqual(memio.read(3), "a\nb") def test_newline_empty(self): # newline="" memio = self.ioclass("a\nb\r\nc\rd", newline="") self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"]) memio.seek(0) self.assertEqual(memio.read(4), "a\nb\r") self.assertEqual(memio.read(2), "\nc") self.assertEqual(memio.read(1), "\r") self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") memio = self.ioclass(newline="") self.assertEqual(2, memio.write("a\n")) self.assertEqual(2, memio.write("b\r")) self.assertEqual(2, memio.write("\nc")) self.assertEqual(2, memio.write("\rd")) memio.seek(0) self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") def test_newline_lf(self): # newline="\n" memio = self.ioclass("a\nb\r\nc\rd", newline="\n") self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") memio = self.ioclass(newline="\n") self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") def test_newline_cr(self): # newline="\r" memio = self.ioclass("a\nb\r\nc\rd", newline="\r") self.assertEqual(memio.read(), "a\rb\r\rc\rd") memio.seek(0) self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"]) self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd") memio = self.ioclass(newline="\r") self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"]) memio.seek(0) self.assertEqual(memio.readlines(), ["a\r", "b\r", "\r", "c\r", "d"]) self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd") def test_newline_crlf(self): # newline="\r\n" memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n") self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd") memio.seek(0) self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"]) memio.seek(0) self.assertEqual(memio.readlines(), ["a\r\n", "b\r\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd") memio = self.ioclass(newline="\r\n") self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd") def test_issue5265(self): # StringIO can duplicate newlines in universal newlines mode memio = self.ioclass("a\r\nb\r\n", newline=None) self.assertEqual(memio.read(5), "a\nb\n") self.assertEqual(memio.getvalue(), "a\nb\n") class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin, TextIOTestMixin, unittest.TestCase): buftype = unicode ioclass = pyio.StringIO UnsupportedOperation = pyio.UnsupportedOperation EOF = "" def test_lone_surrogates(self): # Issue #20424 surrogate = unichr(0xd800) memio = self.ioclass(surrogate) self.assertEqual(memio.read(), surrogate) memio = self.ioclass() memio.write(surrogate) self.assertEqual(memio.getvalue(), surrogate) class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase): """Test if pickle restores properly the internal state of StringIO. """ buftype = unicode UnsupportedOperation = pyio.UnsupportedOperation EOF = "" class ioclass(pyio.StringIO): def __new__(cls, *args, **kwargs): return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs))) def __init__(self, *args, **kwargs): pass class CBytesIOTest(PyBytesIOTest): ioclass = io.BytesIO UnsupportedOperation = io.UnsupportedOperation test_bytes_array = unittest.skip( "array.array() does not have the new buffer API" )(PyBytesIOTest.test_bytes_array) def test_getstate(self): memio = self.ioclass() state = memio.__getstate__() self.assertEqual(len(state), 3) bytearray(state[0]) # Check if state[0] supports the buffer interface. self.assertIsInstance(state[1], int) if state[2] is not None: self.assertIsInstance(state[2], dict) memio.close() self.assertRaises(ValueError, memio.__getstate__) def test_setstate(self): # This checks whether __setstate__ does proper input validation. memio = self.ioclass() memio.__setstate__((b"no error", 0, None)) memio.__setstate__((bytearray(b"no error"), 0, None)) memio.__setstate__((b"no error", 0, {'spam': 3})) self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None)) self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None)) self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None)) self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0)) self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0)) self.assertRaises(TypeError, memio.__setstate__) self.assertRaises(TypeError, memio.__setstate__, 0) memio.close() self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None)) check_sizeof = support.check_sizeof @support.cpython_only def test_sizeof(self): basesize = support.calcobjsize(b'P2PP2P') check = self.check_sizeof self.assertEqual(object.__sizeof__(io.BytesIO()), basesize) check(io.BytesIO(), basesize ) check(io.BytesIO(b'a'), basesize + 1 + 1 ) check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 ) class CStringIOTest(PyStringIOTest): ioclass = io.StringIO UnsupportedOperation = io.UnsupportedOperation # XXX: For the Python version of io.StringIO, this is highly # dependent on the encoding used for the underlying buffer. def test_widechar(self): buf = self.buftype("\U0002030a\U00020347") memio = self.ioclass(buf) self.assertEqual(memio.getvalue(), buf) self.assertEqual(memio.write(buf), len(buf)) self.assertEqual(memio.tell(), len(buf)) self.assertEqual(memio.getvalue(), buf) self.assertEqual(memio.write(buf), len(buf)) self.assertEqual(memio.tell(), len(buf) * 2) self.assertEqual(memio.getvalue(), buf + buf) def test_getstate(self): memio = self.ioclass() state = memio.__getstate__() self.assertEqual(len(state), 4) self.assertIsInstance(state[0], unicode) self.assertIsInstance(state[1], str) self.assertIsInstance(state[2], int) if state[3] is not None: self.assertIsInstance(state[3], dict) memio.close() self.assertRaises(ValueError, memio.__getstate__) def test_setstate(self): # This checks whether __setstate__ does proper input validation. memio = self.ioclass() memio.__setstate__(("no error", "\n", 0, None)) memio.__setstate__(("no error", "", 0, {'spam': 3})) self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None)) self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None)) self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None)) # trunk is more tolerant than py3k on the type of the newline param #self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None)) self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None)) self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0)) self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0)) self.assertRaises(TypeError, memio.__setstate__) self.assertRaises(TypeError, memio.__setstate__, 0) memio.close() self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None)) class CStringIOPickleTest(PyStringIOPickleTest): UnsupportedOperation = io.UnsupportedOperation class ioclass(io.StringIO): def __new__(cls, *args, **kwargs): return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs), protocol=2)) def __init__(self, *args, **kwargs): pass def test_main(): tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest, PyStringIOPickleTest, CStringIOPickleTest] support.run_unittest(*tests) if __name__ == '__main__': test_main()
gpl-3.0
noironetworks/python-neutronclient
neutronclient/tests/unit/osc/v2/dynamic_routing/test_bgp_peer.py
2
4946
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutronclient.osc.v2.dynamic_routing import bgp_peer from neutronclient.tests.unit.osc.v2.dynamic_routing import fakes class TestListBgpPeer(fakes.TestNeutronDynamicRoutingOSCV2): _bgp_peers = fakes.FakeBgpPeer.create_bgp_peers(count=1) columns = ('ID', 'Name', 'Peer IP', 'Remote AS') data = [] for _bgp_peer in _bgp_peers['bgp_peers']: data.append(( _bgp_peer['id'], _bgp_peer['name'], _bgp_peer['peer_ip'], _bgp_peer['remote_as'])) def setUp(self): super(TestListBgpPeer, self).setUp() self.neutronclient.list_bgp_peers = mock.Mock( return_value=self._bgp_peers ) # Get the command object to test self.cmd = bgp_peer.ListBgpPeer(self.app, self.namespace) def test_bgp_peer_list(self): parsed_args = self.check_parser(self.cmd, [], []) columns, data = self.cmd.take_action(parsed_args) self.neutronclient.list_bgp_peers.assert_called_once_with() self.assertEqual(self.columns, columns) self.assertEqual(self.data, list(data)) class TestDeleteBgpPeer(fakes.TestNeutronDynamicRoutingOSCV2): _bgp_peer = fakes.FakeBgpPeer.create_one_bgp_peer() def setUp(self): super(TestDeleteBgpPeer, self).setUp() self.neutronclient.delete_bgp_peer = mock.Mock(return_value=None) self.cmd = bgp_peer.DeleteBgpPeer(self.app, self.namespace) def test_delete_bgp_peer(self): arglist = [ self._bgp_peer['name'], ] verifylist = [ ('bgp_peer', self._bgp_peer['name']), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.neutronclient.delete_bgp_peer.assert_called_once_with( self._bgp_peer['name']) self.assertIsNone(result) class TestShowBgpPeer(fakes.TestNeutronDynamicRoutingOSCV2): _one_bgp_peer = fakes.FakeBgpPeer.create_one_bgp_peer() data = ( _one_bgp_peer['auth_type'], _one_bgp_peer['id'], _one_bgp_peer['name'], _one_bgp_peer['peer_ip'], _one_bgp_peer['remote_as'], _one_bgp_peer['tenant_id'] ) _bgp_peer = {'bgp_peer': _one_bgp_peer} _bgp_peer_name = _one_bgp_peer['name'] columns = ( 'auth_type', 'id', 'name', 'peer_ip', 'remote_as', 'tenant_id' ) def setUp(self): super(TestShowBgpPeer, self).setUp() self.neutronclient.show_bgp_peer = mock.Mock( return_value=self._bgp_peer ) bgp_peer.get_bgp_peer_id = mock.Mock(return_value=self._bgp_peer_name) # Get the command object to test self.cmd = bgp_peer.ShowBgpPeer(self.app, self.namespace) def test_bgp_peer_list(self): arglist = [ self._bgp_peer_name, ] verifylist = [ ('bgp_peer', self._bgp_peer_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) data = self.cmd.take_action(parsed_args) self.neutronclient.show_bgp_peer.assert_called_once_with( self._bgp_peer_name) self.assertEqual(self.columns, data[0]) self.assertEqual(self.data, data[1]) class TestSetBgpPeer(fakes.TestNeutronDynamicRoutingOSCV2): _one_bgp_peer = fakes.FakeBgpPeer.create_one_bgp_peer() _bgp_peer_name = _one_bgp_peer['name'] def setUp(self): super(TestSetBgpPeer, self).setUp() self.neutronclient.update_bgp_peer = mock.Mock(return_value=None) bgp_peer.get_bgp_peer_id = mock.Mock(return_value=self._bgp_peer_name) self.cmd = bgp_peer.SetBgpPeer(self.app, self.namespace) def test_set_bgp_peer(self): arglist = [ self._bgp_peer_name, '--name', 'noob', ] verifylist = [ ('bgp_peer', self._bgp_peer_name), ('name', 'noob'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {'bgp_peer': { 'name': 'noob', 'password': None} } self.neutronclient.update_bgp_peer.assert_called_once_with( self._bgp_peer_name, attrs) self.assertIsNone(result)
apache-2.0
MakeHer/edx-platform
openedx/core/lib/xblock_utils.py
11
15939
""" Functions that can are used to modify XBlock fragments for use in the LMS and Studio """ import datetime import json import logging import markupsafe import re import static_replace import uuid from lxml import html, etree from contracts import contract from django.conf import settings from django.utils.timezone import UTC from django.utils.html import escape from django.contrib.auth.models import User from edxmako.shortcuts import render_to_string from xblock.core import XBlock from xblock.exceptions import InvalidScopeError from xblock.fragment import Fragment from xmodule.seq_module import SequenceModule from xmodule.vertical_block import VerticalBlock from xmodule.x_module import shim_xmodule_js, XModuleDescriptor, XModule, PREVIEW_VIEWS, STUDIO_VIEW from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore log = logging.getLogger(__name__) def wrap_fragment(fragment, new_content): """ Returns a new Fragment that has `new_content` and all as its content, and all of the resources from fragment """ wrapper_frag = Fragment(content=new_content) wrapper_frag.add_frag_resources(fragment) return wrapper_frag def request_token(request): """ Return a unique token for the supplied request. This token will be the same for all calls to `request_token` made on the same request object. """ # pylint: disable=protected-access if not hasattr(request, '_xblock_token'): request._xblock_token = uuid.uuid1().get_hex() return request._xblock_token def wrap_xblock( runtime_class, block, view, frag, context, # pylint: disable=unused-argument usage_id_serializer, request_token, # pylint: disable=redefined-outer-name display_name_only=False, extra_data=None ): """ Wraps the results of rendering an XBlock view in a standard <section> with identifying data so that the appropriate javascript module can be loaded onto it. :param runtime_class: The name of the javascript runtime class to use to load this block :param block: An XBlock (that may be an XModule or XModuleDescriptor) :param view: The name of the view that rendered the fragment being wrapped :param frag: The :class:`Fragment` to be wrapped :param context: The context passed to the view being rendered :param usage_id_serializer: A function to serialize the block's usage_id for use by the front-end Javascript Runtime. :param request_token: An identifier that is unique per-request, so that only xblocks rendered as part of this request will have their javascript initialized. :param display_name_only: If true, don't render the fragment content at all. Instead, just render the `display_name` of `block` :param extra_data: A dictionary with extra data values to be set on the wrapper """ if extra_data is None: extra_data = {} # If any mixins have been applied, then use the unmixed class class_name = getattr(block, 'unmixed_class', block.__class__).__name__ data = {} data.update(extra_data) css_classes = [ 'xblock', 'xblock-{}'.format(markupsafe.escape(view)), 'xblock-{}-{}'.format( markupsafe.escape(view), markupsafe.escape(block.scope_ids.block_type), ) ] if isinstance(block, (XModule, XModuleDescriptor)): if view in PREVIEW_VIEWS: # The block is acting as an XModule css_classes.append('xmodule_display') elif view == STUDIO_VIEW: # The block is acting as an XModuleDescriptor css_classes.append('xmodule_edit') if getattr(block, 'HIDDEN', False): css_classes.append('is-hidden') css_classes.append('xmodule_' + markupsafe.escape(class_name)) data['type'] = block.js_module_name shim_xmodule_js(block, frag) if frag.js_init_fn: data['init'] = frag.js_init_fn data['runtime-class'] = runtime_class data['runtime-version'] = frag.js_init_version data['block-type'] = block.scope_ids.block_type data['usage-id'] = usage_id_serializer(block.scope_ids.usage_id) data['request-token'] = request_token if block.name: data['name'] = block.name template_context = { 'content': block.display_name if display_name_only else frag.content, 'classes': css_classes, 'display_name': block.display_name_with_default_escaped, 'data_attributes': u' '.join(u'data-{}="{}"'.format(markupsafe.escape(key), markupsafe.escape(value)) for key, value in data.iteritems()), } if hasattr(frag, 'json_init_args') and frag.json_init_args is not None: # Replace / with \/ so that "</script>" in the data won't break things. template_context['js_init_parameters'] = json.dumps(frag.json_init_args).replace("/", r"\/") else: template_context['js_init_parameters'] = "" return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context)) def replace_jump_to_id_urls(course_id, jump_to_id_base_url, block, view, frag, context): # pylint: disable=unused-argument """ This will replace a link between courseware in the format /jump_to_id/<id> with a URL for a page that will correctly redirect This is similar to replace_course_urls, but much more flexible and durable for Studio authored courses. See more comments in static_replace.replace_jump_to_urls course_id: The course_id in which this rewrite happens jump_to_id_base_url: A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to the end of this URL at re-write time output: a new :class:`~xblock.fragment.Fragment` that modifies `frag` with content that has been update with /jump_to_id links replaced """ return wrap_fragment(frag, static_replace.replace_jump_to_id_urls(frag.content, course_id, jump_to_id_base_url)) def replace_course_urls(course_id, block, view, frag, context): # pylint: disable=unused-argument """ Updates the supplied module with a new get_html function that wraps the old get_html function and substitutes urls of the form /course/... with urls that are /courses/<course_id>/... """ return wrap_fragment(frag, static_replace.replace_course_urls(frag.content, course_id)) def replace_static_urls(data_dir, block, view, frag, context, course_id=None, static_asset_path=''): # pylint: disable=unused-argument """ Updates the supplied module with a new get_html function that wraps the old get_html function and substitutes urls of the form /static/... with urls that are /static/<prefix>/... """ return wrap_fragment(frag, static_replace.replace_static_urls( frag.content, data_dir, course_id, static_asset_path=static_asset_path )) def grade_histogram(module_id): ''' Print out a histogram of grades on a given problem in staff member debug info. Warning: If a student has just looked at an xmodule and not attempted it, their grade is None. Since there will always be at least one such student this function almost always returns []. ''' from django.db import connection cursor = connection.cursor() query = """\ SELECT courseware_studentmodule.grade, COUNT(courseware_studentmodule.student_id) FROM courseware_studentmodule WHERE courseware_studentmodule.module_id=%s GROUP BY courseware_studentmodule.grade""" # Passing module_id this way prevents sql-injection. cursor.execute(query, [module_id.to_deprecated_string()]) grades = list(cursor.fetchall()) grades.sort(key=lambda x: x[0]) # Add ORDER BY to sql query? if len(grades) >= 1 and grades[0][0] is None: return [] return grades def sanitize_html_id(html_id): """ Template uses element_id in js function names, so can't allow dashes and colons. """ sanitized_html_id = re.sub(r'[:-]', '_', html_id) return sanitized_html_id @contract(user=User, has_instructor_access=bool, block=XBlock, view=basestring, frag=Fragment, context="dict|None") def add_staff_markup(user, has_instructor_access, disable_staff_debug_info, block, view, frag, context): # pylint: disable=unused-argument """ Updates the supplied module with a new get_html function that wraps the output of the old get_html function with additional information for admin users only, including a histogram of student answers, the definition of the xmodule, and a link to view the module in Studio if it is a Studio edited, mongo stored course. Does nothing if module is a SequenceModule. """ # TODO: make this more general, eg use an XModule attribute instead if isinstance(block, VerticalBlock) and (not context or not context.get('child_of_vertical', False)): # check that the course is a mongo backed Studio course before doing work is_mongo_course = modulestore().get_modulestore_type(block.location.course_key) != ModuleStoreEnum.Type.xml is_studio_course = block.course_edit_method == "Studio" if is_studio_course and is_mongo_course: # build edit link to unit in CMS. Can't use reverse here as lms doesn't load cms's urls.py edit_link = "//" + settings.CMS_BASE + '/container/' + unicode(block.location) # return edit link in rendered HTML for display return wrap_fragment( frag, render_to_string( "edit_unit_link.html", {'frag_content': frag.content, 'edit_link': edit_link} ) ) else: return frag if isinstance(block, SequenceModule) or getattr(block, 'HIDDEN', False): return frag block_id = block.location if block.has_score and settings.FEATURES.get('DISPLAY_HISTOGRAMS_TO_STAFF'): histogram = grade_histogram(block_id) render_histogram = len(histogram) > 0 else: histogram = None render_histogram = False if settings.FEATURES.get('ENABLE_LMS_MIGRATION') and hasattr(block.runtime, 'filestore'): [filepath, filename] = getattr(block, 'xml_attributes', {}).get('filename', ['', None]) osfs = block.runtime.filestore if filename is not None and osfs.exists(filename): # if original, unmangled filename exists then use it (github # doesn't like symlinks) filepath = filename data_dir = block.static_asset_path or osfs.root_path.rsplit('/')[-1] giturl = block.giturl or 'https://github.com/MITx' edit_link = "%s/%s/tree/master/%s" % (giturl, data_dir, filepath) else: edit_link = False # Need to define all the variables that are about to be used giturl = "" data_dir = "" source_file = block.source_file # source used to generate the problem XML, eg latex or word # Useful to indicate to staff if problem has been released or not. # TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access, # instead of now>mstart comparison here. now = datetime.datetime.now(UTC()) is_released = "unknown" mstart = block.start if mstart is not None: is_released = "<font color='red'>Yes!</font>" if (now > mstart) else "<font color='green'>Not yet</font>" field_contents = [] for name, field in block.fields.items(): try: field_contents.append((name, field.read_from(block))) except InvalidScopeError: log.warning("Unable to read field in Staff Debug information", exc_info=True) field_contents.append((name, "WARNING: Unable to read field")) staff_context = { 'fields': field_contents, 'xml_attributes': getattr(block, 'xml_attributes', {}), 'tags': block._class_tags, # pylint: disable=protected-access 'location': block.location, 'xqa_key': block.xqa_key, 'source_file': source_file, 'source_url': '%s/%s/tree/master/%s' % (giturl, data_dir, source_file), 'category': str(block.__class__.__name__), 'element_id': sanitize_html_id(block.location.html_id()), 'edit_link': edit_link, 'user': user, 'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"), 'histogram': json.dumps(histogram), 'render_histogram': render_histogram, 'block_content': frag.content, 'is_released': is_released, 'has_instructor_access': has_instructor_access, 'disable_staff_debug_info': disable_staff_debug_info, } return wrap_fragment(frag, render_to_string("staff_problem_info.html", staff_context)) def get_course_update_items(course_updates, provided_index=0): """ Returns list of course_updates data dictionaries either from new format if available or from old. This function don't modify old data to new data (in db), instead returns data in common old dictionary format. New Format: {"items" : [{"id": computed_id, "date": date, "content": html-string}], "data": "<ol>[<li><h2>date</h2>content</li>]</ol>"} Old Format: {"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"} """ def _course_info_content(html_parsed): """ Constructs the HTML for the course info update, not including the header. """ if len(html_parsed) == 1: # could enforce that update[0].tag == 'h2' content = html_parsed[0].tail else: content = html_parsed[0].tail if html_parsed[0].tail is not None else "" content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]]) return content if course_updates and getattr(course_updates, "items", None): if provided_index and 0 < provided_index <= len(course_updates.items): return course_updates.items[provided_index - 1] else: # return list in reversed order (old format: [4,3,2,1]) for compatibility return list(reversed(course_updates.items)) course_update_items = [] if course_updates: # old method to get course updates # purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break. try: course_html_parsed = html.fromstring(course_updates.data) except (etree.XMLSyntaxError, etree.ParserError): log.error("Cannot parse: " + course_updates.data) escaped = escape(course_updates.data) course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>") # confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val if course_html_parsed.tag == 'ol': # 0 is the newest for index, update in enumerate(course_html_parsed): if len(update) > 0: content = _course_info_content(update) # make the id on the client be 1..len w/ 1 being the oldest and len being the newest computed_id = len(course_html_parsed) - index payload = { "id": computed_id, "date": update.findtext("h2"), "content": content } if provided_index == 0: course_update_items.append(payload) elif provided_index == computed_id: return payload return course_update_items
agpl-3.0
slightlymadphoenix/activityPointsApp
activitypoints/lib/python3.5/site-packages/django/db/backends/oracle/schema.py
56
5290
import binascii import copy import datetime import re from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.utils import DatabaseError from django.utils import six from django.utils.text import force_text class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s" sql_alter_column_type = "MODIFY %(column)s %(type)s" sql_alter_column_null = "MODIFY %(column)s NULL" sql_alter_column_not_null = "MODIFY %(column)s NOT NULL" sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s" sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS" def quote_value(self, value): if isinstance(value, (datetime.date, datetime.time, datetime.datetime)): return "'%s'" % value elif isinstance(value, six.string_types): return "'%s'" % six.text_type(value).replace("\'", "\'\'") elif isinstance(value, six.buffer_types): return "'%s'" % force_text(binascii.hexlify(value)) elif isinstance(value, bool): return "1" if value else "0" else: return str(value) def delete_model(self, model): # Run superclass action super(DatabaseSchemaEditor, self).delete_model(model) # Clean up any autoincrement trigger self.execute(""" DECLARE i INTEGER; BEGIN SELECT COUNT(1) INTO i FROM USER_SEQUENCES WHERE SEQUENCE_NAME = '%(sq_name)s'; IF i = 1 THEN EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"'; END IF; END; /""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)}) def alter_field(self, model, old_field, new_field, strict=False): try: super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict) except DatabaseError as e: description = str(e) # If we're changing type to an unsupported type we need a # SQLite-ish workaround if 'ORA-22858' in description or 'ORA-22859' in description: self._alter_field_type_workaround(model, old_field, new_field) else: raise def _alter_field_type_workaround(self, model, old_field, new_field): """ Oracle refuses to change from some type to other type. What we need to do instead is: - Add a nullable version of the desired field with a temporary name - Update the table to transfer values from old to new - Drop old column - Rename the new column and possibly drop the nullable property """ # Make a new field that's like the new one but with a temporary # column name. new_temp_field = copy.deepcopy(new_field) new_temp_field.null = True new_temp_field.column = self._generate_temp_name(new_field.column) # Add it self.add_field(model, new_temp_field) # Explicit data type conversion # https://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements002.htm#sthref340 new_value = self.quote_name(old_field.column) old_type = old_field.db_type(self.connection) if re.match('^N?CLOB', old_type): new_value = "TO_CHAR(%s)" % new_value old_type = 'VARCHAR2' if re.match('^N?VARCHAR2', old_type): new_internal_type = new_field.get_internal_type() if new_internal_type == 'DateField': new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value elif new_internal_type == 'DateTimeField': new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value elif new_internal_type == 'TimeField': # TimeField are stored as TIMESTAMP with a 1900-01-01 date part. new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value # Transfer values across self.execute("UPDATE %s set %s=%s" % ( self.quote_name(model._meta.db_table), self.quote_name(new_temp_field.column), new_value, )) # Drop the old field self.remove_field(model, old_field) # Rename and possibly make the new field NOT NULL super(DatabaseSchemaEditor, self).alter_field(model, new_temp_field, new_field) def normalize_name(self, name): """ Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes. """ nn = self.quote_name(name) if nn[0] == '"' and nn[-1] == '"': nn = nn[1:-1] return nn def _generate_temp_name(self, for_name): """ Generates temporary names for workarounds that need temp columns """ suffix = hex(hash(for_name)).upper()[1:] return self.normalize_name(for_name + "_" + suffix) def prepare_default(self, value): return self.quote_value(value)
mit
ClusterLabs/pacemaker-1.0
shell/modules/idmgmt.py
2
3879
# Copyright (C) 2008 Dejan Muhamedagic <dmuhamedagic@suse.de> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # from vars import Vars from xmlutil import * from msg import * class IdMgmt(Singleton): ''' Make sure that ids are unique. ''' def __init__(self): self._id_store = {} self.ok = True # error var def new(self,node,pfx): ''' Create a unique id for the xml node. ''' name = node.getAttribute("name") if node.tagName == "nvpair": node_id = "%s-%s" % (pfx,name) elif node.tagName == "op": interval = node.getAttribute("interval") if interval: node_id = "%s-%s-%s" % (pfx,name,interval) else: node_id = "%s-%s" % (pfx,name) else: try: subpfx = vars.subpfx_list[node.tagName] except: subpfx = '' if subpfx: node_id = "%s-%s" % (pfx,subpfx) else: node_id = "%s" % pfx if self.is_used(node_id): for cnt in range(99): # shouldn't really get here try_id = "%s-%d" % (node_id,cnt) if not self.is_used(try_id): node_id = try_id break self.save(node_id) return node_id def check_node(self,node,lvl): node_id = node.getAttribute("id") if not node_id: return if self.id_in_use(node_id): common_error("id_store: id %s is in use" % node_id) self.ok = False return def _store_node(self,node,lvl): self.save(node.getAttribute("id")) def _drop_node(self,node,lvl): self.remove(node.getAttribute("id")) def check_xml(self,node): self.ok = True xmltraverse_thin(node,self.check_node) return self.ok def store_xml(self,node): if not self.check_xml(node): return False xmltraverse_thin(node,self._store_node) return True def remove_xml(self,node): xmltraverse_thin(node,self._drop_node) def replace_xml(self,oldnode,newnode): self.remove_xml(oldnode) if not self.store_xml(newnode): self.store_xml(oldnode) return False return True def is_used(self,node_id): return node_id in self._id_store def id_in_use(self,obj_id): if self.is_used(obj_id): id_used_err(obj_id) return True return False def save(self,node_id): if not node_id: return common_debug("id_store: saved %s" % node_id) self._id_store[node_id] = 1 def rename(self,old_id,new_id): if not old_id or not new_id: return if not self.is_used(old_id): return if self.is_used(new_id): return self.remove(old_id) self.save(new_id) def remove(self,node_id): if not node_id: return try: del self._id_store[node_id] common_debug("id_store: removed %s" % node_id) except KeyError: pass def clear(self): self._id_store = {} vars = Vars.getInstance() # vim:ts=4:sw=4:et:
gpl-2.0
ByteInternet/python-social-auth
social/backends/skyrock.py
83
1195
""" Skyrock OAuth1 backend, docs at: http://psa.matiasaguirre.net/docs/backends/skyrock.html """ from social.backends.oauth import BaseOAuth1 class SkyrockOAuth(BaseOAuth1): """Skyrock OAuth authentication backend""" name = 'skyrock' ID_KEY = 'id_user' AUTHORIZATION_URL = 'https://api.skyrock.com/v2/oauth/authenticate' REQUEST_TOKEN_URL = 'https://api.skyrock.com/v2/oauth/initiate' ACCESS_TOKEN_URL = 'https://api.skyrock.com/v2/oauth/token' EXTRA_DATA = [('id', 'id')] def get_user_details(self, response): """Return user details from Skyrock account""" fullname, first_name, last_name = self.get_user_names( first_name=response['firstname'], last_name=response['name'] ) return {'username': response['username'], 'email': response['email'], 'fullname': fullname, 'first_name': first_name, 'last_name': last_name} def user_data(self, access_token): """Return user data provided""" return self.get_json('https://api.skyrock.com/v2/user/get.json', auth=self.oauth_auth(access_token))
bsd-3-clause
neteler/QGIS
python/plugins/processing/core/ProcessingResults.py
34
1461
# -*- coding: utf-8 -*- """ *************************************************************************** ProcessingResults.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' class ProcessingResults: results = [] @staticmethod def addResult(name, result): ProcessingResults.results.append(Result(name, result)) @staticmethod def getResults(): return ProcessingResults.results class Result: def __init__(self, name, filename): self.name = name self.filename = filename
gpl-2.0
mattdangerw/inkscape
share/extensions/bezmisc.py
6
8729
#!/usr/bin/env python ''' Copyright (C) 2010 Nick Drobchenko, nick@cnc-club.ru Copyright (C) 2005 Aaron Spike, aaron@ekips.org This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ''' import math, cmath def rootWrapper(a,b,c,d): if a: # Monics formula see http://en.wikipedia.org/wiki/Cubic_function#Monic_formula_of_roots a,b,c = (b/a, c/a, d/a) m = 2.0*a**3 - 9.0*a*b + 27.0*c k = a**2 - 3.0*b n = m**2 - 4.0*k**3 w1 = -.5 + .5*cmath.sqrt(-3.0) w2 = -.5 - .5*cmath.sqrt(-3.0) if n < 0: m1 = pow(complex((m+cmath.sqrt(n))/2),1./3) n1 = pow(complex((m-cmath.sqrt(n))/2),1./3) else: if m+math.sqrt(n) < 0: m1 = -pow(-(m+math.sqrt(n))/2,1./3) else: m1 = pow((m+math.sqrt(n))/2,1./3) if m-math.sqrt(n) < 0: n1 = -pow(-(m-math.sqrt(n))/2,1./3) else: n1 = pow((m-math.sqrt(n))/2,1./3) x1 = -1./3 * (a + m1 + n1) x2 = -1./3 * (a + w1*m1 + w2*n1) x3 = -1./3 * (a + w2*m1 + w1*n1) return (x1,x2,x3) elif b: det=c**2.0-4.0*b*d if det: return (-c+cmath.sqrt(det))/(2.0*b),(-c-cmath.sqrt(det))/(2.0*b) else: return -c/(2.0*b), elif c: return 1.0*(-d/c), return () def bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))): #parametric bezier x0=bx0 y0=by0 cx=3*(bx1-x0) bx=3*(bx2-bx1)-cx ax=bx3-x0-cx-bx cy=3*(by1-y0) by=3*(by2-by1)-cy ay=by3-y0-cy-by return ax,ay,bx,by,cx,cy,x0,y0 #ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) def linebezierintersect(((lx1,ly1),(lx2,ly2)),((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))): #parametric line dd=lx1 cc=lx2-lx1 bb=ly1 aa=ly2-ly1 if aa: coef1=cc/aa coef2=1 else: coef1=1 coef2=aa/cc ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) #cubic intersection coefficients a=coef1*ay-coef2*ax b=coef1*by-coef2*bx c=coef1*cy-coef2*cx d=coef1*(y0-bb)-coef2*(x0-dd) roots = rootWrapper(a,b,c,d) retval = [] for i in roots: if type(i) is complex and i.imag==0: i = i.real if type(i) is not complex and 0<=i<=1: retval.append(bezierpointatt(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)),i)) return retval def bezierpointatt(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)),t): ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) x=ax*(t**3)+bx*(t**2)+cx*t+x0 y=ay*(t**3)+by*(t**2)+cy*t+y0 return x,y def bezierslopeatt(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)),t): ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) dx=3*ax*(t**2)+2*bx*t+cx dy=3*ay*(t**2)+2*by*t+cy return dx,dy def beziertatslope(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)),(dy,dx)): ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) #quadratic coefficents of slope formula if dx: slope = 1.0*(dy/dx) a=3*ay-3*ax*slope b=2*by-2*bx*slope c=cy-cx*slope elif dy: slope = 1.0*(dx/dy) a=3*ax-3*ay*slope b=2*bx-2*by*slope c=cx-cy*slope else: return [] roots = rootWrapper(0,a,b,c) retval = [] for i in roots: if type(i) is complex and i.imag==0: i = i.real if type(i) is not complex and 0<=i<=1: retval.append(i) return retval def tpoint((x1,y1),(x2,y2),t): return x1+t*(x2-x1),y1+t*(y2-y1) def beziersplitatt(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)),t): m1=tpoint((bx0,by0),(bx1,by1),t) m2=tpoint((bx1,by1),(bx2,by2),t) m3=tpoint((bx2,by2),(bx3,by3),t) m4=tpoint(m1,m2,t) m5=tpoint(m2,m3,t) m=tpoint(m4,m5,t) return ((bx0,by0),m1,m4,m),(m,m5,m3,(bx3,by3)) ''' Approximating the arc length of a bezier curve according to <http://www.cit.gu.edu.au/~anthony/info/graphics/bezier.curves> if: L1 = |P0 P1| +|P1 P2| +|P2 P3| L0 = |P0 P3| then: L = 1/2*L0 + 1/2*L1 ERR = L1-L0 ERR approaches 0 as the number of subdivisions (m) increases 2^-4m Reference: Jens Gravesen <gravesen@mat.dth.dk> "Adaptive subdivision and the length of Bezier curves" mat-report no. 1992-10, Mathematical Institute, The Technical University of Denmark. ''' def pointdistance((x1,y1),(x2,y2)): return math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2)) def Gravesen_addifclose(b, len, error = 0.001): box = 0 for i in range(1,4): box += pointdistance(b[i-1], b[i]) chord = pointdistance(b[0], b[3]) if (box - chord) > error: first, second = beziersplitatt(b, 0.5) Gravesen_addifclose(first, len, error) Gravesen_addifclose(second, len, error) else: len[0] += (box / 2.0) + (chord / 2.0) def bezierlengthGravesen(b, error = 0.001): len = [0] Gravesen_addifclose(b, len, error) return len[0] # balf = Bezier Arc Length Function balfax,balfbx,balfcx,balfay,balfby,balfcy = 0,0,0,0,0,0 def balf(t): retval = (balfax*(t**2) + balfbx*t + balfcx)**2 + (balfay*(t**2) + balfby*t + balfcy)**2 return math.sqrt(retval) def Simpson(f, a, b, n_limit, tolerance): n = 2 multiplier = (b - a)/6.0 endsum = f(a) + f(b) interval = (b - a)/2.0 asum = 0.0 bsum = f(a + interval) est1 = multiplier * (endsum + (2.0 * asum) + (4.0 * bsum)) est0 = 2.0 * est1 #print multiplier, endsum, interval, asum, bsum, est1, est0 while n < n_limit and abs(est1 - est0) > tolerance: n *= 2 multiplier /= 2.0 interval /= 2.0 asum += bsum bsum = 0.0 est0 = est1 for i in xrange(1, n, 2): bsum += f(a + (i * interval)) est1 = multiplier * (endsum + (2.0 * asum) + (4.0 * bsum)) #print multiplier, endsum, interval, asum, bsum, est1, est0 return est1 def bezierlengthSimpson(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)), tolerance = 0.001): global balfax,balfbx,balfcx,balfay,balfby,balfcy ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) balfax,balfbx,balfcx,balfay,balfby,balfcy = 3*ax,2*bx,cx,3*ay,2*by,cy return Simpson(balf, 0.0, 1.0, 4096, tolerance) def beziertatlength(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3)), l = 0.5, tolerance = 0.001): global balfax,balfbx,balfcx,balfay,balfby,balfcy ax,ay,bx,by,cx,cy,x0,y0=bezierparameterize(((bx0,by0),(bx1,by1),(bx2,by2),(bx3,by3))) balfax,balfbx,balfcx,balfay,balfby,balfcy = 3*ax,2*bx,cx,3*ay,2*by,cy t = 1.0 tdiv = t curlen = Simpson(balf, 0.0, t, 4096, tolerance) targetlen = l * curlen diff = curlen - targetlen while abs(diff) > tolerance: tdiv /= 2.0 if diff < 0: t += tdiv else: t -= tdiv curlen = Simpson(balf, 0.0, t, 4096, tolerance) diff = curlen - targetlen return t #default bezier length method bezierlength = bezierlengthSimpson if __name__ == '__main__': import timing #print linebezierintersect(((,),(,)),((,),(,),(,),(,))) #print linebezierintersect(((0,1),(0,-1)),((-1,0),(-.5,0),(.5,0),(1,0))) tol = 0.00000001 curves = [((0,0),(1,5),(4,5),(5,5)), ((0,0),(0,0),(5,0),(10,0)), ((0,0),(0,0),(5,1),(10,0)), ((-10,0),(0,0),(10,0),(10,10)), ((15,10),(0,0),(10,0),(-5,10))] ''' for curve in curves: timing.start() g = bezierlengthGravesen(curve,tol) timing.finish() gt = timing.micro() timing.start() s = bezierlengthSimpson(curve,tol) timing.finish() st = timing.micro() print g, gt print s, st ''' for curve in curves: print beziertatlength(curve,0.5) # vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
gpl-2.0
landryb/QGIS
python/plugins/processing/algs/lidar/lastools/lasview.py
12
3347
# -*- coding: utf-8 -*- """ *************************************************************************** lasview.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com --------------------- Date : September 2013 Copyright : (C) 2013 by Martin Isenburg Email : martin near rapidlasso point com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Martin Isenburg' __date__ = 'September 2013' __copyright__ = '(C) 2013, Martin Isenburg' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from LAStoolsUtils import LAStoolsUtils from LAStoolsAlgorithm import LAStoolsAlgorithm from processing.core.parameters import ParameterSelection from processing.core.parameters import ParameterNumber class lasview(LAStoolsAlgorithm): POINTS = "POINTS" SIZE = "SIZE" SIZES = ["1024 768", "800 600", "1200 900", "1200 400", "1550 900", "1550 1150"] COLORING = "COLORING" COLORINGS = ["default", "classification", "elevation1", "elevation2", "intensity", "return", "flightline", "rgb"] def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('lasview') self.group, self.i18n_group = self.trAlgorithm('LAStools') self.addParametersVerboseGUI() self.addParametersPointInputGUI() self.addParameter(ParameterNumber(lasview.POINTS, self.tr("max number of points sampled"), 100000, 20000000, 5000000)) self.addParameter(ParameterSelection(lasview.COLORING, self.tr("color by"), lasview.COLORINGS, 0)) self.addParameter(ParameterSelection(lasview.SIZE, self.tr("window size (x y) in pixels"), lasview.SIZES, 0)) self.addParametersAdditionalGUI() def processAlgorithm(self, progress): commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasview")] self.addParametersVerboseCommands(commands) self.addParametersPointInputCommands(commands) points = self.getParameterValue(lasview.POINTS) commands.append("-points " + unicode(points)) coloring = self.getParameterValue(lasview.COLORING) if coloring != 0: commands.append("-color_by_" + lasview.COLORINGS[coloring]) size = self.getParameterValue(lasview.SIZE) if size != 0: commands.append("-win " + lasview.SIZES[size]) self.addParametersAdditionalCommands(commands) print commands LAStoolsUtils.runLAStools(commands, progress)
gpl-2.0
joker946/nova
nova/db/sqlalchemy/migrate_repo/versions/233_add_stats_in_compute_nodes.py
81
1460
# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(engine): meta = MetaData() meta.bind = engine # Drop the compute_node_stats table and add a 'stats' column to # compute_nodes directly. The data itself is transient and doesn't # need to be copied over. table_names = ('compute_node_stats', 'shadow_compute_node_stats') for table_name in table_names: table = Table(table_name, meta, autoload=True) table.drop() # Add a new stats column to compute nodes table_names = ('compute_nodes', 'shadow_compute_nodes') for table_name in table_names: table = Table(table_name, meta, autoload=True) stats = Column('stats', Text, default='{}') table.create_column(stats)
apache-2.0
marqh/iris
lib/iris/tests/test_nimrod.py
16
3813
# (C) British Crown Copyright 2010 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # import iris tests first so that some things can be initialised before # importing anything else import iris.tests as tests import numpy as np import iris import iris.fileformats.nimrod_load_rules as nimrod_load_rules def mock_nimrod_field(): field = iris.fileformats.nimrod.NimrodField() field.int_mdi = -32767 field.float32_mdi = -32767.0 return field class TestLoad(tests.IrisTest): @tests.skip_data def test_multi_field_load(self): # load a cube with two fields cube = iris.load(tests.get_data_path( ('NIMROD', 'uk2km', 'WO0000000003452', '201007020900_u1096_ng_ey00_visibility0180_screen_2km'))) self.assertCML(cube, ("nimrod", "load_2flds.cml")) def test_orography(self): # Mock an orography field we've seen. field = mock_nimrod_field() cube = iris.cube.Cube(np.arange(100).reshape(10, 10)) field.dt_year = field.dt_month = field.dt_day = field.int_mdi field.dt_hour = field.dt_minute = field.int_mdi field.proj_biaxial_ellipsoid = 0 field.tm_meridian_scaling = 0.999601 field.field_code = 73 field.vertical_coord_type = 1 field.title = "(MOCK) 2km mean orography" field.units = "metres" field.source = "GLOBE DTM" nimrod_load_rules.name(cube, field) nimrod_load_rules.units(cube, field) nimrod_load_rules.reference_time(cube, field) nimrod_load_rules.proj_biaxial_ellipsoid(cube, field) nimrod_load_rules.tm_meridian_scaling(cube, field) nimrod_load_rules.vertical_coord(cube, field) nimrod_load_rules.attributes(cube, field) self.assertCML(cube, ("nimrod", "mockography.cml")) def test_levels_below_ground(self): # Mock a soil temperature field we've seen. field = mock_nimrod_field() cube = iris.cube.Cube(np.arange(100).reshape(10, 10)) field.field_code = -1 # Not orography field.reference_vertical_coord_type = field.int_mdi # Not bounded field.vertical_coord_type = 12 field.vertical_coord = 42 nimrod_load_rules.vertical_coord(cube, field) self.assertCML(cube, ("nimrod", "levels_below_ground.cml")) def test_period_of_interest(self): # mock a pressure field field = mock_nimrod_field() cube = iris.cube.Cube(np.arange(100).reshape(10, 10)) field.field_code = 0 field.vt_year = 2013 field.vt_month = 5 field.vt_day = 7 field.vt_hour = 6 field.vt_minute = 0 field.vt_second = 0 field.dt_year = 2013 field.dt_month = 5 field.dt_day = 7 field.dt_hour = 6 field.dt_minute = 0 field.dt_second = 0 field.period_minutes = 60 nimrod_load_rules.time(cube, field) self.assertCML(cube, ("nimrod", "period_of_interest.cml")) if __name__ == "__main__": tests.main()
lgpl-3.0
unseenlaser/python-for-android
python-modules/twisted/twisted/protocols/ident.py
56
7774
# -*- test-case-name: twisted.test.test_ident -*- # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ Ident protocol implementation. @author: Jean-Paul Calderone """ from __future__ import generators import struct from twisted.internet import defer from twisted.protocols import basic from twisted.python import log, failure _MIN_PORT = 1 _MAX_PORT = 2 ** 16 - 1 class IdentError(Exception): """ Can't determine connection owner; reason unknown. """ identDescription = 'UNKNOWN-ERROR' def __str__(self): return self.identDescription class NoUser(IdentError): """ The connection specified by the port pair is not currently in use or currently not owned by an identifiable entity. """ identDescription = 'NO-USER' class InvalidPort(IdentError): """ Either the local or foreign port was improperly specified. This should be returned if either or both of the port ids were out of range (TCP port numbers are from 1-65535), negative integers, reals or in any fashion not recognized as a non-negative integer. """ identDescription = 'INVALID-PORT' class HiddenUser(IdentError): """ The server was able to identify the user of this port, but the information was not returned at the request of the user. """ identDescription = 'HIDDEN-USER' class IdentServer(basic.LineOnlyReceiver): """ The Identification Protocol (a.k.a., "ident", a.k.a., "the Ident Protocol") provides a means to determine the identity of a user of a particular TCP connection. Given a TCP port number pair, it returns a character string which identifies the owner of that connection on the server's system. Server authors should subclass this class and override the lookup method. The default implementation returns an UNKNOWN-ERROR response for every query. """ def lineReceived(self, line): parts = line.split(',') if len(parts) != 2: self.invalidQuery() else: try: portOnServer, portOnClient = map(int, parts) except ValueError: self.invalidQuery() else: if _MIN_PORT <= portOnServer <= _MAX_PORT and _MIN_PORT <= portOnClient <= _MAX_PORT: self.validQuery(portOnServer, portOnClient) else: self._ebLookup(failure.Failure(InvalidPort()), portOnServer, portOnClient) def invalidQuery(self): self.transport.loseConnection() def validQuery(self, portOnServer, portOnClient): """ Called when a valid query is received to look up and deliver the response. @param portOnServer: The server port from the query. @param portOnClient: The client port from the query. """ serverAddr = self.transport.getHost().host, portOnServer clientAddr = self.transport.getPeer().host, portOnClient defer.maybeDeferred(self.lookup, serverAddr, clientAddr ).addCallback(self._cbLookup, portOnServer, portOnClient ).addErrback(self._ebLookup, portOnServer, portOnClient ) def _cbLookup(self, (sysName, userId), sport, cport): self.sendLine('%d, %d : USERID : %s : %s' % (sport, cport, sysName, userId)) def _ebLookup(self, failure, sport, cport): if failure.check(IdentError): self.sendLine('%d, %d : ERROR : %s' % (sport, cport, failure.value)) else: log.err(failure) self.sendLine('%d, %d : ERROR : %s' % (sport, cport, IdentError(failure.value))) def lookup(self, serverAddress, clientAddress): """Lookup user information about the specified address pair. Return value should be a two-tuple of system name and username. Acceptable values for the system name may be found online at:: U{http://www.iana.org/assignments/operating-system-names} This method may also raise any IdentError subclass (or IdentError itself) to indicate user information will not be provided for the given query. A Deferred may also be returned. @param serverAddress: A two-tuple representing the server endpoint of the address being queried. The first element is a string holding a dotted-quad IP address. The second element is an integer representing the port. @param clientAddress: Like L{serverAddress}, but represents the client endpoint of the address being queried. """ raise IdentError() class ProcServerMixin: """Implements lookup() to grab entries for responses from /proc/net/tcp """ SYSTEM_NAME = 'LINUX' try: from pwd import getpwuid def getUsername(self, uid, getpwuid=getpwuid): return getpwuid(uid)[0] del getpwuid except ImportError: def getUsername(self, uid): raise IdentError() def entries(self): f = file('/proc/net/tcp') f.readline() for L in f: yield L.strip() def dottedQuadFromHexString(self, hexstr): return '.'.join(map(str, struct.unpack('4B', struct.pack('=L', int(hexstr, 16))))) def unpackAddress(self, packed): addr, port = packed.split(':') addr = self.dottedQuadFromHexString(addr) port = int(port, 16) return addr, port def parseLine(self, line): parts = line.strip().split() localAddr, localPort = self.unpackAddress(parts[1]) remoteAddr, remotePort = self.unpackAddress(parts[2]) uid = int(parts[7]) return (localAddr, localPort), (remoteAddr, remotePort), uid def lookup(self, serverAddress, clientAddress): for ent in self.entries(): localAddr, remoteAddr, uid = self.parseLine(ent) if remoteAddr == clientAddress and localAddr[1] == serverAddress[1]: return (self.SYSTEM_NAME, self.getUsername(uid)) raise NoUser() class IdentClient(basic.LineOnlyReceiver): errorTypes = (IdentError, NoUser, InvalidPort, HiddenUser) def __init__(self): self.queries = [] def lookup(self, portOnServer, portOnClient): """Lookup user information about the specified address pair. """ self.queries.append((defer.Deferred(), portOnServer, portOnClient)) if len(self.queries) > 1: return self.queries[-1][0] self.sendLine('%d, %d' % (portOnServer, portOnClient)) return self.queries[-1][0] def lineReceived(self, line): if not self.queries: log.msg("Unexpected server response: %r" % (line,)) else: d, _, _ = self.queries.pop(0) self.parseResponse(d, line) if self.queries: self.sendLine('%d, %d' % (self.queries[0][1], self.queries[0][2])) def connectionLost(self, reason): for q in self.queries: q[0].errback(IdentError(reason)) self.queries = [] def parseResponse(self, deferred, line): parts = line.split(':', 2) if len(parts) != 3: deferred.errback(IdentError(line)) else: ports, type, addInfo = map(str.strip, parts) if type == 'ERROR': for et in self.errorTypes: if et.identDescription == addInfo: deferred.errback(et(line)) return deferred.errback(IdentError(line)) else: deferred.callback((type, addInfo)) __all__ = ['IdentError', 'NoUser', 'InvalidPort', 'HiddenUser', 'IdentServer', 'IdentClient', 'ProcServerMixin']
apache-2.0
jerli/sympy
sympy/stats/rv_interface.py
88
5205
from __future__ import print_function, division from .rv import (probability, expectation, density, where, given, pspace, cdf, sample, sample_iter, random_symbols, independent, dependent, sampling_density) from sympy import sqrt __all__ = ['P', 'E', 'density', 'where', 'given', 'sample', 'cdf', 'pspace', 'sample_iter', 'variance', 'std', 'skewness', 'covariance', 'dependent', 'independent', 'random_symbols', 'correlation', 'moment', 'cmoment', 'sampling_density'] def moment(X, n, c=0, condition=None, **kwargs): """ Return the nth moment of a random expression about c i.e. E((X-c)**n) Default value of c is 0. Examples ======== >>> from sympy.stats import Die, moment, E >>> X = Die('X', 6) >>> moment(X, 1, 6) -5/2 >>> moment(X, 2) 91/6 >>> moment(X, 1) == E(X) True """ return expectation((X - c)**n, condition, **kwargs) def variance(X, condition=None, **kwargs): """ Variance of a random expression Expectation of (X-E(X))**2 Examples ======== >>> from sympy.stats import Die, E, Bernoulli, variance >>> from sympy import simplify, Symbol >>> X = Die('X', 6) >>> p = Symbol('p') >>> B = Bernoulli('B', p, 1, 0) >>> variance(2*X) 35/3 >>> simplify(variance(B)) p*(-p + 1) """ return cmoment(X, 2, condition, **kwargs) def standard_deviation(X, condition=None, **kwargs): """ Standard Deviation of a random expression Square root of the Expectation of (X-E(X))**2 Examples ======== >>> from sympy.stats import Bernoulli, std >>> from sympy import Symbol, simplify >>> p = Symbol('p') >>> B = Bernoulli('B', p, 1, 0) >>> simplify(std(B)) sqrt(p*(-p + 1)) """ return sqrt(variance(X, condition, **kwargs)) std = standard_deviation def covariance(X, Y, condition=None, **kwargs): """ Covariance of two random expressions The expectation that the two variables will rise and fall together Covariance(X,Y) = E( (X-E(X)) * (Y-E(Y)) ) Examples ======== >>> from sympy.stats import Exponential, covariance >>> from sympy import Symbol >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> X = Exponential('X', rate) >>> Y = Exponential('Y', rate) >>> covariance(X, X) lambda**(-2) >>> covariance(X, Y) 0 >>> covariance(X, Y + rate*X) 1/lambda """ return expectation( (X - expectation(X, condition, **kwargs)) * (Y - expectation(Y, condition, **kwargs)), condition, **kwargs) def correlation(X, Y, condition=None, **kwargs): """ Correlation of two random expressions, also known as correlation coefficient or Pearson's correlation The normalized expectation that the two variables will rise and fall together Correlation(X,Y) = E( (X-E(X)) * (Y-E(Y)) / (sigma(X) * sigma(Y)) ) Examples ======== >>> from sympy.stats import Exponential, correlation >>> from sympy import Symbol >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> X = Exponential('X', rate) >>> Y = Exponential('Y', rate) >>> correlation(X, X) 1 >>> correlation(X, Y) 0 >>> correlation(X, Y + rate*X) 1/sqrt(1 + lambda**(-2)) """ return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs) * std(Y, condition, **kwargs)) def cmoment(X, n, condition=None, **kwargs): """ Return the nth central moment of a random expression about its mean i.e. E((X - E(X))**n) Examples ======== >>> from sympy.stats import Die, cmoment, variance >>> X = Die('X', 6) >>> cmoment(X, 3) 0 >>> cmoment(X, 2) 35/12 >>> cmoment(X, 2) == variance(X) True """ mu = expectation(X, condition, **kwargs) return moment(X, n, mu, condition, **kwargs) def smoment(X, n, condition=None, **kwargs): """ Return the nth Standardized moment of a random expression i.e. E( ((X - mu)/sigma(X))**n ) Examples ======== >>> from sympy.stats import skewness, Exponential, smoment >>> from sympy import Symbol >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> smoment(Y, 4) 9 >>> smoment(Y, 4) == smoment(3*Y, 4) True >>> smoment(Y, 3) == skewness(Y) True """ sigma = std(X, condition, **kwargs) return (1/sigma)**n*cmoment(X, n, condition, **kwargs) def skewness(X, condition=None, **kwargs): """ Measure of the asymmetry of the probability distribution Positive skew indicates that most of the values lie to the right of the mean skewness(X) = E( ((X - E(X))/sigma)**3 ) Examples ======== >>> from sympy.stats import skewness, Exponential, Normal >>> from sympy import Symbol >>> X = Normal('X', 0, 1) >>> skewness(X) 0 >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> skewness(Y) 2 """ return smoment(X, 3, condition, **kwargs) P = probability E = expectation
bsd-3-clause
drmateo/ecto
test/scripts/pyecto/__init__.py
5
1607
# # Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # from MyModule import *
bsd-3-clause
nathanbjenx/cairis
cairis/core/Trace.py
1
1187
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. __author__ = 'Shamal Faily' class Trace: def __init__(self,fObjt,fName,tObjt,tName): self.theFromObject = fObjt self.theFromName = fName self.theToObject = tObjt self.theToName = tName def fromObject(self): return self.theFromObject def fromName(self): return self.theFromName def toObject(self): return self.theToObject def toName(self): return self.theToName
apache-2.0
sathnaga/virt-test
virttest/ovirt.py
3
25249
""" oVirt SDK wrapper module. @copyright: 2008-2012 Red Hat Inc. """ import time, logging try: from ovirtsdk.api import API from ovirtsdk.xml import params as param except ImportError: logging.info("ovirtsdk module not present, please install it") import virt_vm _api = None _connected = False def connect(params): """ Connect ovirt manager API. """ url = params.get('ovirt_engine_url') username = params.get('ovirt_engine_user') password = params.get('ovirt_engine_password') version = params.get('ovirt_engine_version') if url is None or username is None or password is None: logging.error('ovirt_engine[url|user|password] are necessary!!') if version is None: version = param.Version(major='3', minor='0') else: version = param.Version(version) global _api, _connected try: # Try to connect oVirt API if connection doesn't exist, # otherwise, directly return existing API connection. if not _connected: _api = API(url, username, password) _connected = True return (_api, version) else: return (_api, version) except Exception, e: logging.error('Failed to connect: %s\n' % str(e)) else: logging.info('Succeed to connect oVirt/Rhevm manager\n') def disconnect(): """ Disconnect ovirt manager connection. """ global _api, _connected if _connected: return _api.disconnect() class VMManager(virt_vm.BaseVM): """ This class handles all basic VM operations for oVirt. """ def __init__(self, params, root_dir, address_cache=None, state=None): """ Initialize the object and set a few attributes. @param name: The name of the object @param params: A dict containing VM params (see method make_qemu_command for a full description) @param root_dir: Base directory for relative filenames @param address_cache: A dict that maps MAC addresses to IP addresses @param state: If provided, use this as self.__dict__ """ if state: self.__dict__ = state else: self.process = None self.serial_console = None self.redirs = {} self.vnc_port = 5900 self.vnclisten = "0.0.0.0" self.pci_assignable = None self.netdev_id = [] self.device_id = [] self.pci_devices = [] self.uuid = None self.only_pty = False self.spice_port = 8000 self.name = params.get("vm_name", "") self.params = params self.root_dir = root_dir self.address_cache = address_cache self.vnclisten = "0.0.0.0" self.driver_type = "v2v" super(VMManager, self).__init__(self.name, params) (self.api, self.version) = connect(params) if self.name: self.instance = self.api.vms.get(self.name) def list(self): """ List all of VMs. """ vm_list = [] try: vms = self.api.vms.list(query='name=*') for i in range(len(vms)): vm_list.append(vms[i].name) return vm_list except Exception, e: logging.error('Failed to get vms:\n%s' % str(e)) def state(self): """ Return VM state. """ try: return self.instance.status.state except Exception, e: logging.error('Failed to get %s status:\n%s' % (self.name, str(e))) def get_mac_address(self): """ Return MAC address of a VM. """ try: return self.instance.nics.get().get_mac().get_address() except Exception, e: logging.error('Failed to get %s status:\n%s' % (self.name, str(e))) def lookup_by_storagedomains(self, storage_name): """ Lookup VM object in storage domain according to VM name. """ try: storage = self.api.storagedomains.get(storage_name) return storage.vms.get(self.name) except Exception, e: logging.error('Failed to get %s from %s:\n%s' % (self.name, storage_name, str(e))) def is_alive(self): """ Judge if a VM is alive. """ if self.state() == 'up': logging.info('The %s status is <Up>' % self.name) return True else: logging.debug('The %s status is <not Up>' % self.name) return False def is_dead(self): """ Judge if a VM is dead. """ if self.state() == 'down': logging.info('The %s status is <Down>' % self.name) return True else: logging.debug('The %s status is <not Down>' % self.name) return False def is_paused(self): return False def start(self): """ Start a VM. """ try: if self.state() != 'up': logging.info('Starting VM %s' % self.name) self.instance.start() logging.info('Waiting for VM to reach <Up> status ...') while self.state() != 'up': self.instance = self.api.vms.get(self.name) time.sleep(1) else: logging.debug('VM already up') except Exception, e: logging.error('Failed to start VM:\n%s' % str(e)) def suspend(self): """ Suspend a VM. """ while self.state() != 'suspended': try: logging.info('Suspend VM %s' % self.name) self.instance.suspend() logging.info('Waiting for VM to reach <Suspended> status ...') while self.state() != 'suspended': self.instance = self.api.vms.get(self.name) time.sleep(1) except Exception, e: if e.reason == 'Bad Request' \ and 'asynchronous running tasks' in e.detail: logging.warning("VM has asynchronous running tasks, " "trying again") time.sleep(1) else: logging.error('Failed to suspend VM:\n%s' % str(e)) break def resume(self): """ Resume a suspended VM. """ try: if self.state() != 'up': logging.info('Resume VM %s' % self.name) self.instance.start() logging.info('Waiting for VM to <Resume> status ...') while self.state() != 'up': self.instance = self.api.vms.get(self.name) time.sleep(1) else: logging.debug('VM already up') except Exception, e: logging.error('Failed to resume VM:\n%s' % str(e)) def shutdown(self): """ Shut down a running VM. """ try: if self.state() != 'down': logging.info('Stop VM %s' % self.name) self.instance.stop() logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': self.instance = self.api.vms.get(self.name) time.sleep(1) else: logging.debug('VM already down') except Exception, e: logging.error('Failed to Stop VM:\n%s' % str(e)) def delete(self): """ Delete a VM. """ try: if self.state() == 'down': logging.info('Delete VM %s' % self.name) self.instance.delete() logging.info('Waiting for VM to be <Deleted> ...') while self.name in [self.instance.name for self.instance \ in self.api.vms.list()]: time.sleep(1) logging.info('VM was removed successfully') else: logging.debug('VM already is down status') except Exception, e: logging.error('Failed to remove VM:\n%s' % str(e)) def destroy(self): """ Destroy a VM. """ if self.api.vms is None: return self.shutdown() def delete_from_export_domain(self, export_name): """ Remove a VM from specified export domain. @export_name: export domain name. """ vm = self.lookup_by_storagedomains(export_name) try: vm.delete() except Exception, e: logging.error('Failed to remove VM:\n%s' % str(e)) def import_from_export_domain(self, export_name, storage_name, cluster_name): """ Import a VM from export domain to data domain. @export_name: Export domain name. @storage_name: Storage domain name. @cluster_name: Cluster name. """ vm = self.lookup_by_storagedomains(export_name) storage_domains = self.api.storagedomains.get(storage_name) clusters = self.api.clusters.get(cluster_name) try: logging.info('Import VM %s' % self.name) vm.import_vm(param.Action(storage_domain=storage_domains, cluster=clusters)) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': self.instance = self.api.vms.get(self.name) time.sleep(1) logging.info('VM was imported successfully') except Exception, e: logging.error('Failed to import VM:\n%s' % str(e)) def export_from_export_domain(self, export_name): """ Export a VM from storage domain to export domain. @export_name: Export domain name. """ storage_domains = self.api.storagedomains.get(export_name) try: logging.info('Export VM %s' % self.name) self.instance.export(param.Action(storage_domain=storage_domains)) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': self.instance = self.api.vms.get(self.name) time.sleep(1) logging.info('VM was exported successfully') except Exception, e: logging.error('Failed to export VM:\n%s' % str(e)) def snapshot(self, snapshot_name='my_snapshot'): """ Create a snapshot to VM. @snapshot_name: 'my_snapshot' is default snapshot name. """ snap_params = param.Snapshot(description=snapshot_name, vm=self.instance) try: logging.info('Creating a snapshot %s for VM %s' % (snapshot_name, self.name)) self.instance.snapshots.add(snap_params) logging.info('Waiting for snapshot creation to finish ...') while self.state() == 'image_locked': self.instance = self.api.vms.get(self.name) time.sleep(1) logging.info('Snapshot was created successfully') except Exception, e: logging.error('Failed to create a snapshot:\n%s' % str(e)) def create_template(self, cluster_name, template_name='my_template'): """ Create a template from VM. @cluster_name: cluster name. @template_name: 'my_template' is default template name. """ cluster = self.api.clusters.get(cluster_name) tmpl_params = param.Template(name=template_name, vm=self.instance, cluster=cluster) try: logging.info('Creating a template %s from VM %s' % (template_name, self.name)) self.api.templates.add(tmpl_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': self.instance = self.api.vms.get(self.name) time.sleep(1) except Exception, e: logging.error('Failed to create a template from VM:\n%s' % str(e)) def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', network_name='ovirtmgmt', disk_interface='virtio', disk_format='raw', template_name='Blank'): """ Create VM with one NIC and one Disk. @memory: VM's memory size such as 1024*1024*1024=1GB. @disk_size: VM's disk size such as 512*1024=512MB. @nic_name: VM's NICs name such as 'eth0'. @network_interface: VM's network interface such as 'virtio'. @network_name: network such as ovirtmgmt for ovirt, rhevm for rhel. @disk_format: VM's disk format such as 'raw' or 'cow'. @disk_interface: VM's disk interface such as 'virtio'. @cluster_name: cluster name. @storage_name: storage domain name. @template_name: VM's template name, default is 'Blank'. """ # network name is ovirtmgmt for ovirt, rhevm for rhel. vm_params = param.VM(name=self.name, memory=memory, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) storage = self.api.storagedomains.get(storage_name) storage_params = param.StorageDomains(storage_domain=[storage]) nic_params = param.NIC(name=nic_name, network=param.Network(name=network_name), interface=network_interface) disk_params = param.Disk(storage_domains=storage_params, size=disk_size, type_='system', status=None, interface=disk_interface, format=disk_format, sparse=True, bootable=True) try: logging.info('Creating a VM %s' % self.name) self.api.vms.add(vm_params) logging.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) logging.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': time.sleep(1) except Exception, e: logging.error('Failed to create VM with disk and NIC\n%s' % str(e)) def add_vm_from_template(self, cluster_name, template_name='Blank', new_name='my_new_vm'): """ Create a VM from template. @cluster_name: cluster name. @template_name: default template is 'Blank'. @new_name: 'my_new_vm' is a default new VM's name. """ vm_params = param.VM(name=new_name, cluster=self.api.clusters.get(cluster_name), template=self.api.templates.get(template_name)) try: logging.info('Creating a VM %s from template %s' % (new_name, template_name)) self.api.vms.add(vm_params) logging.info('Waiting for VM to reach <Down> status ...') while self.state() != 'down': self.instance = self.api.vms.get(self.name) time.sleep(1) logging.info('VM was created from template successfully') except Exception, e: logging.error('Failed to create VM from template:\n%s' % str(e)) def get_address(self, index=0): """ Return the address of the guest through ovirt node tcpdump cache. @param index: Name or index of the NIC whose address is requested. @return: IP address of NIC. @raise VMIPAddressMissingError: If no IP address is found for the the NIC's MAC address """ nic = self.virtnet[index] if nic.nettype == 'bridge': mac = self.get_mac_address() ip = self.address_cache.get(mac) # TODO: Verify MAC-IP address mapping on remote ovirt node if not ip: raise virt_vm.VMIPAddressMissingError(mac) return ip else: raise ValueError("Ovirt only support bridge nettype now.") class DataCenterManager(object): """ This class handles all basic datacenter operations. """ def __init__(self, params): self.name = params.get("dc_name", "") self.params = params (self.api, self.version) = connect(params) if self.name: self.instance = self.api.datacenters.get(self.name) def list(self): """ List all of datacenters. """ dc_list = [] try: logging.info('List Data centers') dcs = self.api.datacenters.list(query='name=*') for i in range(len(dcs)): dc_list.append(dcs[i].name) return dc_list except Exception, e: logging.error('Failed to get data centers:\n%s' % str(e)) def add(self, storage_type): """ Add a new data center. """ if not self.name: self.name = "my_datacenter" try: logging.info('Creating a %s type datacenter %s' % (storage_type, self.name)) if self.api.datacenters.add(param.DataCenter( name=self.name, storage_type=storage_type, version=self.version)): logging.info('Data center was created successfully') except Exception, e: logging.error('Failed to create data center:\n%s' % str(e)) class ClusterManager(object): """ This class handles all basic cluster operations. """ def __init__(self, params): self.name = params.get("cluster_name", "") self.params = params (self.api, self.version) = connect(params) if self.name: self.instance = self.api.clusters.get(self.name) def list(self): """ List all of clusters. """ cluster_list = [] try: logging.info('List clusters') clusters = self.api.clusters.list(query='name=*') for i in range(len(clusters)): cluster_list.append(clusters[i].name) return cluster_list except Exception, e: logging.error('Failed to get clusters:\n%s' % str(e)) def add(self, dc_name, cpu_type='Intel Nehalem Family'): """ Add a new cluster into data center. """ if not self.name: self.name = "my_cluster" dc = self.api.datacenters.get(dc_name) try: logging.info('Creating a cluster %s in datacenter %s' % (self.name, dc_name)) if self.api.clusters.add(param.Cluster(name=self.name, cpu=param.CPU(id=cpu_type), data_center=dc, version=self.version)): logging.info('Cluster was created successfully') except Exception, e: logging.error('Failed to create cluster:\n%s' % str(e)) class HostManager(object): """ This class handles all basic host operations. """ def __init__(self, params): self.name = params.get("hostname", "") self.params = params (self.api, self.version) = connect(params) if self.name: self.instance = self.api.hosts.get(self.name) def list(self): """ List all of hosts. """ host_list = [] try: logging.info('List hosts') hosts = self.api.hosts.list(query='name=*') for i in range(len(hosts)): host_list.append(hosts[i].name) return host_list except Exception, e: logging.error('Failed to get hosts:\n%s' % str(e)) def state(self): """ Return host state. """ try: return self.instance.status.state except Exception, e: logging.error('Failed to get %s status:\n%s' % (self.name, str(e))) def add(self, host_address, host_password, cluster_name): """ Register a host into specified cluster. """ if not self.name: self.name = 'my_host' clusters = self.api.clusters.get(cluster_name) host_params = param.Host(name=self.name, address=host_address, cluster=clusters, root_password=host_password) try: logging.info('Registing a host %s into cluster %s' % (self.name, cluster_name)) if self.api.hosts.add(host_params): logging.info('Waiting for host to reach the <Up> status ...') while self.state() != 'up': time.sleep(1) else: logging.info('Host is up') logging.info('Host was installed successfully') except Exception, e: logging.error('Failed to install host:\n%s' % str(e)) def get_address(self): """ Return host IP address. """ try: logging.info('Get host %s IP' % self.name) return self.instance.get_address() except Exception, e: logging.error('Failed to get host %s IP address:\n%s' % (self.name, str(e))) class StorageDomainManager(object): """ This class handles all basic storage domain operations. """ def __init__(self, params): self.name = params.get("storage_name", "") self.params = params (self.api, self.version) = connect(params) if self.name: self.instance = self.api.storagedomains.get(self.name) def list(self): """ List all of storagedomains. """ storage_list = [] try: logging.info('List storage domains') storages = self.api.storagedomains.list() for i in range(len(storages)): storage_list.append(storages[i].name) return storage_list except Exception, e: logging.error('Failed to get storage domains:\n%s' % str(e)) def attach_iso_export_domain_into_datacenter(self, address, path, dc_name, host_name, domain_type, storage_type='nfs', name='my_iso'): """ Attach ISO/export domain into data center. @name: ISO or Export name. @host_name: host name. @dc_name: data center name. @path: ISO/export domain path. @address: ISO/export domain address. @domain_type: storage domain type, it may be 'iso' or 'export'. @storage_type: storage type, it may be 'nfs', 'iscsi', or 'fc'. """ dc = self.api.datacenters.get(dc_name) host = self.api.hosts.get(host_name) storage_params = param.Storage(type_=storage_type, address=address, path=path) storage_domain__params = param.StorageDomain(name=name, data_center=dc, type_=domain_type, host=host, storage = storage_params) try: logging.info('Create/import ISO storage domain %s' % name) if self.api.storagedomains.add(storage_domain__params): logging.info('%s domain was created/imported successfully' % domain_type) logging.info('Attach ISO storage domain %s' % name) if self.api.datacenters.get(dc_name).storagedomains.add( self.api.storagedomains.get(name)): logging.info('%s domain was attached successfully' % domain_type) logging.info('Activate ISO storage domain %s' % name) if self.api.datacenters.get(dc_name).storagedomains.get( name).activate(): logging.info('%s domain was activated successfully' % domain_type) except Exception, e: logging.error('Failed to add %s domain:\n%s' % (domain_type, str(e)))
gpl-2.0
mflu/openvstorage_centos
ovs/dal/dataobjectlist.py
1
6441
# Copyright 2014 CloudFounders NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DataObjectList module """ from ovs.dal.exceptions import ObjectNotFoundException class DataObjectList(object): """ The DataObjectList works on the resulting dataset from a DataList query. It uses the descriptor metadata to provide a list-alike experience """ def __init__(self, query_result, cls, reduced=False): """ Initializes a DataObjectList object, using a query result and a class type The reduced flag is both used internally and is used to create a DataObjectList which will yield reduced objects (with only their guid) for faster code in case not all properties are required """ self._guids = query_result self.type = cls self._objects = {} self._reduced = reduced self._query_result = query_result @property def reduced(self): if not self._reduced: dataobjectlist = DataObjectList(self._query_result, self.type, reduced=True) dataobjectlist._guids = self._guids # Keep sorting return dataobjectlist def merge(self, query_result): """ This method merges in a new query result, preservice objects that might already be cached. It also maintains previous sorting, appending new items to the end of the list """ # Maintaining order is very important here old_guids = self._guids[:] new_guids = query_result self._guids = [] for guid in old_guids: if guid in new_guids: self._guids.append(guid) for guid in new_guids: if guid not in self._guids: self._guids.append(guid) # Cleaning out old cached objects for guid in self._objects.keys(): if guid not in self._guids: del self._objects[guid] if not self._reduced: self.reduced.merge(query_result) def _get_object(self, guid): """ Yields an instance with a given guid, or a fake class with only a guid property in case of a reduced list """ if guid not in self._objects: if self._reduced: self._objects[guid] = type(self.type.__name__, (), {})() setattr(self._objects[guid], 'guid', guid) else: self._objects[guid] = self.type(guid) return self._objects[guid] def index(self, value): """ Returns the index of a given value (hybrid) """ return self._guids.index(value.guid) def count(self, value): """ Returns the count for a given value (hybrid) """ return self._guids.count(value.guid) def sort(self, **kwargs): """ Sorts the list with a given set of parameters. However, the sorting will be applied to the guids only """ if len(kwargs) == 0: self._guids.sort() else: self.load() objects = [self._objects[guid] for guid in self._guids] objects.sort(**kwargs) self._guids = [obj.guid for obj in objects] def reverse(self): """ Reverses the list """ self._guids.reverse() def load(self): """ Loads all objects (to use on e.g. sorting) """ for guid in self._guids: if guid not in self._objects: self._get_object(guid) def iterloaded(self): """ Allows to iterate only over the objects that are already loaded preventing unnessesary object loading """ for guid in self._guids: if guid in self._objects: yield self._objects[guid] def itersafe(self): """ Allows to iterate over all objects, but not caring about objects that doesn't exist """ for guid in self._guids: try: yield self._get_object(guid) except ObjectNotFoundException: pass def __add__(self, other): if not isinstance(other, DataObjectList): raise TypeError('Both operands should be of type DataObjectList') new_dol = DataObjectList(self._query_result, self.type) new_dol.merge(other._query_result) return new_dol def __radd__(self, other): # This will typically called when "other" is no DataObjectList. if other is None: return self elif isinstance(other, list) and other == []: return self elif not isinstance(other, DataObjectList): raise TypeError('Both operands should be of type DataObjectList') new_dol = DataObjectList(self._query_result, self.type) new_dol.merge(other._query_result) return new_dol def __iter__(self): """ Yields object instances """ for guid in self._guids: yield self._get_object(guid) def __len__(self): """ Returns the length of the list """ return len(self._guids) def __getitem__(self, item): """ Provide indexer behavior to the list """ if isinstance(item, slice): guids = self._guids[item.start:item.stop] result = [qr_item for qr_item in self._query_result if qr_item in guids] data_object_list = DataObjectList(result, self.type) # Overwrite some internal fields, making sure we keep already fetched objects # and we preseve existing sorting data_object_list._objects = dict(item for item in self._objects.iteritems() if item[0] in guids) data_object_list._guids = guids return data_object_list else: guid = self._guids[item] return self._get_object(guid)
apache-2.0
blaskovic/fedora-gooey-karma
src/sendkarma.py
1
3089
# -*- coding: utf-8 -*- # Fedora Gooey Karma prototype # based on the https://github.com/mkrizek/fedora-gooey-karma # # Copyright (C) 2013 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author: Branislav Blaskovic <branislav@blaskovic.sk> import threading from fedora.client import AuthError from fedora.client import ServerError from fedora.client.bodhi import BodhiClient from idlequeue import * class SendKarma(threading.Thread): def __init__(self, package, username, password, message, karma, main_thread, parent=None): #super(SendKarma, self).__init__(parent) threading.Thread.__init__(self) self.package = package self.username = username self.password = password self.message = message self.karma = karma self.main_thread = main_thread def run(self): bc = BodhiClient() bc.username = self.username bc.password = self.password # Try send it 3 times if ServerError occours for retry in range(3): try: # Send comment to bodhi bc.comment(self.package["title"], self.message, karma=self.karma) message = "Comment submitted successfully." message2 = ("Reloading bodhi update for " + str(self.package['itemlist_name']) + "...") main_thread_call(self.main_thread.add_status_item, message) main_thread_call(self.main_thread.add_status_item, message2) # Reload bodhi update main_thread_call(self.main_thread.bodhi_workers_queue.put, ['package_update', self.package['dnf_package']]) # Clean up after sending main_thread_call(self.main_thread.sending_done, self.username, self.password) return except AuthError: message = "Invalid username or password. Please try again." main_thread_call(self.main_thread.add_status_item, message) break except ServerError, e: message = "Server error %s" % str(e) main_thread_call(self.main_thread.add_status_item, message) # In case of errors, return button and others back main_thread_call(self.main_thread.sending_done, self.username, self.password) # vim: set expandtab ts=4 sts=4 sw=4 :
gpl-3.0
Hasimir/pyjs
examples/misc/swfupload/server.py
6
3807
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler from SocketServer import ThreadingMixIn, ForkingMixIn import sys import os import cgi import mimetypes import shutil import urlparse import posixpath import urllib class Server: def __init__(self): server_address = ('', 8080) httpd = TestHTTPServer(server_address, TestRequestHandler) httpd.serve_forever() class TestHTTPServer(ThreadingMixIn, HTTPServer): pass class TestRequestHandler(BaseHTTPRequestHandler): def __init__(self, request, client_address, server): BaseHTTPRequestHandler.__init__(self, request, client_address, server) self.protocol_version = 'HTTP/1.1' def do_GET(self): self.handle_data() def do_POST(self): self.form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE':self.headers['Content-Type'], }, keep_blank_values=True, strict_parsing=False) self.handle_data() def handle_data(self): if self.path == '/': p = '/html/swfu.html' elif self.path.endswith('upload.html'): self.handleUpload() return else: p = self.path path = self.translate_path(p) if not os.path.exists(path): p = '/html'+p path = self.translate_path(p) ctype = self.guess_type(path) try: f = open(path) except IOError: print 'File not found %s' % path self.send_error(404, 'File not found') return self.send_response(200) self.send_header('Content-type', ctype) self.send_header('Last-Modified', self.date_time_string()) self.end_headers() self.copyfile(f, self.wfile) f.close() def handleUpload(self): self.send_response(200) self.end_headers() fileitem = self.form['Filedata'] filename = os.path.basename(fileitem.filename) filepath = os.path.join(os.getcwd(), 'upload', filename) f = open(filepath, 'wb', 10000) def fbuffer(f, chunk_size=10000): while True: chunk = f.read(chunk_size) if not chunk: break yield chunk for chunk in fbuffer(fileitem.file): f.write(chunk) f.close() self.wfile.write('Upload done') return def translate_path(self, path): path = path.decode('utf-8') path = urlparse.urlparse(path)[2] path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) path = os.getcwd() for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) return path def copyfile(self, source, outputfile): shutil.copyfileobj(source, outputfile) def guess_type(self, path): base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] else: return self.extensions_map[''] if not mimetypes.inited: mimetypes.init() # try to read system mime.types extensions_map = mimetypes.types_map.copy() extensions_map.update({ '': 'application/octet-stream', # Default }) if __name__ == '__main__': Server()
apache-2.0
Prashant-Surya/addons-server
src/olympia/stats/management/commands/download_counts_from_file.py
1
6821
import codecs from datetime import datetime, timedelta from optparse import make_option from os import path, unlink from django.conf import settings from django.core.management.base import BaseCommand, CommandError import commonware.log from olympia.addons.models import Addon from olympia.files.models import File from olympia.stats.models import update_inc, DownloadCount from olympia.zadmin.models import DownloadSource from . import get_date_from_file, save_stats_to_file log = commonware.log.getLogger('adi.downloadcountsfromfile') def is_valid_source(src, fulls, prefixes): """Return True if the source is valid. A source is valid if it is in the list of valid full sources or prefixed by a prefix in the list of valid prefix sources. """ return src in fulls or any(p in src for p in prefixes) class Command(BaseCommand): """Update download count metrics from a file in the database. Usage: ./manage.py download_counts_from_file <folder> --date=YYYY-MM-DD If no date is specified, the default is the day before. If not folder is specified, the default is `hive_results/YYYY-MM-DD/`. This folder will be located in `<settings.NETAPP_STORAGE>/tmp`. We get a row for each "addon download" request, in this format: <count> <file id or add-on id or add-on slug> <click source> We insert one DownloadCount entry per addon per day, and each row holds the json-ified dict of click sources/counters. Eg, for the above request: date: <the date of the day the queries were made> count: <the number of requests for this addon, for this day> addon: <the addon that has this id> src: {'dp-btn-primary': 1} """ help = __doc__ option_list = BaseCommand.option_list + ( make_option('--date', action='store', type='string', dest='date', help='Date in the YYYY-MM-DD format.'), make_option('--separator', action='store', type='string', default='\t', dest='separator', help='Field separator in file.'), ) def handle(self, *args, **options): start = datetime.now() # Measure the time it takes to run the script. day = options['date'] if not day: day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') folder = args[0] if args else 'hive_results' folder = path.join(settings.TMP_PATH, folder, day) sep = options['separator'] filepath = path.join(folder, 'download_counts.hive') # Make sure we're not trying to update with mismatched data. if get_date_from_file(filepath, sep) != day: raise CommandError('%s file contains data for another day' % filepath) # First, make sure we don't have any existing counts for the same day, # or it would just increment again the same data. DownloadCount.objects.filter(date=day).delete() # Memoize the files to addon relations and the DownloadCounts. download_counts = {} # Perf: preload all the files and slugs once and for all. # This builds two dicts: # - One where each key (the file_id we get from the hive query) has # the addon_id as value. # - One where each key (the add-on slug) has the add-on_id as value. files_to_addon = dict(File.objects.values_list('id', 'version__addon_id')) slugs_to_addon = dict(Addon.objects.values_list('slug', 'id')) # Only accept valid sources, which are listed in the DownloadSource # model. The source must either be exactly one of the "full" valid # sources, or prefixed by one of the "prefix" valid sources. fulls = set(DownloadSource.objects.filter(type='full').values_list( 'name', flat=True)) prefixes = DownloadSource.objects.filter(type='prefix').values_list( 'name', flat=True) with codecs.open(filepath, encoding='utf8') as count_file: for index, line in enumerate(count_file): if index and (index % 1000000) == 0: log.info('Processed %s lines' % index) splitted = line[:-1].split(sep) if len(splitted) != 4: log.debug('Badly formatted row: %s' % line) continue day, counter, id_or_slug, src = splitted try: # Clean up data. id_or_slug = id_or_slug.strip() counter = int(counter) except ValueError: # Ignore completely invalid data. continue if id_or_slug.strip().isdigit(): # If it's a digit, then it should be a file id. try: id_or_slug = int(id_or_slug) except ValueError: continue # Does this file exist? if id_or_slug in files_to_addon: addon_id = files_to_addon[id_or_slug] # Maybe it's an add-on ? elif id_or_slug in files_to_addon.values(): addon_id = id_or_slug else: # It's an integer we don't recognize, ignore the row. continue else: # It's probably a slug. if id_or_slug in slugs_to_addon: addon_id = slugs_to_addon[id_or_slug] else: # We've exhausted all possibilities, ignore this row. continue if not is_valid_source(src, fulls=fulls, prefixes=prefixes): continue # Memoize the DownloadCount. if addon_id in download_counts: dc = download_counts[addon_id] else: dc = DownloadCount(date=day, addon_id=addon_id, count=0) download_counts[addon_id] = dc # We can now fill the DownloadCount object. dc.count += counter dc.sources = update_inc(dc.sources, src, counter) # Create in bulk: this is much faster. DownloadCount.objects.bulk_create(download_counts.values(), 100) for download_count in download_counts.values(): save_stats_to_file(download_count) log.info('Processed a total of %s lines' % (index + 1)) log.debug('Total processing time: %s' % (datetime.now() - start)) # Clean up file. log.debug('Deleting {path}'.format(path=filepath)) unlink(filepath)
bsd-3-clause
DevHugo/zds-site
zds/utils/templatetags/captureas.py
3
1600
# -*- coding: utf-8 -*- from django import template from django.utils.safestring import mark_safe register = template.Library() """ Define a tag allowing to capture template content as a variable. """ @register.tag(name='captureas') def do_captureas(parser, token): """ Define a tag allowing to capture template content as a variable. :param parser: The django template parser :param token: tag token (tag_name + variable_name) :return: Template node. """ try: _, variable_name = token.split_contents() except ValueError: raise template.TemplateSyntaxError("'captureas' node requires a variable name.") nodelist = parser.parse(('endcaptureas',)) parser.delete_first_token() return CaptureasNode(nodelist, variable_name) class CaptureasNode(template.Node): """ Capture end render node content to a variable name. """ def __init__(self, nodelist, variable_name): """ Create a template node which render `nodelist` to `variable_name`. :param nodelist: The node list to capture. :param variable_name: The variable name which will gain the rendered content. """ self.__node_list = nodelist self.__variable_name = variable_name def render(self, context): """ Render the node list to the variable name. :param context: Current context. :return: Empty string :rtype: str """ output = self.__node_list.render(context) context[self.__variable_name] = mark_safe(output.strip()) return ''
gpl-3.0
samdoran/ansible
lib/ansible/modules/cloud/openstack/os_keystone_service.py
27
6477
#!/usr/bin/python # Copyright 2016 Sam Yaple # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: os_keystone_service short_description: Manage OpenStack Identity services extends_documentation_fragment: openstack author: "Sam Yaple (@SamYaple)" version_added: "2.2" description: - Create, update, or delete OpenStack Identity service. If a service with the supplied name already exists, it will be updated with the new description and enabled attributes. options: name: description: - Name of the service required: true description: description: - Description of the service required: false default: None enabled: description: - Is the service enabled required: false default: True service_type: description: - The type of service required: true state: description: - Should the resource be present or absent. choices: [present, absent] default: present availability_zone: description: - Ignored. Present for backwards compatibility required: false requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Create a service for glance - os_keystone_service: cloud: mycloud state: present name: glance service_type: image description: OpenStack Image Service # Delete a service - os_keystone_service: cloud: mycloud state: absent name: glance service_type: image ''' RETURN = ''' service: description: Dictionary describing the service. returned: On success when I(state) is 'present' type: complex contains: id: description: Service ID. type: string sample: "3292f020780b4d5baf27ff7e1d224c44" name: description: Service name. type: string sample: "glance" service_type: description: Service type. type: string sample: "image" description: description: Service description. type: string sample: "OpenStack Image Service" enabled: description: Service status. type: boolean sample: True id: description: The service ID. returned: On success when I(state) is 'present' type: string sample: "3292f020780b4d5baf27ff7e1d224c44" ''' try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False from distutils.version import StrictVersion def _needs_update(module, service): if service.enabled != module.params['enabled']: return True if service.description is not None and \ service.description != module.params['description']: return True return False def _system_state_change(module, service): state = module.params['state'] if state == 'absent' and service: return True if state == 'present': if service is None: return True return _needs_update(module, service) return False def main(): argument_spec = openstack_full_argument_spec( description=dict(default=None), enabled=dict(default=True, type='bool'), name=dict(required=True), service_type=dict(required=True), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') if StrictVersion(shade.__version__) < StrictVersion('1.6.0'): module.fail_json(msg="To utilize this module, the installed version of" "the shade library MUST be >=1.6.0") description = module.params['description'] enabled = module.params['enabled'] name = module.params['name'] state = module.params['state'] service_type = module.params['service_type'] try: cloud = shade.operator_cloud(**module.params) services = cloud.search_services(name_or_id=name, filters=dict(type=service_type)) if len(services) > 1: module.fail_json(msg='Service name %s and type %s are not unique' % (name, service_type)) elif len(services) == 1: service = services[0] else: service = None if module.check_mode: module.exit_json(changed=_system_state_change(module, service)) if state == 'present': if service is None: service = cloud.create_service(name=name, description=description, type=service_type, enabled=True) changed = True else: if _needs_update(module, service): service = cloud.update_service( service.id, name=name, type=service_type, enabled=enabled, description=description) changed = True else: changed = False module.exit_json(changed=changed, service=service, id=service.id) elif state == 'absent': if service is None: changed=False else: cloud.delete_service(service.id) changed=True module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
twilio/twilio-python
twilio/rest/preview/sync/service/sync_list/__init__.py
1
16233
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page from twilio.rest.preview.sync.service.sync_list.sync_list_item import SyncListItemList from twilio.rest.preview.sync.service.sync_list.sync_list_permission import SyncListPermissionList class SyncListList(ListResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, service_sid): """ Initialize the SyncListList :param Version version: Version that contains the resource :param service_sid: The service_sid :returns: twilio.rest.preview.sync.service.sync_list.SyncListList :rtype: twilio.rest.preview.sync.service.sync_list.SyncListList """ super(SyncListList, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, } self._uri = '/Services/{service_sid}/Lists'.format(**self._solution) def create(self, unique_name=values.unset): """ Create the SyncListInstance :param unicode unique_name: The unique_name :returns: The created SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListInstance """ data = values.of({'UniqueName': unique_name, }) payload = self._version.create(method='POST', uri=self._uri, data=data, ) return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def stream(self, limit=None, page_size=None): """ Streams SyncListInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.sync.service.sync_list.SyncListInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit']) def list(self, limit=None, page_size=None): """ Lists SyncListInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.sync.service.sync_list.SyncListInstance] """ return list(self.stream(limit=limit, page_size=page_size, )) def page(self, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SyncListInstance records from the API. Request is executed immediately :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListPage """ data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page(method='GET', uri=self._uri, params=data, ) return SyncListPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of SyncListInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return SyncListPage(self._version, response, self._solution) def get(self, sid): """ Constructs a SyncListContext :param sid: The sid :returns: twilio.rest.preview.sync.service.sync_list.SyncListContext :rtype: twilio.rest.preview.sync.service.sync_list.SyncListContext """ return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a SyncListContext :param sid: The sid :returns: twilio.rest.preview.sync.service.sync_list.SyncListContext :rtype: twilio.rest.preview.sync.service.sync_list.SyncListContext """ return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Preview.Sync.SyncListList>' class SyncListPage(Page): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, response, solution): """ Initialize the SyncListPage :param Version version: Version that contains the resource :param Response response: Response from the API :param service_sid: The service_sid :returns: twilio.rest.preview.sync.service.sync_list.SyncListPage :rtype: twilio.rest.preview.sync.service.sync_list.SyncListPage """ super(SyncListPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of SyncListInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.sync.service.sync_list.SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListInstance """ return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Preview.Sync.SyncListPage>' class SyncListContext(InstanceContext): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, service_sid, sid): """ Initialize the SyncListContext :param Version version: Version that contains the resource :param service_sid: The service_sid :param sid: The sid :returns: twilio.rest.preview.sync.service.sync_list.SyncListContext :rtype: twilio.rest.preview.sync.service.sync_list.SyncListContext """ super(SyncListContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Lists/{sid}'.format(**self._solution) # Dependents self._sync_list_items = None self._sync_list_permissions = None def fetch(self): """ Fetch the SyncListInstance :returns: The fetched SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListInstance """ payload = self._version.fetch(method='GET', uri=self._uri, ) return SyncListInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the SyncListInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete(method='DELETE', uri=self._uri, ) @property def sync_list_items(self): """ Access the sync_list_items :returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemList :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemList """ if self._sync_list_items is None: self._sync_list_items = SyncListItemList( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['sid'], ) return self._sync_list_items @property def sync_list_permissions(self): """ Access the sync_list_permissions :returns: twilio.rest.preview.sync.service.sync_list.sync_list_permission.SyncListPermissionList :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_permission.SyncListPermissionList """ if self._sync_list_permissions is None: self._sync_list_permissions = SyncListPermissionList( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['sid'], ) return self._sync_list_permissions def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Preview.Sync.SyncListContext {}>'.format(context) class SyncListInstance(InstanceResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, payload, service_sid, sid=None): """ Initialize the SyncListInstance :returns: twilio.rest.preview.sync.service.sync_list.SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListInstance """ super(SyncListInstance, self).__init__(version) # Marshaled Properties self._properties = { 'sid': payload.get('sid'), 'unique_name': payload.get('unique_name'), 'account_sid': payload.get('account_sid'), 'service_sid': payload.get('service_sid'), 'url': payload.get('url'), 'links': payload.get('links'), 'revision': payload.get('revision'), 'date_created': deserialize.iso8601_datetime(payload.get('date_created')), 'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')), 'created_by': payload.get('created_by'), } # Context self._context = None self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncListContext for this SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListContext """ if self._context is None: self._context = SyncListContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context @property def sid(self): """ :returns: The sid :rtype: unicode """ return self._properties['sid'] @property def unique_name(self): """ :returns: The unique_name :rtype: unicode """ return self._properties['unique_name'] @property def account_sid(self): """ :returns: The account_sid :rtype: unicode """ return self._properties['account_sid'] @property def service_sid(self): """ :returns: The service_sid :rtype: unicode """ return self._properties['service_sid'] @property def url(self): """ :returns: The url :rtype: unicode """ return self._properties['url'] @property def links(self): """ :returns: The links :rtype: unicode """ return self._properties['links'] @property def revision(self): """ :returns: The revision :rtype: unicode """ return self._properties['revision'] @property def date_created(self): """ :returns: The date_created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The date_updated :rtype: datetime """ return self._properties['date_updated'] @property def created_by(self): """ :returns: The created_by :rtype: unicode """ return self._properties['created_by'] def fetch(self): """ Fetch the SyncListInstance :returns: The fetched SyncListInstance :rtype: twilio.rest.preview.sync.service.sync_list.SyncListInstance """ return self._proxy.fetch() def delete(self): """ Deletes the SyncListInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() @property def sync_list_items(self): """ Access the sync_list_items :returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemList :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemList """ return self._proxy.sync_list_items @property def sync_list_permissions(self): """ Access the sync_list_permissions :returns: twilio.rest.preview.sync.service.sync_list.sync_list_permission.SyncListPermissionList :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_permission.SyncListPermissionList """ return self._proxy.sync_list_permissions def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Preview.Sync.SyncListInstance {}>'.format(context)
mit
hinesmr/monitor-core
gmetad-python/Gmetad/gmetad_config.py
1
13583
#/******************************************************************************* #* Portions Copyright (C) 2008 Novell, Inc. All rights reserved. #* #* Redistribution and use in source and binary forms, with or without #* modification, are permitted provided that the following conditions are met: #* #* - Redistributions of source code must retain the above copyright notice, #* this list of conditions and the following disclaimer. #* #* - Redistributions in binary form must reproduce the above copyright notice, #* this list of conditions and the following disclaimer in the documentation #* and/or other materials provided with the distribution. #* #* - Neither the name of Novell, Inc. nor the names of its #* contributors may be used to endorse or promote products derived from this #* software without specific prior written permission. #* #* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' #* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS #* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #* POSSIBILITY OF SUCH DAMAGE. #* #* Authors: Matt Ryan (mrayn novell.com) #* Brad Nicholes (bnicholes novell.com) #******************************************************************************/ from socket import getfqdn import os import sys import optparse class GmetadDataSource: def __init__(self, name, hosts=['localhost'], interval=15): self.name = name self.hosts = hosts self.interval = interval self.time_to_next_read = 0 class GmetadConfig: _shared_state = {} _isInitialized = False DEBUG_LEVEL = 'debug_level' LOGFILE = 'logfile' PIDFILE = 'pidfile' DATA_SOURCE = 'data_source' RRAS = 'RRAs' SCALABLE = 'scalable' GRIDNAME = 'gridname' AUTHORITY = 'authority' TRUSTED_HOSTS = 'trusted_hosts' ALL_TRUSTED = 'all_trusted' SETUID = 'setuid' SETUID_USERNAME = 'setuid_username' XML_PORT = 'xml_port' INTERACTIVE_PORT = 'interactive_port' SERVER_THREADS = 'server_threads' VERSION = '@GANGLIA_VERSION@' PLUGINS_DIR = 'plugins_dir' SYSLOG_ADDRESS = 'syslog_address' SYSLOG_PORT = 'syslog_port' SYSLOG_FACILITY = 'syslog_facility' _cfgDefaults = { DEBUG_LEVEL : 2, LOGFILE : None, PIDFILE : None, DATA_SOURCE : [], SCALABLE : True, GRIDNAME : 'unspecified', AUTHORITY : 'http://%s/ganglia/' % getfqdn(), TRUSTED_HOSTS : [], ALL_TRUSTED : False, SETUID : True, SETUID_USERNAME : 'nobody', XML_PORT : 8651, INTERACTIVE_PORT : 8652, SERVER_THREADS : 4, PLUGINS_DIR : '@libdir@/ganglia/python_modules/gmetad/plugins', SYSLOG_ADDRESS: "127.0.0.1", SYSLOG_PORT: 514, SYSLOG_FACILITY: "23" } def __init__(self, cfgpath=None): self.__dict__ = GmetadConfig._shared_state if cfgpath is not None: self.path = cfgpath self.resetToDefaults() self.sections = {} self.kwHandlers = { GmetadConfig.DEBUG_LEVEL : self.parseDbgLevel, GmetadConfig.LOGFILE : self.parseLogfile, GmetadConfig.DATA_SOURCE : self.parseDataSource, GmetadConfig.SCALABLE : self.parseScalable, GmetadConfig.GRIDNAME : self.parseGridname, GmetadConfig.AUTHORITY : self.parseAuthority, GmetadConfig.TRUSTED_HOSTS : self.parseTrustedHosts, GmetadConfig.ALL_TRUSTED : self.parseAllTrusted, GmetadConfig.SETUID : self.parseSetuid, GmetadConfig.SETUID_USERNAME : self.parseSetuidUsername, GmetadConfig.XML_PORT : self.parseXmlPort, GmetadConfig.INTERACTIVE_PORT : self.parseInteractivePort, GmetadConfig.SERVER_THREADS : self.parseServerThreads, GmetadConfig.PLUGINS_DIR : self.parsePluginsDir } self.updateConfig() GmetadConfig._isInitialized = True def GmetadReadline(self, f): prev_line = None kw = None args = None while 1: line = f.readline() if not line: break if line.startswith('#'): continue if 0 >= len(line.strip()): continue if line.strip().endswith('\\'): if prev_line is None: prev_line = line.strip().strip('\\') else: prev_line += line.strip().strip('\\') continue elif prev_line is not None: prev_line += line.strip() line = prev_line prev_line = None try: kw, args = line.strip().split(None,1) except ValueError: kw = line pass break return kw, args def updateConfig(self): f = open(self.path, 'r') prev_line = None while 1: kw, args = self.GmetadReadline(f) if kw is None: break if args is None: continue if self.kwHandlers.has_key(kw): self.kwHandlers[kw](args) elif (args.strip().startswith('{')): self._setSection(kw,f) def __setitem__(self, k, v): self.cfg[k] = v def __getitem__(self, k): return self.cfg[k] def getSection(self, id): ret = None secID = id.lower() try: ret = self.sections[secID] except KeyError: pass return ret def _setSection(self, id, f): kw = '' secID = id.lower() self.sections[secID] = [] while kw.strip() != '}': kw,args = self.GmetadReadline(f) if kw is None: break if args is None: continue self.sections[secID].append( [kw,args]) def resetToDefaults(self): self.cfg = GmetadConfig._cfgDefaults def parseDbgLevel(self, level): v = level.strip() if v.isdigit(): self.cfg[GmetadConfig.DEBUG_LEVEL] = v def parseLogfile(self, logfile): self.cfg[GmetadConfig.LOGFILE] = logfile.strip().strip('"') def parseDataSource(self, args): a = args.split('"') name = a[1] a = a[2].strip().split() if a[0].isdigit(): interval = int(a[0]) hosts = a[1:] else: interval = 15 hosts = a[0:] self.cfg[GmetadConfig.DATA_SOURCE].append(GmetadDataSource(name, hosts, interval)) def parseScalable(self, arg): v = arg.strip().lower() if v == 'off' or v == 'false' or v == 'no': self.cfg[GmetadConfig.SCALABLE] = False else: self.cfg[GmetadConfig.SCALABLE] = True def parseGridname(self, arg): self.cfg[GmetadConfig.GRIDNAME] = arg.strip().strip('"') def parseAuthority(self, arg): self.cfg[GmetadConfig.AUTHORITY] = arg.strip().strip('"') def parseTrustedHosts(self, args): if len(args): self.cfg[GmetadConfig.TRUSTED_HOSTS] = args def parseAllTrusted(self, arg): v = arg.strip().lower() if v == 'on' or v == 'true' or v == 'yes': self.cfg[GmetadConfig.ALL_TRUSTED] = True else: self.cfg[GmetadConfig.ALL_TRUSTED] = False def parseSetuid(self, arg): v = arg.strip().lower() if v == 'off' or v == 'false' or v == 'no': self.cfg[GmetadConfig.SETUID] = False else: self.cfg[GmetadConfig.SETUID] = True def parseSetuidUsername(self, arg): self.cfg[GmetadConfig.SETUID_USERNAME] = arg.strip().strip('"') def parseXmlPort(self, arg): v = arg.strip() if v.isdigit(): self.cfg[GmetadConfig.XML_PORT] = int(v) def parseInteractivePort(self, arg): v = arg.strip() if v.isdigit(): self.cfg[GmetadConfig.INTERACTIVE_PORT] = int(v) def parseServerThreads(self, arg): v = arg.strip() if v.isdigit(): self.cfg[GmetadConfig.SERVER_THREADS] = int(v) def parsePluginsDir(self, arg): v = arg.strip().strip('"') if os.path.isdir(v): self.cfg[GmetadConfig.PLUGINS_DIR] = v def parseSection(self, name, fhandle): v = arg.strip().strip('"') if os.path.isdir(v): self.cfg[GmetadConfig.PLUGINS_DIR] = v def getConfig(args=sys.argv): if GmetadConfig._isInitialized: return GmetadConfig() dbgLevelDefault = GmetadConfig._cfgDefaults[GmetadConfig.DEBUG_LEVEL] iPortDefault = GmetadConfig._cfgDefaults[GmetadConfig.INTERACTIVE_PORT] xPortDefault = GmetadConfig._cfgDefaults[GmetadConfig.XML_PORT] syslognDefault = GmetadConfig._cfgDefaults[GmetadConfig.SYSLOG_ADDRESS] syslogpDefault = GmetadConfig._cfgDefaults[GmetadConfig.SYSLOG_PORT] syslogfDefault = GmetadConfig._cfgDefaults[GmetadConfig.SYSLOG_FACILITY] parser = optparse.OptionParser(version = GmetadConfig.VERSION) parser.add_option('-d', '--debug', action='store', help='Debug level. If five (5) or greater, daemon will stay in foreground. Values are:\n\ 0 - FATAL\n\ 1 - CRITICAL\n\ 2 - ERROR (default)\n\ 3 - WARNING\n\ 4 - INFO\n\ 5 - DEBUG', default='%d' % dbgLevelDefault) parser.add_option('-p', '--pid_file', action='store', help='Write process-id to file', default=None) parser.add_option('-c', '--conf', action='store', help='Location of gmetad configuration file (default=\'/etc/ganglia/gmetad-python.conf\')', default='/etc/ganglia/gmetad-python.conf') parser.add_option('-l', '--logfile', action='store', help='Log messages to this path in addition to syslog; overrides configuration', default=None) parser.add_option('-i', '--interactive_port', action='store', help='Interactive port to listen on (default=%d)' % iPortDefault, default='%d' % iPortDefault) parser.add_option('-x', '--xml_port', action='store', help='XML port to listen on (default=%d)' % xPortDefault, default='%d' % xPortDefault) parser.add_option("--syslogn", dest = "syslogn", metavar ="LNAME", \ default = "127.0.0.1" , \ help = "Set the syslog's ip/hostname" ) parser.add_option("--syslogp", dest = "syslogp", metavar ="LPORT", \ default = 514, help = "Set the syslog's port" ) parser.add_option("--syslogf", dest = "syslogf", metavar ="LFACILITY", \ default = 23, help = "Set the syslog's facility" ) parser.add_option("--cn", dest = "cn", metavar = "CN", \ default = None, \ help ="Set the Cloud name") options, arguments = parser.parse_args() if not options.debug.isdigit(): print 'Invalid numeric value for --debug: %s' % options.debug parser.print_help() sys.exit() elif not options.interactive_port.isdigit(): print 'Invalid numeric value for --interactive_port: %s' % options.interactive_port sys.exit() elif not options.xml_port.isdigit(): print 'Invalid numeric value for --xml_port: %s' % options.xml_port sys.exit() elif not os.path.exists(options.conf): print 'No such configuration file: %s' % options.conf parser.print_help() sys.exit() cfg = GmetadConfig(options.conf) # Update configuration if non-default values were provided. if int(dbgLevelDefault) != int(options.debug): cfg[GmetadConfig.DEBUG_LEVEL] = options.debug if int(iPortDefault) != int(options.interactive_port): cfg[GmetadConfig.INTERACTIVE_PORT] = options.interactive_port if int(xPortDefault) != int(options.xml_port): cfg[GmetadConfig.XML_PORT] = options.xml_port if options.logfile is not None: cfg[GmetadConfig.LOGFILE] = options.logfile if options.pid_file is not None: cfg[GmetadConfig.PIDFILE] = options.pid_file if syslognDefault != options.syslogn : cfg[GmetadConfig.SYSLOG_ADDRESS] = options.syslogn if syslogpDefault != options.syslogp : cfg[GmetadConfig.SYSLOG_PORT] = options.syslogp if syslogfDefault != options.syslogf : cfg[GmetadConfig.SYSLOG_FACILITY] = options.syslogf return cfg
bsd-3-clause
Kim-Seonghyeon/youtube_8m
mean_average_precision_calculator.py
17
4065
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Calculate the mean average precision. It provides an interface for calculating mean average precision for an entire list or the top-n ranked items. Example usages: We first call the function accumulate many times to process parts of the ranked list. After processing all the parts, we call peek_map_at_n to calculate the mean average precision. ``` import random p = np.array([[random.random() for _ in xrange(50)] for _ in xrange(1000)]) a = np.array([[random.choice([0, 1]) for _ in xrange(50)] for _ in xrange(1000)]) # mean average precision for 50 classes. calculator = mean_average_precision_calculator.MeanAveragePrecisionCalculator( num_class=50) calculator.accumulate(p, a) aps = calculator.peek_map_at_n() ``` """ import numpy import average_precision_calculator class MeanAveragePrecisionCalculator(object): """This class is to calculate mean average precision. """ def __init__(self, num_class): """Construct a calculator to calculate the (macro) average precision. Args: num_class: A positive Integer specifying the number of classes. top_n_array: A list of positive integers specifying the top n for each class. The top n in each class will be used to calculate its average precision at n. The size of the array must be num_class. Raises: ValueError: An error occurred when num_class is not a positive integer; or the top_n_array is not a list of positive integers. """ if not isinstance(num_class, int) or num_class <= 1: raise ValueError("num_class must be a positive integer.") self._ap_calculators = [] # member of AveragePrecisionCalculator self._num_class = num_class # total number of classes for i in range(num_class): self._ap_calculators.append( average_precision_calculator.AveragePrecisionCalculator()) def accumulate(self, predictions, actuals, num_positives=None): """Accumulate the predictions and their ground truth labels. Args: predictions: A list of lists storing the prediction scores. The outer dimension corresponds to classes. actuals: A list of lists storing the ground truth labels. The dimensions should correspond to the predictions input. Any value larger than 0 will be treated as positives, otherwise as negatives. num_positives: If provided, it is a list of numbers representing the number of true positives for each class. If not provided, the number of true positives will be inferred from the 'actuals' array. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match. """ if not num_positives: num_positives = [None for i in predictions.shape[1]] calculators = self._ap_calculators for i in range(len(predictions)): calculators[i].accumulate(predictions[i], actuals[i], num_positives[i]) def clear(self): for calculator in self._ap_calculators: calculator.clear() def is_empty(self): return ([calculator.heap_size for calculator in self._ap_calculators] == [0 for _ in range(self._num_class)]) def peek_map_at_n(self): """Peek the non-interpolated mean average precision at n. Returns: An array of non-interpolated average precision at n (default 0) for each class. """ aps = [self._ap_calculators[i].peek_ap_at_n() for i in range(self._num_class)] return aps
apache-2.0
pytrainer/pytrainer
pytrainer/gui/color.py
1
1430
# -*- coding: utf-8 -*- #Copyright (C) Nathan Jones ncjones@users.sourceforge.net #This program is free software; you can redistribute it and/or #modify it under the terms of the GNU General Public License #as published by the Free Software Foundation; either version 2 #of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. from gi.repository import Gdk from pytrainer.util.color import Color class ColorConverter(object): """Converts between Pytrainer and GDK color instances.""" def convert_to_gdk_color(self, color): """Convert a Pytrainer color to a GDK color.""" color_format = "#{0:06x}".format(color.rgb_val) return Gdk.color_parse(color_format) def convert_to_color(self, gdk_col): """Convert a GDK color to a Pytrainer color.""" red = gdk_col.red >> 8 green = gdk_col.green >> 8 blue = gdk_col.blue >> 8 rgb_val = (red << 16) + (green << 8) + blue return Color(rgb_val)
gpl-2.0
mleist/ukmdb_graph
setup.py
1
1861
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=C0103 try: from setuptools import setup except ImportError: from distutils.core import setup # pylint: disable=E0401,E0611 with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() requirements = [ ] test_requirements = [ ] setup( name='ukmdb_graph', version='0.0.9', description="UKMDB graph database adapter.", long_description=readme + '\n\n' + history, author="Markus Leist", author_email='markus@lei.st', url='https://github.com/mleist/ukmdb_graph', packages=[ 'ukmdb_graph', ], package_dir={'ukmdb_graph': 'ukmdb_graph'}, include_package_data=True, install_requires=requirements, license="GPLv3", zip_safe=False, keywords='ukmdb_graph', entry_points={ 'console_scripts': [ 'ukm_graph = ukmdb_graph.worker:main', ], }, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: System Administrators', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: System :: Monitoring', 'Topic :: System :: Logging', 'Topic :: Internet :: Log Analysis', 'Topic :: Software Development :: Libraries :: Python Modules', ], test_suite='tests', tests_require=test_requirements )
gpl-3.0
yesudeep/greatship
app/jinja2/jinja2/sandbox.py
6
9260
# -*- coding: utf-8 -*- """ jinja2.sandbox ~~~~~~~~~~~~~~ Adds a sandbox layer to Jinja as it was the default behavior in the old Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the default behavior is easier to use. The behavior can be changed by subclassing the environment. :copyright: (c) 2009 by the Jinja Team. :license: BSD. """ import operator from jinja2.runtime import Undefined from jinja2.environment import Environment from jinja2.exceptions import SecurityError from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \ FrameType, GeneratorType #: maximum number of items a range may produce MAX_RANGE = 100000 #: attributes of function objects that are considered unsafe. UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict', 'func_defaults', 'func_globals']) #: unsafe method attributes. function attributes are unsafe for methods too UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self']) import warnings # make sure we don't warn in python 2.6 about stuff we don't care about warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning, module='jinja2.sandbox') from collections import deque from UserDict import UserDict, DictMixin from UserList import UserList _mutable_set_types = (set,) _mutable_mapping_types = (UserDict, DictMixin, dict) _mutable_sequence_types = (UserList, list) # if sets is still available, register the mutable set from there as well try: from sets import Set _mutable_set_types += (Set,) except ImportError: pass #: register Python 2.6 abstract base classes try: from collections import MutableSet, MutableMapping, MutableSequence _mutable_set_types += (MutableSet,) _mutable_mapping_types += (MutableMapping,) _mutable_sequence_types += (MutableSequence,) except ImportError: pass _mutable_spec = ( (_mutable_set_types, frozenset([ 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove', 'symmetric_difference_update', 'update' ])), (_mutable_mapping_types, frozenset([ 'clear', 'pop', 'popitem', 'setdefault', 'update' ])), (_mutable_sequence_types, frozenset([ 'append', 'reverse', 'insert', 'sort', 'extend', 'remove' ])), (deque, frozenset([ 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop', 'popleft', 'remove', 'rotate' ])) ) def safe_range(*args): """A range that can't generate ranges with a length of more than MAX_RANGE items. """ rng = xrange(*args) if len(rng) > MAX_RANGE: raise OverflowError('range too big, maximum size for range is %d' % MAX_RANGE) return rng def unsafe(f): """ Mark a function or method as unsafe:: @unsafe def delete(self): pass """ f.unsafe_callable = True return f def is_internal_attribute(obj, attr): """Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overriden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(lambda: None, "func_code") True >>> is_internal_attribute((lambda x:x).func_code, 'co_code') True >>> is_internal_attribute(str, "upper") False """ if isinstance(obj, FunctionType): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, MethodType): if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): if attr == 'mro': return True elif isinstance(obj, (CodeType, TracebackType, FrameType)): return True elif isinstance(obj, GeneratorType): if attr == 'gi_frame': return True return attr.startswith('__') def modifies_known_mutable(obj, attr): """This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`. >>> modifies_known_mutable({}, "clear") True >>> modifies_known_mutable({}, "keys") False >>> modifies_known_mutable([], "append") True >>> modifies_known_mutable([], "index") False If called with an unsupported object (such as unicode) `False` is returned. >>> modifies_known_mutable("foo", "upper") False """ for typespec, unsafe in _mutable_spec: if isinstance(obj, typespec): return attr in unsafe return False class SandboxedEnvironment(Environment): """The sandboxed environment. It works like the regular environment but tells the compiler to generate sandboxed code. Additionally subclasses of this environment may override the methods that tell the runtime what attributes or functions are safe to access. If the template tries to access insecure code a :exc:`SecurityError` is raised. However also other exceptions may occour during the rendering so the caller has to ensure that all exceptions are catched. """ sandboxed = True def __init__(self, *args, **kwargs): Environment.__init__(self, *args, **kwargs) self.globals['range'] = safe_range def is_safe_attribute(self, obj, attr, value): """The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ return not (attr.startswith('_') or is_internal_attribute(obj, attr)) def is_safe_callable(self, obj): """Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module. """ return not (getattr(obj, 'unsafe_callable', False) or \ getattr(obj, 'alters_data', False)) def getitem(self, obj, argument): """Subscribe an object from sandboxed code.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, basestring): try: attr = str(argument) except: pass else: try: value = getattr(obj, attr) except AttributeError: pass else: if self.is_safe_attribute(obj, argument, value): return value return self.unsafe_undefined(obj, argument) return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring. """ try: value = getattr(obj, attribute) except AttributeError: try: return obj[attribute] except (TypeError, LookupError): pass else: if self.is_safe_attribute(obj, attribute, value): return value return self.unsafe_undefined(obj, attribute) return self.undefined(obj=obj, name=attribute) def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" return self.undefined('access to attribute %r of %r ' 'object is unsafe.' % ( attribute, obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError) def call(__self, __context, __obj, *args, **kwargs): """Call an object from sandboxed code.""" # the double prefixes are to avoid double keyword argument # errors when proxying the call. if not __self.is_safe_callable(__obj): raise SecurityError('%r is not safely callable' % (__obj,)) return __context.call(__obj, *args, **kwargs) class ImmutableSandboxedEnvironment(SandboxedEnvironment): """Works exactly like the regular `SandboxedEnvironment` but does not permit modifications on the builtin mutable objects `list`, `set`, and `dict` by using the :func:`modifies_known_mutable` function. """ def is_safe_attribute(self, obj, attr, value): if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value): return False return not modifies_known_mutable(obj, attr)
mit
infowantstobeseen/pyglet-darwincore
tools/wraptypes/preprocessor.py
20
48296
#!/usr/bin/env python '''Preprocess a C source file. Limitations: * Whitespace is not preserved. * # and ## operators not handled. Reference is C99: * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf * Also understands Objective-C #import directive * Also understands GNU #include_next ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' import operator import os.path import cPickle import re import sys import lex from lex import TOKEN import yacc tokens = ( 'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT', 'STRING_LITERAL', 'OTHER', 'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP', 'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN', 'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN', 'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'HASH_HASH', 'PERIOD', 'ELLIPSIS', 'IF', 'IFDEF', 'IFNDEF', 'ELIF', 'ELSE', 'ENDIF', 'INCLUDE', 'INCLUDE_NEXT', 'DEFINE', 'UNDEF', 'LINE', 'ERROR', 'PRAGMA', 'DEFINED', 'IMPORT', 'NEWLINE', 'LPAREN' ) subs = { 'D': '[0-9]', 'L': '[a-zA-Z_]', 'H': '[a-fA-F0-9]', 'E': '[Ee][+-]?{D}+', 'FS': '[FflL]', 'IS': '[uUlL]*', } # Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy) sub_pattern = re.compile('{([^}]*)}') def sub_repl_match(m): return subs[m.groups()[0]] def sub(s): return sub_pattern.sub(sub_repl_match, s) CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'") STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"') IDENTIFIER = sub('{L}({L}|{D})*') # -------------------------------------------------------------------------- # Token value types # -------------------------------------------------------------------------- # Numbers represented as int and float types. # For all other tokens, type is just str representation. class StringLiteral(str): def __new__(cls, value): assert value[0] == '"' and value[-1] == '"' # Unescaping probably not perfect but close enough. value = value[1:-1].decode('string_escape') return str.__new__(cls, value) class SystemHeaderName(str): def __new__(cls, value): assert value[0] == '<' and value[-1] == '>' return str.__new__(cls, value[1:-1]) def __repr__(self): return '<%s>' % (str(self)) # -------------------------------------------------------------------------- # Token declarations # -------------------------------------------------------------------------- punctuators = { # value: (regex, type) r'...': (r'\.\.\.', 'ELLIPSIS'), r'>>=': (r'>>=', 'RIGHT_ASSIGN'), r'<<=': (r'<<=', 'LEFT_ASSIGN'), r'+=': (r'\+=', 'ADD_ASSIGN'), r'-=': (r'-=', 'SUB_ASSIGN'), r'*=': (r'\*=', 'MUL_ASSIGN'), r'/=': (r'/=', 'DIV_ASSIGN'), r'%=': (r'%=', 'MOD_ASSIGN'), r'&=': (r'&=', 'AND_ASSIGN'), r'^=': (r'\^=', 'XOR_ASSIGN'), r'|=': (r'\|=', 'OR_ASSIGN'), r'>>': (r'>>', 'RIGHT_OP'), r'<<': (r'<<', 'LEFT_OP'), r'++': (r'\+\+', 'INC_OP'), r'--': (r'--', 'DEC_OP'), r'->': (r'->', 'PTR_OP'), r'&&': (r'&&', 'AND_OP'), r'||': (r'\|\|', 'OR_OP'), r'<=': (r'<=', 'LE_OP'), r'>=': (r'>=', 'GE_OP'), r'==': (r'==', 'EQ_OP'), r'!=': (r'!=', 'NE_OP'), r'<:': (r'<:', '['), r':>': (r':>', ']'), r'<%': (r'<%', '{'), r'%>': (r'%>', '}'), r'%:%:': (r'%:%:', 'HASH_HASH'), r';': (r';', ';'), r'{': (r'{', '{'), r'}': (r'}', '}'), r',': (r',', ','), r':': (r':', ':'), r'=': (r'=', '='), r')': (r'\)', ')'), r'[': (r'\[', '['), r']': (r']', ']'), r'.': (r'\.', 'PERIOD'), r'&': (r'&', '&'), r'!': (r'!', '!'), r'~': (r'~', '~'), r'-': (r'-', '-'), r'+': (r'\+', '+'), r'*': (r'\*', '*'), r'/': (r'/', '/'), r'%': (r'%', '%'), r'<': (r'<', '<'), r'>': (r'>', '>'), r'^': (r'\^', '^'), r'|': (r'\|', '|'), r'?': (r'\?', '?'), r'#': (r'\#', '#'), } def punctuator_regex(punctuators): punctuator_regexes = [v[0] for v in punctuators.values()] punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b))) return '(%s)' % '|'.join(punctuator_regexes) def t_clinecomment(t): r'//[^\n]*' t.lexer.lineno += 1 def t_cr(t): r'\r' # Skip over CR characters. Only necessary on urlopen'd files. # C /* comments */. Copied from the ylex.py example in PLY: it's not 100% # correct for ANSI C, but close enough for anything that's not crazy. def t_ccomment(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') def t_header_name(t): r'<([\/]?[^\/\*\n>])*[\/]?>(?=[ \t\f\v\r\n])' # Should allow any character from charset, but that wreaks havok (skips # comment delimiter, for instance), so also don't permit '*' or '//' # The non-matching group at the end prevents false-positives with # operators like '>='. # In the event of a false positive (e.g. "if (a < b || c > d)"), the # token will be split and rescanned if it appears in a text production; # see PreprocessorParser.write. # Is also r'"[^\n"]"', but handled in STRING_LITERAL instead. t.type = 'HEADER_NAME' t.value = SystemHeaderName(t.value) return t def t_directive(t): r'\#[ \t]*(ifdef|ifndef|if|elif|else|endif|define|undef|include_next|include|import|line|error|pragma)' if t.lexer.lasttoken in ('NEWLINE', None): t.type = t.value[1:].lstrip().upper() else: # TODO t.type = '#' t.lexer.nexttoken = ('IDENTIFIER', t.value[1:].lstrip()) return t @TOKEN(punctuator_regex(punctuators)) def t_punctuator(t): t.type = punctuators[t.value][1] return t @TOKEN(CHARACTER_CONSTANT) def t_character_constant(t): t.type = 'CHARACTER_CONSTANT' return t @TOKEN(IDENTIFIER) def t_identifier(t): if t.value == 'defined': t.type = 'DEFINED' else: t.type = 'IDENTIFIER' return t # missing: universal-character-constant @TOKEN(sub(r'({D}|\.{D})({D}|{L}|e[+-]|E[+-]|p[+-]|P[+-]|\.)*')) def t_pp_number(t): t.type = 'PP_NUMBER' return t @TOKEN(STRING_LITERAL) def t_string_literal(t): t.type = 'STRING_LITERAL' t.value = StringLiteral(t.value) return t def t_lparen(t): r'\(' if t.lexpos == 0 or t.lexer.lexdata[t.lexpos-1] not in (' \t\f\v\n'): t.type = 'LPAREN' else: t.type = '(' return t def t_continuation(t): r'\\\n' t.lexer.lineno += 1 return None def t_newline(t): r'\n' t.lexer.lineno += 1 t.type = 'NEWLINE' return t def t_error(t): t.type = 'OTHER' return t t_ignore = ' \t\v\f' # -------------------------------------------------------------------------- # Expression Object Model # -------------------------------------------------------------------------- class EvaluationContext(object): '''Interface for evaluating expression nodes. ''' def is_defined(self, identifier): return False class ExpressionNode(object): def evaluate(self, context): return 0 def __str__(self): return '' class ConstantExpressionNode(ExpressionNode): def __init__(self, value): self.value = value def evaluate(self, context): return self.value def __str__(self): return str(self.value) class UnaryExpressionNode(ExpressionNode): def __init__(self, op, op_str, child): self.op = op self.op_str = op_str self.child = child def evaluate(self, context): return self.op(self.child.evaluate(context)) def __str__(self): return '(%s %s)' % (self.op_str, self.child) class BinaryExpressionNode(ExpressionNode): def __init__(self, op, op_str, left, right): self.op = op self.op_str = op_str self.left = left self.right = right def evaluate(self, context): return self.op(self.left.evaluate(context), self.right.evaluate(context)) def __str__(self): return '(%s %s %s)' % (self.left, self.op_str, self.right) class LogicalAndExpressionNode(ExpressionNode): def __init__(self, left, right): self.left = left self.right = right def evaluate(self, context): return self.left.evaluate(context) and self.right.evaluate(context) def __str__(self): return '(%s && %s)' % (self.left, self.right) class LogicalOrExpressionNode(ExpressionNode): def __init__(self, left, right): self.left = left self.right = right def evaluate(self, context): return self.left.evaluate(context) or self.right.evaluate(context) def __str__(self): return '(%s || %s)' % (self.left, self.right) class ConditionalExpressionNode(ExpressionNode): def __init__(self, condition, left, right): self.condition = condition self.left = left self.right = right def evaluate(self, context): if self.condition.evaluate(context): return self.left.evaluate(context) else: return self.right.evaluate(context) def __str__(self): return '(%s ? %s : %s)' % (self.condition, self.left, self.right) # -------------------------------------------------------------------------- # Lexers # -------------------------------------------------------------------------- class PreprocessorLexer(lex.Lexer): def __init__(self): lex.Lexer.__init__(self) self.filename = '<input>' def input(self, data, filename=None): if filename: self.filename = filename self.lasttoken = None self.input_stack = [] lex.Lexer.input(self, data) def push_input(self, data, filename): self.input_stack.append( (self.lexdata, self.lexpos, self.filename, self.lineno)) self.lexdata = data self.lexpos = 0 self.lineno = 1 self.filename = filename self.lexlen = len(self.lexdata) def pop_input(self): self.lexdata, self.lexpos, self.filename, self.lineno = \ self.input_stack.pop() self.lexlen = len(self.lexdata) def token(self): result = lex.Lexer.token(self) while result is None and self.input_stack: self.pop_input() result = lex.Lexer.token(self) if result: self.lasttoken = result.type result.filename = self.filename else: self.lasttoken = None return result class TokenListLexer(object): def __init__(self, tokens): self.tokens = tokens self.pos = 0 def token(self): if self.pos < len(self.tokens): t = self.tokens[self.pos] self.pos += 1 return t else: return None def symbol_to_token(sym): if isinstance(sym, yacc.YaccSymbol): return sym.value elif isinstance(sym, lex.LexToken): return sym else: assert False, 'Not a symbol: %r' % sym def create_token(type, value, production=None): '''Create a token of type and value, at the position where 'production' was reduced. Don't specify production if the token is built-in''' t = lex.LexToken() t.type = type t.value = value t.lexpos = -1 if production: t.lineno = production.slice[1].lineno t.filename = production.slice[1].filename else: t.lineno = -1 t.filename = '<builtin>' return t # -------------------------------------------------------------------------- # Grammars # -------------------------------------------------------------------------- class Grammar(object): prototype = None name = 'grammar' @classmethod def get_prototype(cls): if not cls.prototype: instance = cls() tabmodule = '%stab' % cls.name cls.prototype = yacc.yacc(module=instance, tabmodule=tabmodule) return cls.prototype class PreprocessorGrammar(Grammar): tokens = tokens name = 'pp' def p_preprocessing_file(self, p): '''preprocessing_file : group_opt ''' def p_group_opt(self, p): '''group_opt : group | ''' def p_group(self, p): '''group : group_part | group group_part ''' def p_group_part(self, p): '''group_part : if_section | control_line | text_line ''' def p_if_section(self, p): '''if_section : if_group elif_groups_opt else_group_opt endif_line ''' def p_if_group(self, p): '''if_group : if_line group_opt ''' def p_if_line(self, p): '''if_line : IF replaced_constant_expression NEWLINE | IFDEF IDENTIFIER NEWLINE | IFNDEF IDENTIFIER NEWLINE ''' if p.parser.enable_declaratives(): type = p.slice[1].type if type == 'IF': if p[2]: result = p[2].evaluate(p.parser.namespace) else: # error result = False elif type == 'IFDEF': result = p.parser.namespace.is_defined(p[2]) elif type == 'IFNDEF': result = not p.parser.namespace.is_defined(p[2]) p.parser.write((create_token('PP_IFNDEF', p[2], p),)) else: result = False p.parser.condition_if(result) def p_elif_groups_opt(self, p): '''elif_groups_opt : elif_groups | ''' def p_elif_groups(self, p): '''elif_groups : elif_group | elif_groups elif_group ''' def p_elif_group(self, p): '''elif_group : elif_line group_opt ''' def p_elif_line(self, p): '''elif_line : ELIF replaced_elif_constant_expression NEWLINE ''' result = p[2].evaluate(p.parser.namespace) p.parser.condition_elif(result) def p_else_group_opt(self, p): '''else_group_opt : else_group | ''' def p_else_group(self, p): '''else_group : else_line group_opt ''' def p_else_line(self, p): '''else_line : ELSE NEWLINE ''' p.parser.condition_else() def p_endif_line(self, p): '''endif_line : ENDIF pp_tokens_opt NEWLINE ''' # pp_tokens needed (ignored) here for Apple. p.parser.condition_endif() def p_control_line(self, p): '''control_line : include_line NEWLINE | define_object | define_function | undef_line | LINE pp_tokens NEWLINE | error_line | PRAGMA pp_tokens_opt NEWLINE ''' def p_include_line(self, p): '''include_line : INCLUDE pp_tokens | INCLUDE_NEXT pp_tokens | IMPORT pp_tokens ''' if p.parser.enable_declaratives(): tokens = p[2] tokens = p.parser.namespace.apply_macros(tokens) if len(tokens) > 0: if p.slice[1].type == 'INCLUDE': if tokens[0].type == 'STRING_LITERAL': p.parser.include(tokens[0].value) return elif tokens[0].type == 'HEADER_NAME': p.parser.include_system(tokens[0].value) return elif p.slice[1].type == 'INCLUDE_NEXT': p.parser.include_next(tokens[0].value, p.slice[1].filename) return else: if tokens[0].type == 'STRING_LITERAL': p.parser.import_(tokens[0].value) return elif tokens[0].type == 'HEADER_NAME': p.parser.import_system(tokens[0].value) return # TODO print >> sys.stderr, 'Invalid #include' def p_define_object(self, p): '''define_object : DEFINE IDENTIFIER replacement_list NEWLINE ''' if p.parser.enable_declaratives(): p.parser.namespace.define_object(p[2], p[3]) # Try to parse replacement list as an expression tokens = p.parser.namespace.apply_macros(p[3]) lexer = TokenListLexer(tokens) expr_parser = StrictConstantExpressionParser(lexer, p.parser.namespace) value = expr_parser.parse(debug=False) if value is not None: value = value.evaluate(p.parser.namespace) p.parser.write( (create_token('PP_DEFINE_CONSTANT', (p[2], value), p),)) else: # Didn't parse, pass on as string value = ' '.join([str(t.value) for t in p[3]]) p.parser.write((create_token('PP_DEFINE', (p[2], value), p),)) def p_define_function(self, p): '''define_function : DEFINE IDENTIFIER LPAREN define_function_params ')' pp_tokens_opt NEWLINE ''' if p.parser.enable_declaratives(): p.parser.namespace.define_function(p[2], p[4], p[6]) def p_define_function_params(self, p): '''define_function_params : identifier_list_opt | ELLIPSIS | identifier_list ',' ELLIPSIS ''' if len(p) == 2: if p[1] == 'ELLIPSIS': p[0] = ('...',) else: p[0] = p[1] else: p[0] = p[1] + ('...',) def p_undef_line(self, p): '''undef_line : UNDEF IDENTIFIER NEWLINE ''' if p.parser.enable_declaratives(): p.parser.namespace.undef(p[2]) def p_error_line(self, p): '''error_line : ERROR pp_tokens_opt NEWLINE ''' if p.parser.enable_declaratives(): p.parser.error(' '.join([t.value for t in p[2]]), p.slice[1].filename, p.slice[1].lineno) def p_text_line(self, p): '''text_line : pp_tokens_opt NEWLINE ''' if p.parser.enable_declaratives(): tokens = p[1] tokens = p.parser.namespace.apply_macros(tokens) p.parser.write(tokens) def p_replacement_list(self, p): '''replacement_list : | preprocessing_token_no_lparen | preprocessing_token_no_lparen pp_tokens ''' if len(p) == 3: p[0] = (p[1],) + p[2] elif len(p) == 2: p[0] = (p[1],) else: p[0] = () def p_identifier_list_opt(self, p): '''identifier_list_opt : identifier_list | ''' if len(p) == 2: p[0] = p[1] else: p[0] = () def p_identifier_list(self, p): '''identifier_list : IDENTIFIER | identifier_list ',' IDENTIFIER ''' if len(p) > 2: p[0] = p[1] + (p[3],) else: p[0] = (p[1],) def p_replaced_constant_expression(self, p): '''replaced_constant_expression : pp_tokens''' if p.parser.enable_conditionals(): tokens = p[1] tokens = p.parser.namespace.apply_macros(tokens) lexer = TokenListLexer(tokens) parser = ConstantExpressionParser(lexer, p.parser.namespace) p[0] = parser.parse(debug=True) else: p[0] = ConstantExpressionNode(0) def p_replaced_elif_constant_expression(self, p): '''replaced_elif_constant_expression : pp_tokens''' if p.parser.enable_elif_conditionals(): tokens = p[1] tokens = p.parser.namespace.apply_macros(tokens) lexer = TokenListLexer(tokens) parser = ConstantExpressionParser(lexer, p.parser.namespace) p[0] = parser.parse(debug=True) else: p[0] = ConstantExpressionNode(0) def p_pp_tokens_opt(self, p): '''pp_tokens_opt : pp_tokens | ''' if len(p) == 2: p[0] = p[1] else: p[0] = () def p_pp_tokens(self, p): '''pp_tokens : preprocessing_token | pp_tokens preprocessing_token ''' if len(p) == 2: p[0] = (p[1],) else: p[0] = p[1] + (p[2],) def p_preprocessing_token_no_lparen(self, p): '''preprocessing_token_no_lparen : HEADER_NAME | IDENTIFIER | PP_NUMBER | CHARACTER_CONSTANT | STRING_LITERAL | punctuator | DEFINED | OTHER ''' p[0] = symbol_to_token(p.slice[1]) def p_preprocessing_token(self, p): '''preprocessing_token : preprocessing_token_no_lparen | LPAREN ''' p[0] = symbol_to_token(p.slice[1]) def p_punctuator(self, p): '''punctuator : ELLIPSIS | RIGHT_ASSIGN | LEFT_ASSIGN | ADD_ASSIGN | SUB_ASSIGN | MUL_ASSIGN | DIV_ASSIGN | MOD_ASSIGN | AND_ASSIGN | XOR_ASSIGN | OR_ASSIGN | RIGHT_OP | LEFT_OP | INC_OP | DEC_OP | PTR_OP | AND_OP | OR_OP | LE_OP | GE_OP | EQ_OP | NE_OP | HASH_HASH | ';' | '{' | '}' | ',' | ':' | '=' | '(' | ')' | '[' | ']' | PERIOD | '&' | '!' | '~' | '-' | '+' | '*' | '/' | '%' | '<' | '>' | '^' | '|' | '?' | '#' ''' p[0] = symbol_to_token(p.slice[1]) def p_error(self, t): if not t: # Crap, no way to get to Parser instance. FIXME TODO print >> sys.stderr, 'Syntax error at end of file.' else: # TODO print >> sys.stderr, '%s:%d Syntax error at %r' % \ (t.lexer.filename, t.lexer.lineno, t.value) #t.lexer.cparser.handle_error('Syntax error at %r' % t.value, # t.lexer.filename, t.lexer.lineno) # Don't alter lexer: default behaviour is to pass error production # up until it hits the catch-all at declaration, at which point # parsing continues (synchronisation). class ConstantExpressionParseException(Exception): pass class ConstantExpressionGrammar(Grammar): name = 'expr' tokens = tokens def p_constant_expression(self, p): '''constant_expression : conditional_expression ''' p[0] = p[1] p.parser.result = p[0] def p_character_constant(self, p): '''character_constant : CHARACTER_CONSTANT ''' try: value = ord(eval(p[1].lstrip('L'))) except StandardError: value = 0 p[0] = ConstantExpressionNode(value) def p_constant(self, p): '''constant : PP_NUMBER ''' value = p[1].rstrip('LlUu') try: if value[:2] == '0x': value = int(value[2:], 16) elif value[0] == '0': value = int(value, 8) else: value = int(value) except ValueError: value = value.rstrip('eEfF') try: value = float(value) except ValueError: value = 0 p[0] = ConstantExpressionNode(value) def p_identifier(self, p): '''identifier : IDENTIFIER ''' p[0] = ConstantExpressionNode(0) def p_primary_expression(self, p): '''primary_expression : constant | character_constant | identifier | '(' expression ')' | LPAREN expression ')' ''' if p[1] == '(': p[0] = p[2] else: p[0] = p[1] def p_postfix_expression(self, p): '''postfix_expression : primary_expression ''' p[0] = p[1] def p_unary_expression(self, p): '''unary_expression : postfix_expression | unary_operator cast_expression ''' if len(p) == 2: p[0] = p[1] elif type(p[1]) == tuple: # unary_operator reduces to (op, op_str) p[0] = UnaryExpressionNode(p[1][0], p[1][1], p[2]) else: # TODO p[0] = None def p_unary_operator(self, p): '''unary_operator : '+' | '-' | '~' | '!' ''' # reduces to (op, op_str) p[0] = ({ '+': operator.pos, '-': operator.neg, '~': operator.inv, '!': operator.not_}[p[1]], p[1]) def p_cast_expression(self, p): '''cast_expression : unary_expression ''' p[0] = p[len(p) - 1] def p_multiplicative_expression(self, p): '''multiplicative_expression : cast_expression | multiplicative_expression '*' cast_expression | multiplicative_expression '/' cast_expression | multiplicative_expression '%' cast_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode({ '*': operator.mul, '/': operator.div, '%': operator.mod}[p[2]], p[2], p[1], p[3]) def p_additive_expression(self, p): '''additive_expression : multiplicative_expression | additive_expression '+' multiplicative_expression | additive_expression '-' multiplicative_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode({ '+': operator.add, '-': operator.sub}[p[2]], p[2], p[1], p[3]) def p_shift_expression(self, p): '''shift_expression : additive_expression | shift_expression LEFT_OP additive_expression | shift_expression RIGHT_OP additive_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode({ '<<': operator.lshift, '>>': operator.rshift}[p[2]], p[2], p[1], p[3]) def p_relational_expression(self, p): '''relational_expression : shift_expression | relational_expression '<' shift_expression | relational_expression '>' shift_expression | relational_expression LE_OP shift_expression | relational_expression GE_OP shift_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode({ '>': operator.gt, '<': operator.lt, '<=': operator.le, '>=': operator.ge}[p[2]], p[2], p[1], p[3]) def p_equality_expression(self, p): '''equality_expression : relational_expression | equality_expression EQ_OP relational_expression | equality_expression NE_OP relational_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode({ '==': operator.eq, '!=': operator.ne}[p[2]], p[2], p[1], p[3]) def p_and_expression(self, p): '''and_expression : equality_expression | and_expression '&' equality_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode(operator.and_, '&', p[1], p[3]) def p_exclusive_or_expression(self, p): '''exclusive_or_expression : and_expression | exclusive_or_expression '^' and_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode(operator.xor, '^', p[1], p[3]) def p_inclusive_or_expression(self, p): '''inclusive_or_expression : exclusive_or_expression | inclusive_or_expression '|' exclusive_or_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = BinaryExpressionNode(operator.or_, '|', p[1], p[3]) def p_logical_and_expression(self, p): '''logical_and_expression : inclusive_or_expression | logical_and_expression AND_OP inclusive_or_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = LogicalAndExpressionNode(p[1], p[3]) def p_logical_or_expression(self, p): '''logical_or_expression : logical_and_expression | logical_or_expression OR_OP logical_and_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = LogicalOrExpressionNode(p[1], p[3]) def p_conditional_expression(self, p): '''conditional_expression : logical_or_expression | logical_or_expression '?' expression ':' conditional_expression ''' if len(p) == 2: p[0] = p[1] else: p[0] = ConditionalExpressionNode(p[1], p[3], p[5]) def p_assignment_expression(self, p): '''assignment_expression : conditional_expression | unary_expression assignment_operator assignment_expression ''' # TODO assignment if len(p) == 2: p[0] = p[1] def p_assignment_operator(self, p): '''assignment_operator : '=' | MUL_ASSIGN | DIV_ASSIGN | MOD_ASSIGN | ADD_ASSIGN | SUB_ASSIGN | LEFT_ASSIGN | RIGHT_ASSIGN | AND_ASSIGN | XOR_ASSIGN | OR_ASSIGN ''' def p_expression(self, p): '''expression : assignment_expression | expression ',' assignment_expression ''' # TODO sequence if len(p) == 2: p[0] = p[1] def p_error(self, t): raise ConstantExpressionParseException() class StrictConstantExpressionGrammar(ConstantExpressionGrammar): name = 'strict_expr' tokens = tokens def p_identifier(self, p): '''identifier : IDENTIFIER ''' raise ConstantExpressionParseException() class ExecutionState(object): def __init__(self, parent_enabled, enabled): self.enabled = parent_enabled and enabled self.context_enabled = enabled self.parent_enabled = parent_enabled def enable(self, result): if result: self.enabled = self.parent_enabled and not self.context_enabled self.context_enabled = True else: self.enabled = False class PreprocessorParser(yacc.Parser): def __init__(self, gcc_search_path=True): yacc.Parser.__init__(self) self.lexer = lex.lex(cls=PreprocessorLexer) PreprocessorGrammar.get_prototype().init_parser(self) # Map system header name to data, overrides path search and open() self.system_headers = {} self.include_path = ['/usr/local/include', '/usr/include'] if sys.platform == 'darwin': self.framework_path = ['/System/Library/Frameworks', '/Library/Frameworks'] else: self.framework_path = [] if gcc_search_path: self.add_gcc_search_path() self.lexer.filename = '' self.defines = {} self.namespace = PreprocessorNamespace() def define(self, name, value): self.defines[name] = value def add_gcc_search_path(self): from subprocess import Popen, PIPE path = Popen('gcc -print-file-name=include', shell=True, stdout=PIPE).communicate()[0].strip() if path: self.include_path.append(path) def parse(self, filename=None, data=None, namespace=None, debug=False): self.output = [] if not namespace: namespace = self.namespace for name, value in self.defines.items(): namespace.define_object(name, (create_token('IDENTIFIER', value),)) self.namespace = namespace self.imported_headers = set() self.condition_stack = [ExecutionState(True, True)] if filename: if not data: data = open(filename, 'r').read() self.lexer.input(data, filename) elif data: self.lexer.input(data, '<input>') return yacc.Parser.parse(self, debug=debug) def push_file(self, filename, data=None): print >> sys.stderr, filename if not data: data = open(filename).read() self.lexer.push_input(data, filename) def include(self, header): path = self.get_header_path(header) if path: self.push_file(path) else: print >> sys.stderr, '"%s" not found' % header # TODO def include_system(self, header): if header in self.system_headers: self.push_file(header, self.system_headers[header]) return path = self.get_system_header_path(header) if path: self.push_file(path) else: print >> sys.stderr, '"%s" not found' % header # TODO def include_next(self, header, reference): # XXX doesn't go via get_system_header next = False for path in self.include_path: p = os.path.join(path, header) if os.path.exists(p): if next: self.push_file(p) return elif p == reference: next = True print >> sys.stderr, '%s: cannot include_next from %s' % \ (header, reference) # TODO def import_(self, header): path = self.get_header_path(header) if path: if path not in self.imported_headers: self.imported_headers.add(path) self.push_file(path) else: print >> sys.stderr, '"%s" not found' % header # TODO def import_system(self, header): if header in self.system_headers: if path not in self.imported_headers: self.imported_headers.add(path) self.push_file(header, self.system_headers[header]) return path = self.get_system_header_path(header) if path: if path not in self.imported_headers: self.imported_headers.add(path) self.push_file(path) else: print >> sys.stderr, '"%s" not found' % header # TODO def get_header_path(self, header): p = os.path.join(os.path.dirname(self.lexer.filename), header) if os.path.exists(p): self.push_file(p) return p elif sys.platform == 'darwin': p = self.get_framework_header_path(header) if not p: p = self.get_system_header_path(header) return p def get_system_header_path(self, header): for path in self.include_path: p = os.path.join(path, header) if os.path.exists(p): return p if sys.platform == 'darwin': return self.get_framework_header_path(header) def get_framework_header_path(self, header): if '/' in header: # header is 'Framework/Framework.h' (e.g. OpenGL/OpenGL.h). framework, header = header.split('/', 1) paths = self.framework_path[:] # Add ancestor frameworks of current file localpath = '' for parent in self.lexer.filename.split('.framework/')[:-1]: localpath += parent + '.framework' paths.append(os.path.join(localpath, 'Frameworks')) for path in paths: p = os.path.join(path, '%s.framework' % framework, 'Headers', header) if os.path.exists(p): return p def error(self, message, filename, line): print >> sys.stderr, '%s:%d #error %s' % (filename, line, message) def condition_if(self, result): self.condition_stack.append( ExecutionState(self.condition_stack[-1].enabled, result)) def condition_elif(self, result): self.condition_stack[-1].enable(result) def condition_else(self): self.condition_stack[-1].enable(True) def condition_endif(self): self.condition_stack.pop() def enable_declaratives(self): return self.condition_stack[-1].enabled def enable_conditionals(self): return self.condition_stack[-1].enabled def enable_elif_conditionals(self): return self.condition_stack[-1].parent_enabled and \ not self.condition_stack[-1].context_enabled def write(self, tokens): for t in tokens: if t.type == 'HEADER_NAME': # token was mis-parsed. Do it again, without the '<', '>'. ta = create_token('<', '<') ta.filename = t.filename ta.lineno = t.lineno self.output.append(ta) l = lex.lex(cls=PreprocessorLexer) l.input(t.value, t.filename) l.lineno = t.lineno tb = l.token() while tb is not None: if hasattr(tb, 'lexer'): del tb.lexer self.output.append(tb) tb = l.token() tc = create_token('>', '>') tc.filename = t.filename tc.lineno = t.lineno self.output.append(tc) continue if hasattr(t, 'lexer'): del t.lexer self.output.append(t) def get_memento(self): return (set(self.namespace.objects.keys()), set(self.namespace.functions.keys())) class ConstantExpressionParser(yacc.Parser): _const_grammar = ConstantExpressionGrammar def __init__(self, lexer, namespace): yacc.Parser.__init__(self) self.lexer = lexer self.namespace = namespace self._const_grammar.get_prototype().init_parser(self) def parse(self, debug=False): self.result = None try: yacc.Parser.parse(self, lexer=self.lexer, debug=debug) except ConstantExpressionParseException: # XXX warning here? pass return self.result class StrictConstantExpressionParser(ConstantExpressionParser): _const_grammar = StrictConstantExpressionGrammar class PreprocessorNamespace(EvaluationContext): def __init__(self, gcc_macros=True, stdc_macros=True, workaround_macros=True): self.objects = {} self.functions = {} if stdc_macros: self.add_stdc_macros() if gcc_macros: self.add_gcc_macros() if workaround_macros: self.add_workaround_macros() def add_stdc_macros(self): '''Add macros defined in 6.10.8 except __FILE__ and __LINE__. This is potentially dangerous, as this preprocessor is not ISO compliant in many ways (the most obvious is the lack of # and ## operators). It is required for Apple headers, however, which otherwise assume some truly bizarre syntax is ok. ''' import time date = time.strftime('%b %d %Y') # XXX %d should have leading space t = time.strftime('%H:%M:S') self.define_object('__DATE__', (create_token('STRING_LITERAL', date),)) self.define_object('__TIME__', (create_token('STRING_LITERAL', t),)) self.define_object('__STDC__', (create_token('PP_NUMBER', '1'),)) self.define_object('__STDC_HOSTED__', (create_token('PP_NUMBER', '1'),)) self.define_object('__STDC_VERSION', (create_token('PP_NUMBER', '199901L'),)) def add_gcc_macros(self): import platform import sys gcc_macros = ('__GLIBC_HAVE_LONG_LONG', '__GNUC__',) # Get these from `gcc -E -dD empty.c` machine_macros = { 'x86_64': ('__amd64', '__amd64__', '__x86_64', '__x86_64__', '__tune_k8__', '__MMX__', '__SSE__', '__SSE2__', '__SSE_MATH__', '__k8', '__k8__'), 'Power Macintosh': ('_ARCH_PPC', '__BIG_ENDIAN__', '_BIG_ENDIAN', '__ppc__', '__POWERPC__'), # TODO everyone else. }.get(platform.machine(), ()) platform_macros = { 'linux': ('__gnu_linux__', '__linux', '__linux__', 'linux', '__unix', '__unix__', 'unix'), 'linux2': ('__gnu_linux__', '__linux', '__linux__', 'linux', '__unix', '__unix__', 'unix'), 'linux3': ('__gnu_linux__', '__linux', '__linux__', 'linux', '__unix', '__unix__', 'unix'), 'darwin': ('__MACH__', '__APPLE__', '__DYNAMIC__', '__APPLE_CC__'), 'win32': ('_WIN32',), # TODO everyone else }.get(sys.platform, ()) tok1 = lex.LexToken() tok1.type = 'PP_NUMBER' tok1.value = '1' tok1.lineno = -1 tok1.lexpos = -1 for macro in machine_macros + platform_macros + gcc_macros: self.define_object(macro, (tok1,)) self.define_object('inline', ()) self.define_object('__inline', ()) self.define_object('__inline__', ()) self.define_object('__const', (create_token('IDENTIFIER', 'const'),)) def add_workaround_macros(self): if sys.platform == 'darwin': self.define_object('CF_INLINE', ()) def is_defined(self, name): return name in self.objects or name in self.functions def undef(self, name): if name in self.objects: del self.objects[name] if name in self.functions: del self.functions[name] def define_object(self, name, replacements): # TODO check not already existing in objects or functions for r in replacements: if hasattr(r, 'lexer'): del r.lexer self.objects[name] = replacements def define_function(self, name, params, replacements): # TODO check not already existing in objects or functions for r in replacements: if hasattr(r, 'lexer'): del r.lexer replacements = list(replacements) params = list(params) numargs = len(params) for i, t in enumerate(replacements): if hasattr(t, 'lexer'): del t.lexer if t.type == 'IDENTIFIER' and t.value in params: replacements[i] = params.index(t.value) elif t.type == 'IDENTIFIER' and t.value == '__VA_ARGS__' and \ '...' in params: replacements[i] = len(params) - 1 self.functions[name] = replacements, numargs def apply_macros(self, tokens, replacing=None): repl = [] i = 0 while i < len(tokens): token = tokens[i] if token.type == 'IDENTIFIER' and token.value in self.objects: r = self.objects[token.value] if token.value != replacing and r: repl += self.apply_macros(r, token.value) elif token.type == 'IDENTIFIER' and \ token.value in self.functions and \ len(tokens) - i > 2 and \ tokens[i+1].value == '(': r, numargs = self.functions[token.value][:] # build params list i += 2 params = [[]] parens = 0 # balance parantheses within each arg while i < len(tokens): if tokens[i].value == ',' and parens == 0 and \ len(params) < numargs: params.append([]) elif tokens[i].value == ')' and parens == 0: break else: if tokens[i].value == '(': parens += 1 elif tokens[i].value == ')': parens -= 1 params[-1].append(tokens[i]) i += 1 if token.value != replacing and r: newr = [] for t in r: if type(t) == int: newr += params[t] else: newr.append(t) repl += self.apply_macros(newr, token.value) elif token.type == 'DEFINED': if len(tokens) - i > 3 and \ tokens[i + 1].type in ('(', 'LPAREN') and \ tokens[i + 2].type == 'IDENTIFIER' and \ tokens[i + 3].type == ')': result = self.is_defined(tokens[i + 2].value) i += 3 elif len(tokens) - i > 1 and \ tokens[i + 1].type == 'IDENTIFIER': result = self.is_defined(tokens[i + 1].value) i += 1 else: # TODO print >> sys.stderr, 'Invalid use of "defined"' result = 0 t = lex.LexToken() t.value = str(int(result)) t.type = 'PP_NUMBER' t.lexpos = token.lexpos t.lineno = token.lineno repl.append(t) else: repl.append(token) i += 1 return repl def copy(self): n = PreprocessorNamespace(gcc_macros=False, workaround_macros=False) n.functions = self.functions.copy() n.objects = self.objects.copy() return n if __name__ == '__main__': filename = sys.argv[1] parser = PreprocessorParser() parser.parse(filename, debug=True) print ' '.join([str(t.value) for t in parser.output])
bsd-3-clause
skbkontur/Diamond
src/collectors/numa/test/testnuma.py
31
1570
#!/usr/bin/python # coding=utf-8 ########################################################################## from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import patch from mock import Mock from diamond.collector import Collector from numa import NumaCollector ########################################################################## class TestNumaCollector(CollectorTestCase): def setUp(self): config = get_collector_config('NumaCollector', { 'interval': 10, 'bin': 'true' }) self.collector = NumaCollector(config, None) def test_import(self): self.assertTrue(NumaCollector) @patch.object(Collector, 'publish') def test(self, publish_mock): self.collector.collect() metrics = { 'node_0_free_MB': 342, 'node_0_size_MB': 15976 } patch_communicate = patch( 'subprocess.Popen.communicate', Mock(return_value=( self.getFixture('single_node.txt').getvalue(), ''))) patch_communicate.start() self.collector.collect() patch_communicate.stop() self.setDocExample( collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics) ########################################################################## if __name__ == "__main__": unittest.main()
mit
npalermo10/auto_choice_assay_train-test
venv/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py
1761
3979
# -*- coding: utf-8 -*- """Interpret PEP 345 environment markers. EXPR [in|==|!=|not in] EXPR [or|and] ... where EXPR belongs to any of those: python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1]) python_full_version = sys.version.split()[0] os.name = os.name sys.platform = sys.platform platform.version = platform.version() platform.machine = platform.machine() platform.python_implementation = platform.python_implementation() a free string, like '2.6', or 'win32' """ __all__ = ['default_environment', 'compile', 'interpret'] import ast import os import platform import sys import weakref _builtin_compile = compile try: from platform import python_implementation except ImportError: if os.name == "java": # Jython 2.5 has ast module, but not platform.python_implementation() function. def python_implementation(): return "Jython" else: raise # restricted set of variables _VARS = {'sys.platform': sys.platform, 'python_version': '%s.%s' % sys.version_info[:2], # FIXME parsing sys.platform is not reliable, but there is no other # way to get e.g. 2.7.2+, and the PEP is defined with sys.version 'python_full_version': sys.version.split(' ', 1)[0], 'os.name': os.name, 'platform.version': platform.version(), 'platform.machine': platform.machine(), 'platform.python_implementation': python_implementation(), 'extra': None # wheel extension } for var in list(_VARS.keys()): if '.' in var: _VARS[var.replace('.', '_')] = _VARS[var] def default_environment(): """Return copy of default PEP 385 globals dictionary.""" return dict(_VARS) class ASTWhitelist(ast.NodeTransformer): def __init__(self, statement): self.statement = statement # for error messages ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str) # Bool operations ALLOWED += (ast.And, ast.Or) # Comparison operations ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn) def visit(self, node): """Ensure statement only contains allowed nodes.""" if not isinstance(node, self.ALLOWED): raise SyntaxError('Not allowed in environment markers.\n%s\n%s' % (self.statement, (' ' * node.col_offset) + '^')) return ast.NodeTransformer.visit(self, node) def visit_Attribute(self, node): """Flatten one level of attribute access.""" new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx) return ast.copy_location(new_node, node) def parse_marker(marker): tree = ast.parse(marker, mode='eval') new_tree = ASTWhitelist(marker).generic_visit(tree) return new_tree def compile_marker(parsed_marker): return _builtin_compile(parsed_marker, '<environment marker>', 'eval', dont_inherit=True) _cache = weakref.WeakValueDictionary() def compile(marker): """Return compiled marker as a function accepting an environment dict.""" try: return _cache[marker] except KeyError: pass if not marker.strip(): def marker_fn(environment=None, override=None): """""" return True else: compiled_marker = compile_marker(parse_marker(marker)) def marker_fn(environment=None, override=None): """override updates environment""" if override is None: override = {} if environment is None: environment = default_environment() environment.update(override) return eval(compiled_marker, environment) marker_fn.__doc__ = marker _cache[marker] = marker_fn return _cache[marker] def interpret(marker, environment=None): return compile(marker)(environment)
gpl-3.0
bpgc-cte/python2017
Week 7/django/lib/python3.6/site-packages/django/contrib/gis/utils/ogrinfo.py
135
1935
""" This module includes some utility functions for inspecting the layout of a GDAL data source -- the functionality is analogous to the output produced by the `ogrinfo` utility. """ from django.contrib.gis.gdal import DataSource from django.contrib.gis.gdal.geometries import GEO_CLASSES def ogrinfo(data_source, num_features=10): """ Walks the available layers in the supplied `data_source`, displaying the fields for the first `num_features` features. """ # Checking the parameters. if isinstance(data_source, str): data_source = DataSource(data_source) elif isinstance(data_source, DataSource): pass else: raise Exception('Data source parameter must be a string or a DataSource object.') for i, layer in enumerate(data_source): print("data source : %s" % data_source.name) print("==== layer %s" % i) print(" shape type: %s" % GEO_CLASSES[layer.geom_type.num].__name__) print(" # features: %s" % len(layer)) print(" srs: %s" % layer.srs) extent_tup = layer.extent.tuple print(" extent: %s - %s" % (extent_tup[0:2], extent_tup[2:4])) print("Displaying the first %s features ====" % num_features) width = max(*map(len, layer.fields)) fmt = " %%%ss: %%s" % width for j, feature in enumerate(layer[:num_features]): print("=== Feature %s" % j) for fld_name in layer.fields: type_name = feature[fld_name].type_name output = fmt % (fld_name, type_name) val = feature.get(fld_name) if val: if isinstance(val, str): val_fmt = ' ("%s")' else: val_fmt = ' (%s)' output += val_fmt % val else: output += ' (None)' print(output)
mit
kemiz/tosca-vcloud-plugin
server_plugin/volume.py
2
5161
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from cloudify import ctx from cloudify import exceptions as cfy_exc from cloudify.decorators import operation from vcloud_plugin_common import (wait_for_task, with_vca_client, get_vcloud_config, get_mandatory) from network_plugin import get_vapp_name @operation @with_vca_client def create_volume(vca_client, **kwargs): """ create new volume, e.g.: { 'use_external_resource': False, 'volume': { 'name': 'some-other', 'size': 11 } } """ if ctx.node.properties.get('use_external_resource'): ctx.logger.info("External resource has been used") return vdc_name = get_vcloud_config()['vdc'] name = ctx.node.properties['volume']['name'] size = ctx.node.properties['volume']['size'] size_in_Mb = size * 1024 * 1024 success, disk = vca_client.add_disk(vdc_name, name, size_in_Mb) if success: wait_for_task(vca_client, disk.get_Tasks()[0]) ctx.logger.info("Volume node {} has been created".format(name)) else: raise cfy_exc.NonRecoverableError( "Disk creation error: {0}".format(disk)) @operation @with_vca_client def delete_volume(vca_client, **kwargs): """ drop volume """ if ctx.node.properties.get('use_external_resource'): ctx.logger.info("External resource has been used") return vdc_name = get_vcloud_config()['vdc'] name = ctx.node.properties['volume']['name'] success, task = vca_client.delete_disk(vdc_name, name) if success: wait_for_task(vca_client, task) ctx.logger.info("Volume node {} has been deleted".format(name)) else: raise cfy_exc.NonRecoverableError( "Disk deletion error: {0}".format(task)) @operation @with_vca_client def creation_validation(vca_client, **kwargs): """ check volume description """ vdc_name = get_vcloud_config()['vdc'] disks_names = [ disk.name for [disk, _vms] in vca_client.get_disks(vdc_name) ] if ctx.node.properties.get('use_external_resource'): resource_id = get_mandatory(ctx.node.properties, 'resource_id') if resource_id not in disks_names: raise cfy_exc.NonRecoverableError( "Disk {} does't exists".format(resource_id)) else: volume = get_mandatory(ctx.node.properties, 'volume') name = get_mandatory(volume, 'name') if name in disks_names: raise cfy_exc.NonRecoverableError( "Disk {} already exists".format(name)) get_mandatory(volume, 'size') @operation @with_vca_client def attach_volume(vca_client, **kwargs): """ attach volume """ _volume_operation(vca_client, "ATTACH") @operation @with_vca_client def detach_volume(vca_client, **kwargs): """ detach volume """ _volume_operation(vca_client, "DETACH") def _volume_operation(vca_client, operation): """ attach/detach volume """ vdc_name = get_vcloud_config()['vdc'] vdc = vca_client.get_vdc(vdc_name) vmName = get_vapp_name(ctx.target.instance.runtime_properties) if ctx.source.node.properties.get('use_external_resource'): volumeName = ctx.source.node.properties['resource_id'] else: volumeName = ctx.source.node.properties['volume']['name'] vapp = vca_client.get_vapp(vdc, vmName) for ref in vca_client.get_diskRefs(vdc): if ref.name == volumeName: if operation == 'ATTACH': task = vapp.attach_disk_to_vm(vmName, ref) if task: wait_for_task(vca_client, task) ctx.logger.info( "Volume node {} has been attached".format(volumeName)) else: raise cfy_exc.NonRecoverableError( "Can't attach disk: {0}".format(volumeName)) elif operation == 'DETACH': task = vapp.detach_disk_from_vm(vmName, ref) if task: wait_for_task(vca_client, task) ctx.logger.info( "Volume node {} has been detached".format(volumeName)) else: raise cfy_exc.NonRecoverableError( "Can't detach disk: {0}".format(volumeName)) else: raise cfy_exc.NonRecoverableError( "Unknown operation {0}".format(operation))
apache-2.0