repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
miguelinux/vbox
src/VBox/ValidationKit/testmanager/batch/regen_sched_queues.py
3
4295
#!/usr/bin/env python # -*- coding: utf-8 -*- # $Id: regen_sched_queues.py $ # pylint: disable=C0301 """ Interface used by the admin to regenerate scheduling queues. """ __copyright__ = \ """ Copyright (C) 2012-2015 Oracle Corporation This file is part of VirtualBox Open Source Edition (OSE), as available from http://www.virtualbox.org. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License (GPL) as published by the Free Software Foundation, in version 2 as it comes in the "COPYING" file of the VirtualBox OSE distribution. VirtualBox OSE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. The contents of this file may alternatively be used under the terms of the Common Development and Distribution License Version 1.0 (CDDL) only, as it comes in the "COPYING.CDDL" file of the VirtualBox OSE distribution, in which case the provisions of the CDDL are applicable instead of those of the GPL. You may elect to license modified versions of this file under the terms and conditions of either the GPL or the CDDL or both. """ __version__ = "$Revision: 101450 $" # Standard python imports import sys; import os; from optparse import OptionParser; # Add Test Manager's modules path g_ksTestManagerDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))); sys.path.append(g_ksTestManagerDir); # Test Manager imports from testmanager.core.db import TMDatabaseConnection; from testmanager.core.schedulerbase import SchedulerBase; from testmanager.core.schedgroup import SchedGroupLogic; class RegenSchedQueues(object): # pylint: disable=R0903 """ Regenerates all the scheduling queues. """ def __init__(self): """ Parse command line. """ oParser = OptionParser(); oParser.add_option('-q', '--quiet', dest = 'fQuiet', action = 'store_true', default = False, help = 'Quiet execution'); oParser.add_option('-u', '--uid', dest = 'uid', action = 'store', type = 'int', default = 1, help = 'User ID to accredit with this job'); oParser.add_option('--profile', dest = 'fProfile', action = 'store_true', default = False, help = 'User ID to accredit with this job'); (self.oConfig, _) = oParser.parse_args(); def doIt(self): """ Does the job. """ oDb = TMDatabaseConnection(); aoGroups = SchedGroupLogic(oDb).getAll(); iRc = 0; for oGroup in aoGroups: if not self.oConfig.fQuiet: print '%s (ID %#d):' % (oGroup.sName, oGroup.idSchedGroup,); try: (aoErrors, asMessages) = SchedulerBase.recreateQueue(oDb, self.oConfig.uid, oGroup.idSchedGroup, 2); except Exception as oXcpt: oDb.rollback(); print ' !!Hit exception processing "%s": %s' % (oGroup.sName, oXcpt,); else: if len(aoErrors) == 0: if not self.oConfig.fQuiet: print ' Successfully regenerated.'; else: iRc = 1; print ' %d errors:' % (len(aoErrors,)); for oError in aoErrors: if oError[1] is None: print ' !!%s' % (oError[0],); else: print ' !!%s (%s)' % (oError[0], oError[1]); if len(asMessages) > 0 and not self.oConfig.fQuiet: print ' %d messages:' % (len(asMessages),); for sMsg in asMessages: print ' ##%s' % (sMsg,); return iRc; @staticmethod def main(): """ Main function. """ oMain = RegenSchedQueues(); if oMain.oConfig.fProfile is not True: iRc = oMain.doIt(); else: import cProfile; oProfiler = cProfile.Profile(); iRc = oProfiler.runcall(oMain.doIt); oProfiler.print_stats(sort = 'time'); oProfiler = None; return iRc; if __name__ == '__main__': sys.exit(RegenSchedQueues().main());
gpl-2.0
tadeas482/android_kernel_u8500
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
arunhotra/tensorflow
tensorflow/python/ops/sparse_ops_test.py
5
7592
"""Tests for Python ops defined in sparse_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.python.platform import numpy as np from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.framework import types from tensorflow.python.ops import constant_op from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import googletest class SparseToIndicatorTest(test_util.TensorFlowTestCase): def _SparseTensor_5x6(self, dtype): ind = np.array([ [0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]) val = np.array([0, 10, 13, 14, 32, 33]) shape = np.array([5, 6]) return ops.SparseTensor( constant_op.constant(ind, types.int64), constant_op.constant(val, dtype), constant_op.constant(shape, types.int64)) def _SparseTensor_2x3x4(self, dtype): ind = np.array([ [0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1], [1, 1, 3], [1, 2, 2]]) val = np.array([1, 10, 12, 103, 111, 113, 122]) shape = np.array([2, 3, 4]) return ops.SparseTensor( constant_op.constant(ind, types.int64), constant_op.constant(val, dtype), constant_op.constant(shape, types.int64)) def testInt32(self): with self.test_session(use_gpu=False): sp_input = self._SparseTensor_5x6(types.int32) output = sparse_ops.sparse_to_indicator(sp_input, 50).eval() expected_output = np.zeros((5, 50), dtype=np.bool) expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)) for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output) def testInt64(self): with self.test_session(use_gpu=False): sp_input = self._SparseTensor_5x6(types.int64) output = sparse_ops.sparse_to_indicator(sp_input, 50).eval() expected_output = np.zeros((5, 50), dtype=np.bool) expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)] for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output) def testHigherRank(self): with self.test_session(use_gpu=False): sp_input = self._SparseTensor_2x3x4(types.int64) output = sparse_ops.sparse_to_indicator(sp_input, 200).eval() expected_output = np.zeros((2, 3, 200), dtype=np.bool) expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103), (1, 1, 111), (1, 1, 113), (1, 2, 122)] for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output) class SparseRetainTest(test_util.TensorFlowTestCase): def _SparseTensor_5x6(self): ind = np.array([ [0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]) val = np.array([0, 10, 13, 14, 32, 33]) shape = np.array([5, 6]) return ops.SparseTensor( constant_op.constant(ind, types.int64), constant_op.constant(val, types.int32), constant_op.constant(shape, types.int64)) def testBasic(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensor_5x6() to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool) sp_output = sparse_ops.sparse_retain(sp_input, to_retain) output = sess.run(sp_output) self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]]) self.assertAllEqual(output.values, [0, 14, 32]) self.assertAllEqual(output.shape, [5, 6]) def testRetainNone(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensor_5x6() to_retain = np.zeros((6,), dtype=np.bool) sp_output = sparse_ops.sparse_retain(sp_input, to_retain) output = sess.run(sp_output) self.assertAllEqual(output.indices, np.array([]).reshape((0, 2))) self.assertAllEqual(output.values, []) self.assertAllEqual(output.shape, [5, 6]) def testMismatchedRetainShape(self): with self.test_session(use_gpu=False): sp_input = self._SparseTensor_5x6() to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool) with self.assertRaises(ValueError): sparse_ops.sparse_retain(sp_input, to_retain) class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase): def _SparseTensor_5x6(self): ind = np.array([ [0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]) val = np.array([0, 10, 13, 14, 32, 33]) shape = np.array([5, 6]) return ops.SparseTensor( constant_op.constant(ind, types.int64), constant_op.constant(val, types.int32), constant_op.constant(shape, types.int64)) def _SparseTensor_String5x6(self): ind = np.array([ [0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]) val = np.array(["a", "b", "c", "d", "e", "f"]) shape = np.array([5, 6]) return ops.SparseTensor( constant_op.constant(ind, types.int64), constant_op.constant(val, types.string), constant_op.constant(shape, types.int64)) def _SparseTensor_2x6(self): ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]]) val = np.array([0, 10, 13, 14]) shape = np.array([2, 6]) return ops.SparseTensor( constant_op.constant(ind, types.int64), constant_op.constant(val, types.int32), constant_op.constant(shape, types.int64)) def testFillNumber(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensor_5x6() sp_output, empty_row_indicator = ( sparse_ops.sparse_fill_empty_rows(sp_input, -1)) output, empty_row_indicator_out = sess.run( [sp_output, empty_row_indicator]) self.assertAllEqual( output.indices, [[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]]) self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1]) self.assertAllEqual(output.shape, [5, 6]) self.assertAllEqual(empty_row_indicator_out, np.array([0, 0, 1, 0, 1]).astype(np.bool)) def testFillString(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensor_String5x6() sp_output, empty_row_indicator = ( sparse_ops.sparse_fill_empty_rows(sp_input, "")) output, empty_row_indicator_out = sess.run( [sp_output, empty_row_indicator]) self.assertAllEqual( output.indices, [[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]]) self.assertAllEqual(output.values, ["a", "b", "c", "d", "", "e", "f", ""]) self.assertAllEqual(output.shape, [5, 6]) self.assertAllEqual(empty_row_indicator_out, np.array([0, 0, 1, 0, 1]).astype(np.bool)) def testNoEmptyRows(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensor_2x6() sp_output, empty_row_indicator = ( sparse_ops.sparse_fill_empty_rows(sp_input, -1)) output, empty_row_indicator_out = sess.run( [sp_output, empty_row_indicator]) self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]]) self.assertAllEqual(output.values, [0, 10, 13, 14]) self.assertAllEqual(output.shape, [2, 6]) self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool)) if __name__ == "__main__": googletest.main()
apache-2.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/docs/_ext/applyxrefs.py
322
1834
"""Adds xref targets to the top of files.""" import os import sys testing = False DONT_TOUCH = ( './index.txt', ) def target_name(fn): if fn.endswith('.txt'): fn = fn[:-4] return '_' + fn.lstrip('./').replace('/', '-') def process_file(fn, lines): lines.insert(0, '\n') lines.insert(0, '.. %s:\n' % target_name(fn)) try: with open(fn, 'w') as fp: fp.writelines(lines) except IOError: print("Can't open %s for writing. Not touching it." % fn) def has_target(fn): try: with open(fn, 'r') as fp: lines = fp.readlines() except IOError: print("Can't open or read %s. Not touching it." % fn) return (True, None) # print fn, len(lines) if len(lines) < 1: print("Not touching empty file %s." % fn) return (True, None) if lines[0].startswith('.. _'): return (True, None) return (False, lines) def main(argv=None): if argv is None: argv = sys.argv if len(argv) == 1: argv.extend('.') files = [] for root in argv[1:]: for (dirpath, dirnames, filenames) in os.walk(root): files.extend((dirpath, f) for f in filenames) files.sort() files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')] # print files for fn in files: if fn in DONT_TOUCH: print("Skipping blacklisted file %s." % fn) continue target_found, lines = has_target(fn) if not target_found: if testing: print('%s: %s' % (fn, lines[0])) else: print("Adding xref to %s" % fn) process_file(fn, lines) else: print("Skipping %s: already has a xref" % fn) if __name__ == '__main__': sys.exit(main())
mit
inmomentsoftware/teams
wtforms/ext/django/fields.py
175
4580
""" Useful form fields for use with the Django ORM. """ from __future__ import unicode_literals import datetime import operator try: from django.conf import settings from django.utils import timezone has_timezone = True except ImportError: has_timezone = False from wtforms import fields, widgets from wtforms.compat import string_types from wtforms.validators import ValidationError __all__ = ( 'ModelSelectField', 'QuerySetSelectField', 'DateTimeField' ) class QuerySetSelectField(fields.SelectFieldBase): """ Given a QuerySet either at initialization or inside a view, will display a select drop-down field of choices. The `data` property actually will store/keep an ORM model instance, not the ID. Submitting a choice which is not in the queryset will result in a validation error. Specify `get_label` to customize the label associated with each option. If a string, this is the name of an attribute on the model object to use as the label text. If a one-argument callable, this callable will be passed model instance and expected to return the label text. Otherwise, the model object's `__str__` or `__unicode__` will be used. If `allow_blank` is set to `True`, then a blank choice will be added to the top of the list. Selecting this choice will result in the `data` property being `None`. The label for the blank choice can be set by specifying the `blank_text` parameter. """ widget = widgets.Select() def __init__(self, label=None, validators=None, queryset=None, get_label=None, allow_blank=False, blank_text='', **kwargs): super(QuerySetSelectField, self).__init__(label, validators, **kwargs) self.allow_blank = allow_blank self.blank_text = blank_text self._set_data(None) if queryset is not None: self.queryset = queryset.all() # Make sure the queryset is fresh if get_label is None: self.get_label = lambda x: x elif isinstance(get_label, string_types): self.get_label = operator.attrgetter(get_label) else: self.get_label = get_label def _get_data(self): if self._formdata is not None: for obj in self.queryset: if obj.pk == self._formdata: self._set_data(obj) break return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def iter_choices(self): if self.allow_blank: yield ('__None', self.blank_text, self.data is None) for obj in self.queryset: yield (obj.pk, self.get_label(obj), obj == self.data) def process_formdata(self, valuelist): if valuelist: if valuelist[0] == '__None': self.data = None else: self._data = None self._formdata = int(valuelist[0]) def pre_validate(self, form): if not self.allow_blank or self.data is not None: for obj in self.queryset: if self.data == obj: break else: raise ValidationError(self.gettext('Not a valid choice')) class ModelSelectField(QuerySetSelectField): """ Like a QuerySetSelectField, except takes a model class instead of a queryset and lists everything in it. """ def __init__(self, label=None, validators=None, model=None, **kwargs): super(ModelSelectField, self).__init__(label, validators, queryset=model._default_manager.all(), **kwargs) class DateTimeField(fields.DateTimeField): """ Adds support for Django's timezone utilities. Requires Django >= 1.5 """ def __init__(self, *args, **kwargs): if not has_timezone: raise ImportError('DateTimeField requires Django >= 1.5') super(DateTimeField, self).__init__(*args, **kwargs) def process_formdata(self, valuelist): super(DateTimeField, self).process_formdata(valuelist) date = self.data if settings.USE_TZ and date is not None and timezone.is_naive(date): current_timezone = timezone.get_current_timezone() self.data = timezone.make_aware(date, current_timezone) def _value(self): date = self.data if settings.USE_TZ and isinstance(date, datetime.datetime) and timezone.is_aware(date): self.data = timezone.localtime(date) return super(DateTimeField, self)._value()
agpl-3.0
WhiteNeo-/NeoKernel-L
scripts/gcc-wrapper.py
234
4095
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Invoke gcc, looking for warnings, and causing a failure if there are # non-whitelisted warnings. import errno import re import os import sys import subprocess # Note that gcc uses unicode, which may depend on the locale. TODO: # force LANG to be set to en_US.UTF-8 to get consistent warnings. allowed_warnings = set([ "alignment.c:327", "mmu.c:602", "return_address.c:62", "swab.h:49", "SemaLambda.cpp:946", "CGObjCGNU.cpp:1414", "BugReporter.h:146", "RegionStore.cpp:1904", "SymbolManager.cpp:484", "RewriteObjCFoundationAPI.cpp:737", "RewriteObjCFoundationAPI.cpp:696", "CommentParser.cpp:394", "CommentParser.cpp:391", "CommentParser.cpp:356", "LegalizeDAG.cpp:3646", "IRBuilder.h:844", "DataLayout.cpp:193", "transport.c:653", "xt_socket.c:307", "xt_socket.c:161", "inet_hashtables.h:356", "xc4000.c:1049", "xc4000.c:1063", "f_qdss.c:586", "mipi_tc358764_dsi2lvds.c:746", "dynamic_debug.h:75", "hci_conn.c:407", "f_qdss.c:740", "mipi_novatek.c:569", "swab.h:34", ]) # Capture the name of the object file, can find it. ofile = None warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''') def interpret_warning(line): """Decode the message from gcc. The messages we care about have a filename, and a warning""" line = line.rstrip('\n') m = warning_re.match(line) if m and m.group(2) not in allowed_warnings: print "error, forbidden warning:", m.group(2) # If there is a warning, remove any object if it exists. if ofile: try: os.remove(ofile) except OSError: pass sys.exit(1) def run_gcc(): args = sys.argv[1:] # Look for -o try: i = args.index('-o') global ofile ofile = args[i+1] except (ValueError, IndexError): pass compiler = sys.argv[0] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE) for line in proc.stderr: print line, interpret_warning(line) result = proc.wait() except OSError as e: result = e.errno if result == errno.ENOENT: print args[0] + ':',e.strerror print 'Is your PATH set correctly?' else: print ' '.join(args), str(e) return result if __name__ == '__main__': status = run_gcc() sys.exit(status)
gpl-2.0
boppreh/keyboard
setup.py
1
1333
""" Usage instructions: - If you are installing: `python setup.py install` - If you are developing: `python setup.py sdist --format=zip bdist_wheel --universal bdist_wininst && twine check dist/*` """ import keyboard from setuptools import setup setup( name='keyboard', version=keyboard.version, author='BoppreH', author_email='boppreh@gmail.com', packages=['keyboard'], url='https://github.com/boppreh/keyboard', license='MIT', description='Hook and simulate keyboard events on Windows and Linux', keywords = 'keyboard hook simulate hotkey', # Wheel creation breaks with Windows newlines. # https://github.com/pypa/setuptools/issues/1126 long_description=keyboard.__doc__.replace('\r\n', '\n'), long_description_content_type='text/markdown', install_requires=["pyobjc; sys_platform=='darwin'"], # OSX-specific dependency classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft :: Windows', 'Operating System :: Unix', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], )
mit
srsman/odoo
addons/l10n_be/wizard/l10n_be_vat_intra.py
332
14728
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # Adapted by Noviat to # - make the 'mand_id' field optional # - support Noviat tax code scheme # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time import base64 from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.report import report_sxw class partner_vat_intra(osv.osv_memory): """ Partner Vat Intra """ _name = "partner.vat.intra" _description = 'Partner VAT Intra' def _get_xml_data(self, cr, uid, context=None): if context.get('file_save', False): return base64.encodestring(context['file_save'].encode('utf8')) return '' def _get_europe_country(self, cursor, user, context=None): return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])]) _columns = { 'name': fields.char('File Name'), 'period_code': fields.char('Period Code', size=6, required=True, help='''This is where you have to set the period code for the intracom declaration using the format: ppyyyy PP can stand for a month: from '01' to '12'. PP can stand for a trimester: '31','32','33','34' The first figure means that it is a trimester, The second figure identify the trimester. PP can stand for a complete fiscal year: '00'. YYYY stands for the year (4 positions). ''' ), 'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'), 'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True), 'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"), 'mand_id' : fields.char('Reference', help="Reference given by the Representative of the sending company."), 'msg': fields.text('File created', readonly=True), 'no_vat': fields.text('Partner With No VAT', readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."), 'file_save' : fields.binary('Save File', readonly=True), 'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'), 'comments': fields.text('Comments'), } def _get_tax_code(self, cr, uid, context=None): obj_tax_code = self.pool.get('account.tax.code') obj_user = self.pool.get('res.users') company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context) return tax_code_ids and tax_code_ids[0] or False _defaults = { 'country_ids': _get_europe_country, 'file_save': _get_xml_data, 'name': 'vat_intra.xml', 'tax_code_id': _get_tax_code, } def _get_datas(self, cr, uid, ids, context=None): """Collects require data for vat intra xml :param ids: id of wizard. :return: dict of all data to be used to generate xml for Partner VAT Intra. :rtype: dict """ if context is None: context = {} obj_user = self.pool.get('res.users') obj_sequence = self.pool.get('ir.sequence') obj_partner = self.pool.get('res.partner') xmldict = {} post_code = street = city = country = data_clientinfo = '' seq = amount_sum = 0 wiz_data = self.browse(cr, uid, ids[0], context=context) comments = wiz_data.comments if wiz_data.tax_code_id: data_company = wiz_data.tax_code_id.company_id else: data_company = obj_user.browse(cr, uid, uid, context=context).company_id # Get Company vat company_vat = data_company.partner_id.vat if not company_vat: raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.')) company_vat = company_vat.replace(' ','').upper() issued_by = company_vat[:2] if len(wiz_data.period_code) != 6: raise osv.except_osv(_('Error!'), _('Period code is not valid.')) if not wiz_data.period_ids: raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.')) p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context) if not p_id_list: raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.')) seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum') dnum = company_vat[2:] + seq_declarantnum[-4:] addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice']) email = data_company.partner_id.email or '' phone = data_company.partner_id.phone or '' if addr.get('invoice',False): ads = obj_partner.browse(cr, uid, [addr['invoice']])[0] city = (ads.city or '') post_code = (ads.zip or '') if ads.street: street = ads.street if ads.street2: street += ' ' street += ads.street2 if ads.country_id: country = ads.country_id.code if not country: country = company_vat[:2] if not email: raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.')) if not phone: raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.')) xmldict.update({ 'company_name': data_company.name, 'company_vat': company_vat, 'vatnum': company_vat[2:], 'mand_id': wiz_data.mand_id, 'sender_date': str(time.strftime('%Y-%m-%d')), 'street': street, 'city': city, 'post_code': post_code, 'country': country, 'email': email, 'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''), 'period': wiz_data.period_code, 'clientlist': [], 'comments': comments, 'issued_by': issued_by, }) #tax code 44: services #tax code 46L: normal good deliveries #tax code 46T: ABC good deliveries #tax code 48xxx: credite note on tax code xxx codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T') cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat, (CASE WHEN t.code = '48s44' THEN '44' WHEN t.code = '48s46L' THEN '46L' WHEN t.code = '48s46T' THEN '46T' ELSE t.code END) AS intra_code, SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount FROM account_move_line l LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id) LEFT JOIN res_partner p ON (l.partner_id = p.id) WHERE t.code IN %s AND l.period_id IN %s AND t.company_id = %s GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id)) p_count = 0 for row in cr.dictfetchall(): if not row['vat']: row['vat'] = '' p_count += 1 seq += 1 amt = row['amount'] or 0.0 amount_sum += amt intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or '')) xmldict['clientlist'].append({ 'partner_name': row['partner_name'], 'seq': seq, 'vatnum': row['vat'][2:].replace(' ','').upper(), 'vat': row['vat'], 'country': row['vat'][:2], 'amount': round(amt,2), 'intra_code': row['intra_code'], 'code': intra_code}) xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count}) return xmldict def create_xml(self, cursor, user, ids, context=None): """Creates xml that is to be exported and sent to estate for partner vat intra. :return: Value for next action. :rtype: dict """ mod_obj = self.pool.get('ir.model.data') xml_data = self._get_datas(cursor, user, ids, context=context) month_quarter = xml_data['period'][:2] year = xml_data['period'][2:] data_file = '' # Can't we do this by etree? data_head = """<?xml version="1.0" encoding="ISO-8859-1"?> <ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1"> <ns2:Representative> <RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(vatnum)s</RepresentativeID> <Name>%(company_name)s</Name> <Street>%(street)s</Street> <PostCode>%(post_code)s</PostCode> <City>%(city)s</City> <CountryCode>%(country)s</CountryCode> <EmailAddress>%(email)s</EmailAddress> <Phone>%(phone)s</Phone> </ns2:Representative>""" % (xml_data) if xml_data['mand_id']: data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data) data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data) if month_quarter.startswith('3'): data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>' elif month_quarter.startswith('0') and month_quarter.endswith('0'): data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>' else: data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>' data_clientinfo = '' for client in xml_data['clientlist']: if not client['vatnum']: raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name']) data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client) data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data) data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data) context = dict(context or {}) context['file_save'] = data_file model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context) resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id'] return { 'name': _('Save'), 'context': context, 'view_type': 'form', 'view_mode': 'form', 'res_model': 'partner.vat.intra', 'views': [(resource_id,'form')], 'view_id': 'view_vat_intra_save', 'type': 'ir.actions.act_window', 'target': 'new', } def preview(self, cr, uid, ids, context=None): xml_data = self._get_datas(cr, uid, ids, context=context) datas = { 'ids': [], 'model': 'partner.vat.intra', 'form': xml_data } return self.pool['report'].get_action( cr, uid, [], 'l10n_be.report_l10nvatintraprint', data=datas, context=context ) class vat_intra_print(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(vat_intra_print, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, }) class wrapped_vat_intra_print(osv.AbstractModel): _name = 'report.l10n_be.report_l10nvatintraprint' _inherit = 'report.abstract_report' _template = 'l10n_be.report_l10nvatintraprint' _wrapped_report_class = vat_intra_print # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
2Minutes/davos-dev
davos/core/utils.py
1
7692
import re import sys import os import os.path as osp from fnmatch import fnmatch from pytd.gui.dialogs import promptDialog from pytd.util.logutils import logMsg from pytd.util.sysutils import importModule, toStr, inDevMode, getCaller from pytd.util.fsutils import pathSplitDirs, pathResolve, pathNorm, pathJoin from pytd.util.fsutils import jsonRead, jsonWrite, isDirStat, parseDirContent from pytd.util.strutils import padded _VERS_SPLIT_REXP = re.compile(r'-(v[0-9]+)') def getConfigModule(sProjectName): try: sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config") sConfigModule = sConfPkg + '.' + sProjectName modobj = importModule(sConfigModule) except ImportError: raise ImportError("No config module named '{}'".format(sConfigModule)) return modobj def versionFromName(sFileName): vers = _VERS_SPLIT_REXP.findall(sFileName) return int(vers[-1].strip('v')) if vers else None def mkVersionSuffix(v): if not isinstance(v, int): raise TypeError("argument must be of type <int>. Got {}.".format(type(v))) return "".join(('-v', padded(v))) def findVersionFields(s): return _VERS_SPLIT_REXP.findall(s) def promptForComment(**kwargs): sComment = "" bOk = False result = promptDialog(title='Please...', message='Leave a comment: ', button=['OK', 'Cancel'], defaultButton='OK', cancelButton='Cancel', dismissString='Cancel', scrollableField=True, **kwargs) if result == 'Cancel': logMsg("Cancelled !" , warning=True) elif result == 'OK': sComment = promptDialog(query=True, text=True) bOk = True return sComment, bOk def projectNameFromPath(p): sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config") pkg = importModule(sConfPkg) sPkgDirPath = os.path.dirname(pkg.__file__) sDirList = pathSplitDirs(p) for sFilename in os.listdir(sPkgDirPath): bIgnored = False for sPatrn in ("__*", ".*", "*.pyc"): if fnmatch(sFilename, sPatrn): bIgnored = True break if bIgnored: continue sModName = os.path.splitext(sFilename)[0] m = importModule(sConfPkg + '.' + sModName) sProjDir = m.project.dir_name if sProjDir in sDirList: return sModName return "" def splitStep(sTaskName): return sTaskName.rsplit("|", 1) if ("|" in sTaskName) else ("", sTaskName) def damasServerPort(): return os.getenv("DAMAS_DEV_PORT", "8443") if inDevMode() else "8443" def loadPrefs(): global DAVOS_PREFS try: p = pathResolve(r"%USERPROFILE%\davos_prefs.json") DAVOS_PREFS = jsonRead(p) except EnvironmentError: DAVOS_PREFS = {} return DAVOS_PREFS def savePrefs(): global DAVOS_PREFS if DAVOS_PREFS: p = pathResolve(r"%USERPROFILE%\davos_prefs.json") jsonWrite(p, DAVOS_PREFS) def setPref(in_sKey, value): global DAVOS_PREFS if "|" not in in_sKey: DAVOS_PREFS[in_sKey] = value return sKeyList = in_sKey.split("|") iLastKey = len(sKeyList) - 1 currPrefs = DAVOS_PREFS sPrevKey = "" prevPrefs = None for i, sKey in enumerate(sKeyList): if not isinstance(currPrefs, dict): prevPrefs[sPrevKey] = {} currPrefs = prevPrefs[sPrevKey] if i == iLastKey: currPrefs[sKey] = value return if sKey not in currPrefs: currPrefs[sKey] = {} prevPrefs = currPrefs sPrevKey = sKey currPrefs = currPrefs[sKey] def getPref(in_sKey, default=None): global DAVOS_PREFS if "|" not in in_sKey: return DAVOS_PREFS.get(in_sKey, default) sKeyList = in_sKey.split("|") iLastKey = len(sKeyList) - 1 currPrefs = DAVOS_PREFS for i, sKey in enumerate(sKeyList): if not isinstance(currPrefs, dict): k = "|".join(sKeyList[:(i + 1)]) logMsg("Not a pref dictionary: '{}'.".format(k), warning=True) return default if i == iLastKey: return currPrefs.get(sKey, default) if sKey in currPrefs: currPrefs = currPrefs[sKey] else: logMsg("No such pref: '{}'.".format(in_sKey), warning=True) return default _ICON_DIR_PATH = "" def mkIconPath(sRelPath): global _ICON_DIR_PATH if (not _ICON_DIR_PATH) or (not osp.exists(_ICON_DIR_PATH)): p = sys.modules["davos"].__file__ p = osp.abspath(osp.join(osp.dirname(p), "..", "resources", "icon")) _ICON_DIR_PATH = p return pathJoin(_ICON_DIR_PATH, sRelPath) def writePackContent(sPackDirPath, dirStat=None): sPackDirPath = pathNorm(sPackDirPath) if not dirStat: dirStat = os.stat(sPackDirPath) sJsonPath = mkPackFilePath(sPackDirPath) iMtime = 0 if not osp.exists(sJsonPath): iMtime = dirStat.st_mtime iAtime = dirStat.st_atime try: open(sJsonPath, 'a+b').close() # create json file so it is listed by parseDirContent() dirContent = parseDirContent(sPackDirPath) jsonWrite(sJsonPath, dirContent, sort_keys=True) finally: if iMtime: os.utime(sPackDirPath, (iAtime, iMtime)) return dirContent def readPackContent(sPackDirPath, fail=True): try: dirContent = jsonRead(mkPackFilePath(sPackDirPath)) except EnvironmentError as e: if fail: raise logMsg(toStr(e), warning=True) dirContent = parseDirContent(sPackDirPath) return dirContent def mkPackFilePath(sPackDirPath): return pathJoin(sPackDirPath, "_package.json") _ISPACK_REXP = re.compile(r".+_pkg[^/\w].+", re.I) def assertPack(p, dirStat=None): if not dirStat: dirStat = os.stat(pathNorm(p)) if isPack(p, fail=True, dirStat=dirStat): return dirStat return None def belowPack(p): p = pathNorm(p) if os.environ["IN_SEB_MODE"]: return True if _belowPack(p) else _belowOldPack(p) else: return _belowPack(p) def isPack(p, fail=False, dirStat=None): p = pathNorm(p) if os.environ["IN_SEB_MODE"]: bPackPath = True if _isPack(p) else _isOldPack(p) else: bPackPath = _isPack(p) if not bPackPath: if fail: sMsg = ("Directory NOT a package (should start with 'pkg_' or 'lyr_'): '{}'." .format(osp.basename(p))) raise EnvironmentError(sMsg) else: return False if dirStat and not isDirStat(dirStat): if fail: raise EnvironmentError("Package path NOT a directory: '{}'".format(p)) else: return False return True def _belowPack(p): p = osp.dirname(p) for sDirName in pathSplitDirs(p): if _isPack(sDirName): return True return False def _isPack(p): sBaseName = osp.basename(p) if "/" in p else p if "_" not in sBaseName: return False sPrefix = sBaseName.split("_", 1)[0] if not sPrefix: return False return (sPrefix.lower() + "_") in ("pkg_", "lyr_") def _belowOldPack(p): p = osp.dirname(p) if "_pkg/" in p.lower(): return True if _ISPACK_REXP.match(p): return True return False def _isOldPack(p): sName = osp.basename(p) if sName.lower().endswith("_pkg"): return True if _ISPACK_REXP.match(sName): return True return False
gpl-3.0
openstack/poppy
poppy/metrics/blueflood/services.py
2
4570
# Copyright (c) 2016 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_context import context as context_utils from oslo_log import log from poppy.metrics import base from poppy.metrics.blueflood.utils import client from poppy.metrics.blueflood.utils import errors from poppy.metrics.blueflood.utils import helper LOG = log.getLogger(__name__) class ServicesController(base.ServicesController): def __init__(self, driver): super(ServicesController, self).__init__(driver) self.driver = driver def _result_formatter(self, response): resp_dict = [] if not response.ok: LOG.warning("BlueFlood Metrics Response status Code:{0} " "Response Text: {1} " "Request URL: {2}".format(response.status_code, response.text, response.url)) return resp_dict else: serialized_response = response.json() try: values = serialized_response['values'] for val in values: m = {} m['timestamp'] = helper.datetime_from_epoch( int(val['timestamp'])) m['count'] = val['sum'] resp_dict.append(m) except KeyError: msg = 'content from {0} not conforming ' \ 'to API contracts'.format(response.url) LOG.warning(msg) raise errors.BlueFloodApiSchemaError(msg) # sort the resp_dict by timestamp ascending resp_dict = sorted(resp_dict, key=lambda x: x['timestamp']) return resp_dict def read(self, metric_names, from_timestamp, to_timestamp, resolution): """read metrics from metrics driver. """ curr_resolution = \ helper.resolution_converter_seconds_to_enum(resolution) context_dict = context_utils.get_current().to_dict() project_id = context_dict['tenant'] auth_token = None if self.driver.metrics_conf.use_keystone_auth: auth_token = context_dict['auth_token'] tenanted_blueflood_url = \ self.driver.metrics_conf.blueflood_url.format( project_id=project_id ) from_timestamp = int(helper.datetime_to_epoch(from_timestamp)) to_timestamp = int(helper.datetime_to_epoch(to_timestamp)) urls = [] params = { 'to': to_timestamp, 'from': from_timestamp, 'resolution': curr_resolution } for metric_name in metric_names: tenanted_blueflood_url_with_metric = helper.join_url( tenanted_blueflood_url, metric_name.strip().replace(" ", "")) LOG.info("Querying BlueFlood Metric: {0}".format( tenanted_blueflood_url_with_metric)) urls.append(helper.set_qs_on_url( tenanted_blueflood_url_with_metric, **params)) executors = self.driver.metrics_conf.no_of_executors blueflood_client = client.BlueFloodMetricsClient(token=auth_token, project_id=project_id, executors=executors) results = blueflood_client.async_requests(urls) reordered_metric_names = [] for result in results: metric_name = helper.retrieve_last_relative_url(result.url) reordered_metric_names.append(metric_name) formatted_results = [] for metric_name, result in zip(reordered_metric_names, results): formatted_result = self._result_formatter(result) # NOTE(TheSriram): Tuple to pass the associated metric name, along # with the formatted result formatted_results.append((metric_name, formatted_result)) return formatted_results
apache-2.0
solarsail/aerosol-tools
clustatlib/clucsv.py
1
3752
import numpy as np import os import os.path class csvbuilder: def __init__(self, cs): self.cs = cs if not os.path.isdir('csv'): os.mkdir('csv') def month_type_csv(self, site = None): label = 'all' if site == None else site values, percentages = self.cs.month_type_stat(site) header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)]) header = "month," + header all = [] for i in range(len(values)): all.append(values[i]) all.append(percentages[i]) mat = np.matrix(all) mat = mat.transpose().tolist() content = [] for i in range(12): content.append("%d,%s" % (i+1, ','.join([str(field) for field in mat[i]]))) content = '\n'.join(content) with open("csv/month_type_%s.csv" % label, 'w') as outfile: outfile.write('\n'.join((header, content))) def year_type_csv(self, start_year, end_year, site = None): label = 'all' if site == None else site values, percentages = self.cs.year_type_stat(start_year, end_year, site) header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)]) header = "year," + header all = [] for i in range(len(values)): all.append(values[i]) all.append(percentages[i]) mat = np.matrix(all) mat = mat.transpose().tolist() content = [] for i in range(start_year, end_year+1): content.append("%d,%s" % (i, ','.join([str(field) for field in mat[i-start_year]]))) content = '\n'.join(content) with open("csv/year_type_%s.csv" % label, 'w') as outfile: outfile.write('\n'.join((header, content))) def type_csv(self): header = "type,count,percentage%" all = self.cs.type_stat() content = '\n'.join([','.join([str(field) for field in row]) for row in all]) with open("csv/type_count.csv", 'w') as outfile: outfile.write('\n'.join((header, content))) def site_type_csv(self): all, types = self.cs.site_type_stat() header = ",".join(["type{},%".format(t) for t in range(1, types+1)]) header = "site," + header content = '\n'.join([','.join([str(field) for field in row]) for row in all]) with open("csv/site_type_count.csv", 'w') as outfile: outfile.write('\n'.join((header, content))) def type_stat_csv(self): header = "type,refr440,refr675,refr870,refr1020,refi440,refi675,refi870,refi1020,volmedianradf,stddevf,volconf,volmedianradc,stddevc,volconc,ssa675,ssa870,ssa1020,asy440,asy675,asy870,sphericity" list1 = self.cs.type_means() list2 = self.cs.type_stddev() l = [] for i in range(len(list1)): l.append(list1[i]) stddevline = list(list2[i]) stddevline[0] = "stddev" l.append(stddevline) content = '\n'.join([','.join([str(field) for field in row]) for row in l]) with open("csv/type_stat.csv", 'w') as outfile: outfile.write('\n'.join((header, content))) def distances_csv(self): clus, dist_mat = self.cs.all_distances() header = "," + ",".join([str(cid) for cid in clus]) lines = [] first = 1 cur = 0 for clu in clus: lines.append(str(clu) + ',' * first + ','.join(str(d) for d in dist_mat[cur:cur+len(clus)-first+1])) cur += len(clus) - first + 1 first += 1 content = '\n'.join(lines) with open("csv/distance_stat.csv", 'w') as outfile: outfile.write('\n'.join((header, content)))
gpl-3.0
stenskjaer/scrapy
scrapy/commands/parse.py
108
8286
from __future__ import print_function import logging from w3lib.url import is_url from scrapy.commands import ScrapyCommand from scrapy.http import Request from scrapy.item import BaseItem from scrapy.utils import display from scrapy.utils.conf import arglist_to_dict from scrapy.utils.spider import iterate_spider_output, spidercls_for_request from scrapy.exceptions import UsageError logger = logging.getLogger(__name__) class Command(ScrapyCommand): requires_project = True spider = None items = {} requests = {} first_response = None def syntax(self): return "[options] <url>" def short_desc(self): return "Parse URL (using its spider) and print the results" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("--spider", dest="spider", default=None, \ help="use this spider without looking for one") parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE", \ help="set spider argument (may be repeated)") parser.add_option("--pipelines", action="store_true", \ help="process items through pipelines") parser.add_option("--nolinks", dest="nolinks", action="store_true", \ help="don't show links to follow (extracted requests)") parser.add_option("--noitems", dest="noitems", action="store_true", \ help="don't show scraped items") parser.add_option("--nocolour", dest="nocolour", action="store_true", \ help="avoid using pygments to colorize the output") parser.add_option("-r", "--rules", dest="rules", action="store_true", \ help="use CrawlSpider rules to discover the callback") parser.add_option("-c", "--callback", dest="callback", \ help="use this callback for parsing, instead looking for a callback") parser.add_option("-d", "--depth", dest="depth", type="int", default=1, \ help="maximum depth for parsing requests [default: %default]") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", \ help="print each depth level one by one") @property def max_level(self): levels = self.items.keys() + self.requests.keys() if levels: return max(levels) else: return 0 def add_items(self, lvl, new_items): old_items = self.items.get(lvl, []) self.items[lvl] = old_items + new_items def add_requests(self, lvl, new_reqs): old_reqs = self.requests.get(lvl, []) self.requests[lvl] = old_reqs + new_reqs def print_items(self, lvl=None, colour=True): if lvl is None: items = [item for lst in self.items.values() for item in lst] else: items = self.items.get(lvl, []) print("# Scraped Items ", "-"*60) display.pprint([dict(x) for x in items], colorize=colour) def print_requests(self, lvl=None, colour=True): if lvl is None: levels = self.requests.keys() if levels: requests = self.requests[max(levels)] else: requests = [] else: requests = self.requests.get(lvl, []) print("# Requests ", "-"*65) display.pprint(requests, colorize=colour) def print_results(self, opts): colour = not opts.nocolour if opts.verbose: for level in xrange(1, self.max_level+1): print('\n>>> DEPTH LEVEL: %s <<<' % level) if not opts.noitems: self.print_items(level, colour) if not opts.nolinks: self.print_requests(level, colour) else: print('\n>>> STATUS DEPTH LEVEL %s <<<' % self.max_level) if not opts.noitems: self.print_items(colour=colour) if not opts.nolinks: self.print_requests(colour=colour) def run_callback(self, response, cb): items, requests = [], [] for x in iterate_spider_output(cb(response)): if isinstance(x, (BaseItem, dict)): items.append(x) elif isinstance(x, Request): requests.append(x) return items, requests def get_callback_from_rules(self, spider, response): if getattr(spider, 'rules', None): for rule in spider.rules: if rule.link_extractor.matches(response.url) and rule.callback: return rule.callback else: logger.error('No CrawlSpider rules found in spider %(spider)r, ' 'please specify a callback to use for parsing', {'spider': spider.name}) def set_spidercls(self, url, opts): spider_loader = self.crawler_process.spider_loader if opts.spider: try: self.spidercls = spider_loader.load(opts.spider) except KeyError: logger.error('Unable to find spider: %(spider)s', {'spider': opts.spider}) else: self.spidercls = spidercls_for_request(spider_loader, Request(url)) if not self.spidercls: logger.error('Unable to find spider for: %(url)s', {'url': url}) request = Request(url, opts.callback) _start_requests = lambda s: [self.prepare_request(s, request, opts)] self.spidercls.start_requests = _start_requests def start_parsing(self, url, opts): self.crawler_process.crawl(self.spidercls, **opts.spargs) self.pcrawler = list(self.crawler_process.crawlers)[0] self.crawler_process.start() if not self.first_response: logger.error('No response downloaded for: %(url)s', {'url': url}) def prepare_request(self, spider, request, opts): def callback(response): # memorize first request if not self.first_response: self.first_response = response # determine real callback cb = response.meta['_callback'] if not cb: if opts.rules and self.first_response == response: cb = self.get_callback_from_rules(spider, response) else: cb = 'parse' if not callable(cb): cb_method = getattr(spider, cb, None) if callable(cb_method): cb = cb_method else: logger.error('Cannot find callback %(callback)r in spider: %(spider)s', {'callback': callback, 'spider': spider.name}) return # parse items and requests depth = response.meta['_depth'] items, requests = self.run_callback(response, cb) if opts.pipelines: itemproc = self.pcrawler.engine.scraper.itemproc for item in items: itemproc.process_item(item, spider) self.add_items(depth, items) self.add_requests(depth, requests) if depth < opts.depth: for req in requests: req.meta['_depth'] = depth + 1 req.meta['_callback'] = req.callback req.callback = callback return requests request.meta['_depth'] = 1 request.meta['_callback'] = request.callback request.callback = callback return request def process_options(self, args, opts): ScrapyCommand.process_options(self, args, opts) try: opts.spargs = arglist_to_dict(opts.spargs) except ValueError: raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False) def run(self, args, opts): # parse arguments if not len(args) == 1 or not is_url(args[0]): raise UsageError() else: url = args[0] # prepare spidercls self.set_spidercls(url, opts) if self.spidercls and opts.depth > 0: self.start_parsing(url, opts) self.print_results(opts)
bsd-3-clause
Benster900/mhn
server/mhn/api/views.py
9
12576
import json from StringIO import StringIO import csv from uuid import uuid1 from sqlalchemy import func from sqlalchemy.exc import IntegrityError from flask import Blueprint, request, jsonify, make_response from bson.errors import InvalidId from mhn import db, csrf from mhn.api import errors from mhn.api.models import ( Sensor, Rule, DeployScript as Script, DeployScript, RuleSource) from mhn.api.decorators import deploy_auth, sensor_auth, token_auth from mhn.common.utils import error_response from mhn.common.clio import Clio from mhn.auth import current_user, login_required api = Blueprint('api', __name__, url_prefix='/api') # Endpoints for the Sensor resource. @api.route('/sensor/', methods=['POST']) @csrf.exempt @deploy_auth def create_sensor(): missing = Sensor.check_required(request.json) if missing: return error_response( errors.API_FIELDS_MISSING.format(missing), 400) else: sensor = Sensor(**request.json) sensor.uuid = str(uuid1()) sensor.ip = request.remote_addr Clio().authkey.new(**sensor.new_auth_dict()).post() try: db.session.add(sensor) db.session.commit() except IntegrityError: return error_response( errors.API_SENSOR_EXISTS.format(request.json['name']), 400) else: return jsonify(sensor.to_dict()) @api.route('/sensor/', methods=['GET']) @token_auth def get_sensors(): req = request.args.to_dict() if 'api_key' in req: del req['api_key'] resp = make_response(json.dumps([s.to_dict() for s in Sensor.query.filter_by(**req)])) resp.headers['Content-Type'] = "application/json" return resp @api.route('/sensor/<uuid>/', methods=['PUT']) @csrf.exempt def update_sensor(uuid): sensor = Sensor.query.filter_by(uuid=uuid).first_or_404() for field in request.json.keys(): if field in Sensor.editable_fields(): setattr(sensor, field, request.json[field]) elif field in Sensor.fields(): return error_response( errors.API_FIELD_NOT_EDITABLE.format(field), 400) else: return error_response( errors.API_FIELD_INVALID.format(field), 400) else: try: db.session.commit() except IntegrityError: return error_response( errors.API_SENSOR_EXISTS.format(request.json['name']), 400) return jsonify(sensor.to_dict()) @api.route('/sensor/<uuid>/', methods=['DELETE']) @login_required def delete_sensor(uuid): sensor = Sensor.query.filter_by(uuid=uuid).first_or_404() Clio().authkey.delete(identifier=uuid) db.session.delete(sensor) db.session.commit() return jsonify({}) @api.route('/sensor/<uuid>/connect/', methods=['POST']) @csrf.exempt @sensor_auth def connect_sensor(uuid): sensor = Sensor.query.filter_by(uuid=uuid).first_or_404() sensor.ip = request.remote_addr db.session.commit() return jsonify(sensor.to_dict()) # Utility functions that generalize the GET # requests of resources from Mnemosyne. def _get_one_resource(resource, res_id): try: res = resource.get(_id=res_id) except InvalidId: res = None if not res: return error_response(errors.API_RESOURCE_NOT_FOUND, 404) else: return jsonify(res.to_dict()) def _get_query_resource(resource, query): options = {} if 'limit' in query: options['limit'] = int(query['limit']) results = list(resource.get(options, **query)) return jsonify( data=[r.to_dict() for r in results], meta={ 'size': len(results), 'query': query, 'options': options } ) # Now let's make use these methods in the views. @api.route('/feed/<feed_id>/', methods=['GET']) @token_auth def get_feed(feed_id): return _get_one_resource(Clio().hpfeed, feed_id) @api.route('/session/<session_id>/', methods=['GET']) @token_auth def get_session(session_id): return _get_one_resource(Clio().session, session_id) @api.route('/url/<url_id>/', methods=['GET']) @token_auth def get_url(url_id): return _get_one_resource(Clio().url, url_id) @api.route('/file/<file_id>/', methods=['GET']) @token_auth def get_file(file_id): return _get_one_resource(Clio().file, file_id) @api.route('/dork/<dork_id>/', methods=['GET']) @token_auth def get_dork(dork_id): return _get_one_resource(Clio().dork, dork_id) @api.route('/metadata/<metadata_id>/', methods=['GET']) @token_auth def get_metadatum(metadata_id): return _get_one_resource(Clio().metadata, metadata_id) @api.route('/feed/', methods=['GET']) @token_auth def get_feeds(): return _get_query_resource(Clio().hpfeed, request.args.to_dict()) @api.route('/session/', methods=['GET']) @token_auth def get_sessions(): return _get_query_resource(Clio().session, request.args.to_dict()) @api.route('/url/', methods=['GET']) @token_auth def get_urls(): return _get_query_resource(Clio().url, request.args.to_dict()) @api.route('/file/', methods=['GET']) @token_auth def get_files(): return _get_query_resource(Clio().file, request.args.to_dict()) @api.route('/dork/', methods=['GET']) @token_auth def get_dorks(): return _get_query_resource(Clio().dork, request.args.to_dict()) @api.route('/metadata/', methods=['GET']) @token_auth def get_metadata(): return _get_query_resource(Clio().metadata, request.args.to_dict()) @api.route('/top_attackers/', methods=['GET']) @token_auth def top_attackers(): options = request.args.to_dict() limit = int(options.get('limit', '1000')) hours_ago = int(options.get('hours_ago', '4')) extra = dict(options) for name in ('hours_ago', 'limit', 'api_key',): if name in extra: del extra[name] for name in options.keys(): if name not in ('hours_ago', 'limit',): del options[name] results = Clio().session._tops(['source_ip', 'honeypot'], top=limit, hours_ago=hours_ago, **extra) return jsonify( data=results, meta={ 'size': len(results), 'query': 'top_attackers', 'options': options } ) @api.route('/attacker_stats/<ip>/', methods=['GET']) @token_auth def attacker_stats(ip): options = request.args.to_dict() hours_ago = int(options.get('hours_ago', '720')) # 30 days for name in options.keys(): if name not in ('hours_ago', 'limit',): del options[name] results = Clio().session.attacker_stats(ip, hours_ago=hours_ago) return jsonify( data=results, meta={ 'query': 'attacker_stats', 'options': options } ) def get_tags(rec): tags = [rec['honeypot'], rec['protocol'], 'port-{}'.format(rec['destination_port']),] meta = rec['meta'] if len(meta) > 0: meta = meta[0] else: meta = {} for meta_key in ['app', 'os', 'link',]: value = meta.get(meta_key) if value: tags.append(value.replace(',', '').replace('\t', ' ')) return tags @api.route('/intel_feed.csv/', methods=['GET']) @token_auth def intel_feed_csv(): fieldnames = ['source_ip', 'count', 'tags', ] results = get_intel_feed() outf = StringIO() wr = csv.DictWriter(outf, fieldnames=fieldnames, delimiter='\t', lineterminator='\n') wr.writeheader() for rec in results['data']: wr.writerow({ 'count': rec['count'], 'source_ip': rec['source_ip'], 'tags': ','.join(get_tags(rec)), }) response_data = outf.getvalue() outf.close() response = make_response(response_data) response.headers['Content-type'] = 'text/plain' return response @api.route('/intel_feed/', methods=['GET']) @token_auth def intel_feed(): results = get_intel_feed() return jsonify(**results) def get_intel_feed(): options = request.args.to_dict() limit = int(options.get('limit', '1000')) hours_ago = int(options.get('hours_ago', '4')) extra = dict(options) for name in ('hours_ago', 'limit', 'api_key',): if name in extra: del extra[name] for name in options.keys(): if name not in ('hours_ago', 'limit',): del options[name] extra['ne__protocol'] = 'pcap' results = Clio().session._tops(['source_ip', 'honeypot', 'protocol', 'destination_port'], top=limit, hours_ago=hours_ago, **extra) results = [r for r in results if r['protocol'] != 'ftpdatalisten'] cache = {} for r in results: source_ip = r['source_ip'] if source_ip not in cache: # TODO: may want to make one big query to mongo here... cache[source_ip] = [m.to_dict() for m in Clio().metadata.get(ip=r['source_ip'], honeypot='p0f')] r['meta'] = cache[source_ip] return { 'data':results, 'meta':{ 'size': len(results), 'query': 'intel_feed', 'options': options } } @api.route('/rule/<rule_id>/', methods=['PUT']) @token_auth def update_rule(rule_id): rule = Rule.query.filter_by(id=rule_id).first_or_404() for field in request.json.keys(): if field in Rule.editable_fields(): setattr(rule, field, request.json[field]) elif field in Rule.fields(): return error_response( errors.API_FIELD_NOT_EDITABLE.format(field), 400) else: return error_response( errors.API_FIELD_INVALID.format(field), 400) else: db.session.commit() return jsonify(rule.to_dict()) @api.route('/rule/', methods=['GET']) @sensor_auth def get_rules(): # Getting active rules. if request.args.get('plaintext') in ['1', 'true']: # Requested rendered rules in plaintext. resp = make_response(Rule.renderall()) resp.headers['Content-Disposition'] = "attachment; filename=mhn.rules" return resp else: # Responding with active rules. rules = Rule.query.filter_by(is_active=True).\ group_by(Rule.sid).\ having(func.max(Rule.rev)) resp = make_response(json.dumps([ru.to_dict() for ru in rules])) resp.headers['Content-Type'] = "application/json" return resp @api.route('/rulesources/', methods=['POST']) @login_required def create_rule_source(): missing = RuleSource.check_required(request.json) if missing: return error_response( errors.API_FIELDS_MISSING.format(missing), 400) else: rsource = RuleSource(**request.json) try: db.session.add(rsource) db.session.commit() except IntegrityError: return error_response( errors.API_SOURCE_EXISTS.format(request.json['uri']), 400) else: return jsonify(rsource.to_dict()) @api.route('/rulesources/<rs_id>/', methods=['DELETE']) @login_required def delete_rule_source(rs_id): source = RuleSource.query.filter_by(id=rs_id).first_or_404() db.session.delete(source) db.session.commit() return jsonify({}) @api.route('/script/', methods=['POST']) @login_required def create_script(): missing = Script.check_required(request.json) if missing: return error_response( errors.API_FIELDS_MISSING.format(missing), 400) else: script = Script(**request.json) script.user = current_user db.session.add(script) db.session.commit() return jsonify(script.to_dict()) @api.route('/script/', methods=['PUT', 'PATCH']) @login_required def update_script(): script = Script.query.get(request.json.get('id')) script.user = current_user for editable in Script.editable_fields(): if editable in request.json: setattr(script, editable, request.json[editable]) db.session.add(script) db.session.commit() return jsonify(script.to_dict()) @api.route('/script/', methods=['GET']) def get_script(): if request.args.get('script_id'): script = DeployScript.query.get(request.args.get('script_id')) else: script = DeployScript.query.order_by(DeployScript.date.desc()).first() if request.args.get('text') in ['1', 'true']: resp = make_response(script.script) resp.headers['Content-Disposition'] = "attachment; filename=deploy.sh" return resp else: return jsonify(script.to_dict())
lgpl-2.1
HazyResearch/metal
metal/logging/writer.py
1
4223
import copy import json import os from collections import defaultdict from subprocess import check_output from time import strftime from metal.utils import recursive_transform class LogWriter(object): """Class for writing simple JSON logs at end of runs, with interface for storing per-iter data as well. Config contains: log_dir: (str) The path to the base log directory, or defaults to current working directory. run_dir: (str) The name of the sub-directory, or defaults to the date, strftime("%Y_%m_%d"). run_name: (str) The name of the run + the time, or defaults to the time, strftime("%H_%M_%S). writer_metrics: (list) An optional whitelist of metrics to write, ignoring all others. (If None, write all available metrics). Log is saved to 'log_dir/run_dir/{run_name}_H_M_S.json' """ def __init__( self, log_dir=None, run_dir=None, run_name=None, writer_metrics=[], verbose=True, **kwargs, ): start_date = strftime("%Y_%m_%d") start_time = strftime("%H_%M_%S") # Set logging subdirectory + make sure exists log_dir = log_dir or os.getcwd() run_dir = run_dir or start_date if run_name is not None: run_name = f"{run_name}_{start_time}" else: run_name = start_time self.log_subdir = os.path.join(log_dir, run_dir, run_name) if not os.path.exists(self.log_subdir): os.makedirs(self.log_subdir) # Save other settings self.writer_metrics = writer_metrics self.verbose = verbose # Initialize log # Note we have a separate section for during-run metrics commit = check_output(["git", "rev-parse", "--short", "HEAD"]).strip() self.log_dict = { "start_date": start_date, "start_time": start_time, "commit": str(commit), "config": None, "run_log": defaultdict(list), } def add_scalar(self, name, val, i): # Note: Does not handle deduplication of (name, val) entries w same i if not self.writer_metrics or name in self.write_metrics: if val is not None: val = float(val) self.log_dict["run_log"][name].append((i, val)) return True else: return False def write(self, config=None, metrics=None): self.write_run_log() if config is not None: self.write_config(config) if metrics is not None: self.write_metrics(metrics) def write_log(self): """Dump log output to file""" log_path = os.path.join(self.log_subdir, "log.json") if self.verbose: print(f"Writing log to {log_path}") with open(log_path, "w") as f: json.dump(self.log_dict, f, indent=1) def write_config(self, config, config_name="config"): """Dump config dict to file""" config_path = os.path.join(self.log_subdir, f"{config_name}.json") if self.verbose: print(f"Writing config to {config_path}") with open(config_path, "w") as f: config = self._sanitize_config(config) json.dump(config, f, indent=1) def write_metrics(self, metrics): metrics_path = os.path.join(self.log_subdir, "metrics.json") if self.verbose: print(f"Writing metrics to {metrics_path}") with open(metrics_path, "w") as f: json.dump(metrics, f, indent=1) def close(self): pass def _sanitize_config(self, config): config = copy.deepcopy(config) # Replace individual functions is_func = lambda x: callable(x) replace_with_name = lambda f: str(f) config = recursive_transform(config, is_func, replace_with_name) # Replace lists of functions is_func_list = lambda x: isinstance(x, list) and all(is_func(f) for f in x) replace_with_names = lambda x: [replace_with_name(f) for f in x] config = recursive_transform(config, is_func_list, replace_with_names) return config
apache-2.0
alex1818/capstone
bindings/python/capstone/xcore_const.py
37
3353
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [xcore_const.py] # Operand type for instruction's operands XCORE_OP_INVALID = 0 XCORE_OP_REG = 1 XCORE_OP_IMM = 2 XCORE_OP_MEM = 3 # XCore registers XCORE_REG_INVALID = 0 XCORE_REG_CP = 1 XCORE_REG_DP = 2 XCORE_REG_LR = 3 XCORE_REG_SP = 4 XCORE_REG_R0 = 5 XCORE_REG_R1 = 6 XCORE_REG_R2 = 7 XCORE_REG_R3 = 8 XCORE_REG_R4 = 9 XCORE_REG_R5 = 10 XCORE_REG_R6 = 11 XCORE_REG_R7 = 12 XCORE_REG_R8 = 13 XCORE_REG_R9 = 14 XCORE_REG_R10 = 15 XCORE_REG_R11 = 16 # pseudo registers XCORE_REG_PC = 17 XCORE_REG_SCP = 18 XCORE_REG_SSR = 19 XCORE_REG_ET = 20 XCORE_REG_ED = 21 XCORE_REG_SED = 22 XCORE_REG_KEP = 23 XCORE_REG_KSP = 24 XCORE_REG_ID = 25 XCORE_REG_ENDING = 26 # XCore instruction XCORE_INS_INVALID = 0 XCORE_INS_ADD = 1 XCORE_INS_ANDNOT = 2 XCORE_INS_AND = 3 XCORE_INS_ASHR = 4 XCORE_INS_BAU = 5 XCORE_INS_BITREV = 6 XCORE_INS_BLA = 7 XCORE_INS_BLAT = 8 XCORE_INS_BL = 9 XCORE_INS_BF = 10 XCORE_INS_BT = 11 XCORE_INS_BU = 12 XCORE_INS_BRU = 13 XCORE_INS_BYTEREV = 14 XCORE_INS_CHKCT = 15 XCORE_INS_CLRE = 16 XCORE_INS_CLRPT = 17 XCORE_INS_CLRSR = 18 XCORE_INS_CLZ = 19 XCORE_INS_CRC8 = 20 XCORE_INS_CRC32 = 21 XCORE_INS_DCALL = 22 XCORE_INS_DENTSP = 23 XCORE_INS_DGETREG = 24 XCORE_INS_DIVS = 25 XCORE_INS_DIVU = 26 XCORE_INS_DRESTSP = 27 XCORE_INS_DRET = 28 XCORE_INS_ECALLF = 29 XCORE_INS_ECALLT = 30 XCORE_INS_EDU = 31 XCORE_INS_EEF = 32 XCORE_INS_EET = 33 XCORE_INS_EEU = 34 XCORE_INS_ENDIN = 35 XCORE_INS_ENTSP = 36 XCORE_INS_EQ = 37 XCORE_INS_EXTDP = 38 XCORE_INS_EXTSP = 39 XCORE_INS_FREER = 40 XCORE_INS_FREET = 41 XCORE_INS_GETD = 42 XCORE_INS_GET = 43 XCORE_INS_GETN = 44 XCORE_INS_GETR = 45 XCORE_INS_GETSR = 46 XCORE_INS_GETST = 47 XCORE_INS_GETTS = 48 XCORE_INS_INCT = 49 XCORE_INS_INIT = 50 XCORE_INS_INPW = 51 XCORE_INS_INSHR = 52 XCORE_INS_INT = 53 XCORE_INS_IN = 54 XCORE_INS_KCALL = 55 XCORE_INS_KENTSP = 56 XCORE_INS_KRESTSP = 57 XCORE_INS_KRET = 58 XCORE_INS_LADD = 59 XCORE_INS_LD16S = 60 XCORE_INS_LD8U = 61 XCORE_INS_LDA16 = 62 XCORE_INS_LDAP = 63 XCORE_INS_LDAW = 64 XCORE_INS_LDC = 65 XCORE_INS_LDW = 66 XCORE_INS_LDIVU = 67 XCORE_INS_LMUL = 68 XCORE_INS_LSS = 69 XCORE_INS_LSUB = 70 XCORE_INS_LSU = 71 XCORE_INS_MACCS = 72 XCORE_INS_MACCU = 73 XCORE_INS_MJOIN = 74 XCORE_INS_MKMSK = 75 XCORE_INS_MSYNC = 76 XCORE_INS_MUL = 77 XCORE_INS_NEG = 78 XCORE_INS_NOT = 79 XCORE_INS_OR = 80 XCORE_INS_OUTCT = 81 XCORE_INS_OUTPW = 82 XCORE_INS_OUTSHR = 83 XCORE_INS_OUTT = 84 XCORE_INS_OUT = 85 XCORE_INS_PEEK = 86 XCORE_INS_REMS = 87 XCORE_INS_REMU = 88 XCORE_INS_RETSP = 89 XCORE_INS_SETCLK = 90 XCORE_INS_SET = 91 XCORE_INS_SETC = 92 XCORE_INS_SETD = 93 XCORE_INS_SETEV = 94 XCORE_INS_SETN = 95 XCORE_INS_SETPSC = 96 XCORE_INS_SETPT = 97 XCORE_INS_SETRDY = 98 XCORE_INS_SETSR = 99 XCORE_INS_SETTW = 100 XCORE_INS_SETV = 101 XCORE_INS_SEXT = 102 XCORE_INS_SHL = 103 XCORE_INS_SHR = 104 XCORE_INS_SSYNC = 105 XCORE_INS_ST16 = 106 XCORE_INS_ST8 = 107 XCORE_INS_STW = 108 XCORE_INS_SUB = 109 XCORE_INS_SYNCR = 110 XCORE_INS_TESTCT = 111 XCORE_INS_TESTLCL = 112 XCORE_INS_TESTWCT = 113 XCORE_INS_TSETMR = 114 XCORE_INS_START = 115 XCORE_INS_WAITEF = 116 XCORE_INS_WAITET = 117 XCORE_INS_WAITEU = 118 XCORE_INS_XOR = 119 XCORE_INS_ZEXT = 120 XCORE_INS_ENDING = 121 # Group of XCore instructions XCORE_GRP_INVALID = 0 # Generic groups XCORE_GRP_JUMP = 1 XCORE_GRP_ENDING = 2
bsd-3-clause
hawkeyexp/plugin.video.netflix
resources/lib/services/nfsession/session/base.py
1
2055
# -*- coding: utf-8 -*- """ Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix) Copyright (C) 2018 Caphm (original implementation module) Copyright (C) 2019 Stefano Gottardo - @CastagnaIT Initialize the netflix session SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. """ from __future__ import absolute_import, division, unicode_literals import resources.lib.common as common from resources.lib.database.db_utils import TABLE_SESSION from resources.lib.globals import G from resources.lib.utils.logging import LOG class SessionBase(object): """Initialize the netflix session""" session = None """The requests.session object to handle communication to Netflix""" verify_ssl = True """Use SSL verification when performing requests""" # Functions from derived classes to allow perform particular operations in parent classes external_func_activate_profile = None # (set by nfsession_op.py) def __init__(self): self.verify_ssl = bool(G.ADDON.getSettingBool('ssl_verification')) self._init_session() def _init_session(self): """Initialize the session to use for all future connections""" try: self.session.close() LOG.info('Session closed') except AttributeError: pass from requests import session self.session = session() self.session.max_redirects = 10 # Too much redirects should means some problem self.session.headers.update({ 'User-Agent': common.get_user_agent(enable_android_mediaflag_fix=True), 'Accept-Encoding': 'gzip, deflate, br', 'Host': 'www.netflix.com' }) LOG.info('Initialized new session') @property def auth_url(self): """Access rights to make HTTP requests on an endpoint""" return G.LOCAL_DB.get_value('auth_url', table=TABLE_SESSION) @auth_url.setter def auth_url(self, value): G.LOCAL_DB.set_value('auth_url', value, TABLE_SESSION)
mit
Wuguanping/Server_Manage_Plugin
Openstack_Plugin/ironic-plugin-pike/ironic/tests/unit/drivers/modules/irmc/test_common.py
3
10969
# Copyright 2015 FUJITSU LIMITED # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test class for common methods used by iRMC modules. """ import mock from oslo_config import cfg from ironic.common import exception from ironic.conductor import task_manager from ironic.drivers.modules.irmc import common as irmc_common from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.drivers import third_party_driver_mock_specs \ as mock_specs from ironic.tests.unit.objects import utils as obj_utils class IRMCValidateParametersTestCase(db_base.DbTestCase): def setUp(self): super(IRMCValidateParametersTestCase, self).setUp() self.node = obj_utils.create_test_node( self.context, driver='fake_irmc', driver_info=db_utils.get_test_irmc_info()) def test_parse_driver_info(self): info = irmc_common.parse_driver_info(self.node) self.assertEqual('1.2.3.4', info['irmc_address']) self.assertEqual('admin0', info['irmc_username']) self.assertEqual('fake0', info['irmc_password']) self.assertEqual(60, info['irmc_client_timeout']) self.assertEqual(80, info['irmc_port']) self.assertEqual('digest', info['irmc_auth_method']) self.assertEqual('ipmitool', info['irmc_sensor_method']) self.assertEqual('v2c', info['irmc_snmp_version']) self.assertEqual(161, info['irmc_snmp_port']) self.assertEqual('public', info['irmc_snmp_community']) self.assertFalse(info['irmc_snmp_security']) def test_parse_driver_option_default(self): self.node.driver_info = { "irmc_address": "1.2.3.4", "irmc_username": "admin0", "irmc_password": "fake0", } info = irmc_common.parse_driver_info(self.node) self.assertEqual('basic', info['irmc_auth_method']) self.assertEqual(443, info['irmc_port']) self.assertEqual(60, info['irmc_client_timeout']) self.assertEqual('ipmitool', info['irmc_sensor_method']) def test_parse_driver_info_missing_address(self): del self.node.driver_info['irmc_address'] self.assertRaises(exception.MissingParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_missing_username(self): del self.node.driver_info['irmc_username'] self.assertRaises(exception.MissingParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_missing_password(self): del self.node.driver_info['irmc_password'] self.assertRaises(exception.MissingParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_timeout(self): self.node.driver_info['irmc_client_timeout'] = 'qwe' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_port(self): self.node.driver_info['irmc_port'] = 'qwe' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_auth_method(self): self.node.driver_info['irmc_auth_method'] = 'qwe' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_sensor_method(self): self.node.driver_info['irmc_sensor_method'] = 'qwe' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_missing_multiple_params(self): del self.node.driver_info['irmc_password'] del self.node.driver_info['irmc_address'] e = self.assertRaises(exception.MissingParameterValue, irmc_common.parse_driver_info, self.node) self.assertIn('irmc_password', str(e)) self.assertIn('irmc_address', str(e)) def test_parse_driver_info_invalid_snmp_version(self): self.node.driver_info['irmc_snmp_version'] = 'v3x' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_snmp_port(self): self.node.driver_info['irmc_snmp_port'] = '161' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_snmp_community(self): self.node.driver_info['irmc_snmp_version'] = 'v2c' self.node.driver_info['irmc_snmp_community'] = 100 self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_invalid_snmp_security(self): self.node.driver_info['irmc_snmp_version'] = 'v3' self.node.driver_info['irmc_snmp_security'] = 100 self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) def test_parse_driver_info_empty_snmp_security(self): self.node.driver_info['irmc_snmp_version'] = 'v3' self.node.driver_info['irmc_snmp_security'] = '' self.assertRaises(exception.InvalidParameterValue, irmc_common.parse_driver_info, self.node) class IRMCCommonMethodsTestCase(db_base.DbTestCase): def setUp(self): super(IRMCCommonMethodsTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="fake_irmc") self.info = db_utils.get_test_irmc_info() self.node = obj_utils.create_test_node( self.context, driver='fake_irmc', driver_info=self.info) @mock.patch.object(irmc_common, 'scci', spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC) def test_get_irmc_client(self, mock_scci): self.info['irmc_port'] = 80 self.info['irmc_auth_method'] = 'digest' self.info['irmc_client_timeout'] = 60 mock_scci.get_client.return_value = 'get_client' returned_mock_scci_get_client = irmc_common.get_irmc_client(self.node) mock_scci.get_client.assert_called_with( self.info['irmc_address'], self.info['irmc_username'], self.info['irmc_password'], port=self.info['irmc_port'], auth_method=self.info['irmc_auth_method'], client_timeout=self.info['irmc_client_timeout']) self.assertEqual('get_client', returned_mock_scci_get_client) def test_update_ipmi_properties(self): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: ipmi_info = { "ipmi_address": "1.2.3.4", "ipmi_username": "admin0", "ipmi_password": "fake0", } task.node.driver_info = self.info irmc_common.update_ipmi_properties(task) actual_info = task.node.driver_info expected_info = dict(self.info, **ipmi_info) self.assertEqual(expected_info, actual_info) @mock.patch.object(irmc_common, 'scci', spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC) def test_get_irmc_report(self, mock_scci): self.info['irmc_port'] = 80 self.info['irmc_auth_method'] = 'digest' self.info['irmc_client_timeout'] = 60 mock_scci.get_report.return_value = 'get_report' returned_mock_scci_get_report = irmc_common.get_irmc_report(self.node) mock_scci.get_report.assert_called_with( self.info['irmc_address'], self.info['irmc_username'], self.info['irmc_password'], port=self.info['irmc_port'], auth_method=self.info['irmc_auth_method'], client_timeout=self.info['irmc_client_timeout']) self.assertEqual('get_report', returned_mock_scci_get_report) def test_out_range_port(self): self.assertRaises(ValueError, cfg.CONF.set_override, 'port', 60, 'irmc') def test_out_range_auth_method(self): self.assertRaises(ValueError, cfg.CONF.set_override, 'auth_method', 'fake', 'irmc') def test_out_range_sensor_method(self): self.assertRaises(ValueError, cfg.CONF.set_override, 'sensor_method', 'fake', 'irmc') @mock.patch.object(irmc_common, 'elcm', spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC) def test_set_secure_boot_mode_enable(self, mock_elcm): mock_elcm.set_secure_boot_mode.return_value = 'set_secure_boot_mode' info = irmc_common.parse_driver_info(self.node) irmc_common.set_secure_boot_mode(self.node, True) mock_elcm.set_secure_boot_mode.assert_called_once_with( info, True) @mock.patch.object(irmc_common, 'elcm', spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC) def test_set_secure_boot_mode_disable(self, mock_elcm): mock_elcm.set_secure_boot_mode.return_value = 'set_secure_boot_mode' info = irmc_common.parse_driver_info(self.node) irmc_common.set_secure_boot_mode(self.node, False) mock_elcm.set_secure_boot_mode.assert_called_once_with( info, False) @mock.patch.object(irmc_common, 'elcm', spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC) @mock.patch.object(irmc_common, 'scci', spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC) def test_set_secure_boot_mode_fail(self, mock_scci, mock_elcm): irmc_common.scci.SCCIError = Exception mock_elcm.set_secure_boot_mode.side_effect = Exception with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.IRMCOperationError, irmc_common.set_secure_boot_mode, task.node, True) info = irmc_common.parse_driver_info(task.node) mock_elcm.set_secure_boot_mode.assert_called_once_with( info, True)
apache-2.0
erickt/hue
apps/beeswax/src/beeswax/design.py
26
9530
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The HQLdesign class can (de)serialize a design to/from a QueryDict. """ import json import logging import os import re import urlparse import django.http from django import forms from desktop.lib.django_forms import BaseSimpleFormSet, MultiForm from desktop.lib.django_mako import render_to_string from hadoop.cluster import get_hdfs LOG = logging.getLogger(__name__) SERIALIZATION_VERSION = '0.4.1' def hql_query(hql, database='default', query_type=None): data_dict = json.loads('{"query": {"email_notify": false, "query": null, "type": 0, "is_parameterized": true, "database": "default"}, ' '"functions": [], "VERSION": "0.4.1", "file_resources": [], "settings": []}') if not (isinstance(hql, str) or isinstance(hql, unicode)): raise Exception('Requires a SQL text query of type <str>, <unicode> and not %s' % type(hql)) data_dict['query']['query'] = strip_trailing_semicolon(hql) data_dict['query']['database'] = database if query_type: data_dict['query']['type'] = query_type hql_design = HQLdesign() hql_design._data_dict = data_dict return hql_design class HQLdesign(object): """ Represents an HQL design, with methods to perform (de)serialization. We support queries that aren't parameterized, in case users want to use "$" natively, but we leave that as an advanced option to turn off. """ _QUERY_ATTRS = [ 'query', 'type', 'is_parameterized', 'email_notify', 'database' ] _SETTINGS_ATTRS = [ 'key', 'value' ] _FILE_RES_ATTRS = [ 'type', 'path' ] _FUNCTIONS_ATTRS = [ 'name', 'class_name' ] def __init__(self, form=None, query_type=None): """Initialize the design from a valid form data.""" if form is not None: assert isinstance(form, MultiForm) self._data_dict = { 'query': normalize_form_dict(form.query, HQLdesign._QUERY_ATTRS), 'settings': normalize_formset_dict(form.settings, HQLdesign._SETTINGS_ATTRS), 'file_resources': normalize_formset_dict(form.file_resources, HQLdesign._FILE_RES_ATTRS), 'functions': normalize_formset_dict(form.functions, HQLdesign._FUNCTIONS_ATTRS) } if query_type is not None: self._data_dict['query']['type'] = query_type def dumps(self): """Returns the serialized form of the design in a string""" dic = self._data_dict.copy() dic['VERSION'] = SERIALIZATION_VERSION return json.dumps(dic) @property def hql_query(self): return self._data_dict['query']['query'] @hql_query.setter def hql_query(self, query): self._data_dict['query']['query'] = query @property def query(self): return self._data_dict['query'].copy() @property def settings(self): return list(self._data_dict['settings']) @property def file_resources(self): return list(self._data_dict['file_resources']) @property def functions(self): return list(self._data_dict['functions']) def get_configuration_statements(self): configuration = [] for f in self.file_resources: if not urlparse.urlsplit(f['path']).scheme: scheme = get_hdfs().fs_defaultfs else: scheme = '' configuration.append(render_to_string("hql_resource.mako", dict(type=f['type'], path=f['path'], scheme=scheme))) for f in self.functions: configuration.append(render_to_string("hql_function.mako", f)) return configuration def get_query_dict(self): # We construct the mform to use its structure and prefix. We don't actually bind data to the forms. from beeswax.forms import QueryForm mform = QueryForm() mform.bind() res = django.http.QueryDict('', mutable=True) res.update(denormalize_form_dict( self._data_dict['query'], mform.query, HQLdesign._QUERY_ATTRS)) res.update(denormalize_formset_dict( self._data_dict['settings'], mform.settings, HQLdesign._SETTINGS_ATTRS)) res.update(denormalize_formset_dict( self._data_dict['file_resources'], mform.file_resources, HQLdesign._FILE_RES_ATTRS)) res.update(denormalize_formset_dict( self._data_dict['functions'], mform.functions, HQLdesign._FUNCTIONS_ATTRS)) return res @staticmethod def loads(data): """Returns an HQLdesign from the serialized form""" dic = json.loads(data) dic = dict(map(lambda k: (str(k), dic.get(k)), dic.keys())) if dic['VERSION'] != SERIALIZATION_VERSION: LOG.error('Design version mismatch. Found %s; expect %s' % (dic['VERSION'], SERIALIZATION_VERSION)) # Convert to latest version del dic['VERSION'] if 'type' not in dic['query'] or dic['query']['type'] is None: dic['query']['type'] = 0 if 'database' not in dic['query']: dic['query']['database'] = 'default' design = HQLdesign() design._data_dict = dic return design def get_query(self): return self._data_dict["query"] @property def statement_count(self): return len(self.statements) def get_query_statement(self, n=0): return self.statements[n] @property def statements(self): hql_query = strip_trailing_semicolon(self.hql_query) return [strip_trailing_semicolon(statement.strip()) for statement in split_statements(hql_query)] def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) def split_statements(hql): """ Split statments at semicolons ignoring the ones inside quotes and comments. The comment symbols that come inside quotes should be ignored. """ statements = [] current = '' prev = '' between_quotes = None is_comment = None lines = hql.splitlines() for line in lines: for c in line: current += c if c in ('"', "'") and prev != '\\' and is_comment is None: if between_quotes == c: between_quotes = None elif between_quotes is None: between_quotes = c elif c == '-' and prev == '-' and between_quotes is None and is_comment is None: is_comment = True elif c == ';': if between_quotes is None and is_comment is None: current = current.strip() # Strip off the trailing semicolon current = current[:-1] if len(current) > 1: statements.append(current) current = '' # This character holds no significance if it was escaped within a string if prev == '\\' and between_quotes is not None: c = '' prev = c is_comment = None prev = os.linesep if current != '': current += os.linesep if current and current != ';': current = current.strip() statements.append(current) return statements def normalize_form_dict(form, attr_list): """ normalize_form_dict(form, attr_list) -> A dictionary of (attr, value) Each attr is a field name. And the value is obtained by looking up the form's data dict. """ assert isinstance(form, forms.Form) res = { } for attr in attr_list: res[attr] = form.cleaned_data.get(attr) return res def normalize_formset_dict(formset, attr_list): """ normalize_formset_dict(formset, attr_list) -> A list of dictionary of (attr, value) """ assert isinstance(formset, BaseSimpleFormSet) res = [ ] for form in formset.forms: res.append(normalize_form_dict(form, attr_list)) return res def denormalize_form_dict(data_dict, form, attr_list): """ denormalize_form_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set """ assert isinstance(form, forms.Form) res = django.http.QueryDict('', mutable=True) for attr in attr_list: try: res[str(form.add_prefix(attr))] = data_dict[attr] except KeyError: pass return res def denormalize_formset_dict(data_dict_list, formset, attr_list): """ denormalize_formset_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set """ assert isinstance(formset, BaseSimpleFormSet) res = django.http.QueryDict('', mutable=True) for i, data_dict in enumerate(data_dict_list): prefix = formset.make_prefix(i) form = formset.form(prefix=prefix) res.update(denormalize_form_dict(data_dict, form, attr_list)) res[prefix + '-_exists'] = 'True' res[str(formset.management_form.add_prefix('next_form_id'))] = str(len(data_dict_list)) return res def __str__(self): return '%s: %s' % (self.__class__, self.query) _SEMICOLON_WHITESPACE = re.compile(";\s*$") def strip_trailing_semicolon(query): """As a convenience, we remove trailing semicolons from queries.""" s = _SEMICOLON_WHITESPACE.split(query, 2) if len(s) > 1: assert len(s) == 2 assert s[1] == '' return s[0]
apache-2.0
playerNaN/NaNPyGameEngine
engine.py
1
5921
import pygame import sys import os from collections import namedtuple import time import resourcemanager ColorList = namedtuple("ColorList", "black white red green blue") colors = ColorList((0,0,0),(0xFF,0xFF,0xFF),(0xFF,0,0),(0,0xFF,0),(0,0,0xFF)) PyListener = namedtuple("PyListener", "condition effect") PyEventListener = namedtuple("PyEventListener","events condition effect") class Pyengine: def __init__(self,size): pygame.init() self.__size = size self.__fps = 60 self.__bg = colors.white self.__fg = colors.black self.__on_update = [] self.__on_draw = [] self.__keys_down = {} self.__listeners = [] self.__event_handlers = [] self.__mouse_down = {} self.__display = None self.__screen_centerX = size[0]/2 self.__scaleX = 1.0 self.__scaleY = 1.0 self.__screen_centerY = size[1]/2 self.__clock = pygame.time.Clock() self.__buffer_surface = None self.__resource_manager = resourcemanager.ResourceManager() self.__animators = {} def add_animator(self,name,animator): self.__animators[name] = animator def remove_animator(self,name): del self.__animators[name] def get_animator(self,name): return self.__animators[name] def set_scale_x(self,x): self.__scaleX = x def get_scale_x(self): return self.__scaleX def set_scale_y(self,y): self.__scaleY = y def get_scale_y(self): return self.__scaleY def set_scale(self,s): self.__scaleX = s[0] self.__scaleY = s[1] def get_scale(self): return (self.__scaleX,self.__scaleY) def set_fg(self,fg): self.__fg = fg def get_fg(self): return self.__fg def set_bg(self,bg): self.__bg = bg def get_bg(self): return self.__bg def get_display(self): return self.__display() def set_screen_center_x(self,x): self.__screen_centerX = x def get_screen_center_x(self): return self.__screen_centerX def set_screen_center_y(self,y): self.__screen_centerY = y def get_screen_center_y(self): return self.__screen_centerY def set_screen_center(self,pos): self.__screen_centerX = pos[0] self.__screen_centerY = pos[1] def get_screen_center(self): return (self.__screen_centerX,self.__screen_centerY) def get_buffer_surface(self): return self.__buffer_surface def get_resource_manager(self): return self.__resource_manager def update_all_animators(self): ms = self.__clock.get_time() for i in self.__animators: self.__animators[i].update(ms) def draw_all_animators(self): for i in self.__animators: self.draw_image(self.__animators[i].get_current_image(),self.__animators[i].get_position()) def handle_events(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.exit() elif event.type == pygame.KEYDOWN: self.__keys_down[event.key] = True elif event.type == pygame.KEYUP: self.__keys_down[event.key] = False elif event.type == pygame.MOUSEBUTTONDOWN: self.__mouse_down = True elif event.type == pygame.MOUSEBUTTONUP: self.__mouse_down = False for handler in self.__event_handlers: if event.type in handler.events and handler.condition(self,event): handler.effect(self,event) def draw_image(self,name,pos): self.__buffer_surface.blit(self.__resource_manager.get_image(name),pos) def is_key_down(self,key): if not key in self.__keys_down: return False return self.__keys_down[key] def is_mouse_button_down(self,button): if not button in self.__mouse_down: return False return self.__mouse_down[button] def run(self): screen = pygame.display.set_mode(self.__size) self.__display = screen oldTime = time.time() while True: spf = 1.0 / self.__fps self.handle_events() self.update() self.draw(screen) self.__clock.tick(self.__fps) def exit(self): pygame.display.quit() pygame.quit() sys.exit() def update(self): self.update_all_animators() for l in self.__listeners: if l.condition(self): l.effect(self) def draw(self,display): self.__buffer_surface = pygame.Surface(display.get_size()) display.fill(colors.red) self.__buffer_surface.fill(self.__bg) for od in self.__on_draw: od(self,self.__buffer_surface) self.draw_all_animators() src_size = (self.__size[0]/self.__scaleX,self.__size[1]/self.__scaleY) top = self.__screen_centerY - src_size[1] / 2 left = self.__screen_centerX - src_size[0] / 2 cropped = pygame.Surface(src_size) cropped.blit(self.__buffer_surface,(0,0),(left,top,src_size[0],src_size[1])) cropped = pygame.transform.scale(cropped,self.__size) display.blit(cropped,(0,0)) pygame.display.update((0,0,self.__size[0],self.__size[1])) def add_draw_listener(self,f): self.__on_draw.append(f) def add_listener(self,condition,effect): self.__listeners.append(PyListener(condition,effect)) def add_on_update(self,effect): self.__add_listener(lambda s:True,effect) def add_event_listener(self,events,condition,effect): self.__event_handlers.append(PyEventListener(events,condition,effect)) def set_fps(self,fps): self.__fps = fps def get_fps(self): return self.__fps
unlicense
pinnamur/titanium_mobile
support/iphone/provisioner.py
34
3613
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Install a provisioning profile # import os, sys, subprocess, re, time, poorjson, types from xml.dom.minidom import parseString import codecs from OpenSSL import crypto def dequote(s): if s[0:1] == '"': return s[1:-1] return s def getText(nodelist): rc = "" for node in nodelist: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc def make_map(dict): props = {} curkey = None for i in dict.childNodes: if i.nodeType == 1: if i.nodeName == 'key': curkey = str(getText(i.childNodes)).strip() elif i.nodeName == 'dict': props[curkey] = make_map(i) curkey = None elif i.nodeName == 'array': s = i.getElementsByTagName('string') if len(s): txt = '' for t in s: txt+=getText(t.childNodes) props[curkey]=txt else: props[curkey]=None curkey = None else: props[curkey] = getText(i.childNodes) curkey = None return props def find_dict_element(dict,name): found = False for i in dict.childNodes: if i.nodeType == 1: if i.nodeName == 'key': if str(getText(i.childNodes)).strip() == name: found = True elif found: return i return None def get_cert(dict): certs_array = find_dict_element(dict, 'DeveloperCertificates') if certs_array: certs_array = certs_array.getElementsByTagName('data') if not certs_array or not len(certs_array): return None cert_text = str(getText(certs_array[0].childNodes)).strip() cert_text = "-----BEGIN CERTIFICATE-----\n" + cert_text + "\n-----END CERTIFICATE-----\n" cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_text) return cert def main(args): if len(args)!=2: print "%s <provisioning_file>" % os.path.basename(args[0]) sys.exit(1) try: xml = os.path.abspath(os.path.expanduser(dequote(args[1].decode("utf-8")))) f = open(xml,'rb').read() b = f.index('<?xml') e = f.index('</plist>') xml_content = f[b:e+8] dom = parseString(xml_content) dict = dom.getElementsByTagName('dict')[0] props = make_map(dict) profile_type = 'unknown' if len(re.findall('ProvisionedDevices',xml_content)) > 0: profile_type = 'development' try: cert = get_cert(dict) if cert and re.search('Distribution:', cert.get_subject().commonName): profile_type = 'adhoc' except Exception, e: sys.stderr.write('ERROR: %s\n' % str(e)) else: profile_type = 'distribution' name = props['Name'] name = name.decode('string_escape').decode('utf-8') entitlements = props['Entitlements'] appid = entitlements['application-identifier'] appid_prefix = props['ApplicationIdentifierPrefix'] uuid = props['UUID'] bundle_id = appid.replace(appid_prefix+'.','') # check to see if xcode is already running output = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE).communicate()[0] is_xcode = re.findall(r'Xcode.app',output) xcode = len(is_xcode) > 0 # now we need to install the cert # we essentially open xcode causing the cert to be installed # automagically (but -g tells it to stay in the background) cmd = "open -g \"%s\"" % xml os.system(cmd) # only kill Xcode if it wasn't already running if xcode == False: # give it a sec to install before killing it time.sleep(1.5) cmd = "killall Xcode" os.system(cmd) print poorjson.PoorJSON().dump({'type':profile_type,'appid':bundle_id, 'prefix':appid_prefix, 'name':name, 'uuid': uuid}) sys.exit(0) except Exception, e: print e sys.exit(10) if __name__ == "__main__": main(sys.argv)
apache-2.0
gsehub/edx-platform
lms/djangoapps/lti_provider/tests/test_tasks.py
12
4312
""" Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py """ import ddt from django.test import TestCase from mock import MagicMock, patch from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator import lti_provider.tasks as tasks from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService from student.tests.factories import UserFactory class BaseOutcomeTest(TestCase): """ Super type for tests of both the leaf and composite outcome celery tasks. """ def setUp(self): super(BaseOutcomeTest, self).setUp() self.course_key = CourseLocator( org='some_org', course='some_course', run='some_run' ) self.usage_key = BlockUsageLocator( course_key=self.course_key, block_type='problem', block_id='block_id' ) self.user = UserFactory.create() self.consumer = LtiConsumer( consumer_name='Lti Consumer Name', consumer_key='consumer_key', consumer_secret='consumer_secret', instance_guid='tool_instance_guid' ) self.consumer.save() outcome = OutcomeService( lis_outcome_service_url='http://example.com/service_url', lti_consumer=self.consumer ) outcome.save() self.assignment = GradedAssignment( user=self.user, course_key=self.course_key, usage_key=self.usage_key, outcome_service=outcome, lis_result_sourcedid='sourcedid', version_number=1, ) self.assignment.save() self.send_score_update_mock = self.setup_patch( 'lti_provider.outcomes.send_score_update', None ) def setup_patch(self, function_name, return_value): """ Patch a method with a given return value, and return the mock """ mock = MagicMock(return_value=return_value) new_patch = patch(function_name, new=mock) new_patch.start() self.addCleanup(new_patch.stop) return mock @ddt.ddt class SendLeafOutcomeTest(BaseOutcomeTest): """ Tests for the send_leaf_outcome method in tasks.py """ shard = 4 @ddt.data( (2.0, 2.0, 1.0), (2.0, 0.0, 0.0), (1, 2, 0.5), ) @ddt.unpack def test_outcome_with_score(self, earned, possible, expected): tasks.send_leaf_outcome( self.assignment.id, earned, possible ) self.send_score_update_mock.assert_called_once_with(self.assignment, expected) @ddt.ddt class SendCompositeOutcomeTest(BaseOutcomeTest): """ Tests for the send_composite_outcome method in tasks.py """ shard = 4 def setUp(self): super(SendCompositeOutcomeTest, self).setUp() self.descriptor = MagicMock() self.descriptor.location = BlockUsageLocator( course_key=self.course_key, block_type='problem', block_id='problem', ) self.course_grade = MagicMock() self.course_grade_mock = self.setup_patch( 'lti_provider.tasks.CourseGradeFactory.read', self.course_grade ) self.module_store = MagicMock() self.module_store.get_item = MagicMock(return_value=self.descriptor) self.check_result_mock = self.setup_patch( 'lti_provider.tasks.modulestore', self.module_store ) @ddt.data( (2.0, 2.0, 1.0), (2.0, 0.0, 0.0), (1, 2, 0.5), ) @ddt.unpack def test_outcome_with_score_score(self, earned, possible, expected): self.course_grade.score_for_module = MagicMock(return_value=(earned, possible)) tasks.send_composite_outcome( self.user.id, unicode(self.course_key), self.assignment.id, 1 ) self.send_score_update_mock.assert_called_once_with(self.assignment, expected) def test_outcome_with_outdated_version(self): self.assignment.version_number = 2 self.assignment.save() tasks.send_composite_outcome( self.user.id, unicode(self.course_key), self.assignment.id, 1 ) self.assertEqual(self.course_grade_mock.call_count, 0)
agpl-3.0
Azure/azure-sdk-for-python
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
1
21363
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VirtualMachineScaleSetRollingUpgradesOperations: """VirtualMachineScaleSetRollingUpgradesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2018_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _cancel_initial( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" # Construct URL url = self._cancel_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore async def begin_cancel( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Cancels the current virtual machine scale set rolling upgrade. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._cancel_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore async def _start_os_upgrade_initial( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" # Construct URL url = self._start_os_upgrade_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_os_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore async def begin_start_os_upgrade( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Starts a rolling upgrade to move all virtual machine scale set instances to the latest available Platform Image OS version. Instances which are already running the latest available OS version are not affected. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._start_os_upgrade_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start_os_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore async def _start_extension_upgrade_initial( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" # Construct URL url = self._start_extension_upgrade_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_extension_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore async def begin_start_extension_upgrade( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to the latest available extension version. Instances which are already running the latest extension versions are not affected. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._start_extension_upgrade_initial( resource_group_name=resource_group_name, vm_scale_set_name=vm_scale_set_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start_extension_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore async def get_latest( self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any ) -> "_models.RollingUpgradeStatusInfo": """Gets the status of the latest virtual machine scale set rolling upgrade. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_scale_set_name: The name of the VM scale set. :type vm_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RollingUpgradeStatusInfo, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2018_06_01.models.RollingUpgradeStatusInfo :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" # Construct URL url = self.get_latest.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_latest.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest'} # type: ignore
mit
hyqneuron/pylearn2-maxsom
pylearn2/sandbox/cuda_convnet/tests/test_common.py
49
2802
__authors__ = "Ian Goodfellow, David Warde-Farley" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow, David Warde-Farley"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" from pylearn2.testing.skip import skip_if_no_gpu skip_if_no_gpu() import numpy as np from theano import shared from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs from pylearn2.sandbox.cuda_convnet.img_acts import ImageActs from theano.sandbox.cuda import gpu_from_host from theano import function from theano.tensor import as_tensor_variable def test_reject_rect(): for cls in (FilterActs, ImageActs): # Tests that running FilterActs with a non-square # kernel is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows + 1 num_filters = 6 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) if cls is ImageActs: output = cls()(gpu_images, gpu_filters, as_tensor_variable((rows, cols))) else: output = cls()(gpu_images, gpu_filters) f = function([], output) try: output = f() except ValueError: continue assert False def test_reject_bad_filt_number(): for cls in (FilterActs, ImageActs): # Tests that running FilterActs with a # of filters per # group that is not 16 is an error rng = np.random.RandomState([2012, 10, 9]) batch_size = 5 rows = 10 cols = 9 channels = 3 filter_rows = 4 filter_cols = filter_rows num_filters = 6 images = shared(rng.uniform(-1., 1., (channels, rows, cols, batch_size)).astype('float32'), name='images') filters = shared(rng.uniform(-1., 1., (channels, filter_rows, filter_cols, num_filters)).astype('float32'), name='filters') gpu_images = gpu_from_host(images) gpu_filters = gpu_from_host(filters) if cls is ImageActs: output = cls()(gpu_images, gpu_filters, as_tensor_variable((rows, cols))) else: output = cls()(gpu_images, gpu_filters) f = function([], output) try: output = f() except ValueError: continue assert False
bsd-3-clause
pianomania/scikit-learn
sklearn/utils/tests/test_random.py
85
7349
from __future__ import division import numpy as np import scipy.sparse as sp from scipy.misc import comb as combinations from numpy.testing import assert_array_almost_equal from sklearn.utils.random import sample_without_replacement from sklearn.utils.random import random_choice_csc from sklearn.utils.testing import ( assert_raises, assert_equal, assert_true) ############################################################################### # test custom sampling without replacement algorithm ############################################################################### def test_invalid_sample_without_replacement_algorithm(): assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown") def test_sample_without_replacement_algorithms(): methods = ("auto", "tracking_selection", "reservoir_sampling", "pool") for m in methods: def sample_without_replacement_method(n_population, n_samples, random_state=None): return sample_without_replacement(n_population, n_samples, method=m, random_state=random_state) check_edge_case_of_sample_int(sample_without_replacement_method) check_sample_int(sample_without_replacement_method) check_sample_int_distribution(sample_without_replacement_method) def check_edge_case_of_sample_int(sample_without_replacement): # n_population < n_sample assert_raises(ValueError, sample_without_replacement, 0, 1) assert_raises(ValueError, sample_without_replacement, 1, 2) # n_population == n_samples assert_equal(sample_without_replacement(0, 0).shape, (0, )) assert_equal(sample_without_replacement(1, 1).shape, (1, )) # n_population >= n_samples assert_equal(sample_without_replacement(5, 0).shape, (0, )) assert_equal(sample_without_replacement(5, 1).shape, (1, )) # n_population < 0 or n_samples < 0 assert_raises(ValueError, sample_without_replacement, -1, 5) assert_raises(ValueError, sample_without_replacement, 5, -1) def check_sample_int(sample_without_replacement): # This test is heavily inspired from test_random.py of python-core. # # For the entire allowable range of 0 <= k <= N, validate that # the sample is of the correct length and contains only unique items n_population = 100 for n_samples in range(n_population + 1): s = sample_without_replacement(n_population, n_samples) assert_equal(len(s), n_samples) unique = np.unique(s) assert_equal(np.size(unique), n_samples) assert_true(np.all(unique < n_population)) # test edge case n_population == n_samples == 0 assert_equal(np.size(sample_without_replacement(0, 0)), 0) def check_sample_int_distribution(sample_without_replacement): # This test is heavily inspired from test_random.py of python-core. # # For the entire allowable range of 0 <= k <= N, validate that # sample generates all possible permutations n_population = 10 # a large number of trials prevents false negatives without slowing normal # case n_trials = 10000 for n_samples in range(n_population): # Counting the number of combinations is not as good as counting the # the number of permutations. However, it works with sampling algorithm # that does not provide a random permutation of the subset of integer. n_expected = combinations(n_population, n_samples, exact=True) output = {} for i in range(n_trials): output[frozenset(sample_without_replacement(n_population, n_samples))] = None if len(output) == n_expected: break else: raise AssertionError( "number of combinations != number of expected (%s != %s)" % (len(output), n_expected)) def test_random_choice_csc(n_samples=10000, random_state=24): # Explicit class probabilities classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] got = random_choice_csc(n_samples, classes, class_probabilites, random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) assert_array_almost_equal(class_probabilites[k], p, decimal=1) # Implicit class probabilities classes = [[0, 1], [1, 2]] # test for array-like support class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])] got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) assert_array_almost_equal(class_probabilites[k], p, decimal=1) # Edge case probabilities 1.0 and 0.0 classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])] got = random_choice_csc(n_samples, classes, class_probabilites, random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel(), minlength=len(class_probabilites[k])) / n_samples assert_array_almost_equal(class_probabilites[k], p, decimal=1) # One class target data classes = [[1], [0]] # test for array-like support class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])] got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples assert_array_almost_equal(class_probabilites[k], p, decimal=1) def test_random_choice_csc_errors(): # the length of an array in classes and class_probabilites is mismatched classes = [np.array([0, 1]), np.array([0, 1, 2, 3])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # the class dtype is not supported classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # the class dtype is not supported classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # Given probabilities don't sum to 1 classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1)
bsd-3-clause
Affix/CouchPotatoServer
libs/pyutil/repeatable_random.py
106
3622
""" If you execute force_repeatability() then the following things are changed in the runtime: 1. random.random() and its sibling functions, and random.Random.seed() in the random module are seeded with a known seed so that they will return the same sequence on each run. 2. os.urandom() is replaced by a fake urandom that returns a pseudorandom sequence. 3. time.time() is replaced by a fake time that returns an incrementing number. (Original time.time is available as time.realtime.) Which seed will be used? If the environment variable REPEATABLE_RANDOMNESS_SEED is set, then it will use that. Else, it will use the current real time. In either case it logs the seed that it used. Caveats: 1. If some code has acquired a random.Random object before force_repeatability() is executed, then that Random object will produce non-reproducible results. For example, the tempfile module in the Python Standard Library does this. 2. Likewise if some code called time.time() before force_repeatability() was called, then it will have gotten a real time stamp. For example, trial does this. (Then it later subtracts that real timestamp from a faketime timestamp to calculate elapsed time, resulting in a large negative elapsed time.) 3. Fake urandom has an added constraint for performance reasons -- you can't ask it for more than 64 bytes of randomness at a time. (I couldn't figure out how to generate large fake random strings efficiently.) """ import os, random, time if not hasattr(time, "realtime"): time.realtime = time.time if not hasattr(os, "realurandom"): os.realurandom = os.urandom if not hasattr(random, "realseed"): random.realseed = random.seed tdelta = 0 seeded = False def force_repeatability(): now = 1043659734.0 def faketime(): global tdelta tdelta += 1 return now + tdelta time.faketime = faketime time.time = faketime from idlib import i2b def fakeurandom(n): if n > 64: raise ("Can't produce more than 64 bytes of pseudorandomness efficiently.") elif n == 0: return '' else: z = i2b(random.getrandbits(n*8)) x = z + "0" * (n-len(z)) assert len(x) == n return x os.fakeurandom = fakeurandom os.urandom = fakeurandom global seeded if not seeded: SEED = os.environ.get('REPEATABLE_RANDOMNESS_SEED', None) if SEED is None: # Generate a seed which is integral and fairly short (to ease cut-and-paste, writing it down, etc.). t = time.realtime() subsec = t % 1 t += (subsec * 1000000) t %= 1000000 SEED = long(t) import sys sys.stdout.write("REPEATABLE_RANDOMNESS_SEED: %s\n" % SEED) ; sys.stdout.flush() sys.stdout.write("In order to reproduce this run of the code, set the environment variable \"REPEATABLE_RANDOMNESS_SEED\" to %s before executing.\n" % SEED) ; sys.stdout.flush() random.seed(SEED) def seed_which_refuses(a): sys.stdout.write("I refuse to reseed to %s. Go away!\n" % (a,)) ; sys.stdout.flush() return random.realseed = random.seed random.seed = seed_which_refuses seeded = True import setutil setutil.RandomSet.DETERMINISTIC = True def restore_real_clock(): time.time = time.realtime def restore_real_urandom(): os.urandom = os.realurandom def restore_real_seed(): random.seed = random.realseed def restore_non_repeatability(): restore_real_seed() restore_real_urandom() restore_real_clock()
gpl-3.0
avoinsystems/odoo
addons/account_voucher/account_voucher.py
132
85482
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from lxml import etree from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.tools.translate import _ from openerp.tools import float_compare from openerp.report import report_sxw import openerp class res_currency(osv.osv): _inherit = "res.currency" def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None): if context is None: context = {} res = super(res_currency, self)._get_current_rate(cr, uid, ids, raise_on_no_rate, context=context) if context.get('voucher_special_currency') in ids and context.get('voucher_special_currency_rate'): res[context.get('voucher_special_currency')] = context.get('voucher_special_currency_rate') return res class account_voucher(osv.osv): def _check_paid(self, cr, uid, ids, name, args, context=None): res = {} for voucher in self.browse(cr, uid, ids, context=context): res[voucher.id] = any([((line.account_id.type, 'in', ('receivable', 'payable')) and line.reconcile_id) for line in voucher.move_ids]) return res def _get_type(self, cr, uid, context=None): if context is None: context = {} return context.get('type', False) def _get_period(self, cr, uid, context=None): if context is None: context = {} if context.get('period_id', False): return context.get('period_id') periods = self.pool.get('account.period').find(cr, uid, context=context) return periods and periods[0] or False def _make_journal_search(self, cr, uid, ttype, context=None): journal_pool = self.pool.get('account.journal') return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1) def _get_journal(self, cr, uid, context=None): if context is None: context = {} invoice_pool = self.pool.get('account.invoice') journal_pool = self.pool.get('account.journal') if context.get('invoice_id', False): invoice = invoice_pool.browse(cr, uid, context['invoice_id'], context=context) journal_id = journal_pool.search(cr, uid, [ ('currency', '=', invoice.currency_id.id), ('company_id', '=', invoice.company_id.id) ], limit=1, context=context) return journal_id and journal_id[0] or False if context.get('journal_id', False): return context.get('journal_id') if not context.get('journal_id', False) and context.get('search_default_journal_id', False): return context.get('search_default_journal_id') ttype = context.get('type', 'bank') if ttype in ('payment', 'receipt'): ttype = 'bank' res = self._make_journal_search(cr, uid, ttype, context=context) return res and res[0] or False def _get_tax(self, cr, uid, context=None): if context is None: context = {} journal_pool = self.pool.get('account.journal') journal_id = context.get('journal_id', False) if not journal_id: ttype = context.get('type', 'bank') res = journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1) if not res: return False journal_id = res[0] if not journal_id: return False journal = journal_pool.browse(cr, uid, journal_id, context=context) account_id = journal.default_credit_account_id or journal.default_debit_account_id if account_id and account_id.tax_ids: tax_id = account_id.tax_ids[0].id return tax_id return False def _get_payment_rate_currency(self, cr, uid, context=None): """ Return the default value for field payment_rate_currency_id: the currency of the journal if there is one, otherwise the currency of the user's company """ if context is None: context = {} journal_pool = self.pool.get('account.journal') journal_id = context.get('journal_id', False) if journal_id: journal = journal_pool.browse(cr, uid, journal_id, context=context) if journal.currency: return journal.currency.id #no journal given in the context, use company currency as default return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id def _get_currency(self, cr, uid, context=None): if context is None: context = {} journal_pool = self.pool.get('account.journal') journal_id = context.get('journal_id', False) if journal_id: if isinstance(journal_id, (list, tuple)): # sometimes journal_id is a pair (id, display_name) journal_id = journal_id[0] journal = journal_pool.browse(cr, uid, journal_id, context=context) if journal.currency: return journal.currency.id return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id def _get_partner(self, cr, uid, context=None): if context is None: context = {} return context.get('partner_id', False) def _get_reference(self, cr, uid, context=None): if context is None: context = {} return context.get('reference', False) def _get_narration(self, cr, uid, context=None): if context is None: context = {} return context.get('narration', False) def _get_amount(self, cr, uid, context=None): if context is None: context= {} return context.get('amount', 0.0) def name_get(self, cr, uid, ids, context=None): if not ids: return [] if context is None: context = {} return [(r['id'], (r['number'] or _('Voucher'))) for r in self.read(cr, uid, ids, ['number'], context, load='_classic_write')] def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): mod_obj = self.pool.get('ir.model.data') if context is None: context = {} if view_type == 'form': if not view_id and context.get('invoice_type'): if context.get('invoice_type') in ('out_invoice', 'out_refund'): result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form') else: result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form') result = result and result[1] or False view_id = result if not view_id and context.get('line_type'): if context.get('line_type') == 'customer': result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form') else: result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form') result = result and result[1] or False view_id = result res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) doc = etree.XML(res['arch']) if context.get('type', 'sale') in ('purchase', 'payment'): nodes = doc.xpath("//field[@name='partner_id']") for node in nodes: node.set('context', "{'default_customer': 0, 'search_default_supplier': 1, 'default_supplier': 1}") if context.get('invoice_type','') in ('in_invoice', 'in_refund'): node.set('string', _("Supplier")) res['arch'] = etree.tostring(doc) return res def _compute_writeoff_amount(self, cr, uid, line_dr_ids, line_cr_ids, amount, type): debit = credit = 0.0 sign = type == 'payment' and -1 or 1 for l in line_dr_ids: if isinstance(l, dict): debit += l['amount'] for l in line_cr_ids: if isinstance(l, dict): credit += l['amount'] return amount - sign * (credit - debit) def onchange_line_ids(self, cr, uid, ids, line_dr_ids, line_cr_ids, amount, voucher_currency, type, context=None): context = context or {} if not line_dr_ids and not line_cr_ids: return {'value':{'writeoff_amount': 0.0}} # resolve lists of commands into lists of dicts line_dr_ids = self.resolve_2many_commands(cr, uid, 'line_dr_ids', line_dr_ids, ['amount'], context) line_cr_ids = self.resolve_2many_commands(cr, uid, 'line_cr_ids', line_cr_ids, ['amount'], context) #compute the field is_multi_currency that is used to hide/display options linked to secondary currency on the voucher is_multi_currency = False #loop on the voucher lines to see if one of these has a secondary currency. If yes, we need to see the options for voucher_line in line_dr_ids+line_cr_ids: line_id = voucher_line.get('id') and self.pool.get('account.voucher.line').browse(cr, uid, voucher_line['id'], context=context).move_line_id.id or voucher_line.get('move_line_id') if line_id and self.pool.get('account.move.line').browse(cr, uid, line_id, context=context).currency_id: is_multi_currency = True break return {'value': {'writeoff_amount': self._compute_writeoff_amount(cr, uid, line_dr_ids, line_cr_ids, amount, type), 'is_multi_currency': is_multi_currency}} def _get_journal_currency(self, cr, uid, ids, name, args, context=None): res = {} for voucher in self.browse(cr, uid, ids, context=context): res[voucher.id] = voucher.journal_id.currency and voucher.journal_id.currency.id or voucher.company_id.currency_id.id return res def _get_writeoff_amount(self, cr, uid, ids, name, args, context=None): if not ids: return {} currency_obj = self.pool.get('res.currency') res = {} for voucher in self.browse(cr, uid, ids, context=context): debit = credit = 0.0 sign = voucher.type == 'payment' and -1 or 1 for l in voucher.line_dr_ids: debit += l.amount for l in voucher.line_cr_ids: credit += l.amount currency = voucher.currency_id or voucher.company_id.currency_id res[voucher.id] = currency_obj.round(cr, uid, currency, voucher.amount - sign * (credit - debit)) return res def _paid_amount_in_company_currency(self, cr, uid, ids, name, args, context=None): if context is None: context = {} res = {} ctx = context.copy() for v in self.browse(cr, uid, ids, context=context): ctx.update({'date': v.date}) #make a new call to browse in order to have the right date in the context, to get the right currency rate voucher = self.browse(cr, uid, v.id, context=ctx) ctx.update({ 'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False, 'voucher_special_currency_rate': voucher.currency_id.rate * voucher.payment_rate,}) res[voucher.id] = self.pool.get('res.currency').compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, voucher.amount, context=ctx) return res def _get_currency_help_label(self, cr, uid, currency_id, payment_rate, payment_rate_currency_id, context=None): """ This function builds a string to help the users to understand the behavior of the payment rate fields they can specify on the voucher. This string is only used to improve the usability in the voucher form view and has no other effect. :param currency_id: the voucher currency :type currency_id: integer :param payment_rate: the value of the payment_rate field of the voucher :type payment_rate: float :param payment_rate_currency_id: the value of the payment_rate_currency_id field of the voucher :type payment_rate_currency_id: integer :return: translated string giving a tip on what's the effect of the current payment rate specified :rtype: str """ rml_parser = report_sxw.rml_parse(cr, uid, 'currency_help_label', context=context) currency_pool = self.pool.get('res.currency') currency_str = payment_rate_str = '' if currency_id: currency_str = rml_parser.formatLang(1, currency_obj=currency_pool.browse(cr, uid, currency_id, context=context)) if payment_rate_currency_id: payment_rate_str = rml_parser.formatLang(payment_rate, currency_obj=currency_pool.browse(cr, uid, payment_rate_currency_id, context=context)) currency_help_label = _('At the operation date, the exchange rate was\n%s = %s') % (currency_str, payment_rate_str) return currency_help_label def _fnct_currency_help_label(self, cr, uid, ids, name, args, context=None): res = {} for voucher in self.browse(cr, uid, ids, context=context): res[voucher.id] = self._get_currency_help_label(cr, uid, voucher.currency_id.id, voucher.payment_rate, voucher.payment_rate_currency_id.id, context=context) return res _name = 'account.voucher' _description = 'Accounting Voucher' _inherit = ['mail.thread'] _order = "date desc, id desc" # _rec_name = 'number' _track = { 'state': { 'account_voucher.mt_voucher_state_change': lambda self, cr, uid, obj, ctx=None: True, }, } _columns = { 'type':fields.selection([ ('sale','Sale'), ('purchase','Purchase'), ('payment','Payment'), ('receipt','Receipt'), ],'Default Type', readonly=True, states={'draft':[('readonly',False)]}), 'name':fields.char('Memo', readonly=True, states={'draft':[('readonly',False)]}), 'date':fields.date('Date', readonly=True, select=True, states={'draft':[('readonly',False)]}, help="Effective date for accounting entries", copy=False), 'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}), 'account_id':fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}), 'line_ids':fields.one2many('account.voucher.line', 'voucher_id', 'Voucher Lines', readonly=True, copy=True, states={'draft':[('readonly',False)]}), 'line_cr_ids':fields.one2many('account.voucher.line','voucher_id','Credits', domain=[('type','=','cr')], context={'default_type':'cr'}, readonly=True, states={'draft':[('readonly',False)]}), 'line_dr_ids':fields.one2many('account.voucher.line','voucher_id','Debits', domain=[('type','=','dr')], context={'default_type':'dr'}, readonly=True, states={'draft':[('readonly',False)]}), 'period_id': fields.many2one('account.period', 'Period', required=True, readonly=True, states={'draft':[('readonly',False)]}), 'narration':fields.text('Notes', readonly=True, states={'draft':[('readonly',False)]}), 'currency_id': fields.function(_get_journal_currency, type='many2one', relation='res.currency', string='Currency', readonly=True, required=True), 'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}), 'state':fields.selection( [('draft','Draft'), ('cancel','Cancelled'), ('proforma','Pro-forma'), ('posted','Posted') ], 'Status', readonly=True, track_visibility='onchange', copy=False, help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Voucher. \ \n* The \'Pro-forma\' when voucher is in Pro-forma status,voucher does not have an voucher number. \ \n* The \'Posted\' status is used when user create voucher,a voucher number is generated and voucher entries are created in account \ \n* The \'Cancelled\' status is used when user cancel voucher.'), 'amount': fields.float('Total', digits_compute=dp.get_precision('Account'), required=True, readonly=True, states={'draft':[('readonly',False)]}), 'tax_amount':fields.float('Tax Amount', digits_compute=dp.get_precision('Account'), readonly=True), 'reference': fields.char('Ref #', readonly=True, states={'draft':[('readonly',False)]}, help="Transaction reference number.", copy=False), 'number': fields.char('Number', readonly=True, copy=False), 'move_id':fields.many2one('account.move', 'Account Entry', copy=False), 'move_ids': fields.related('move_id','line_id', type='one2many', relation='account.move.line', string='Journal Items', readonly=True), 'partner_id':fields.many2one('res.partner', 'Partner', change_default=1, readonly=True, states={'draft':[('readonly',False)]}), 'audit': fields.related('move_id','to_check', type='boolean', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.', relation='account.move', string='To Review'), 'paid': fields.function(_check_paid, string='Paid', type='boolean', help="The Voucher has been totally paid."), 'pay_now':fields.selection([ ('pay_now','Pay Directly'), ('pay_later','Pay Later or Group Funds'), ],'Payment', select=True, readonly=True, states={'draft':[('readonly',False)]}), 'tax_id': fields.many2one('account.tax', 'Tax', readonly=True, states={'draft':[('readonly',False)]}, domain=[('price_include','=', False)], help="Only for tax excluded from price"), 'pre_line':fields.boolean('Previous Payments ?', required=False), 'date_due': fields.date('Due Date', readonly=True, select=True, states={'draft':[('readonly',False)]}), 'payment_option':fields.selection([ ('without_writeoff', 'Keep Open'), ('with_writeoff', 'Reconcile Payment Balance'), ], 'Payment Difference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="This field helps you to choose what you want to do with the eventual difference between the paid amount and the sum of allocated amounts. You can either choose to keep open this difference on the partner's account, or reconcile it with the payment(s)"), 'writeoff_acc_id': fields.many2one('account.account', 'Counterpart Account', readonly=True, states={'draft': [('readonly', False)]}), 'comment': fields.char('Counterpart Comment', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'analytic_id': fields.many2one('account.analytic.account','Write-Off Analytic Account', readonly=True, states={'draft': [('readonly', False)]}), 'writeoff_amount': fields.function(_get_writeoff_amount, string='Difference Amount', type='float', readonly=True, help="Computed as the difference between the amount stated in the voucher and the sum of allocation on the voucher lines."), 'payment_rate_currency_id': fields.many2one('res.currency', 'Payment Rate Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}), 'payment_rate': fields.float('Exchange Rate', digits=(12,6), required=True, readonly=True, states={'draft': [('readonly', False)]}, help='The specific rate that will be used, in this voucher, between the selected currency (in \'Payment Rate Currency\' field) and the voucher currency.'), 'paid_amount_in_company_currency': fields.function(_paid_amount_in_company_currency, string='Paid Amount in Company Currency', type='float', readonly=True), 'is_multi_currency': fields.boolean('Multi Currency Voucher', help='Fields with internal purpose only that depicts if the voucher is a multi currency one or not'), 'currency_help_label': fields.function(_fnct_currency_help_label, type='text', string="Helping Sentence", help="This sentence helps you to know how to specify the payment rate by giving you the direct effect it has"), } _defaults = { 'period_id': _get_period, 'partner_id': _get_partner, 'journal_id':_get_journal, 'currency_id': _get_currency, 'reference': _get_reference, 'narration':_get_narration, 'amount': _get_amount, 'type':_get_type, 'state': 'draft', 'pay_now': 'pay_now', 'name': '', 'date': fields.date.context_today, 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.voucher',context=c), 'tax_id': _get_tax, 'payment_option': 'without_writeoff', 'comment': _('Write-Off'), 'payment_rate': 1.0, 'payment_rate_currency_id': _get_payment_rate_currency, } def compute_tax(self, cr, uid, ids, context=None): tax_pool = self.pool.get('account.tax') partner_pool = self.pool.get('res.partner') position_pool = self.pool.get('account.fiscal.position') voucher_line_pool = self.pool.get('account.voucher.line') voucher_pool = self.pool.get('account.voucher') if context is None: context = {} for voucher in voucher_pool.browse(cr, uid, ids, context=context): voucher_amount = 0.0 for line in voucher.line_ids: voucher_amount += line.untax_amount or line.amount line.amount = line.untax_amount or line.amount voucher_line_pool.write(cr, uid, [line.id], {'amount':line.amount, 'untax_amount':line.untax_amount}) if not voucher.tax_id: self.write(cr, uid, [voucher.id], {'amount':voucher_amount, 'tax_amount':0.0}) continue tax = [tax_pool.browse(cr, uid, voucher.tax_id.id, context=context)] partner = partner_pool.browse(cr, uid, voucher.partner_id.id, context=context) or False taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax) tax = tax_pool.browse(cr, uid, taxes, context=context) total = voucher_amount total_tax = 0.0 if not tax[0].price_include: for line in voucher.line_ids: for tax_line in tax_pool.compute_all(cr, uid, tax, line.amount, 1).get('taxes', []): total_tax += tax_line.get('amount', 0.0) total += total_tax else: for line in voucher.line_ids: line_total = 0.0 line_tax = 0.0 for tax_line in tax_pool.compute_all(cr, uid, tax, line.untax_amount or line.amount, 1).get('taxes', []): line_tax += tax_line.get('amount', 0.0) line_total += tax_line.get('price_unit') total_tax += line_tax untax_amount = line.untax_amount or line.amount voucher_line_pool.write(cr, uid, [line.id], {'amount':line_total, 'untax_amount':untax_amount}) self.write(cr, uid, [voucher.id], {'amount':total, 'tax_amount':total_tax}) return True def onchange_price(self, cr, uid, ids, line_ids, tax_id, partner_id=False, context=None): context = context or {} tax_pool = self.pool.get('account.tax') partner_pool = self.pool.get('res.partner') position_pool = self.pool.get('account.fiscal.position') if not line_ids: line_ids = [] res = { 'tax_amount': False, 'amount': False, } voucher_total = 0.0 # resolve the list of commands into a list of dicts line_ids = self.resolve_2many_commands(cr, uid, 'line_ids', line_ids, ['amount'], context) total_tax = 0.0 for line in line_ids: line_amount = 0.0 line_amount = line.get('amount',0.0) if tax_id: tax = [tax_pool.browse(cr, uid, tax_id, context=context)] if partner_id: partner = partner_pool.browse(cr, uid, partner_id, context=context) or False taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax) tax = tax_pool.browse(cr, uid, taxes, context=context) if not tax[0].price_include: for tax_line in tax_pool.compute_all(cr, uid, tax, line_amount, 1).get('taxes', []): total_tax += tax_line.get('amount') voucher_total += line_amount total = voucher_total + total_tax res.update({ 'amount': total or voucher_total, 'tax_amount': total_tax }) return { 'value': res } def onchange_term_id(self, cr, uid, ids, term_id, amount): term_pool = self.pool.get('account.payment.term') terms = False due_date = False default = {'date_due':False} if term_id and amount: terms = term_pool.compute(cr, uid, term_id, amount) if terms: due_date = terms[-1][0] default.update({ 'date_due':due_date }) return {'value':default} def onchange_journal_voucher(self, cr, uid, ids, line_ids=False, tax_id=False, price=0.0, partner_id=False, journal_id=False, ttype=False, company_id=False, context=None): """price Returns a dict that contains new values and context @param partner_id: latest value from user input for field partner_id @param args: other arguments @param context: context arguments, like lang, time zone @return: Returns a dict which contains new values, and context """ default = { 'value':{}, } if not partner_id or not journal_id: return default partner_pool = self.pool.get('res.partner') journal_pool = self.pool.get('account.journal') journal = journal_pool.browse(cr, uid, journal_id, context=context) partner = partner_pool.browse(cr, uid, partner_id, context=context) account_id = False tr_type = False if journal.type in ('sale','sale_refund'): account_id = partner.property_account_receivable.id tr_type = 'sale' elif journal.type in ('purchase', 'purchase_refund','expense'): account_id = partner.property_account_payable.id tr_type = 'purchase' else: if not journal.default_credit_account_id or not journal.default_debit_account_id: raise osv.except_osv(_('Error!'), _('Please define default credit/debit accounts on the journal "%s".') % (journal.name)) if ttype in ('sale', 'receipt'): account_id = journal.default_debit_account_id.id elif ttype in ('purchase', 'payment'): account_id = journal.default_credit_account_id.id else: account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id tr_type = 'receipt' default['value']['account_id'] = account_id default['value']['type'] = ttype or tr_type vals = self.onchange_journal(cr, uid, ids, journal_id, line_ids, tax_id, partner_id, time.strftime('%Y-%m-%d'), price, ttype, company_id, context) default['value'].update(vals.get('value')) return default def onchange_rate(self, cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=None): res = {'value': {'paid_amount_in_company_currency': amount, 'currency_help_label': self._get_currency_help_label(cr, uid, currency_id, rate, payment_rate_currency_id, context=context)}} if rate and amount and currency_id: company_currency = self.pool.get('res.company').browse(cr, uid, company_id, context=context).currency_id #context should contain the date, the payment currency and the payment rate specified on the voucher amount_in_company_currency = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency.id, amount, context=context) res['value']['paid_amount_in_company_currency'] = amount_in_company_currency return res def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None): if context is None: context = {} ctx = context.copy() ctx.update({'date': date}) #read the voucher rate with the right date in the context currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate'] ctx.update({ 'voucher_special_currency': payment_rate_currency_id, 'voucher_special_currency_rate': rate * voucher_rate}) res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx) vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx) for key in vals.keys(): res[key].update(vals[key]) return res def recompute_payment_rate(self, cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=None): if context is None: context = {} #on change of the journal, we need to set also the default value for payment_rate and payment_rate_currency_id currency_obj = self.pool.get('res.currency') journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context) company_id = journal.company_id.id payment_rate = 1.0 currency_id = currency_id or journal.company_id.currency_id.id payment_rate_currency_id = currency_id ctx = context.copy() ctx.update({'date': date}) o2m_to_loop = False if ttype == 'receipt': o2m_to_loop = 'line_cr_ids' elif ttype == 'payment': o2m_to_loop = 'line_dr_ids' if o2m_to_loop and 'value' in vals and o2m_to_loop in vals['value']: for voucher_line in vals['value'][o2m_to_loop]: if not isinstance(voucher_line, dict): continue if voucher_line['currency_id'] != currency_id: # we take as default value for the payment_rate_currency_id, the currency of the first invoice that # is not in the voucher currency payment_rate_currency_id = voucher_line['currency_id'] tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate payment_rate = tmp / currency_obj.browse(cr, uid, currency_id, context=ctx).rate break vals['value'].update({ 'payment_rate': payment_rate, 'currency_id': currency_id, 'payment_rate_currency_id': payment_rate_currency_id }) #read the voucher rate with the right date in the context voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate'] ctx.update({ 'voucher_special_currency_rate': payment_rate * voucher_rate, 'voucher_special_currency': payment_rate_currency_id}) res = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx) for key in res.keys(): vals[key].update(res[key]) return vals def basic_onchange_partner(self, cr, uid, ids, partner_id, journal_id, ttype, context=None): partner_pool = self.pool.get('res.partner') journal_pool = self.pool.get('account.journal') res = {'value': {'account_id': False}} if not partner_id or not journal_id: return res journal = journal_pool.browse(cr, uid, journal_id, context=context) partner = partner_pool.browse(cr, uid, partner_id, context=context) account_id = False if journal.type in ('sale','sale_refund'): account_id = partner.property_account_receivable.id elif journal.type in ('purchase', 'purchase_refund','expense'): account_id = partner.property_account_payable.id elif ttype in ('sale', 'receipt'): account_id = journal.default_debit_account_id.id elif ttype in ('purchase', 'payment'): account_id = journal.default_credit_account_id.id else: account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id res['value']['account_id'] = account_id return res def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None): if not journal_id: return {} if context is None: context = {} #TODO: comment me and use me directly in the sales/purchases views res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context) if ttype in ['sale', 'purchase']: return res ctx = context.copy() # not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate ctx.update({'date': date}) vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx) vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context) for key in vals.keys(): res[key].update(vals[key]) for key in vals2.keys(): res[key].update(vals2[key]) #TODO: can probably be removed now #TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not # [pre_line, line_cr_ids, payment_rate...] for type purchase. # We should definitively split account.voucher object in two and make distinct on_change functions. In the # meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the # onchange returns a value for them if ttype == 'sale': del(res['value']['line_dr_ids']) del(res['value']['pre_line']) del(res['value']['payment_rate']) elif ttype == 'purchase': del(res['value']['line_cr_ids']) del(res['value']['pre_line']) del(res['value']['payment_rate']) return res def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None): """ Returns a dict that contains new values and context @param partner_id: latest value from user input for field partner_id @param args: other arguments @param context: context arguments, like lang, time zone @return: Returns a dict which contains new values, and context """ def _remove_noise_in_o2m(): """if the line is partially reconciled, then we must pay attention to display it only once and in the good o2m. This function returns True if the line is considered as noise and should not be displayed """ if line.reconcile_partial_id: if currency_id == line.currency_id.id: if line.amount_residual_currency <= 0: return True else: if line.amount_residual <= 0: return True return False if context is None: context = {} context_multi_currency = context.copy() currency_pool = self.pool.get('res.currency') move_line_pool = self.pool.get('account.move.line') partner_pool = self.pool.get('res.partner') journal_pool = self.pool.get('account.journal') line_pool = self.pool.get('account.voucher.line') #set default values default = { 'value': {'line_dr_ids': [], 'line_cr_ids': [], 'pre_line': False}, } # drop existing lines line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])]) for line in line_pool.browse(cr, uid, line_ids, context=context): if line.type == 'cr': default['value']['line_cr_ids'].append((2, line.id)) else: default['value']['line_dr_ids'].append((2, line.id)) if not partner_id or not journal_id: return default journal = journal_pool.browse(cr, uid, journal_id, context=context) partner = partner_pool.browse(cr, uid, partner_id, context=context) currency_id = currency_id or journal.company_id.currency_id.id total_credit = 0.0 total_debit = 0.0 account_type = None if context.get('account_id'): account_type = self.pool['account.account'].browse(cr, uid, context['account_id'], context=context).type if ttype == 'payment': if not account_type: account_type = 'payable' total_debit = price or 0.0 else: total_credit = price or 0.0 if not account_type: account_type = 'receivable' if not context.get('move_line_ids', False): ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id)], context=context) else: ids = context['move_line_ids'] invoice_id = context.get('invoice_id', False) company_currency = journal.company_id.currency_id.id move_lines_found = [] #order the lines by most old first ids.reverse() account_move_lines = move_line_pool.browse(cr, uid, ids, context=context) #compute the total debit/credit and look for a matching open amount or invoice for line in account_move_lines: if _remove_noise_in_o2m(): continue if invoice_id: if line.invoice.id == invoice_id: #if the invoice linked to the voucher line is equal to the invoice_id in context #then we assign the amount on that line, whatever the other voucher lines move_lines_found.append(line.id) elif currency_id == company_currency: #otherwise treatments is the same but with other field names if line.amount_residual == price: #if the amount residual is equal the amount voucher, we assign it to that voucher #line, whatever the other voucher lines move_lines_found.append(line.id) break #otherwise we will split the voucher amount on each line (by most old first) total_credit += line.credit or 0.0 total_debit += line.debit or 0.0 elif currency_id == line.currency_id.id: if line.amount_residual_currency == price: move_lines_found.append(line.id) break total_credit += line.credit and line.amount_currency or 0.0 total_debit += line.debit and line.amount_currency or 0.0 remaining_amount = price #voucher line creation for line in account_move_lines: if _remove_noise_in_o2m(): continue if line.currency_id and currency_id == line.currency_id.id: amount_original = abs(line.amount_currency) amount_unreconciled = abs(line.amount_residual_currency) else: #always use the amount booked in the company currency as the basis of the conversion into the voucher currency amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0, context=context_multi_currency) amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual), context=context_multi_currency) line_currency_id = line.currency_id and line.currency_id.id or company_currency rs = { 'name':line.move_id.name, 'type': line.credit and 'dr' or 'cr', 'move_line_id':line.id, 'account_id':line.account_id.id, 'amount_original': amount_original, 'amount': (line.id in move_lines_found) and min(abs(remaining_amount), amount_unreconciled) or 0.0, 'date_original':line.date, 'date_due':line.date_maturity, 'amount_unreconciled': amount_unreconciled, 'currency_id': line_currency_id, } remaining_amount -= rs['amount'] #in case a corresponding move_line hasn't been found, we now try to assign the voucher amount #on existing invoices: we split voucher amount by most old first, but only for lines in the same currency if not move_lines_found: if currency_id == line_currency_id: if line.credit: amount = min(amount_unreconciled, abs(total_debit)) rs['amount'] = amount total_debit -= amount else: amount = min(amount_unreconciled, abs(total_credit)) rs['amount'] = amount total_credit -= amount if rs['amount_unreconciled'] == rs['amount']: rs['reconcile'] = True if rs['type'] == 'cr': default['value']['line_cr_ids'].append(rs) else: default['value']['line_dr_ids'].append(rs) if len(default['value']['line_cr_ids']) > 0: default['value']['pre_line'] = 1 elif len(default['value']['line_dr_ids']) > 0: default['value']['pre_line'] = 1 default['value']['writeoff_amount'] = self._compute_writeoff_amount(cr, uid, default['value']['line_dr_ids'], default['value']['line_cr_ids'], price, ttype) return default def onchange_payment_rate_currency(self, cr, uid, ids, currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=None): if context is None: context = {} res = {'value': {}} if currency_id: #set the default payment rate of the voucher and compute the paid amount in company currency ctx = context.copy() ctx.update({'date': date}) #read the voucher rate with the right date in the context voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate'] ctx.update({ 'voucher_special_currency_rate': payment_rate * voucher_rate, 'voucher_special_currency': payment_rate_currency_id}) vals = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx) for key in vals.keys(): res[key].update(vals[key]) return res def onchange_date(self, cr, uid, ids, date, currency_id, payment_rate_currency_id, amount, company_id, context=None): """ @param date: latest value from user input for field date @param args: other arguments @param context: context arguments, like lang, time zone @return: Returns a dict which contains new values, and context """ if context is None: context ={} res = {'value': {}} #set the period of the voucher period_pool = self.pool.get('account.period') currency_obj = self.pool.get('res.currency') ctx = context.copy() ctx.update({'company_id': company_id, 'account_period_prefer_normal': True}) voucher_currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id pids = period_pool.find(cr, uid, date, context=ctx) if pids: res['value'].update({'period_id':pids[0]}) if payment_rate_currency_id: ctx.update({'date': date}) payment_rate = 1.0 if payment_rate_currency_id != currency_id: tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate payment_rate = tmp / currency_obj.browse(cr, uid, voucher_currency_id, context=ctx).rate vals = self.onchange_payment_rate_currency(cr, uid, ids, voucher_currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=context) vals['value'].update({'payment_rate': payment_rate}) for key in vals.keys(): res[key].update(vals[key]) return res def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None): if context is None: context = {} if not journal_id: return False journal_pool = self.pool.get('account.journal') journal = journal_pool.browse(cr, uid, journal_id, context=context) if ttype in ('sale', 'receipt'): account_id = journal.default_debit_account_id elif ttype in ('purchase', 'payment'): account_id = journal.default_credit_account_id else: account_id = journal.default_credit_account_id or journal.default_debit_account_id tax_id = False if account_id and account_id.tax_ids: tax_id = account_id.tax_ids[0].id vals = {'value':{} } if ttype in ('sale', 'purchase'): vals = self.onchange_price(cr, uid, ids, line_ids, tax_id, partner_id, context) vals['value'].update({'tax_id':tax_id,'amount': amount}) currency_id = False if journal.currency: currency_id = journal.currency.id else: currency_id = journal.company_id.currency_id.id period_ids = self.pool['account.period'].find(cr, uid, dt=date, context=dict(context, company_id=company_id)) vals['value'].update({ 'currency_id': currency_id, 'payment_rate_currency_id': currency_id, 'period_id': period_ids and period_ids[0] or False }) #in case we want to register the payment directly from an invoice, it's confusing to allow to switch the journal #without seeing that the amount is expressed in the journal currency, and not in the invoice currency. So to avoid #this common mistake, we simply reset the amount to 0 if the currency is not the invoice currency. if context.get('payment_expected_currency') and currency_id != context.get('payment_expected_currency'): vals['value']['amount'] = 0 amount = 0 if partner_id: res = self.onchange_partner_id(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context) for key in res.keys(): vals[key].update(res[key]) return vals def onchange_company(self, cr, uid, ids, partner_id, journal_id, currency_id, company_id, context=None): """ If the company changes, check that the journal is in the right company. If not, fetch a new journal. """ journal_pool = self.pool['account.journal'] journal = journal_pool.browse(cr, uid, journal_id, context=context) if journal.company_id.id != company_id: # can not guess type of journal, better remove it return {'value': {'journal_id': False}} return {} def button_proforma_voucher(self, cr, uid, ids, context=None): self.signal_workflow(cr, uid, ids, 'proforma_voucher') return {'type': 'ir.actions.act_window_close'} def proforma_voucher(self, cr, uid, ids, context=None): self.action_move_line_create(cr, uid, ids, context=context) return True def action_cancel_draft(self, cr, uid, ids, context=None): self.create_workflow(cr, uid, ids) self.write(cr, uid, ids, {'state':'draft'}) return True def cancel_voucher(self, cr, uid, ids, context=None): reconcile_pool = self.pool.get('account.move.reconcile') move_pool = self.pool.get('account.move') move_line_pool = self.pool.get('account.move.line') for voucher in self.browse(cr, uid, ids, context=context): # refresh to make sure you don't unlink an already removed move voucher.refresh() for line in voucher.move_ids: # refresh to make sure you don't unreconcile an already unreconciled entry line.refresh() if line.reconcile_id: move_lines = [move_line.id for move_line in line.reconcile_id.line_id] move_lines.remove(line.id) reconcile_pool.unlink(cr, uid, [line.reconcile_id.id]) if len(move_lines) >= 2: move_line_pool.reconcile_partial(cr, uid, move_lines, 'auto',context=context) if voucher.move_id: move_pool.button_cancel(cr, uid, [voucher.move_id.id]) move_pool.unlink(cr, uid, [voucher.move_id.id]) res = { 'state':'cancel', 'move_id':False, } self.write(cr, uid, ids, res) return True def unlink(self, cr, uid, ids, context=None): for t in self.read(cr, uid, ids, ['state'], context=context): if t['state'] not in ('draft', 'cancel'): raise osv.except_osv(_('Invalid Action!'), _('Cannot delete voucher(s) which are already opened or paid.')) return super(account_voucher, self).unlink(cr, uid, ids, context=context) def onchange_payment(self, cr, uid, ids, pay_now, journal_id, partner_id, ttype='sale'): res = {} if not partner_id: return res res = {} partner_pool = self.pool.get('res.partner') journal_pool = self.pool.get('account.journal') if pay_now == 'pay_later': partner = partner_pool.browse(cr, uid, partner_id) journal = journal_pool.browse(cr, uid, journal_id) if journal.type in ('sale','sale_refund'): account_id = partner.property_account_receivable.id elif journal.type in ('purchase', 'purchase_refund','expense'): account_id = partner.property_account_payable.id elif ttype in ('sale', 'receipt'): account_id = journal.default_debit_account_id.id elif ttype in ('purchase', 'payment'): account_id = journal.default_credit_account_id.id else: account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id if account_id: res['account_id'] = account_id return {'value':res} def _sel_context(self, cr, uid, voucher_id, context=None): """ Select the context to use accordingly if it needs to be multicurrency or not. :param voucher_id: Id of the actual voucher :return: The returned context will be the same as given in parameter if the voucher currency is the same than the company currency, otherwise it's a copy of the parameter with an extra key 'date' containing the date of the voucher. :rtype: dict """ company_currency = self._get_company_currency(cr, uid, voucher_id, context) current_currency = self._get_current_currency(cr, uid, voucher_id, context) if current_currency <> company_currency: context_multi_currency = context.copy() voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context) context_multi_currency.update({'date': voucher.date}) return context_multi_currency return context def first_move_line_get(self, cr, uid, voucher_id, move_id, company_currency, current_currency, context=None): ''' Return a dict to be use to create the first account move line of given voucher. :param voucher_id: Id of voucher what we are creating account_move. :param move_id: Id of account move where this line will be added. :param company_currency: id of currency of the company to which the voucher belong :param current_currency: id of currency of the voucher :return: mapping between fieldname and value of account move line to create :rtype: dict ''' voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context) debit = credit = 0.0 # TODO: is there any other alternative then the voucher type ?? # ANSWER: We can have payment and receipt "In Advance". # TODO: Make this logic available. # -for sale, purchase we have but for the payment and receipt we do not have as based on the bank/cash journal we can not know its payment or receipt if voucher.type in ('purchase', 'payment'): credit = voucher.paid_amount_in_company_currency elif voucher.type in ('sale', 'receipt'): debit = voucher.paid_amount_in_company_currency if debit < 0: credit = -debit; debit = 0.0 if credit < 0: debit = -credit; credit = 0.0 sign = debit - credit < 0 and -1 or 1 #set the first line of the voucher move_line = { 'name': voucher.name or '/', 'debit': debit, 'credit': credit, 'account_id': voucher.account_id.id, 'move_id': move_id, 'journal_id': voucher.journal_id.id, 'period_id': voucher.period_id.id, 'partner_id': voucher.partner_id.id, 'currency_id': company_currency <> current_currency and current_currency or False, 'amount_currency': (sign * abs(voucher.amount) # amount < 0 for refunds if company_currency != current_currency else 0.0), 'date': voucher.date, 'date_maturity': voucher.date_due } return move_line def account_move_get(self, cr, uid, voucher_id, context=None): ''' This method prepare the creation of the account move related to the given voucher. :param voucher_id: Id of voucher for which we are creating account_move. :return: mapping between fieldname and value of account move to create :rtype: dict ''' seq_obj = self.pool.get('ir.sequence') voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context) if voucher.number: name = voucher.number elif voucher.journal_id.sequence_id: if not voucher.journal_id.sequence_id.active: raise osv.except_osv(_('Configuration Error !'), _('Please activate the sequence of selected journal !')) c = dict(context) c.update({'fiscalyear_id': voucher.period_id.fiscalyear_id.id}) name = seq_obj.next_by_id(cr, uid, voucher.journal_id.sequence_id.id, context=c) else: raise osv.except_osv(_('Error!'), _('Please define a sequence on the journal.')) if not voucher.reference: ref = name.replace('/','') else: ref = voucher.reference move = { 'name': name, 'journal_id': voucher.journal_id.id, 'narration': voucher.narration, 'date': voucher.date, 'ref': ref, 'period_id': voucher.period_id.id, } return move def _get_exchange_lines(self, cr, uid, line, move_id, amount_residual, company_currency, current_currency, context=None): ''' Prepare the two lines in company currency due to currency rate difference. :param line: browse record of the voucher.line for which we want to create currency rate difference accounting entries :param move_id: Account move wher the move lines will be. :param amount_residual: Amount to be posted. :param company_currency: id of currency of the company to which the voucher belong :param current_currency: id of currency of the voucher :return: the account move line and its counterpart to create, depicted as mapping between fieldname and value :rtype: tuple of dict ''' if amount_residual > 0: account_id = line.voucher_id.company_id.expense_currency_exchange_account_id if not account_id: model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form') msg = _("You should configure the 'Loss Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.") raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel')) else: account_id = line.voucher_id.company_id.income_currency_exchange_account_id if not account_id: model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form') msg = _("You should configure the 'Gain Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.") raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel')) # Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise # the receivable/payable account may have a secondary currency, which render this field mandatory if line.account_id.currency_id: account_currency_id = line.account_id.currency_id.id else: account_currency_id = company_currency <> current_currency and current_currency or False move_line = { 'journal_id': line.voucher_id.journal_id.id, 'period_id': line.voucher_id.period_id.id, 'name': _('change')+': '+(line.name or '/'), 'account_id': line.account_id.id, 'move_id': move_id, 'partner_id': line.voucher_id.partner_id.id, 'currency_id': account_currency_id, 'amount_currency': 0.0, 'quantity': 1, 'credit': amount_residual > 0 and amount_residual or 0.0, 'debit': amount_residual < 0 and -amount_residual or 0.0, 'date': line.voucher_id.date, } move_line_counterpart = { 'journal_id': line.voucher_id.journal_id.id, 'period_id': line.voucher_id.period_id.id, 'name': _('change')+': '+(line.name or '/'), 'account_id': account_id.id, 'move_id': move_id, 'amount_currency': 0.0, 'partner_id': line.voucher_id.partner_id.id, 'currency_id': account_currency_id, 'quantity': 1, 'debit': amount_residual > 0 and amount_residual or 0.0, 'credit': amount_residual < 0 and -amount_residual or 0.0, 'date': line.voucher_id.date, } return (move_line, move_line_counterpart) def _convert_amount(self, cr, uid, amount, voucher_id, context=None): ''' This function convert the amount given in company currency. It takes either the rate in the voucher (if the payment_rate_currency_id is relevant) either the rate encoded in the system. :param amount: float. The amount to convert :param voucher: id of the voucher on which we want the conversion :param context: to context to use for the conversion. It may contain the key 'date' set to the voucher date field in order to select the good rate to use. :return: the amount in the currency of the voucher's company :rtype: float ''' if context is None: context = {} currency_obj = self.pool.get('res.currency') voucher = self.browse(cr, uid, voucher_id, context=context) return currency_obj.compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, amount, context=context) def voucher_move_line_create(self, cr, uid, voucher_id, line_total, move_id, company_currency, current_currency, context=None): ''' Create one account move line, on the given account move, per voucher line where amount is not 0.0. It returns Tuple with tot_line what is total of difference between debit and credit and a list of lists with ids to be reconciled with this format (total_deb_cred,list_of_lists). :param voucher_id: Voucher id what we are working with :param line_total: Amount of the first line, which correspond to the amount we should totally split among all voucher lines. :param move_id: Account move wher those lines will be joined. :param company_currency: id of currency of the company to which the voucher belong :param current_currency: id of currency of the voucher :return: Tuple build as (remaining amount not allocated on voucher lines, list of account_move_line created in this method) :rtype: tuple(float, list of int) ''' if context is None: context = {} move_line_obj = self.pool.get('account.move.line') currency_obj = self.pool.get('res.currency') tax_obj = self.pool.get('account.tax') tot_line = line_total rec_lst_ids = [] date = self.read(cr, uid, [voucher_id], ['date'], context=context)[0]['date'] ctx = context.copy() ctx.update({'date': date}) voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context=ctx) voucher_currency = voucher.journal_id.currency or voucher.company_id.currency_id ctx.update({ 'voucher_special_currency_rate': voucher_currency.rate * voucher.payment_rate , 'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,}) prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account') for line in voucher.line_ids: #create one move line per voucher line where amount is not 0.0 # AND (second part of the clause) only if the original move line was not having debit = credit = 0 (which is a legal value) if not line.amount and not (line.move_line_id and not float_compare(line.move_line_id.debit, line.move_line_id.credit, precision_digits=prec) and not float_compare(line.move_line_id.debit, 0.0, precision_digits=prec)): continue # convert the amount set on the voucher line into the currency of the voucher's company # this calls res_curreny.compute() with the right context, so that it will take either the rate on the voucher if it is relevant or will use the default behaviour amount = self._convert_amount(cr, uid, line.untax_amount or line.amount, voucher.id, context=ctx) # if the amount encoded in voucher is equal to the amount unreconciled, we need to compute the # currency rate difference if line.amount == line.amount_unreconciled: if not line.move_line_id: raise osv.except_osv(_('Wrong voucher line'),_("The invoice you are willing to pay is not valid anymore.")) sign = line.type =='dr' and -1 or 1 currency_rate_difference = sign * (line.move_line_id.amount_residual - amount) else: currency_rate_difference = 0.0 move_line = { 'journal_id': voucher.journal_id.id, 'period_id': voucher.period_id.id, 'name': line.name or '/', 'account_id': line.account_id.id, 'move_id': move_id, 'partner_id': voucher.partner_id.id, 'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False, 'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False, 'quantity': 1, 'credit': 0.0, 'debit': 0.0, 'date': voucher.date } if amount < 0: amount = -amount if line.type == 'dr': line.type = 'cr' else: line.type = 'dr' if (line.type=='dr'): tot_line += amount move_line['debit'] = amount else: tot_line -= amount move_line['credit'] = amount if voucher.tax_id and voucher.type in ('sale', 'purchase'): move_line.update({ 'account_tax_id': voucher.tax_id.id, }) # compute the amount in foreign currency foreign_currency_diff = 0.0 amount_currency = False if line.move_line_id: # We want to set it on the account move line as soon as the original line had a foreign currency if line.move_line_id.currency_id and line.move_line_id.currency_id.id != company_currency: # we compute the amount in that foreign currency. if line.move_line_id.currency_id.id == current_currency: # if the voucher and the voucher line share the same currency, there is no computation to do sign = (move_line['debit'] - move_line['credit']) < 0 and -1 or 1 amount_currency = sign * (line.amount) else: # if the rate is specified on the voucher, it will be used thanks to the special keys in the context # otherwise we use the rates of the system amount_currency = currency_obj.compute(cr, uid, company_currency, line.move_line_id.currency_id.id, move_line['debit']-move_line['credit'], context=ctx) if line.amount == line.amount_unreconciled: foreign_currency_diff = line.move_line_id.amount_residual_currency - abs(amount_currency) move_line['amount_currency'] = amount_currency voucher_line = move_line_obj.create(cr, uid, move_line) rec_ids = [voucher_line, line.move_line_id.id] if not currency_obj.is_zero(cr, uid, voucher.company_id.currency_id, currency_rate_difference): # Change difference entry in company currency exch_lines = self._get_exchange_lines(cr, uid, line, move_id, currency_rate_difference, company_currency, current_currency, context=context) new_id = move_line_obj.create(cr, uid, exch_lines[0],context) move_line_obj.create(cr, uid, exch_lines[1], context) rec_ids.append(new_id) if line.move_line_id and line.move_line_id.currency_id and not currency_obj.is_zero(cr, uid, line.move_line_id.currency_id, foreign_currency_diff): # Change difference entry in voucher currency move_line_foreign_currency = { 'journal_id': line.voucher_id.journal_id.id, 'period_id': line.voucher_id.period_id.id, 'name': _('change')+': '+(line.name or '/'), 'account_id': line.account_id.id, 'move_id': move_id, 'partner_id': line.voucher_id.partner_id.id, 'currency_id': line.move_line_id.currency_id.id, 'amount_currency': -1 * foreign_currency_diff, 'quantity': 1, 'credit': 0.0, 'debit': 0.0, 'date': line.voucher_id.date, } new_id = move_line_obj.create(cr, uid, move_line_foreign_currency, context=context) rec_ids.append(new_id) if line.move_line_id.id: rec_lst_ids.append(rec_ids) return (tot_line, rec_lst_ids) def writeoff_move_line_get(self, cr, uid, voucher_id, line_total, move_id, name, company_currency, current_currency, context=None): ''' Set a dict to be use to create the writeoff move line. :param voucher_id: Id of voucher what we are creating account_move. :param line_total: Amount remaining to be allocated on lines. :param move_id: Id of account move where this line will be added. :param name: Description of account move line. :param company_currency: id of currency of the company to which the voucher belong :param current_currency: id of currency of the voucher :return: mapping between fieldname and value of account move line to create :rtype: dict ''' currency_obj = self.pool.get('res.currency') move_line = {} voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context) current_currency_obj = voucher.currency_id or voucher.journal_id.company_id.currency_id if not currency_obj.is_zero(cr, uid, current_currency_obj, line_total): diff = line_total account_id = False write_off_name = '' if voucher.payment_option == 'with_writeoff': account_id = voucher.writeoff_acc_id.id write_off_name = voucher.comment elif voucher.partner_id: if voucher.type in ('sale', 'receipt'): account_id = voucher.partner_id.property_account_receivable.id else: account_id = voucher.partner_id.property_account_payable.id else: # fallback on account of voucher account_id = voucher.account_id.id sign = voucher.type == 'payment' and -1 or 1 move_line = { 'name': write_off_name or name, 'account_id': account_id, 'move_id': move_id, 'partner_id': voucher.partner_id.id, 'date': voucher.date, 'credit': diff > 0 and diff or 0.0, 'debit': diff < 0 and -diff or 0.0, 'amount_currency': company_currency <> current_currency and (sign * -1 * voucher.writeoff_amount) or 0.0, 'currency_id': company_currency <> current_currency and current_currency or False, 'analytic_account_id': voucher.analytic_id and voucher.analytic_id.id or False, } return move_line def _get_company_currency(self, cr, uid, voucher_id, context=None): ''' Get the currency of the actual company. :param voucher_id: Id of the voucher what i want to obtain company currency. :return: currency id of the company of the voucher :rtype: int ''' return self.pool.get('account.voucher').browse(cr,uid,voucher_id,context).journal_id.company_id.currency_id.id def _get_current_currency(self, cr, uid, voucher_id, context=None): ''' Get the currency of the voucher. :param voucher_id: Id of the voucher what i want to obtain current currency. :return: currency id of the voucher :rtype: int ''' voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context) return voucher.currency_id.id or self._get_company_currency(cr,uid,voucher.id,context) def action_move_line_create(self, cr, uid, ids, context=None): ''' Confirm the vouchers given in ids and create the journal entries for each of them ''' if context is None: context = {} move_pool = self.pool.get('account.move') move_line_pool = self.pool.get('account.move.line') for voucher in self.browse(cr, uid, ids, context=context): local_context = dict(context, force_company=voucher.journal_id.company_id.id) if voucher.move_id: continue company_currency = self._get_company_currency(cr, uid, voucher.id, context) current_currency = self._get_current_currency(cr, uid, voucher.id, context) # we select the context to use accordingly if it's a multicurrency case or not context = self._sel_context(cr, uid, voucher.id, context) # But for the operations made by _convert_amount, we always need to give the date in the context ctx = context.copy() ctx.update({'date': voucher.date}) # Create the account move record. move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context) # Get the name of the account_move just created name = move_pool.browse(cr, uid, move_id, context=context).name # Create the first line of the voucher move_line_id = move_line_pool.create(cr, uid, self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, local_context), local_context) move_line_brw = move_line_pool.browse(cr, uid, move_line_id, context=context) line_total = move_line_brw.debit - move_line_brw.credit rec_list_ids = [] if voucher.type == 'sale': line_total = line_total - self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx) elif voucher.type == 'purchase': line_total = line_total + self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx) # Create one move line per voucher line where amount is not 0.0 line_total, rec_list_ids = self.voucher_move_line_create(cr, uid, voucher.id, line_total, move_id, company_currency, current_currency, context) # Create the writeoff line if needed ml_writeoff = self.writeoff_move_line_get(cr, uid, voucher.id, line_total, move_id, name, company_currency, current_currency, local_context) if ml_writeoff: move_line_pool.create(cr, uid, ml_writeoff, local_context) # We post the voucher. self.write(cr, uid, [voucher.id], { 'move_id': move_id, 'state': 'posted', 'number': name, }) if voucher.journal_id.entry_posted: move_pool.post(cr, uid, [move_id], context={}) # We automatically reconcile the account move lines. reconcile = False for rec_ids in rec_list_ids: if len(rec_ids) >= 2: reconcile = move_line_pool.reconcile_partial(cr, uid, rec_ids, writeoff_acc_id=voucher.writeoff_acc_id.id, writeoff_period_id=voucher.period_id.id, writeoff_journal_id=voucher.journal_id.id) return True class account_voucher_line(osv.osv): _name = 'account.voucher.line' _description = 'Voucher Lines' _order = "move_line_id" # If the payment is in the same currency than the invoice, we keep the same amount # Otherwise, we compute from invoice currency to payment currency def _compute_balance(self, cr, uid, ids, name, args, context=None): currency_pool = self.pool.get('res.currency') rs_data = {} for line in self.browse(cr, uid, ids, context=context): ctx = context.copy() ctx.update({'date': line.voucher_id.date}) voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate'] ctx.update({ 'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False, 'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate}) res = {} company_currency = line.voucher_id.journal_id.company_id.currency_id.id voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency move_line = line.move_line_id or False if not move_line: res['amount_original'] = 0.0 res['amount_unreconciled'] = 0.0 elif move_line.currency_id and voucher_currency==move_line.currency_id.id: res['amount_original'] = abs(move_line.amount_currency) res['amount_unreconciled'] = abs(move_line.amount_residual_currency) else: #always use the amount booked in the company currency as the basis of the conversion into the voucher currency res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx) res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx) rs_data[line.id] = res return rs_data def _currency_id(self, cr, uid, ids, name, args, context=None): ''' This function returns the currency id of a voucher line. It's either the currency of the associated move line (if any) or the currency of the voucher or the company currency. ''' res = {} for line in self.browse(cr, uid, ids, context=context): move_line = line.move_line_id if move_line: res[line.id] = move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id else: res[line.id] = line.voucher_id.currency_id and line.voucher_id.currency_id.id or line.voucher_id.company_id.currency_id.id return res _columns = { 'voucher_id':fields.many2one('account.voucher', 'Voucher', required=1, ondelete='cascade'), 'name':fields.char('Description',), 'account_id':fields.many2one('account.account','Account', required=True), 'partner_id':fields.related('voucher_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'), 'untax_amount':fields.float('Untax Amount'), 'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')), 'reconcile': fields.boolean('Full Reconcile'), 'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Dr/Cr'), 'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'), 'move_line_id': fields.many2one('account.move.line', 'Journal Item', copy=False), 'date_original': fields.related('move_line_id','date', type='date', relation='account.move.line', string='Date', readonly=1), 'date_due': fields.related('move_line_id','date_maturity', type='date', relation='account.move.line', string='Due Date', readonly=1), 'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')), 'amount_unreconciled': fields.function(_compute_balance, multi='dc', type='float', string='Open Balance', store=True, digits_compute=dp.get_precision('Account')), 'company_id': fields.related('voucher_id','company_id', relation='res.company', type='many2one', string='Company', store=True, readonly=True), 'currency_id': fields.function(_currency_id, string='Currency', type='many2one', relation='res.currency', readonly=True), } _defaults = { 'name': '', } def onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None): vals = {'amount': 0.0} if reconcile: vals = { 'amount': amount_unreconciled} return {'value': vals} def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None): vals = {} if amount: vals['reconcile'] = (amount == amount_unreconciled) return {'value': vals} def onchange_move_line_id(self, cr, user, ids, move_line_id, context=None): """ Returns a dict that contains new values and context @param move_line_id: latest value from user input for field move_line_id @param args: other arguments @param context: context arguments, like lang, time zone @return: Returns a dict which contains new values, and context """ res = {} move_line_pool = self.pool.get('account.move.line') if move_line_id: move_line = move_line_pool.browse(cr, user, move_line_id, context=context) if move_line.credit: ttype = 'dr' else: ttype = 'cr' res.update({ 'account_id': move_line.account_id.id, 'type': ttype, 'currency_id': move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id, }) return { 'value':res, } def default_get(self, cr, user, fields_list, context=None): """ Returns default values for fields @param fields_list: list of fields, for which default values are required to be read @param context: context arguments, like lang, time zone @return: Returns a dict that contains default values for fields """ if context is None: context = {} journal_id = context.get('journal_id', False) partner_id = context.get('partner_id', False) journal_pool = self.pool.get('account.journal') partner_pool = self.pool.get('res.partner') values = super(account_voucher_line, self).default_get(cr, user, fields_list, context=context) if (not journal_id) or ('account_id' not in fields_list): return values journal = journal_pool.browse(cr, user, journal_id, context=context) account_id = False ttype = 'cr' if journal.type in ('sale', 'sale_refund'): account_id = journal.default_credit_account_id and journal.default_credit_account_id.id or False ttype = 'cr' elif journal.type in ('purchase', 'expense', 'purchase_refund'): account_id = journal.default_debit_account_id and journal.default_debit_account_id.id or False ttype = 'dr' elif partner_id: partner = partner_pool.browse(cr, user, partner_id, context=context) if context.get('type') == 'payment': ttype = 'dr' account_id = partner.property_account_payable.id elif context.get('type') == 'receipt': account_id = partner.property_account_receivable.id values.update({ 'account_id':account_id, 'type':ttype }) return values # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
chouseknecht/ansible
lib/ansible/module_utils/network/junos/facts/vlans/vlans.py
21
3564
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The junos vlans fact class It is in this file the configuration is collected from the device for a given resource, parsed, and the facts tree is populated based on the configuration. """ from __future__ import absolute_import, division, print_function __metaclass__ = type from copy import deepcopy from ansible.module_utils._text import to_bytes from ansible.module_utils.network.common import utils from ansible.module_utils.network.junos.argspec.vlans.vlans import VlansArgs from ansible.module_utils.network.junos.utils.utils import get_resource_config from ansible.module_utils.six import string_types try: from lxml import etree HAS_LXML = True except ImportError: HAS_LXML = False class VlansFacts(object): """ The junos vlans fact class """ def __init__(self, module, subspec='config', options='options'): self._module = module self.argument_spec = VlansArgs.argument_spec spec = deepcopy(self.argument_spec) if subspec: if options: facts_argument_spec = spec[subspec][options] else: facts_argument_spec = spec[subspec] else: facts_argument_spec = spec self.generated_spec = utils.generate_dict(facts_argument_spec) def populate_facts(self, connection, ansible_facts, data=None): """ Populate the facts for vlans :param connection: the device connection :param ansible_facts: Facts dictionary :param data: previously collected conf :rtype: dictionary :returns: facts """ if not HAS_LXML: self._module.fail_json(msg='lxml is not installed.') if not data: config_filter = """ <configuration> <vlans> </vlans> </configuration> """ data = get_resource_config(connection, config_filter=config_filter) if isinstance(data, string_types): data = etree.fromstring(to_bytes(data, errors='surrogate_then_replace')) resources = data.xpath('configuration/vlans/vlan') objs = [] for resource in resources: if resource is not None: obj = self.render_config(self.generated_spec, resource) if obj: objs.append(obj) facts = {} if objs: facts['vlans'] = [] params = utils.validate_config(self.argument_spec, {'config': objs}) for cfg in params['config']: facts['vlans'].append(utils.remove_empties(cfg)) ansible_facts['ansible_network_resources'].update(facts) return ansible_facts def render_config(self, spec, conf): """ Render config as dictionary structure and delete keys from spec for null values :param spec: The facts tree, generated from the argspec :param conf: The configuration :rtype: dictionary :returns: The generated config """ config = deepcopy(spec) config['name'] = utils.get_xml_conf_arg(conf, 'name') config['vlan_id'] = utils.get_xml_conf_arg(conf, 'vlan-id') config['description'] = utils.get_xml_conf_arg(conf, 'description') return utils.remove_empties(config)
gpl-3.0
caseydavenport/calico-containers
tests/st/calicoctl/test_autodetection.py
2
4864
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nose.plugins.attrib import attr from tests.st.test_base import TestBase from tests.st.utils.docker_host import DockerHost from tests.st.utils.utils import ETCD_CA, ETCD_CERT, \ ETCD_KEY, ETCD_HOSTNAME_SSL, ETCD_SCHEME, get_ip from tests.st.utils.exceptions import CommandExecError if ETCD_SCHEME == "https": ADDITIONAL_DOCKER_OPTIONS = "--cluster-store=etcd://%s:2379 " \ "--cluster-store-opt kv.cacertfile=%s " \ "--cluster-store-opt kv.certfile=%s " \ "--cluster-store-opt kv.keyfile=%s " % \ (ETCD_HOSTNAME_SSL, ETCD_CA, ETCD_CERT, ETCD_KEY) else: ADDITIONAL_DOCKER_OPTIONS = "--cluster-store=etcd://%s:2379 " % \ get_ip() class TestAutodetection(TestBase): @attr('slow') def test_autodetection(self): """ Test using different IP autodetection methods. We run a multi-host test for this to test explicit selection of "first-found" and also "interface" and "can-reach" detection methods. """ with DockerHost('host1', additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, start_calico=False) as host1, \ DockerHost('host2', additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, start_calico=False) as host2, \ DockerHost('host3', additional_docker_options=ADDITIONAL_DOCKER_OPTIONS, start_calico=False) as host3: # Start the node on host1 using first-found auto-detection # method. host1.start_calico_node( "--ip=autodetect --ip-autodetection-method=first-found") # Attempt to start the node on host2 using can-reach auto-detection # method using a bogus DNS name. This should fail. try: host2.start_calico_node( "--ip=autodetect --ip-autodetection-method=can-reach=XXX.YYY.ZZZ.XXX") except CommandExecError: pass else: raise AssertionError("Command expected to fail but did not") # Start the node on host2 using can-reach auto-detection method # using the IP address of host1. This should succeed. host2.start_calico_node( "--ip=autodetect --ip-autodetection-method=can-reach=" + host1.ip) # Attempt to start the node on host3 using interface auto-detection # method using a bogus interface name. This should fail. try: host3.start_calico_node( "--ip=autodetect --ip-autodetection-method=interface=BogusInterface") except CommandExecError: pass else: raise AssertionError("Command expected to fail but did not") # Start the node on host2 using can-reach auto-detection method # using the IP address of host1. This should succeed. host3.start_calico_node( "--ip=autodetect --ip-autodetection-method=interface=eth0") # Create a network and a workload on each host. network1 = host1.create_network("subnet1") workload_host1 = host1.create_workload("workload1", network=network1) workload_host2 = host2.create_workload("workload2", network=network1) workload_host3 = host3.create_workload("workload3", network=network1) # Allow network to converge self.assert_true(workload_host1.check_can_ping(workload_host3.ip, retries=10)) # Check connectivity in both directions self.assert_ip_connectivity(workload_list=[workload_host1, workload_host2, workload_host3], ip_pass_list=[workload_host1.ip, workload_host2.ip, workload_host3.ip])
apache-2.0
hyperized/ansible
lib/ansible/modules/network/f5/bigip_ucs_fetch.py
38
18824
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_ucs_fetch short_description: Fetches a UCS file from remote nodes description: - This module is used for fetching UCS files from remote machines and storing them locally in a file tree, organized by hostname. Note that this module is written to transfer UCS files that might not be present, so a missing remote UCS won't be an error unless fail_on_missing is set to 'yes'. version_added: 2.5 options: backup: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. type: bool default: no create_on_missing: description: - Creates the UCS based on the value of C(src) if the file does not already exist on the remote system. type: bool default: yes dest: description: - A directory to save the UCS file into. type: path required: True encryption_password: description: - Password to use to encrypt the UCS file if desired. type: str fail_on_missing: description: - Make the module fail if the UCS file on the remote system is missing. type: bool default: no force: description: - If C(no), the file will only be transferred if the destination does not exist. type: bool default: yes src: description: - The name of the UCS file to create on the remote server for downloading type: str notes: - BIG-IP provides no way to get a checksum of the UCS files on the system via any interface except, perhaps, logging in directly to the box (which would not support appliance mode). Therefore, the best this module can do is check for the existence of the file on disk; no check-summing. - If you are using this module with either Ansible Tower or Ansible AWX, you should be aware of how these Ansible products execute jobs in restricted environments. More information can be found here https://clouddocs.f5.com/products/orchestration/ansible/devel/usage/module-usage-with-tower.html extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Download a new UCS bigip_ucs_fetch: src: cs_backup.ucs dest: /tmp/cs_backup.ucs provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost ''' RETURN = r''' checksum: description: The SHA1 checksum of the downloaded file returned: success or changed type: str sample: 7b46bbe4f8ebfee64761b5313855618f64c64109 dest: description: Location on the ansible host that the UCS was saved to returned: success type: str sample: /path/to/file.txt src: description: - Name of the UCS file on the remote BIG-IP to download. If not specified, then this will be a randomly generated filename returned: changed type: str sample: cs_backup.ucs backup_file: description: Name of backup file created returned: changed and if backup=yes type: str sample: /path/to/file.txt.2015-02-12@22:09~ gid: description: Group id of the UCS file, after execution returned: success type: int sample: 100 group: description: Group of the UCS file, after execution returned: success type: str sample: httpd owner: description: Owner of the UCS file, after execution returned: success type: str sample: httpd uid: description: Owner id of the UCS file, after execution returned: success type: int sample: 100 md5sum: description: The MD5 checksum of the downloaded file returned: changed or success type: str sample: 96cacab4c259c4598727d7cf2ceb3b45 mode: description: Permissions of the target UCS, after execution returned: success type: str sample: 0644 size: description: Size of the target UCS, after execution returned: success type: int sample: 1220 ''' import os import re import tempfile from ansible.module_utils.basic import AnsibleModule from distutils.version import LooseVersion try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.icontrol import download_file from library.module_utils.network.f5.icontrol import tmos_version except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.icontrol import download_file from ansible.module_utils.network.f5.icontrol import tmos_version class Parameters(AnsibleF5Parameters): updatables = [] returnables = [ 'dest', 'src', 'md5sum', 'checksum', 'backup_file'] api_attributes = [] api_map = {} @property def options(self): result = [] if self.passphrase: result.append(dict( passphrase=self.want.passphrase )) return result @property def src(self): if self._values['src'] is not None: return self._values['src'] result = next(tempfile._get_candidate_names()) + '.ucs' self._values['src'] = result return result @property def fulldest(self): result = None if os.path.isdir(self.dest): result = os.path.join(self.dest, self.src) else: if os.path.exists(os.path.dirname(self.dest)): result = self.dest else: try: # os.path.exists() can return false in some # circumstances where the directory does not have # the execute bit for the current user set, in # which case the stat() call will raise an OSError os.stat(os.path.dirname(result)) except OSError as e: if "permission denied" in str(e).lower(): raise F5ModuleError( "Destination directory {0} is not accessible".format(os.path.dirname(result)) ) raise F5ModuleError( "Destination directory {0} does not exist".format(os.path.dirname(result)) ) if not os.access(os.path.dirname(result), os.W_OK): raise F5ModuleError( "Destination {0} not writable".format(os.path.dirname(result)) ) return result class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class ModuleManager(object): def __init__(self, *args, **kwargs): self.kwargs = kwargs self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) def exec_module(self): if self.is_version_v1(): manager = self.get_manager('v1') else: manager = self.get_manager('v2') return manager.exec_module() def get_manager(self, type): if type == 'v1': return V1Manager(**self.kwargs) elif type == 'v2': return V2Manager(**self.kwargs) def is_version_v1(self): """Checks to see if the TMOS version is less than 12.1.0 Versions prior to 12.1.0 have a bug which prevents the REST API from properly listing any UCS files when you query the /mgmt/tm/sys/ucs endpoint. Therefore you need to do everything through tmsh over REST. :return: bool """ version = tmos_version(self.client) if LooseVersion(version) < LooseVersion('12.1.0'): return True else: return False class BaseManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = Parameters(params=self.module.params) self.changes = UsableChanges() def exec_module(self): result = dict() self.present() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=True)) return result def present(self): if self.exists(): self.update() else: self.create() def update(self): if os.path.exists(self.want.fulldest): if not self.want.force: raise F5ModuleError( "File '{0}' already exists".format(self.want.fulldest) ) self.execute() def _get_backup_file(self): return self.module.backup_local(self.want.fulldest) def execute(self): try: if self.want.backup: if os.path.exists(self.want.fulldest): backup_file = self._get_backup_file() self.changes.update({'backup_file': backup_file}) self.download() except IOError: raise F5ModuleError( "Failed to copy: {0} to {1}".format(self.want.src, self.want.fulldest) ) self._set_checksum() self._set_md5sum() file_args = self.module.load_file_common_arguments(self.module.params) return self.module.set_fs_attributes_if_different(file_args, True) def _set_checksum(self): try: result = self.module.sha1(self.want.fulldest) self.want.update({'checksum': result}) except ValueError: pass def _set_md5sum(self): try: result = self.module.md5(self.want.fulldest) self.want.update({'md5sum': result}) except ValueError: pass def create(self): if self.want.fail_on_missing: raise F5ModuleError( "UCS '{0}' was not found".format(self.want.src) ) if not self.want.create_on_missing: raise F5ModuleError( "UCS '{0}' was not found".format(self.want.src) ) if self.module.check_mode: return True if self.want.create_on_missing: self.create_on_device() self.execute() return True def create_on_device(self): if self.want.passphrase: params = dict( command='save', name=self.want.src, options=[{'passphrase': self.want.encryption_password}] ) else: params = dict( command='save', name=self.want.src, ) uri = "https://{0}:{1}/mgmt/tm/sys/ucs".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def download(self): self.download_from_device(self.want.dest) if os.path.exists(self.want.dest): return True raise F5ModuleError( "Failed to download the remote file" ) class V1Manager(BaseManager): def __init__(self, *args, **kwargs): super(V1Manager, self).__init__(**kwargs) self.remote_dir = '/var/config/rest/madm' def read_current(self): result = None output = self.read_current_from_device() if 'commandResult' in output: result = self._read_ucs_files_from_output(output['commandResult']) return result def read_current_from_device(self): params = dict( command='run', utilCmdArgs='-c "tmsh list sys ucs"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response def _read_ucs_files_from_output(self, output): search = re.compile(r'filename\s+(.*)').search lines = output.split("\n") result = [m.group(1) for m in map(search, lines) if m] return result def exists(self): collection = self.read_current() base = os.path.basename(self.want.src) if any(base == os.path.basename(x) for x in collection): return True return False def download_from_device(self, dest): url = 'https://{0}:{1}/mgmt/shared/file-transfer/madm/{2}'.format( self.client.provider['server'], self.client.provider['server_port'], self.want.filename ) try: download_file(self.client, url, dest) except F5ModuleError: raise F5ModuleError( "Failed to download the file." ) if os.path.exists(self.want.dest): return True return False def _move_to_download(self): move_path = '/var/local/ucs/{0} {1}/{0}'.format( self.want.filename, self.remote_dir ) params = dict( command='run', utilCmdArgs=move_path ) uri = "https://{0}:{1}/mgmt/tm/util/unix-mv/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() if 'commandResult' in response: if 'cannot stat' in response['commandResult']: raise F5ModuleError(response['commandResult']) except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True class V2Manager(BaseManager): def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/ucs".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response def read_current(self): collection = self.read_current_from_device() if 'items' not in collection: return [] resources = collection['items'] result = [x['apiRawValues']['filename'] for x in resources] return result def exists(self): collection = self.read_current() base = os.path.basename(self.want.src) if any(base == os.path.basename(x) for x in collection): return True return False def download_from_device(self, dest): url = 'https://{0}:{1}/mgmt/shared/file-transfer/ucs-downloads/{2}'.format( self.client.provider['server'], self.client.provider['server_port'], self.want.src ) try: download_file(self.client, url, dest) except F5ModuleError: raise F5ModuleError( "Failed to download the file." ) if os.path.exists(self.want.dest): return True return False class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( backup=dict( default='no', type='bool' ), create_on_missing=dict( default='yes', type='bool' ), encryption_password=dict(no_log=True), dest=dict( required=True, type='path' ), force=dict( default='yes', type='bool' ), fail_on_missing=dict( default='no', type='bool' ), src=dict() ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.add_file_common_args = True def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, add_file_common_args=spec.add_file_common_args ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
muminoff/savollar
savollar/pipelines.py
1
2093
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don"t forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html from scrapy.exceptions import DropItem from scrapy.conf import settings from scrapy import log from elasticsearch import Elasticsearch from uuid import uuid1 from savollar.models import SavolModel class ElasticSearchIndexPipeline(object): def process_item(self, item, spider): es = Elasticsearch([ {"host": settings["ELASTICSEARCH_HOST"]}, ]) valid = True for data in item: if not data: raise DropItem("Missing %s of item from %s" %(data, item["link"])) if valid: es.index( index=settings["ELASTICSEARCH_INDEX"], doc_type="info", id=str(uuid1()), body=dict(item) ) log.msg("Item indexed to ElasticSearch database %s:%s" % (settings["ELASTICSEARCH_HOST"], settings["ELASTICSEARCH_PORT"]), level=log.DEBUG, spider=spider) return item class CassandraExportPipleline(object): def process_item(self, item, spider): valid = True for data in item: if not data: raise DropItem("Missing %s of item from %s" %(data, item["link"])) if valid: model = SavolModel() model.title = item["title"] model.question = item["question"] model.answer = item["answer"] model.author = item["author"] model.permalink = item["permalink"] model.year = int(item["year"]) model.month = int(item["month"]) model.date = int(item["date"]) model.tags = item["title"].split() model.save() log.msg("Item exported to Cassandra database %s/%s" % (settings["CASSANDRA_HOST"], settings["CASSANDRA_KEYSPACE"]), level=log.DEBUG, spider=spider) return item
apache-2.0
thnee/ansible
lib/ansible/modules/cloud/vmware/vmware_guest_custom_attributes.py
31
8326
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright, (c) 2018, Ansible Project # Copyright, (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_guest_custom_attributes short_description: Manage custom attributes from VMware for the given virtual machine description: - This module can be used to add, remove and update custom attributes for the given virtual machine. version_added: 2.7 author: - Jimmy Conner (@cigamit) - Abhijeet Kasurde (@Akasurde) notes: - Tested on vSphere 6.5 requirements: - "python >= 2.6" - PyVmomi options: name: description: - Name of the virtual machine to work with. - This is required parameter, if C(uuid) or C(moid) is not supplied. type: str state: description: - The action to take. - If set to C(present), then custom attribute is added or updated. - If set to C(absent), then custom attribute is removed. default: 'present' choices: ['present', 'absent'] type: str uuid: description: - UUID of the virtual machine to manage if known. This is VMware's unique identifier. - This is required parameter, if C(name) or C(moid) is not supplied. type: str moid: description: - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance. - This is required if C(name) or C(uuid) is not supplied. version_added: '2.9' type: str use_instance_uuid: description: - Whether to use the VMware instance UUID rather than the BIOS UUID. default: no type: bool version_added: '2.8' folder: description: - Absolute path to find an existing guest. - This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found. type: str datacenter: description: - Datacenter name where the virtual machine is located in. required: True type: str attributes: description: - A list of name and value of custom attributes that needs to be manage. - Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent). default: [] type: list extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Add virtual machine custom attributes vmware_guest_custom_attributes: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" uuid: 421e4592-c069-924d-ce20-7e7533fab926 state: present attributes: - name: MyAttribute value: MyValue delegate_to: localhost register: attributes - name: Add multiple virtual machine custom attributes vmware_guest_custom_attributes: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" uuid: 421e4592-c069-924d-ce20-7e7533fab926 state: present attributes: - name: MyAttribute value: MyValue - name: MyAttribute2 value: MyValue2 delegate_to: localhost register: attributes - name: Remove virtual machine Attribute vmware_guest_custom_attributes: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" uuid: 421e4592-c069-924d-ce20-7e7533fab926 state: absent attributes: - name: MyAttribute delegate_to: localhost register: attributes - name: Remove virtual machine Attribute using Virtual Machine MoID vmware_guest_custom_attributes: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" moid: vm-42 state: absent attributes: - name: MyAttribute delegate_to: localhost register: attributes ''' RETURN = """ custom_attributes: description: metadata about the virtual machine attributes returned: always type: dict sample: { "mycustom": "my_custom_value", "mycustom_2": "my_custom_value_2", "sample_1": "sample_1_value", "sample_2": "sample_2_value", "sample_3": "sample_3_value" } """ try: from pyVmomi import vim except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec class VmAttributeManager(PyVmomi): def __init__(self, module): super(VmAttributeManager, self).__init__(module) def set_custom_field(self, vm, user_fields): result_fields = dict() change_list = list() changed = False for field in user_fields: field_key = self.check_exists(field['name']) found = False field_value = field.get('value', '') for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]: if k == field['name']: found = True if v != field_value: if not self.module.check_mode: self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value) result_fields[k] = field_value change_list.append(True) if not found and field_value != "": if not field_key and not self.module.check_mode: field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine) change_list.append(True) if not self.module.check_mode: self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value) result_fields[field['name']] = field_value if any(change_list): changed = True return {'changed': changed, 'failed': False, 'custom_attributes': result_fields} def check_exists(self, field): for x in self.custom_field_mgr: if x.name == field: return x return False def main(): argument_spec = vmware_argument_spec() argument_spec.update( datacenter=dict(type='str'), name=dict(type='str'), folder=dict(type='str'), uuid=dict(type='str'), moid=dict(type='str'), use_instance_uuid=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), attributes=dict( type='list', default=[], options=dict( name=dict(type='str', required=True), value=dict(type='str'), ) ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[ ['name', 'uuid', 'moid'] ], ) if module.params.get('folder'): # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified module.params['folder'] = module.params['folder'].rstrip('/') pyv = VmAttributeManager(module) results = {'changed': False, 'failed': False, 'instance': dict()} # Check if the virtual machine exists before continuing vm = pyv.get_vm() if vm: # virtual machine already exists if module.params['state'] == "present": results = pyv.set_custom_field(vm, module.params['attributes']) elif module.params['state'] == "absent": results = pyv.set_custom_field(vm, module.params['attributes']) module.exit_json(**results) else: # virtual machine does not exists vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid')) module.fail_json(msg="Unable to manage custom attributes for non-existing" " virtual machine %s" % vm_id) if __name__ == '__main__': main()
gpl-3.0
shanot/imp
modules/rmf/examples/link.py
2
1236
## \example rmf/link.py # This example is like module/rmf/pdb.py except that instead of creating a # new hierarchy from the rmf file, it simply links the existing hierarchy # to the file. This mechanism can be used for loading multiple # conformations for scoring or other analysis without having to set up # restraints and things each time. from __future__ import print_function import IMP.atom import IMP.rmf import RMF import sys IMP.setup_from_argv(sys.argv, "link") m = IMP.Model() # Create a new IMP.atom.Hierarchy from the contents of the pdb file h = IMP.atom.read_pdb(IMP.rmf.get_example_path("simple.pdb"), m) tfn = "link.rmf" print("File name is", tfn) # open the file, clearing any existing contents rh = RMF.create_rmf_file(tfn) # add the hierarchy to the file IMP.rmf.add_hierarchies(rh, [h]) # add the current configuration to the file as frame 0 IMP.rmf.save_frame(rh) # close the file del rh # reopen it, don't clear the file when opening it rh = RMF.open_rmf_file_read_only(tfn) # link to the existing pdb hierarchy IMP.rmf.link_hierarchies(rh, [h]) # load the same coordinates in, ok, that is not very exciting IMP.rmf.load_frame(rh, RMF.FrameID(0)) print("Try running rmf_display or rmf_show on", tfn)
gpl-3.0
karesansui/karesansui
bin/restart_network.py
1
4392
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of Karesansui. # # Copyright (C) 2012 HDE, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import os import sys import logging from optparse import OptionParser from ksscommand import KssCommand, KssCommandException, KssCommandOptException import __cmd__ try: import karesansui from karesansui import __version__ from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException from karesansui.lib.const import NETWORK_IFCONFIG_COMMAND, NETWORK_BRCTL_COMMAND from karesansui.lib.utils import load_locale from karesansui.lib.utils import execute_command except ImportError, e: print >>sys.stderr, "[Error] some packages not found. - %s" % e sys.exit(1) _ = load_locale() usage = '%prog [options]' def getopts(): optp = OptionParser(usage=usage, version=__version__) optp.add_option('-n', '--name', dest='name', help=_('Network name')) optp.add_option('-f', '--force', dest='force', action="store_true", help=_('Do everything to bring up network')) return optp.parse_args() def chkopts(opts): if not opts.name: raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name') class RestartNetwork(KssCommand): def process(self): (opts, args) = getopts() chkopts(opts) self.up_progress(10) conn = KaresansuiVirtConnection(readonly=False) try: active_networks = conn.list_active_network() inactive_networks = conn.list_inactive_network() if not (opts.name in active_networks or opts.name in inactive_networks): raise KssCommandException('Could not find the specified network. - net=%s' % (opts.name)) self.up_progress(10) try: conn.stop_network(opts.name) except KaresansuiVirtException, e: if opt.force is not True: raise KssCommandException('Could not stop the specified network. - net=%s' % (opts.name)) self.up_progress(20) try: conn.start_network(opts.name) except KaresansuiVirtException, e: if opts.force is not True: raise KssCommandException('Could not start the specified network. - net=%s' % (opts.name)) # try to bring down existing bridge kvn = conn.search_kvn_networks(opts.name)[0] try: bridge_name = kvn.get_info()['bridge']['name'] except KeyError: pass ret, res = execute_command([NETWORK_IFCONFIG_COMMAND, bridge_name, 'down']) ret, res = execute_command([NETWORK_BRCTL_COMMAND, 'delbr', bridge_name]) # try again conn.start_network(opts.name) self.up_progress(10) if not (opts.name in conn.list_active_network()): raise KssCommandException('Failed to start network. - net=%s' % (opts.name)) self.logger.info('Restarted network. - net=%s' % (opts.name)) print >>sys.stdout, _('Restarted network. - net=%s') % (opts.name) return True finally: conn.close() if __name__ == "__main__": target = RestartNetwork() sys.exit(target.run())
mit
PwnArt1st/searx
tests/unit/engines/test_youtube_api.py
13
3848
from collections import defaultdict import mock from searx.engines import youtube_api from searx.testing import SearxTestCase class TestYoutubeAPIEngine(SearxTestCase): def test_request(self): query = 'test_query' dicto = defaultdict(dict) dicto['pageno'] = 0 dicto['language'] = 'fr_FR' params = youtube_api.request(query, dicto) self.assertTrue('url' in params) self.assertTrue(query in params['url']) self.assertIn('googleapis.com', params['url']) self.assertIn('youtube', params['url']) self.assertIn('fr', params['url']) dicto['language'] = 'all' params = youtube_api.request(query, dicto) self.assertFalse('fr' in params['url']) def test_response(self): self.assertRaises(AttributeError, youtube_api.response, None) self.assertRaises(AttributeError, youtube_api.response, []) self.assertRaises(AttributeError, youtube_api.response, '') self.assertRaises(AttributeError, youtube_api.response, '[]') response = mock.Mock(text='{}') self.assertEqual(youtube_api.response(response), []) response = mock.Mock(text='{"data": []}') self.assertEqual(youtube_api.response(response), []) json = """ { "kind": "youtube#searchListResponse", "etag": "xmg9xJZuZD438sF4hb-VcBBREXc/YJQDcTBCDcaBvl-sRZJoXdvy1ME", "nextPageToken": "CAUQAA", "pageInfo": { "totalResults": 1000000, "resultsPerPage": 20 }, "items": [ { "kind": "youtube#searchResult", "etag": "xmg9xJZuZD438sF4hb-VcBBREXc/IbLO64BMhbHIgWLwLw7MDYe7Hs4", "id": { "kind": "youtube#video", "videoId": "DIVZCPfAOeM" }, "snippet": { "publishedAt": "2015-05-29T22:41:04.000Z", "channelId": "UCNodmx1ERIjKqvcJLtdzH5Q", "title": "Title", "description": "Description", "thumbnails": { "default": { "url": "https://i.ytimg.com/vi/DIVZCPfAOeM/default.jpg" }, "medium": { "url": "https://i.ytimg.com/vi/DIVZCPfAOeM/mqdefault.jpg" }, "high": { "url": "https://i.ytimg.com/vi/DIVZCPfAOeM/hqdefault.jpg" } }, "channelTitle": "MinecraftUniverse", "liveBroadcastContent": "none" } } ] } """ response = mock.Mock(text=json) results = youtube_api.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1) self.assertEqual(results[0]['title'], 'Title') self.assertEqual(results[0]['url'], 'https://www.youtube.com/watch?v=DIVZCPfAOeM') self.assertEqual(results[0]['content'], 'Description') self.assertEqual(results[0]['thumbnail'], 'https://i.ytimg.com/vi/DIVZCPfAOeM/hqdefault.jpg') self.assertTrue('DIVZCPfAOeM' in results[0]['embedded']) json = """ { "kind": "youtube#searchListResponse", "etag": "xmg9xJZuZD438sF4hb-VcBBREXc/YJQDcTBCDcaBvl-sRZJoXdvy1ME", "nextPageToken": "CAUQAA", "pageInfo": { "totalResults": 1000000, "resultsPerPage": 20 } } """ response = mock.Mock(text=json) results = youtube_api.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 0) json = """ {"toto":{"entry":[] } } """ response = mock.Mock(text=json) results = youtube_api.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 0)
agpl-3.0
aquarimeter/aquarimeter
lib/python2.7/site-packages/pkg_resources.py
134
99605
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ import sys import os import time import re import imp import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform from pkgutil import get_importer try: from urlparse import urlparse, urlunparse except ImportError: from urllib.parse import urlparse, urlunparse try: frozenset except NameError: from sets import ImmutableSet as frozenset try: basestring next = lambda o: o.next() from cStringIO import StringIO as BytesIO except NameError: basestring = str from io import BytesIO def execfile(fn, globs=None, locs=None): if globs is None: globs = globals() if locs is None: locs = globs exec(compile(open(fn).read(), fn, 'exec'), globs, locs) # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib._bootstrap as importlib_bootstrap else: importlib_bootstrap = None try: import parser except ImportError: pass def _bypass_ensure_directory(name, mode=0x1FF): # 0777 # Sandbox-bypassing version of ensure_directory() if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(name) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, mode) _state_vars = {} def _declare_state(vartype, **kw): g = globals() for name, val in kw.items(): g[name] = val _state_vars[name] = vartype def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: pass # not Mac OS X return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra', 'ExtractionError', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """An already-installed version conflicts with the requested version""" class DistributionNotFound(ResolutionError): """A requested distribution was not found""" class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq,Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: import platform version = platform.mac_ver()[0] # fallback for MacPorts if version == '': import plistlib plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") get_platform = get_build_platform # XXX backward compat def compatible_platforms(provided,required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: return True # easy case # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": #import warnings #warnings.warn("Mac eggs should be rebuilt to " # "use the macosx designation instead of darwin.", # category=DeprecationWarning) return True return False # egg isn't macosx or legacy darwin # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) run_main = run_script # backward compatibility def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist,basestring): dist = Requirement.parse(dist) if isinstance(dist,Requirement): dist = get_provider(dist) if not isinstance(dist,Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self,dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: raise VersionConflict(dist,req) # XXX add more info else: return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: return # ignore hidden distros self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ requirements = list(requirements)[::-1] # set up the stack processed = {} # set of processed requirements best = {} # key -> dist to_activate = [] while requirements: req = requirements.pop(0) # process dependencies breadth-first if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: #msg = ("The '%s' distribution was not found on this " # "system, and is required by this application.") #raise DistributionNotFound(msg % req) # unfortunately, zc.buildout uses a str(err) # to get the name of the distribution here.. raise DistributionNotFound(req) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency raise VersionConflict(dist,req) # XXX put more info here requirements.extend(dist.requires(req.extras)[::-1]) processed[req] = True return to_activate # return list of distros to activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) map(working_set.add, distributions) # add plugins+libs to sys.path print 'Could not load', errors # display errors The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) plugin_projects.sort() # scan project names in alphabetic order error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) list(map(shadow_set.add, self)) # put all our entries in shadow_set for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError: v = sys.exc_info()[1] error_info[dist] = v # save error info if fallback: continue # try the next older version of project else: break # give up on this project, keep going else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self._cache = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform,self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self,project_name): """Return a newest-to-oldest list of distributions for `project_name` """ try: return self._cache[project_name] except KeyError: project_name = project_name.lower() if project_name not in self._distmap: return [] if project_name not in self._cache: dists = self._cache[project_name] = self._distmap[project_name] _sort_dists(dists) return self._cache[project_name] def add(self,dist): """Add `dist` if we ``can_add()`` it and it isn't already added""" if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key,[]) if dist not in dists: dists.append(dist) if dist.key in self._cache: _sort_dists(self._cache[dist.key]) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist return self.obtain(req, installer) # try and download/install def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other,Distribution): self.add(other) elif isinstance(other,Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new AvailableDistributions = Environment # XXX backward compatibility class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') app_data = 'Application Data' # XXX this may be locale-specific! app_homes = [ (('APPDATA',), None), # best option, should be locale-safe (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), (('WINDIR',), app_data), # 95/98/ME ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname,subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """Convert an arbitrary string to a standard version string Spaces become dots, and all other non-alphanumeric characters become dashes, with runs of multiple dashes condensed to a single dash. """ version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': lambda: sys.version.split()[0], 'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]), 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError: return cls.normalize_exception(sys.exc_info()[1]) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)]) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) raise SyntaxError("Language feature not supported in environment markers") @classmethod def comparison(cls, nodelist): if len(nodelist)>4: raise SyntaxError("Chained comparison not allowed in environment markers") comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: raise SyntaxError(repr(cop)+" operator not allowed in environment markers") return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError: e = sys.exc_info()[1] raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \ or '\\' in s: raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] raise SyntaxError("Language feature not supported in environment markers") invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info,name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info,name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info,name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self,resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self,name): return self.egg_info and self._isdir(self._fn(self.egg_info,name)) def resource_listdir(self,resource_name): return self._listdir(self._fn(self.module_path,resource_name)) def metadata_listdir(self,name): if self.egg_info: return self._listdir(self._fn(self.egg_info,name)) return [] def run_script(self,script_name,namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n','\n') script_text = script_text.replace('\r','\n') script_filename = self._fn(self.egg_info,script) namespace['__file__'] = script_filename if os.path.exists(script_filename): execfile(script_filename, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text,script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self,module): NullProvider.__init__(self,module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self,path): return os.path.isdir(path) def _listdir(self,path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): stream = open(path, 'rb') try: return stream.read() finally: stream.close() register_loader_type(type(None), DefaultProvider) if importlib_bootstrap is not None: register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self,path: False _get = lambda self,path: '' _listdir = lambda self,path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() def build_zipmanifest(path): """ This builds a similar dictionary to the zipimport directory caches. However instead of tuples, ZipInfo objects are stored. The translation of the tuple is as follows: * [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep on pypy it is the same (one reason why distribute did work in some cases on pypy and win32). * [1] - zipinfo.compress_type * [2] - zipinfo.compress_size * [3] - zipinfo.file_size * [4] - len(utf-8 encoding of filename) if zipinfo & 0x800 len(ascii encoding of filename) otherwise * [5] - (zipinfo.date_time[0] - 1980) << 9 | zipinfo.date_time[1] << 5 | zipinfo.date_time[2] * [6] - (zipinfo.date_time[3] - 1980) << 11 | zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2) * [7] - zipinfo.CRC """ zipinfo = dict() zfile = zipfile.ZipFile(path) #Got ZipFile has not __exit__ on python 3.1 try: for zitem in zfile.namelist(): zpath = zitem.replace('/', os.sep) zipinfo[zpath] = zfile.getinfo(zitem) assert zipinfo[zpath] is not None finally: zfile.close() return zipinfo class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None def __init__(self, module): EggProvider.__init__(self,module) self.zipinfo = build_zipmanifest(self.loader.archive) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath,self.zip_pre) ) def _parts(self,zip_path): # Convert a zipfile subpath into an egg-relative path part list fspath = self.zip_pre+zip_path # pseudo-fs path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath,self.egg_root) ) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst #1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) return os.path.dirname(last) # return the extracted directory name timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp,timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path elif os.name=='nt': # Windows, del old file and retry unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: manager.extraction_error() # report a user-friendly error return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) f = open(file_path, 'rb') file_contents = f.read() f.close() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self,fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self,fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self,resource_name): return self._zipinfo_name(self._fn(self.egg_root,resource_name)) def _resource_to_zip(self,resource_name): return self._zipinfo_name(self._fn(self.module_path,resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self,path): self.path = path def has_metadata(self,name): return name=='PKG-INFO' def get_metadata(self,name): if name=='PKG-INFO': f = open(self.path,'rU') metadata = f.read() f.close() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self,name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir,project_name=dist_name,metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zipinfo = build_zipmanifest(importer.archive) self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: return # don't yield nested distros for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object,find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item,entry,metadata,precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): for dist in find_distributions(os.path.join(path_item, entry)): yield dist elif not only and lower.endswith('.egg-link'): entry_file = open(os.path.join(path_item, entry)) try: entry_lines = entry_file.readlines() finally: entry_file.close() for line in entry_lines: if not line.strip(): continue for item in find_distributions(os.path.join(path_item,line.rstrip())): yield item break register_finder(pkgutil.ImpImporter,find_on_path) if importlib_bootstrap is not None: register_finder(importlib_bootstrap.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer,path_entry,moduleName,module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = imp.new_module(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath,package) finally: imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter,file_ns_handler) register_namespace_handler(zipimport.zipimporter,file_ns_handler) if importlib_bootstrap is not None: register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object,null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename,_cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a ``basestring`` or sequence""" if isinstance(strs,basestring): for s in strs.splitlines(): s = s.strip() if s and not s.startswith('#'): # skip blank lines/comments yield s else: for ss in strs: for s in yield_lines(ss): yield s LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info COMMA = re.compile(r"\s*,").match # comma between items OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r"(?P<name>[^-]+)" r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?", re.VERBOSE | re.IGNORECASE ).match component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part,part) if not part or part=='.': continue if part[:1] in '0123456789': yield part.zfill(8) # pad for numeric comparison else: yield '*'+part yield '*final' # ensure that alpha/beta/candidate are before final def parse_version(s): """Convert a version string to a chronologically-sortable key This is a rough cross between distutils' StrictVersion and LooseVersion; if you give it versions that would work with StrictVersion, then it behaves the same; otherwise it acts like a slightly-smarter LooseVersion. It is *possible* to create pathological version coding schemes that will fool this parser, but they should be very rare in practice. The returned value will be a tuple of strings. Numeric portions of the version are padded to 8 digits so they will compare numerically, but without relying on how numbers compare relative to strings. Dots are dropped, but dashes are retained. Trailing zeros between alpha segments or dashes are suppressed, so that e.g. "2.4.0" is considered the same as "2.4". Alphanumeric parts are lower-cased. The algorithm assumes that strings like "-" and any alpha string that alphabetically follows "final" represents a "patch level". So, "2.4-1" is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is considered newer than "2.4-1", which in turn is newer than "2.4". Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that come before "final" alphabetically) are assumed to be pre-release versions, so that the version "2.4" is considered newer than "2.4a1". Finally, to handle miscellaneous cases, the strings "pre", "preview", and "rc" are treated as if they were "c", i.e. as though they were release candidates, and therefore are not as new as a version string that does not contain them, and "dev" is replaced with an '@' so that it sorts lower than than any other pre-release tag. """ parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): if part<'*final': # remove '-' before a prerelease tag while parts and parts[-1]=='*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1]=='00000000': parts.pop() parts.append(part) return tuple(parts) class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, env=None, installer=None): if require: self.require(env, installer) entry = __import__(self.module_name, globals(),globals(), ['__name__']) for attr in self.attrs: try: entry = getattr(entry,attr) except AttributeError: raise ImportError("%r has no %r attribute" % (entry,attr)) return entry def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) list(map(working_set.add, working_set.resolve(self.dist.requires(self.extras),env,installer))) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1,extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ try: attrs = extras = () name,value = src.split('=',1) if '[' in value: value,extras = value.split('[',1) req = Requirement.parse("x["+extras) if req.specs: raise ValueError extras = req.extras if ':' in value: value,attrs = value.split(':',1) if not MODULE(attrs.rstrip()): raise ValueError attrs = attrs.rstrip().split('.') except ValueError: raise ValueError( "EntryPoint must be in 'name=module:attrs [extras]' format", src ) else: return cls(name.strip(), value.strip(), attrs, extras, dist) @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data,dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls,location,basename,metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) hashcmp = property( lambda self: ( getattr(self,'parsed_version',()), self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version, self.platform ) ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): try: return self._parsed_version except AttributeError: self._parsed_version = pv = parse_version(self.version) return pv @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: raise ValueError( "Missing 'Version:' header and/or %s file" % self.PKG_INFO, self ) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra,reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':',1) if invalid_marker(marker): reqs=[] # XXX warn elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self,extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None,())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self,name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self,path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) list(map(declare_namespace, self._get_metadata('namespace_packages.txt'))) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-'+self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self,self.location) else: return str(self) def __str__(self): try: version = getattr(self,'version',None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name,version) def __getattr__(self,attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls,filename,metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" return Requirement.parse('%s==%s' % (self.project_name, self.version)) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group,name) if ep is None: raise ImportError("Entry point %r not found" % ((group,name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item==nloc: break elif item==bdir and self.precedence==EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while 1: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] p = np # ha! return def check_version_conflict(self): if self.key=='setuptools': return # ignore the inevitable setuptools self-conflicts :( nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for "+repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" for attr in ( 'project_name', 'version', 'py_version', 'platform', 'location', 'precedence' ): kw.setdefault(attr, getattr(self,attr,None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: from email.parser import Parser self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO)) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from _markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass from warnings import warn warn(stacklevel = level+1, *args, **kw) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be an instance of ``basestring``, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM,TERMINATOR,line,p,groups,item_name): items = [] while not TERMINATOR(line,p): if CONTINUE(line,p): try: line = next(lines) p = 0 except StopIteration: raise ValueError( "\\ must not appear on the last nonblank line" ) match = ITEM(line,p) if not match: raise ValueError("Expected "+item_name+" in",line,"at",line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line,p) if match: p = match.end() # skip the comma elif not TERMINATOR(line,p): raise ValueError( "Expected ',' or end-of-list in",line,"at",line[p:] ) match = TERMINATOR(line,p) if match: p = match.end() # skip the terminator, if any return line, p, items for line in lines: match = DISTRO(line) if not match: raise ValueError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line,p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec") specs = [(op,safe_version(val)) for op,val in specs] yield Requirement(project_name, specs, extras) def _sort_dists(dists): tmp = [(dist.hashcmp,dist) for dist in dists] tmp.sort() dists[::-1] = [d for hc,d in tmp] class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() index = [(parse_version(v),state_machine[op],op,v) for op,v in specs] index.sort() self.specs = [(op,ver) for parsed,trans,op,ver in index] self.index, self.extras = index, tuple(map(safe_extra,extras)) self.hashCmp = ( self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]), frozenset(self.extras) ) self.__hash = hash(self.hashCmp) def __str__(self): specs = ','.join([''.join(s) for s in self.specs]) extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, specs) def __eq__(self,other): return isinstance(other,Requirement) and self.hashCmp==other.hashCmp def __contains__(self,item): if isinstance(item,Distribution): if item.key != self.key: return False if self.index: item = item.parsed_version # only get if we need it elif isinstance(item,basestring): item = parse_version(item) last = None compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1 for parsed,trans,op,ver in self.index: action = trans[compare(item,parsed)] # Indexing: 0, 1, -1 if action=='F': return False elif action=='T': return True elif action=='+': last = True elif action=='-' or last is None: last = False if last is None: last = True # no rules encountered return last def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs)==1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) state_machine = { # =>< '<': '--T', '<=': 'T-T', '>': 'F+F', '>=': 'T+F', '==': 'T..', '!=': 'F++', } def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls,type): class cls(cls,object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def split_sections(s): """Split a string or iterable thereof into (section,content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): from tempfile import mkstemp old_open = os.open try: os.open = os_open # temporarily bypass sandboxing return mkstemp(*args,**kw) finally: os.open = old_open # and then put it back # Set up global resource manager (deliberately not state-saved) _manager = ResourceManager() def _initialize(g): for name in dir(_manager): if not name.startswith('_'): g[name] = getattr(_manager, name) _initialize(globals()) # Prepare the master working set and make the ``require()`` API available _declare_state('object', working_set = WorkingSet()) try: # Does the main program list any requirements? from __main__ import __requires__ except ImportError: pass # No: just use the default working set based on sys.path else: # Yes: ensure the requirements are met, by prefixing sys.path if necessary try: working_set.require(__requires__) except VersionConflict: # try it without defaults already on sys.path working_set = WorkingSet([]) # by starting with an empty path for dist in working_set.resolve( parse_requirements(__requires__), Environment() ): working_set.add(dist) for entry in sys.path: # add any missing entries from sys.path if entry not in working_set.entries: working_set.add_entry(entry) sys.path[:] = working_set.entries # then copy back to sys.path require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script run_main = run_script # backward compatibility # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] list(map(working_set.add_entry,sys.path)) # match order
apache-2.0
aleonliao/depot_tools
download_from_google_storage.py
15
20248
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Download files from Google Storage based on SHA1 sums.""" import hashlib import optparse import os import Queue import re import shutil import stat import sys import tarfile import threading import time import subprocess2 GSUTIL_DEFAULT_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'gsutil.py') # Maps sys.platform to what we actually want to call them. PLATFORM_MAPPING = { 'cygwin': 'win', 'darwin': 'mac', 'linux2': 'linux', 'win32': 'win', } class FileNotFoundError(IOError): pass class InvalidFileError(IOError): pass class InvalidPlatformError(Exception): pass def GetNormalizedPlatform(): """Returns the result of sys.platform accounting for cygwin. Under cygwin, this will always return "win32" like the native Python.""" if sys.platform == 'cygwin': return 'win32' return sys.platform # Common utilities class Gsutil(object): """Call gsutil with some predefined settings. This is a convenience object, and is also immutable.""" def __init__(self, path, boto_path=None, timeout=None, version='4.15'): if not os.path.exists(path): raise FileNotFoundError('GSUtil not found in %s' % path) self.path = path self.timeout = timeout self.boto_path = boto_path self.version = version def get_sub_env(self): env = os.environ.copy() if self.boto_path == os.devnull: env['AWS_CREDENTIAL_FILE'] = '' env['BOTO_CONFIG'] = '' elif self.boto_path: env['AWS_CREDENTIAL_FILE'] = self.boto_path env['BOTO_CONFIG'] = self.boto_path return env def call(self, *args): cmd = [sys.executable, self.path, '--force-version', self.version] cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout) def check_call(self, *args): cmd = [sys.executable, self.path, '--force-version', self.version] cmd.extend(args) ((out, err), code) = subprocess2.communicate( cmd, stdout=subprocess2.PIPE, stderr=subprocess2.PIPE, env=self.get_sub_env(), timeout=self.timeout) # Parse output. status_code_match = re.search('status=([0-9]+)', err) if status_code_match: return (int(status_code_match.group(1)), out, err) if ('You are attempting to access protected data with ' 'no configured credentials.' in err): return (403, out, err) if 'matched no objects' in err: return (404, out, err) return (code, out, err) def check_platform(target): """Checks if any parent directory of target matches (win|mac|linux).""" assert os.path.isabs(target) root, target_name = os.path.split(target) if not target_name: return None if target_name in ('linux', 'mac', 'win'): return target_name return check_platform(root) def get_sha1(filename): sha1 = hashlib.sha1() with open(filename, 'rb') as f: while True: # Read in 1mb chunks, so it doesn't all have to be loaded into memory. chunk = f.read(1024*1024) if not chunk: break sha1.update(chunk) return sha1.hexdigest() # Download-specific code starts here def enumerate_work_queue(input_filename, work_queue, directory, recursive, ignore_errors, output, sha1_file, auto_platform): if sha1_file: if not os.path.exists(input_filename): if not ignore_errors: raise FileNotFoundError('%s not found.' % input_filename) print >> sys.stderr, '%s not found.' % input_filename with open(input_filename, 'rb') as f: sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip()) if sha1_match: work_queue.put((sha1_match.groups(1)[0], output)) return 1 if not ignore_errors: raise InvalidFileError('No sha1 sum found in %s.' % input_filename) print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename return 0 if not directory: work_queue.put((input_filename, output)) return 1 work_queue_size = 0 for root, dirs, files in os.walk(input_filename): if not recursive: for item in dirs[:]: dirs.remove(item) else: for exclude in ['.svn', '.git']: if exclude in dirs: dirs.remove(exclude) for filename in files: full_path = os.path.join(root, filename) if full_path.endswith('.sha1'): if auto_platform: # Skip if the platform does not match. target_platform = check_platform(os.path.abspath(full_path)) if not target_platform: err = ('--auto_platform passed in but no platform name found in ' 'the path of %s' % full_path) if not ignore_errors: raise InvalidFileError(err) print >> sys.stderr, err continue current_platform = PLATFORM_MAPPING[sys.platform] if current_platform != target_platform: continue with open(full_path, 'rb') as f: sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip()) if sha1_match: work_queue.put( (sha1_match.groups(1)[0], full_path.replace('.sha1', ''))) work_queue_size += 1 else: if not ignore_errors: raise InvalidFileError('No sha1 sum found in %s.' % filename) print >> sys.stderr, 'No sha1 sum found in %s.' % filename return work_queue_size def _validate_tar_file(tar, prefix): def _validate(tarinfo): """Returns false if the tarinfo is something we explicitly forbid.""" if tarinfo.issym() or tarinfo.islnk(): return False if '..' in tarinfo.name or not tarinfo.name.startswith(prefix): return False return True return all(map(_validate, tar.getmembers())) def _downloader_worker_thread(thread_num, q, force, base_url, gsutil, out_q, ret_codes, verbose, extract, delete=True): while True: input_sha1_sum, output_filename = q.get() if input_sha1_sum is None: return if os.path.exists(output_filename) and not force: if get_sha1(output_filename) == input_sha1_sum: if verbose: out_q.put( '%d> File %s exists and SHA1 matches. Skipping.' % ( thread_num, output_filename)) continue # Check if file exists. file_url = '%s/%s' % (base_url, input_sha1_sum) (code, _, err) = gsutil.check_call('ls', file_url) if code != 0: if code == 404: out_q.put('%d> File %s for %s does not exist, skipping.' % ( thread_num, file_url, output_filename)) ret_codes.put((1, 'File %s for %s does not exist.' % ( file_url, output_filename))) else: # Other error, probably auth related (bad ~/.boto, etc). out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % ( thread_num, file_url, output_filename, err)) ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % ( file_url, output_filename, err))) continue # Fetch the file. out_q.put('%d> Downloading %s...' % (thread_num, output_filename)) try: if delete: os.remove(output_filename) # Delete the file if it exists already. except OSError: if os.path.exists(output_filename): out_q.put('%d> Warning: deleting %s failed.' % ( thread_num, output_filename)) code, _, err = gsutil.check_call('cp', file_url, output_filename) if code != 0: out_q.put('%d> %s' % (thread_num, err)) ret_codes.put((code, err)) continue remote_sha1 = get_sha1(output_filename) if remote_sha1 != input_sha1_sum: msg = ('%d> ERROR remote sha1 (%s) does not match expected sha1 (%s).' % (thread_num, remote_sha1, input_sha1_sum)) out_q.put(msg) ret_codes.put((20, msg)) continue if extract: if (not tarfile.is_tarfile(output_filename) or not output_filename.endswith('.tar.gz')): out_q.put('%d> Error: %s is not a tar.gz archive.' % ( thread_num, output_filename)) ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename))) continue with tarfile.open(output_filename, 'r:gz') as tar: dirname = os.path.dirname(os.path.abspath(output_filename)) extract_dir = output_filename[0:len(output_filename)-7] if not _validate_tar_file(tar, os.path.basename(extract_dir)): out_q.put('%d> Error: %s contains files outside %s.' % ( thread_num, output_filename, extract_dir)) ret_codes.put((1, '%s contains invalid entries.' % (output_filename))) continue if os.path.exists(extract_dir): try: shutil.rmtree(extract_dir) out_q.put('%d> Removed %s...' % (thread_num, extract_dir)) except OSError: out_q.put('%d> Warning: Can\'t delete: %s' % ( thread_num, extract_dir)) ret_codes.put((1, 'Can\'t delete %s.' % (extract_dir))) continue out_q.put('%d> Extracting %d entries from %s to %s' % (thread_num, len(tar.getmembers()),output_filename, extract_dir)) tar.extractall(path=dirname) # Set executable bit. if sys.platform == 'cygwin': # Under cygwin, mark all files as executable. The executable flag in # Google Storage will not be set when uploading from Windows, so if # this script is running under cygwin and we're downloading an # executable, it will be unrunnable from inside cygwin without this. st = os.stat(output_filename) os.chmod(output_filename, st.st_mode | stat.S_IEXEC) elif sys.platform != 'win32': # On non-Windows platforms, key off of the custom header # "x-goog-meta-executable". code, out, _ = gsutil.check_call('stat', file_url) if code != 0: out_q.put('%d> %s' % (thread_num, err)) ret_codes.put((code, err)) elif re.search(r'executable:\s*1', out): st = os.stat(output_filename) os.chmod(output_filename, st.st_mode | stat.S_IEXEC) def printer_worker(output_queue): while True: line = output_queue.get() # Its plausible we want to print empty lines. if line is None: break print line def download_from_google_storage( input_filename, base_url, gsutil, num_threads, directory, recursive, force, output, ignore_errors, sha1_file, verbose, auto_platform, extract): # Start up all the worker threads. all_threads = [] download_start = time.time() stdout_queue = Queue.Queue() work_queue = Queue.Queue() ret_codes = Queue.Queue() ret_codes.put((0, None)) for thread_num in range(num_threads): t = threading.Thread( target=_downloader_worker_thread, args=[thread_num, work_queue, force, base_url, gsutil, stdout_queue, ret_codes, verbose, extract]) t.daemon = True t.start() all_threads.append(t) printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue]) printer_thread.daemon = True printer_thread.start() # Enumerate our work queue. work_queue_size = enumerate_work_queue( input_filename, work_queue, directory, recursive, ignore_errors, output, sha1_file, auto_platform) for _ in all_threads: work_queue.put((None, None)) # Used to tell worker threads to stop. # Wait for all downloads to finish. for t in all_threads: t.join() stdout_queue.put(None) printer_thread.join() # See if we ran into any errors. max_ret_code = 0 for ret_code, message in ret_codes.queue: max_ret_code = max(ret_code, max_ret_code) if message: print >> sys.stderr, message if verbose and not max_ret_code: print 'Success!' if verbose: print 'Downloading %d files took %1f second(s)' % ( work_queue_size, time.time() - download_start) return max_ret_code def main(args): usage = ('usage: %prog [options] target\n' 'Target must be:\n' ' (default) a sha1 sum ([A-Za-z0-9]{40}).\n' ' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on ' 'the first line.\n' ' (-d or --directory) A directory to scan for .sha1 files.') parser = optparse.OptionParser(usage) parser.add_option('-o', '--output', help='Specify the output file name. Defaults to: ' '(a) Given a SHA1 hash, the name is the SHA1 hash. ' '(b) Given a .sha1 file or directory, the name will ' 'match (.*).sha1.') parser.add_option('-b', '--bucket', help='Google Storage bucket to fetch from.') parser.add_option('-e', '--boto', help='Specify a custom boto file.') parser.add_option('-c', '--no_resume', action='store_true', help='Resume download if file is partially downloaded.') parser.add_option('-f', '--force', action='store_true', help='Force download even if local file exists.') parser.add_option('-i', '--ignore_errors', action='store_true', help='Don\'t throw error if we find an invalid .sha1 file.') parser.add_option('-r', '--recursive', action='store_true', help='Scan folders recursively for .sha1 files. ' 'Must be used with -d/--directory') parser.add_option('-t', '--num_threads', default=1, type='int', help='Number of downloader threads to run.') parser.add_option('-d', '--directory', action='store_true', help='The target is a directory. ' 'Cannot be used with -s/--sha1_file.') parser.add_option('-s', '--sha1_file', action='store_true', help='The target is a file containing a sha1 sum. ' 'Cannot be used with -d/--directory.') parser.add_option('-g', '--config', action='store_true', help='Alias for "gsutil config". Run this if you want ' 'to initialize your saved Google Storage ' 'credentials. This will create a read-only ' 'credentials file in ~/.boto.depot_tools.') parser.add_option('-n', '--no_auth', action='store_true', help='Skip auth checking. Use if it\'s known that the ' 'target bucket is a public bucket.') parser.add_option('-p', '--platform', help='A regular expression that is compared against ' 'Python\'s sys.platform. If this option is specified, ' 'the download will happen only if there is a match.') parser.add_option('-a', '--auto_platform', action='store_true', help='Detects if any parent folder of the target matches ' '(linux|mac|win). If so, the script will only ' 'process files that are in the paths that ' 'that matches the current platform.') parser.add_option('-u', '--extract', action='store_true', help='Extract a downloaded tar.gz file. ' 'Leaves the tar.gz file around for sha1 verification' 'If a directory with the same name as the tar.gz ' 'file already exists, is deleted (to get a ' 'clean state in case of update.)') parser.add_option('-v', '--verbose', action='store_true', default=True, help='DEPRECATED: Defaults to True. Use --no-verbose ' 'to suppress.') parser.add_option('-q', '--quiet', action='store_false', dest='verbose', help='Suppresses diagnostic and progress information.') (options, args) = parser.parse_args() # Make sure we should run at all based on platform matching. if options.platform: if options.auto_platform: parser.error('--platform can not be specified with --auto_platform') if not re.match(options.platform, GetNormalizedPlatform()): if options.verbose: print('The current platform doesn\'t match "%s", skipping.' % options.platform) return 0 # Set the boto file to /dev/null if we don't need auth. if options.no_auth: if (set(('http_proxy', 'https_proxy')).intersection( env.lower() for env in os.environ) and 'NO_AUTH_BOTO_CONFIG' not in os.environ): print >> sys.stderr, ('NOTICE: You have PROXY values set in your ' 'environment, but gsutil in depot_tools does not ' '(yet) obey them.') print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG ' 'environment variable from being used.') print >> sys.stderr, ('To use a proxy in this situation, please supply ' 'those settings in a .boto file pointed to by ' 'the NO_AUTH_BOTO_CONFIG environment var.') options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull) # Make sure gsutil exists where we expect it to. if os.path.exists(GSUTIL_DEFAULT_PATH): gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto) else: parser.error('gsutil not found in %s, bad depot_tools checkout?' % GSUTIL_DEFAULT_PATH) # Passing in -g/--config will run our copy of GSUtil, then quit. if options.config: print '===Note from depot_tools===' print 'If you do not have a project ID, enter "0" when asked for one.' print '===End note from depot_tools===' print return gsutil.call('config') if not args: parser.error('Missing target.') if len(args) > 1: parser.error('Too many targets.') if not options.bucket: parser.error('Missing bucket. Specify bucket with --bucket.') if options.sha1_file and options.directory: parser.error('Both --directory and --sha1_file are specified, ' 'can only specify one.') if options.recursive and not options.directory: parser.error('--recursive specified but --directory not specified.') if options.output and options.directory: parser.error('--directory is specified, so --output has no effect.') if (not (options.sha1_file or options.directory) and options.auto_platform): parser.error('--auto_platform must be specified with either ' '--sha1_file or --directory') input_filename = args[0] # Set output filename if not specified. if not options.output and not options.directory: if not options.sha1_file: # Target is a sha1 sum, so output filename would also be the sha1 sum. options.output = input_filename elif options.sha1_file: # Target is a .sha1 file. if not input_filename.endswith('.sha1'): parser.error('--sha1_file is specified, but the input filename ' 'does not end with .sha1, and no --output is specified. ' 'Either make sure the input filename has a .sha1 ' 'extension, or specify --output.') options.output = input_filename[:-5] else: parser.error('Unreachable state.') # Check if output file already exists. if not options.directory and not options.force and not options.no_resume: if os.path.exists(options.output): parser.error('Output file %s exists and --no_resume is specified.' % options.output) base_url = 'gs://%s' % options.bucket return download_from_google_storage( input_filename, base_url, gsutil, options.num_threads, options.directory, options.recursive, options.force, options.output, options.ignore_errors, options.sha1_file, options.verbose, options.auto_platform, options.extract) if __name__ == '__main__': sys.exit(main(sys.argv))
bsd-3-clause
qnzhou/ThingiverseCrawler
thingiverse_crawler.py
1
9320
#!//usr/bin/env python import argparse import datetime import os import os.path import requests import re import time import urllib import urlparse from subprocess import check_call def utc_mktime(utc_tuple): """Returns number of seconds elapsed since epoch Note that no timezone are taken into consideration. utc tuple must be: (year, month, day, hour, minute, second) """ if len(utc_tuple) == 6: utc_tuple += (0, 0, 0) return time.mktime(utc_tuple) - time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0)) def datetime_to_timestamp(dt): """Converts a datetime object to UTC timestamp""" return int(utc_mktime(dt.timetuple())) def parse_thing_ids(text): pattern = "thing:(\d{5,7})" matched = re.findall(pattern, text) return [int(val) for val in matched] def parse_file_ids(text): pattern = "download:(\d{5,7})" matched = re.findall(pattern, text) return [int(val) for val in matched] known_licenses = [ ("Creative Commons - Attribution", re.compile("http://creativecommons.org/licenses/by/\d(.\d)?/")), ("Creative Commons - Attribution - Share Alike", re.compile("http://creativecommons.org/licenses/by-sa/\d(.\d)?/")), ("Creative Commons - Attribution - No Derivatives", re.compile("http://creativecommons.org/licenses/by-nd/\d(.\d)?/")), ("Creative Commons - Attribution - Non-Commercial", re.compile("http://creativecommons.org/licenses/by-nc/\d(.\d)?/")), ("Attribution - Non-Commercial - Share Alike", re.compile("http://creativecommons.org/licenses/by-nc-sa/\d(.\d)?/")), ("Attribution - Non-Commercial - No Derivatives", re.compile("http://creativecommons.org/licenses/by-nc-nd/\d(.\d)?/")), ("Creative Commons - Public Domain Dedication", re.compile("http://creativecommons.org/publicdomain/zero/\d(.\d)?/")), ("GNU - GPL", re.compile("http://creativecommons.org/licenses/GPL/\d(.\d)?/")), ("GNU - LGPL", re.compile("http://creativecommons.org/licenses/LGPL/\d(.\d)?/")), ("BSD License", re.compile("http://creativecommons.org/licenses/BSD/")), ("Nokia", re.compile("http://www.developer.nokia.com/Terms_and_conditions/3d-printing.xhtml")), ("Public Domain", re.compile("http://creativecommons.org/licenses/publicdomain/")), ] def parse_license(text): for name, pattern in known_licenses: if pattern.search(text): return name return "unknown_license" def crawl_thing_ids(N, end_date=None): """ This method extract N things that were uploaded to thingiverse.com before end_date. If end_date is None, use today's date. """ baseurl = "http://www.thingiverse.com/search/recent/things/page:{}?q=&start_date=&stop_date={}&search_mode=advanced&description=&username=&tags=&license=" end_date = datetime_to_timestamp(end_date) thing_ids = set() for i in range(N/12 + 1): url = baseurl.format(i, end_date) r = requests.get(url) assert(r.status_code==200) thing_ids.update(parse_thing_ids(r.text)) if len(thing_ids) > N: break # Sleep a bit to avoid being mistaken as DoS. time.sleep(0.5) return thing_ids def crawl_things(N, output_dir, term=None, category=None, source=None, organize=False): #baseurl = "http://www.thingiverse.com/newest/page:{}" #baseurl = "http://www.thingiverse.com/explore/popular/page:{}" key = None if term is None: assert(source is not None); url_prefix= "http://www.thingiverse.com/explore/{}/".format(source); if category is None: baseurl = url_prefix + "page:{}" else: baseurl = url_prefix + urllib.quote_plus(category) + "/page:{}" key = category else: baseurl = "http://www.thingiverse.com/search/page:{}?type=things&q=" + urllib.quote_plus(term) key = term thing_ids = set() file_ids = set() records = [] num_files = 0 page = 0 previous_path = '' while True: url = baseurl.format(page+1) contents = get_url(url) page += 1 # If the previous url ends up being the same as the old one, we should stop as there are no more pages current_path = urlparse.urlparse(contents.url).path if previous_path == current_path: return records else: previous_path = current_path for thing_id in parse_thing_ids(contents.text): if thing_id in thing_ids: continue print("thing id: {}".format(thing_id)) thing_ids.add(thing_id) license, thing_files = get_thing(thing_id) for file_id in thing_files: if file_id in file_ids: continue file_ids.add(file_id) print(" file id: {}".format(file_id)) result = download_file(file_id, thing_id, output_dir, organize) if result is None: continue filename, link = result if filename is not None: records.append((thing_id, file_id, filename, license, link)) if N is not None and len(records) >= N: return records # Sleep a bit to avoid being mistaken as DoS. time.sleep(0.5) save_records(records, key) def get_thing(thing_id): base_url = "http://www.thingiverse.com/{}:{}" file_ids = [] url = base_url.format("thing", thing_id) contents = get_url(url).text license = parse_license(contents) return license, parse_file_ids(contents) def get_url(url, time_out=600): r = requests.get(url) sleep_time = 1.0 while r.status_code != 200: print("sleep {}s".format(sleep_time)) print(url) time.sleep(sleep_time) r = requests.get(url) sleep_time += 2 if (sleep_time > time_out): # We have sleeped for over 10 minutes, the page probably does # not exist. break if r.status_code != 200: print("failed to retrieve {}".format(url)) else: return r # return r.text def get_download_link(file_id): base_url = "https://www.thingiverse.com/{}:{}" url = base_url.format("download", file_id) r = requests.head(url) link = r.headers.get("Location", None) if link is not None: __, ext = os.path.splitext(link) if ext.lower() not in [".stl", ".obj", ".ply", ".off"]: return None return link def download_file(file_id, thing_id, output_dir, organize): link = get_download_link(file_id) if link is None: return None __, ext = os.path.splitext(link) output_file = "{}{}".format(file_id, ext.lower()) if organize: output_file = os.path.join(str(thing_id), output_file) output_file = os.path.join(output_dir, output_file) command = "wget -q --tries=20 --waitretry 20 -O {} {}".format(output_file, link) #check_call(command.split()) return output_file, link def save_records(records, key=None): # Enforce kebab case file name output_name = re.sub('(\w) (\w)', r'\1-\2', key).lower()+"-" if key else "" output_name += "summary" with open(output_name+".csv", 'w') as fout: fout.write("thing_id, file_id, file, license, link\n") for entry in records: fout.write(",".join([str(val) for val in entry]) + "\n") def parse_args(): parser = argparse.ArgumentParser( description="Crawl data from thingiverse", epilog="Written by Qingnan Zhou <qnzhou at gmail dot com> Modified by Mike Gleason") parser.add_argument("--output-dir", "-o", help="output directories", default=".") parser.add_argument("--number", "-n", type=int, help="how many files to crawl", default=None) group = parser.add_mutually_exclusive_group() group.add_argument("--search-term", "-s", type=str, default=None, help="term to search for") group.add_argument("--category", "-c", type=str, default=None, help="catergory to search for") parser.add_argument('--organize', dest='organized', default=False, action='store_true', help="organize files by their main category") parser.add_argument("--source", choices=("newest", "featured", "popular", "verified", "made-things", "derivatives", "customizable", "random-things", "firehose"), default="featured"); return parser def main(): parser = parse_args() args = parser.parse_args() if args.number is None and (args.search_term is None and args.category is None): parser.error('Number or Search/Category Term required') output_dir = args.output_dir number = args.number records = crawl_things( args.number, output_dir, args.search_term, args.category, args.source, args.organized) if args.search_term: save_records(records, args.search_term) elif args.category: save_records(records, args.category) else: save_records(records) if __name__ == "__main__": main()
mit
ryankurte/mbed-os
tools/host_tests/tcpecho_server_loop.py
73
1349
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Be sure that the tools directory is in the search path import sys from os.path import join, abspath, dirname ROOT = abspath(join(dirname(__file__), "..", "..")) sys.path.insert(0, ROOT) from mbed_settings import LOCALHOST from SocketServer import BaseRequestHandler, TCPServer class TCP_EchoHandler(BaseRequestHandler): def handle(self): print "\nHandle connection from:", self.client_address while True: data = self.request.recv(1024) if not data: break self.request.sendall(data) self.request.close() print "socket closed" if __name__ == '__main__': server = TCPServer((LOCALHOST, 7), TCP_EchoHandler) print "listening for connections on:", (LOCALHOST, 7) server.serve_forever()
apache-2.0
PyBossa/pybossa
pybossa/model/counter.py
2
1787
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2017 Scifabric LTD. # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. from sqlalchemy import Integer from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.dialects.postgresql import TIMESTAMP from pybossa.core import db from pybossa.model import DomainObject, make_timestamp class Counter(db.Model, DomainObject): '''A Counter lists the number of task runs for a given Task.''' __tablename__ = 'counter' #: Counter.ID id = Column(Integer, primary_key=True) #: UTC timestamp when the counter was created. created = Column(TIMESTAMP, default=make_timestamp) #: Project.ID that this counter is associated with. project_id = Column(Integer, ForeignKey('project.id', ondelete='CASCADE'), nullable=False) #: Task.ID that this counter is associated with. task_id = Column(Integer, ForeignKey('task.id', ondelete='CASCADE'), nullable=False) #: Number of task_runs for this task. n_task_runs = Column(Integer, default=0, nullable=False)
agpl-3.0
pplatek/odoo
addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/logreport.py
386
1736
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import tempfile LOG_DEBUG='debug' LOG_INFO='info' LOG_WARNING='warn' LOG_ERROR='error' LOG_CRITICAL='critical' _logger = logging.getLogger(__name__) def log_detail(self): import os logfile_name = os.path.join(tempfile.gettempdir(), "openerp_report_designer.log") hdlr = logging.FileHandler(logfile_name) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) _logger.addHandler(hdlr) _logger.setLevel(logging.INFO) class Logger(object): def log_write(self, name, level, msg): getattr(_logger,level)(msg) def shutdown(self): logging.shutdown() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
arpitn30/open-event-orga-server
tests/unittests/api/test_custom_fields.py
9
3404
import unittest from app import current_app as app from app.api.helpers.custom_fields import Color, Email, Uri, \ ImageUri, DateTime, Integer, Float, ChoiceString, Upload from tests.unittests.utils import OpenEventTestCase class TestCustomFieldsValidation(OpenEventTestCase): """ Test the validation methods of custom fields """ def _test_common(self, field): field.required = False self.assertTrue(field.validate(None)) field.required = True self.assertFalse(field.validate(None)) if field.__schema_type__ != 'string': self.assertFalse(field.validate('')) def test_color_field(self): field = Color() self._test_common(field) self.assertFalse(field.validate('randomnothing')) self.assertTrue(field.validate('black')) self.assertTrue(field.validate('#44ff3b')) def test_email_field(self): field = Email() self._test_common(field) self.assertFalse(field.validate('website.com')) self.assertTrue(field.validate('email@gmail.com')) def test_uri_field(self): field = Uri() self._test_common(field) self.assertFalse(field.validate('somestring')) self.assertFalse(field.validate('website.com')) self.assertFalse(field.validate('www.website.com')) self.assertFalse(field.validate('http://bazooka')) self.assertTrue(field.validate('http://localhost/file')) self.assertTrue(field.validate('http://website.com')) self.assertTrue(field.validate('ftp://domain.com/blah')) def test_image_uri_field(self): field = ImageUri() self._test_common(field) # same as uri field, not many tests needed self.assertFalse(field.validate('imgur.com/image.png')) self.assertTrue(field.validate('http://imgur.com/image.png')) def test_datetime_field(self): field = DateTime() self._test_common(field) self.assertTrue(field.validate('2014-12-31 23:11:44')) self.assertTrue(field.validate('2014-12-31T23:11:44')) self.assertFalse(field.validate('2014-31-12T23:11:44')) self.assertFalse(field.validate('2014-12-32')) self.assertFalse(field.validate('2014-06-30 12:00')) def test_integer_field(self): field = Integer() self._test_common(field) self.assertTrue(field.validate(0)) self.assertFalse(field.validate(-2323.23)) self.assertFalse(field.validate(2323.23)) def test_float_field(self): field = Float() self._test_common(field) self.assertTrue(field.validate(92)) def test_choice_string_field(self): field = ChoiceString(choice_list=['a', 'b', 'c']) self._test_common(field) self.assertTrue(field.validate('a')) self.assertFalse(field.validate('d')) self.assertFalse(field.validate('ab')) def test_upload_field(self): with app.test_request_context(): field = Upload() self._test_common(field) link = '/static/1' self.assertTrue(field.validate(link)) z = field.format(link) self.assertNotEqual(link, z) self.assertTrue(field.validate(z), msg=z) self.assertEqual('http://site.co/1', field.format('http://site.co/1')) if __name__ == '__main__': unittest.main()
gpl-3.0
marbu/pylatest
tests/xdocutils/test_utils.py
1
6663
# -*- coding: utf8 -*- """ Tests of helper functions from pylatest.xdocutils.utils module. """ # Copyright (C) 2018 Martin Bukatovič <martin.bukatovic@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import textwrap from docutils.core import publish_doctree import pytest from pylatest.xdocutils.core import pylatest_publish_parts from pylatest.xdocutils.readers import NoDocInfoReader from pylatest.xdocutils.utils import get_field_list from pylatest.xdocutils.utils import get_testcase_id from pylatest.xdocutils.utils import get_testcase_requirements def _publish(source): """ Parse rst source string into doctree. """ doctree = publish_doctree( source=source, reader=NoDocInfoReader(), parser_name='restructuredtext',) return doctree def test_get_field_list_null(empty_doctree): assert get_field_list(empty_doctree) == None def test_get_field_list_missing(): doctree = _publish(textwrap.dedent('''\ Test Foo ******** There is no field list. Description =========== Nothing here as well. ''')) assert get_field_list(doctree) == None def test_get_field_list_present(): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :id: FOO-122 :author: joe.foo@example.com :component: foo ''')) assert get_field_list(doctree) is not None def test_get_testcase_id_null(empty_doctree): assert get_testcase_id(empty_doctree) == None def test_get_testcase_id(): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :id: FOO-122 :author: joe.foo@example.com :component: foo ''')) assert get_testcase_id(doctree) == "FOO-122" # # requirements # def test_get_testcase_requirements_null(empty_doctree): assert get_testcase_requirements(empty_doctree) == [] REQUIREMENT_FIELD_NAMES = ["requirement", "requirements"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_single(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :{}: FOO-212 '''.format(field_name))) assert get_testcase_requirements(doctree) == ["FOO-212"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_single_empty(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :{}: '''.format(field_name))) assert get_testcase_requirements(doctree) == [] def test_get_testcase_requirements_many(): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :requirement: FOO-212 :requirement: FOO-232 :component: foo ''')) assert get_testcase_requirements(doctree) == ["FOO-212", "FOO-232"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_list_single(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :{}: - FOO-212 '''.format(field_name))) assert get_testcase_requirements(doctree) == ["FOO-212"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_list_many(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :{}: - FOO-212 - FOO-232 '''.format(field_name))) assert get_testcase_requirements(doctree) == ["FOO-212", "FOO-232"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_list_many_someemptyitems(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :{}: - - FOO-132 - :requirement: FOO-130 '''.format(field_name))) assert get_testcase_requirements(doctree) == ["FOO-132", "FOO-130"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_list_many_onlyemptyitems(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :{}: - - '''.format(field_name))) assert get_testcase_requirements(doctree) == [] def test_get_testcase_requirements_many_list_many(): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :requirement: FOO-012 :requirement: FOO-032 :requirements: - FOO-212 - FOO-232 ''')) assert get_testcase_requirements(doctree) == [ "FOO-012", "FOO-032", "FOO-212", "FOO-232"] @pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES) def test_get_testcase_requirements_single_url_link(field_name): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :requirement: https://example.com '''.format(field_name))) results = get_testcase_requirements(doctree) assert len(results) == 1 # check that we get actual rst node for a link (reference node) assert results[0].tagname == "reference" assert results[0].astext() == "https://example.com" def test_get_testcase_requirements_many_list_url_link(): doctree = _publish(textwrap.dedent('''\ Test Foo ******** :author: joe.foo@example.com :component: foo :requirements: - https://example.com/foo - https://example.com/bar ''')) results = get_testcase_requirements(doctree) assert len(results) == 2 # check that we get actual rst node for a link (reference node) assert results[0].tagname == "reference" assert results[1].tagname == "reference" # and expected content assert results[0].astext() == "https://example.com/foo" assert results[1].astext() == "https://example.com/bar"
gpl-3.0
openshift/openshift-tools
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/openshift_health_checker/test/etcd_volume_test.py
55
3964
import pytest from openshift_checks.etcd_volume import EtcdVolume from openshift_checks import OpenShiftCheckException @pytest.mark.parametrize('ansible_mounts,extra_words', [ ([], ['none']), # empty ansible_mounts ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths ]) def test_cannot_determine_available_disk(ansible_mounts, extra_words): task_vars = dict( ansible_mounts=ansible_mounts, ) with pytest.raises(OpenShiftCheckException) as excinfo: EtcdVolume(fake_execute_module, task_vars).run() for word in ['Unable to determine mount point'] + extra_words: assert word in str(excinfo.value) @pytest.mark.parametrize('size_limit,ansible_mounts', [ ( # if no size limit is specified, expect max usage # limit to default to 90% of size_total None, [{ 'mount': '/', 'size_available': 40 * 10**9, 'size_total': 80 * 10**9 }], ), ( 1, [{ 'mount': '/', 'size_available': 30 * 10**9, 'size_total': 30 * 10**9, }], ), ( 20000000000, [{ 'mount': '/', 'size_available': 20 * 10**9, 'size_total': 40 * 10**9, }], ), ( 5000000000, [{ # not enough space on / ... 'mount': '/', 'size_available': 0, 'size_total': 0, }, { # not enough space on /var/lib ... 'mount': '/var/lib', 'size_available': 2 * 10**9, 'size_total': 21 * 10**9, }, { # ... but enough on /var/lib/etcd 'mount': '/var/lib/etcd', 'size_available': 36 * 10**9, 'size_total': 40 * 10**9 }], ) ]) def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts): task_vars = dict( etcd_device_usage_threshold_percent=size_limit, ansible_mounts=ansible_mounts, ) if task_vars["etcd_device_usage_threshold_percent"] is None: task_vars.pop("etcd_device_usage_threshold_percent") result = EtcdVolume(fake_execute_module, task_vars).run() assert not result.get('failed', False) @pytest.mark.parametrize('size_limit_percent,ansible_mounts,extra_words', [ ( # if no size limit is specified, expect max usage # limit to default to 90% of size_total None, [{ 'mount': '/', 'size_available': 1 * 10**9, 'size_total': 100 * 10**9, }], ['99.0%'], ), ( 70.0, [{ 'mount': '/', 'size_available': 1 * 10**6, 'size_total': 5 * 10**9, }], ['100.0%'], ), ( 40.0, [{ 'mount': '/', 'size_available': 2 * 10**9, 'size_total': 6 * 10**9, }], ['66.7%'], ), ( None, [{ # enough space on /var ... 'mount': '/var', 'size_available': 20 * 10**9, 'size_total': 20 * 10**9, }, { # .. but not enough on /var/lib 'mount': '/var/lib', 'size_available': 1 * 10**9, 'size_total': 20 * 10**9, }], ['95.0%'], ), ]) def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts, extra_words): task_vars = dict( etcd_device_usage_threshold_percent=size_limit_percent, ansible_mounts=ansible_mounts, ) if task_vars["etcd_device_usage_threshold_percent"] is None: task_vars.pop("etcd_device_usage_threshold_percent") result = EtcdVolume(fake_execute_module, task_vars).run() assert result['failed'] for word in extra_words: assert word in result['msg'] def fake_execute_module(*args): raise AssertionError('this function should not be called')
apache-2.0
opavader/fabric
tests/test_state.py
44
1109
from nose.tools import eq_ from fabric.state import _AliasDict def test_dict_aliasing(): """ Assigning values to aliases updates aliased keys """ ad = _AliasDict( {'bar': False, 'biz': True, 'baz': False}, aliases={'foo': ['bar', 'biz', 'baz']} ) # Before eq_(ad['bar'], False) eq_(ad['biz'], True) eq_(ad['baz'], False) # Change ad['foo'] = True # After eq_(ad['bar'], True) eq_(ad['biz'], True) eq_(ad['baz'], True) def test_nested_dict_aliasing(): """ Aliases can be nested """ ad = _AliasDict( {'bar': False, 'biz': True}, aliases={'foo': ['bar', 'nested'], 'nested': ['biz']} ) # Before eq_(ad['bar'], False) eq_(ad['biz'], True) # Change ad['foo'] = True # After eq_(ad['bar'], True) eq_(ad['biz'], True) def test_dict_alias_expansion(): """ Alias expansion """ ad = _AliasDict( {'bar': False, 'biz': True}, aliases={'foo': ['bar', 'nested'], 'nested': ['biz']} ) eq_(ad.expand_aliases(['foo']), ['bar', 'biz'])
bsd-2-clause
cubicova17/annet
venv/lib/python2.7/site-packages/django/contrib/gis/gdal/feature.py
219
4255
# The GDAL C library, OGR exception, and the Field object from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import OGRException, OGRIndexError from django.contrib.gis.gdal.field import Field from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType # ctypes function prototypes from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api from django.utils.encoding import force_bytes, force_text from django.utils import six from django.utils.six.moves import xrange # For more information, see the OGR C API source code: # http://www.gdal.org/ogr/ogr__api_8h.html # # The OGR_F_* routines are relevant here. class Feature(GDALBase): """ This class that wraps an OGR Feature, needs to be instantiated from a Layer object. """ #### Python 'magic' routines #### def __init__(self, feat, layer): """ Initializes Feature from a pointer and its Layer object. """ if not feat: raise OGRException('Cannot create OGR Feature, invalid pointer given.') self.ptr = feat self._layer = layer def __del__(self): "Releases a reference to this object." if self._ptr: capi.destroy_feature(self._ptr) def __getitem__(self, index): """ Gets the Field object at the specified index, which may be either an integer or the Field's string label. Note that the Field object is not the field's _value_ -- use the `get` method instead to retrieve the value (e.g. an integer) instead of a Field instance. """ if isinstance(index, six.string_types): i = self.index(index) else: if index < 0 or index > self.num_fields: raise OGRIndexError('index out of range') i = index return Field(self, i) def __iter__(self): "Iterates over each field in the Feature." for i in xrange(self.num_fields): yield self[i] def __len__(self): "Returns the count of fields in this feature." return self.num_fields def __str__(self): "The string name of the feature." return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name) def __eq__(self, other): "Does equivalence testing on the features." return bool(capi.feature_equal(self.ptr, other._ptr)) #### Feature Properties #### @property def encoding(self): return self._layer._ds.encoding @property def fid(self): "Returns the feature identifier." return capi.get_fid(self.ptr) @property def layer_name(self): "Returns the name of the layer for the feature." name = capi.get_feat_name(self._layer._ldefn) return force_text(name, self.encoding, strings_only=True) @property def num_fields(self): "Returns the number of fields in the Feature." return capi.get_feat_field_count(self.ptr) @property def fields(self): "Returns a list of fields in the Feature." return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)) for i in xrange(self.num_fields)] @property def geom(self): "Returns the OGR Geometry for this Feature." # Retrieving the geometry pointer for the feature. geom_ptr = capi.get_feat_geom_ref(self.ptr) return OGRGeometry(geom_api.clone_geom(geom_ptr)) @property def geom_type(self): "Returns the OGR Geometry Type for this Feture." return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn)) #### Feature Methods #### def get(self, field): """ Returns the value of the field, instead of an instance of the Field object. May take a string of the field name or a Field object as parameters. """ field_name = getattr(field, 'name', field) return self[field_name].value def index(self, field_name): "Returns the index of the given field name." i = capi.get_field_index(self.ptr, force_bytes(field_name)) if i < 0: raise OGRIndexError('invalid OFT field name given: "%s"' % field_name) return i
mit
Ali-aqrabawi/ezclinic
lib/django/contrib/staticfiles/management/commands/findstatic.py
106
1745
from __future__ import unicode_literals import os from django.contrib.staticfiles import finders from django.core.management.base import LabelCommand from django.utils.encoding import force_text class Command(LabelCommand): help = "Finds the absolute paths for the given static file(s)." label = 'staticfile' def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument( '--first', action='store_false', dest='all', default=True, help="Only return the first match for each static file.", ) def handle_label(self, path, **options): verbosity = options['verbosity'] result = finders.find(path, all=options['all']) path = force_text(path) if verbosity >= 2: searched_locations = ( "\nLooking in the following locations:\n %s" % "\n ".join(force_text(location) for location in finders.searched_locations) ) else: searched_locations = '' if result: if not isinstance(result, (list, tuple)): result = [result] result = (force_text(os.path.realpath(path)) for path in result) if verbosity >= 1: file_list = '\n '.join(result) return ("Found '%s' here:\n %s%s" % (path, file_list, searched_locations)) else: return '\n'.join(result) else: message = ["No matching file found for '%s'." % path] if verbosity >= 2: message.append(searched_locations) if verbosity >= 1: self.stderr.write('\n'.join(message))
mit
vshtanko/scikit-learn
examples/cluster/plot_agglomerative_clustering_metrics.py
402
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
mostafa8026/MyObjectListView
docs/conf.py
2
5224
# -*- coding: utf-8 -*- # # ObjectListView documentation build configuration file, created by # sphinx-quickstart on Sun May 18 14:41:14 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys, os # sys.path.append(os.path.abspath("..")) # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.append(os.path.abspath("sphinxext")) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. #extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'ObjectListView' copyright = '2006-2015, Phillip Piper' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = '2.9' # The full version, including alpha/beta/rc tags. release = '2.9.0' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directories, that shouldn't be searched # for source files. #exclude_dirs = [] # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' highlight_language = 'c#' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'master.css' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # The name of an image file (within the static path) to place at the top of # the sidebar. #html_logo = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If true, the reST sources are included in the HTML build as _sources/<name>. html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'ObjectListViewDoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = "a4" # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '11pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'ObjectListView.tex', 'ObjectListView Documentation', 'Phillip Piper', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
gpl-3.0
2014c2g3/0623exam
static/Brython3.1.1-20150328-091302/Lib/threading_1.py
730
45641
"""Thread module emulating a subset of Java's threading model.""" import sys as _sys import _thread from time import sleep as _sleep try: from time import monotonic as _time except ImportError: from time import time as _time from traceback import format_exc as _format_exc from _weakrefset import WeakSet # Note regarding PEP 8 compliant names # This threading model was originally inspired by Java, and inherited # the convention of camelCase function and method names from that # language. Those original names are not in any imminent danger of # being deprecated (even for Py3k),so this module provides them as an # alias for the PEP 8 compliant names # Note that using the new PEP 8 compliant names facilitates substitution # with the multiprocessing module, which doesn't provide the old # Java inspired names. __all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier', 'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size'] # Rename some stuff so "from threading import *" is safe _start_new_thread = _thread.start_new_thread _allocate_lock = _thread.allocate_lock get_ident = _thread.get_ident ThreadError = _thread.error try: _CRLock = _thread.RLock except AttributeError: _CRLock = None TIMEOUT_MAX = _thread.TIMEOUT_MAX del _thread # Support for profile and trace hooks _profile_hook = None _trace_hook = None def setprofile(func): """Set a profile function for all threads started from the threading module. The func will be passed to sys.setprofile() for each thread, before its run() method is called. """ global _profile_hook _profile_hook = func def settrace(func): """Set a trace function for all threads started from the threading module. The func will be passed to sys.settrace() for each thread, before its run() method is called. """ global _trace_hook _trace_hook = func # Synchronization classes Lock = _allocate_lock def RLock(*args, **kwargs): """Factory function that returns a new reentrant lock. A reentrant lock must be released by the thread that acquired it. Once a thread has acquired a reentrant lock, the same thread may acquire it again without blocking; the thread must release it once for each time it has acquired it. """ if _CRLock is None: return _PyRLock(*args, **kwargs) return _CRLock(*args, **kwargs) class _RLock: """This class implements reentrant lock objects. A reentrant lock must be released by the thread that acquired it. Once a thread has acquired a reentrant lock, the same thread may acquire it again without blocking; the thread must release it once for each time it has acquired it. """ def __init__(self): self._block = _allocate_lock() self._owner = None self._count = 0 def __repr__(self): owner = self._owner try: owner = _active[owner].name except KeyError: pass return "<%s owner=%r count=%d>" % ( self.__class__.__name__, owner, self._count) def acquire(self, blocking=True, timeout=-1): """Acquire a lock, blocking or non-blocking. When invoked without arguments: if this thread already owns the lock, increment the recursion level by one, and return immediately. Otherwise, if another thread owns the lock, block until the lock is unlocked. Once the lock is unlocked (not owned by any thread), then grab ownership, set the recursion level to one, and return. If more than one thread is blocked waiting until the lock is unlocked, only one at a time will be able to grab ownership of the lock. There is no return value in this case. When invoked with the blocking argument set to true, do the same thing as when called without arguments, and return true. When invoked with the blocking argument set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. When invoked with the floating-point timeout argument set to a positive value, block for at most the number of seconds specified by timeout and as long as the lock cannot be acquired. Return true if the lock has been acquired, false if the timeout has elapsed. """ me = get_ident() if self._owner == me: self._count = self._count + 1 return 1 rc = self._block.acquire(blocking, timeout) if rc: self._owner = me self._count = 1 return rc __enter__ = acquire def release(self): """Release a lock, decrementing the recursion level. If after the decrement it is zero, reset the lock to unlocked (not owned by any thread), and if any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. If after the decrement the recursion level is still nonzero, the lock remains locked and owned by the calling thread. Only call this method when the calling thread owns the lock. A RuntimeError is raised if this method is called when the lock is unlocked. There is no return value. """ if self._owner != get_ident(): raise RuntimeError("cannot release un-acquired lock") self._count = count = self._count - 1 if not count: self._owner = None self._block.release() def __exit__(self, t, v, tb): self.release() # Internal methods used by condition variables def _acquire_restore(self, state): self._block.acquire() self._count, self._owner = state def _release_save(self): if self._count == 0: raise RuntimeError("cannot release un-acquired lock") count = self._count self._count = 0 owner = self._owner self._owner = None self._block.release() return (count, owner) def _is_owned(self): return self._owner == get_ident() _PyRLock = _RLock class Condition: """Class that implements a condition variable. A condition variable allows one or more threads to wait until they are notified by another thread. If the lock argument is given and not None, it must be a Lock or RLock object, and it is used as the underlying lock. Otherwise, a new RLock object is created and used as the underlying lock. """ def __init__(self, lock=None): if lock is None: lock = RLock() self._lock = lock # Export the lock's acquire() and release() methods self.acquire = lock.acquire self.release = lock.release # If the lock defines _release_save() and/or _acquire_restore(), # these override the default implementations (which just call # release() and acquire() on the lock). Ditto for _is_owned(). try: self._release_save = lock._release_save except AttributeError: pass try: self._acquire_restore = lock._acquire_restore except AttributeError: pass try: self._is_owned = lock._is_owned except AttributeError: pass self._waiters = [] def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __repr__(self): return "<Condition(%s, %d)>" % (self._lock, len(self._waiters)) def _release_save(self): self._lock.release() # No state to save def _acquire_restore(self, x): self._lock.acquire() # Ignore saved state def _is_owned(self): # Return True if lock is owned by current_thread. # This method is called only if __lock doesn't have _is_owned(). if self._lock.acquire(0): self._lock.release() return False else: return True def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notify_all() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self._waiters.append(waiter) saved_state = self._release_save() try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() gotit = True else: if timeout > 0: gotit = waiter.acquire(True, timeout) else: gotit = waiter.acquire(False) if not gotit: try: self._waiters.remove(waiter) except ValueError: pass return gotit finally: self._acquire_restore(saved_state) def wait_for(self, predicate, timeout=None): """Wait until a condition evaluates to True. predicate should be a callable which result will be interpreted as a boolean value. A timeout may be provided giving the maximum time to wait. """ endtime = None waittime = timeout result = predicate() while not result: if waittime is not None: if endtime is None: endtime = _time() + waittime else: waittime = endtime - _time() if waittime <= 0: break self.wait(waittime) result = predicate() return result def notify(self, n=1): """Wake up one or more threads waiting on this condition, if any. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the threads waiting for the condition variable; it is a no-op if no threads are waiting. """ if not self._is_owned(): raise RuntimeError("cannot notify on un-acquired lock") __waiters = self._waiters waiters = __waiters[:n] if not waiters: return for waiter in waiters: waiter.release() try: __waiters.remove(waiter) except ValueError: pass def notify_all(self): """Wake up all threads waiting on this condition. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. """ self.notify(len(self._waiters)) notifyAll = notify_all class Semaphore: """This class implements semaphore objects. Semaphores manage a counter representing the number of release() calls minus the number of acquire() calls, plus an initial value. The acquire() method blocks if necessary until it can return without making the counter negative. If not given, value defaults to 1. """ # After Tim Peters' semaphore class, but not quite the same (no maximum) def __init__(self, value=1): if value < 0: raise ValueError("semaphore initial value must be >= 0") self._cond = Condition(Lock()) self._value = value def acquire(self, blocking=True, timeout=None): """Acquire a semaphore, decrementing the internal counter by one. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. When invoked with a timeout other than None, it will block for at most timeout seconds. If acquire does not complete successfully in that interval, return false. Return true otherwise. """ if not blocking and timeout is not None: raise ValueError("can't specify timeout for non-blocking acquire") rc = False endtime = None with self._cond: while self._value == 0: if not blocking: break if timeout is not None: if endtime is None: endtime = _time() + timeout else: timeout = endtime - _time() if timeout <= 0: break self._cond.wait(timeout) else: self._value = self._value - 1 rc = True return rc __enter__ = acquire def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. """ with self._cond: self._value = self._value + 1 self._cond.notify() def __exit__(self, t, v, tb): self.release() class BoundedSemaphore(Semaphore): """Implements a bounded semaphore. A bounded semaphore checks to make sure its current value doesn't exceed its initial value. If it does, ValueError is raised. In most situations semaphores are used to guard resources with limited capacity. If the semaphore is released too many times it's a sign of a bug. If not given, value defaults to 1. Like regular semaphores, bounded semaphores manage a counter representing the number of release() calls minus the number of acquire() calls, plus an initial value. The acquire() method blocks if necessary until it can return without making the counter negative. If not given, value defaults to 1. """ def __init__(self, value=1): Semaphore.__init__(self, value) self._initial_value = value def release(self): """Release a semaphore, incrementing the internal counter by one. When the counter is zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. If the number of releases exceeds the number of acquires, raise a ValueError. """ with self._cond: if self._value >= self._initial_value: raise ValueError("Semaphore released too many times") self._value += 1 self._cond.notify() class Event: """Class implementing event objects. Events manage a flag that can be set to true with the set() method and reset to false with the clear() method. The wait() method blocks until the flag is true. The flag is initially false. """ # After Tim Peters' event class (without is_posted()) def __init__(self): self._cond = Condition(Lock()) self._flag = False def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() self._cond.__init__() def is_set(self): """Return true if and only if the internal flag is true.""" return self._flag isSet = is_set def set(self): """Set the internal flag to true. All threads waiting for it to become true are awakened. Threads that call wait() once the flag is true will not block at all. """ self._cond.acquire() try: self._flag = True self._cond.notify_all() finally: self._cond.release() def clear(self): """Reset the internal flag to false. Subsequently, threads calling wait() will block until set() is called to set the internal flag to true again. """ self._cond.acquire() try: self._flag = False finally: self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. If the internal flag is true on entry, return immediately. Otherwise, block until another thread calls set() to set the flag to true, or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ self._cond.acquire() try: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled finally: self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and # the CyclicBarrier class from Java. See # http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and # http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ # CyclicBarrier.html # for information. # We maintain two main states, 'filling' and 'draining' enabling the barrier # to be cyclic. Threads are not allowed into it until it has fully drained # since the previous cycle. In addition, a 'resetting' state exists which is # similar to 'draining' except that threads leave with a BrokenBarrierError, # and a 'broken' state in which all threads get the exception. class Barrier: """Implements a Barrier. Useful for synchronizing a fixed number of threads at known synchronization points. Threads block on 'wait()' and are simultaneously once they have all made that call. """ def __init__(self, parties, action=None, timeout=None): """Create a barrier, initialised to 'parties' threads. 'action' is a callable which, when supplied, will be called by one of the threads after they have all entered the barrier and just prior to releasing them all. If a 'timeout' is provided, it is uses as the default for all subsequent 'wait()' calls. """ self._cond = Condition(Lock()) self._action = action self._timeout = timeout self._parties = parties self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken self._count = 0 def wait(self, timeout=None): """Wait for the barrier. When the specified number of threads have started waiting, they are all simultaneously awoken. If an 'action' was provided for the barrier, one of the threads will have executed that callback prior to returning. Returns an individual index number from 0 to 'parties-1'. """ if timeout is None: timeout = self._timeout with self._cond: self._enter() # Block while the barrier drains. index = self._count self._count += 1 try: if index + 1 == self._parties: # We release the barrier self._release() else: # We wait until someone releases us self._wait(timeout) return index finally: self._count -= 1 # Wake up any threads waiting for barrier to drain. self._exit() # Block until the barrier is ready for us, or raise an exception # if it is broken. def _enter(self): while self._state in (-1, 1): # It is draining or resetting, wait until done self._cond.wait() #see if the barrier is in a broken state if self._state < 0: raise BrokenBarrierError assert self._state == 0 # Optionally run the 'action' and release the threads waiting # in the barrier. def _release(self): try: if self._action: self._action() # enter draining state self._state = 1 self._cond.notify_all() except: #an exception during the _action handler. Break and reraise self._break() raise # Wait in the barrier until we are relased. Raise an exception # if the barrier is reset or broken. def _wait(self, timeout): if not self._cond.wait_for(lambda : self._state != 0, timeout): #timed out. Break the barrier self._break() raise BrokenBarrierError if self._state < 0: raise BrokenBarrierError assert self._state == 1 # If we are the last thread to exit the barrier, signal any threads # waiting for the barrier to drain. def _exit(self): if self._count == 0: if self._state in (-1, 1): #resetting or draining self._state = 0 self._cond.notify_all() def reset(self): """Reset the barrier to the initial state. Any threads currently waiting will get the BrokenBarrier exception raised. """ with self._cond: if self._count > 0: if self._state == 0: #reset the barrier, waking up threads self._state = -1 elif self._state == -2: #was broken, set it to reset state #which clears when the last thread exits self._state = -1 else: self._state = 0 self._cond.notify_all() def abort(self): """Place the barrier into a 'broken' state. Useful in case of error. Any currently waiting threads and threads attempting to 'wait()' will have BrokenBarrierError raised. """ with self._cond: self._break() def _break(self): # An internal error was detected. The barrier is set to # a broken state all parties awakened. self._state = -2 self._cond.notify_all() @property def parties(self): """Return the number of threads required to trip the barrier.""" return self._parties @property def n_waiting(self): """Return the number of threads currently waiting at the barrier.""" # We don't need synchronization here since this is an ephemeral result # anyway. It returns the correct value in the steady state. if self._state == 0: return self._count return 0 @property def broken(self): """Return True if the barrier is in a broken state.""" return self._state == -2 # exception raised by the Barrier class class BrokenBarrierError(RuntimeError): pass # Helper to generate new thread names _counter = 0 def _newname(template="Thread-%d"): global _counter _counter = _counter + 1 return template % _counter # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} # For debug and leak testing _dangling = WeakSet() # Main class for threads class Thread: """A class that represents a thread of control. This class can be safely subclassed in a limited fashion. There are two ways to specify the activity: by passing a callable object to the constructor, or by overriding the run() method in a subclass. """ __initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType __exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. #XXX __exc_clear = _sys.exc_clear def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None): """This constructor should always be called with keyword arguments. Arguments are: *group* should be None; reserved for future extension when a ThreadGroup class is implemented. *target* is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called. *name* is the thread name. By default, a unique name is constructed of the form "Thread-N" where N is a small decimal number. *args* is the argument tuple for the target invocation. Defaults to (). *kwargs* is a dictionary of keyword arguments for the target invocation. Defaults to {}. If a subclass overrides the constructor, it must make sure to invoke the base class constructor (Thread.__init__()) before doing anything else to the thread. """ assert group is None, "group argument must be None for now" if kwargs is None: kwargs = {} self._target = target self._name = str(name or _newname()) self._args = args self._kwargs = kwargs if daemon is not None: self._daemonic = daemon else: self._daemonic = current_thread().daemon self._ident = None self._started = Event() self._stopped = False self._block = Condition(Lock()) self._initialized = True # sys.stderr is not stored in the class like # sys.exc_info since it can be changed between instances self._stderr = _sys.stderr _dangling.add(self) def _reset_internal_locks(self): # private! Called by _after_fork() to reset our internal locks as # they may be in an invalid state leading to a deadlock or crash. if hasattr(self, '_block'): # DummyThread deletes _block self._block.__init__() self._started._reset_internal_locks() def __repr__(self): assert self._initialized, "Thread.__init__() was not called" status = "initial" if self._started.is_set(): status = "started" if self._stopped: status = "stopped" if self._daemonic: status += " daemon" if self._ident is not None: status += " %s" % self._ident return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status) def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self._initialized: raise RuntimeError("thread.__init__() not called") if self._started.is_set(): raise RuntimeError("threads can only be started once") with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self._bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self._started.wait() def run(self): """Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively. """ try: if self._target: self._target(*self._args, **self._kwargs) finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self._target, self._args, self._kwargs def _bootstrap(self): # Wrapper around the real bootstrap code that ignores # exceptions during interpreter cleanup. Those typically # happen when a daemon thread wakes up at an unfortunate # moment, finds the world around it destroyed, and raises some # random exception *** while trying to report the exception in # _bootstrap_inner() below ***. Those random exceptions # don't help anybody, and they confuse users, so we suppress # them. We suppress them only when it appears that the world # indeed has already been destroyed, so that exceptions in # _bootstrap_inner() during normal business hours are properly # reported. Also, we only suppress them for daemonic threads; # if a non-daemonic encounters this, something else is wrong. try: self._bootstrap_inner() except: if self._daemonic and _sys is None: return raise def _set_ident(self): self._ident = get_ident() def _bootstrap_inner(self): try: self._set_ident() self._started.set() with _active_limbo_lock: _active[self._ident] = self del _limbo[self] if _trace_hook: _sys.settrace(_trace_hook) if _profile_hook: _sys.setprofile(_profile_hook) try: self.run() except SystemExit: pass except: # If sys.stderr is no more (most likely from interpreter # shutdown) use self._stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. if _sys: _sys.stderr.write("Exception in thread %s:\n%s\n" % (self.name, _format_exc())) else: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) exc_type, exc_value, exc_tb = self._exc_info() try: print(( "Exception in thread " + self.name + " (most likely raised during interpreter shutdown):"), file=self._stderr) print(( "Traceback (most recent call last):"), file=self._stderr) while exc_tb: print(( ' File "%s", line %s, in %s' % (exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc_tb.tb_frame.f_code.co_name)), file=self._stderr) exc_tb = exc_tb.tb_next print(("%s: %s" % (exc_type, exc_value)), file=self._stderr) # Make sure that exc_tb gets deleted since it is a memory # hog; deleting everything else is just for thoroughness finally: del exc_type, exc_value, exc_tb finally: # Prevent a race in # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. #XXX self.__exc_clear() pass finally: with _active_limbo_lock: self._stop() try: # We don't call self._delete() because it also # grabs _active_limbo_lock. del _active[get_ident()] except: pass def _stop(self): self._block.acquire() self._stopped = True self._block.notify_all() self._block.release() def _delete(self): "Remove current thread from the dict of currently running threads." # Notes about running with _dummy_thread: # # Must take care to not raise an exception if _dummy_thread is being # used (and thus this module is being used as an instance of # dummy_threading). _dummy_thread.get_ident() always returns -1 since # there is only one thread if _dummy_thread is being used. Thus # len(_active) is always <= 1 here, and any Thread instance created # overwrites the (if any) thread currently registered in _active. # # An instance of _MainThread is always created by 'threading'. This # gets overwritten the instant an instance of Thread is created; both # threads return -1 from _dummy_thread.get_ident() and thus have the # same key in the dict. So when the _MainThread instance created by # 'threading' tries to clean itself up when atexit calls this method # it gets a KeyError if another Thread instance was created. # # This all means that KeyError from trying to delete something from # _active if dummy_threading is being used is a red herring. But # since it isn't if dummy_threading is *not* being used then don't # hide the exception. try: with _active_limbo_lock: del _active[get_ident()] # There must not be any python code between the previous line # and after the lock is released. Otherwise a tracing function # could try to acquire the lock again in the same thread, (in # current_thread()), and would block. except KeyError: if 'dummy_threading' not in _sys.modules: raise def join(self, timeout=None): """Wait until the thread terminates. This blocks the calling thread until the thread whose join() method is called terminates -- either normally or through an unhandled exception or until the optional timeout occurs. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened -- if the thread is still alive, the join() call timed out. When the timeout argument is not present or None, the operation will block until the thread terminates. A thread can be join()ed many times. join() raises a RuntimeError if an attempt is made to join the current thread as that would cause a deadlock. It is also an error to join() a thread before it has been started and attempts to do so raises the same exception. """ if not self._initialized: raise RuntimeError("Thread.__init__() not called") if not self._started.is_set(): raise RuntimeError("cannot join thread before it is started") if self is current_thread(): raise RuntimeError("cannot join current thread") self._block.acquire() try: if timeout is None: while not self._stopped: self._block.wait() else: deadline = _time() + timeout while not self._stopped: delay = deadline - _time() if delay <= 0: break self._block.wait(delay) finally: self._block.release() @property def name(self): """A string used for identification purposes only. It has no semantics. Multiple threads may be given the same name. The initial name is set by the constructor. """ assert self._initialized, "Thread.__init__() not called" return self._name @name.setter def name(self, name): assert self._initialized, "Thread.__init__() not called" self._name = str(name) @property def ident(self): """Thread identifier of this thread or None if it has not been started. This is a nonzero integer. See the thread.get_ident() function. Thread identifiers may be recycled when a thread exits and another thread is created. The identifier is available even after the thread has exited. """ assert self._initialized, "Thread.__init__() not called" return self._ident def is_alive(self): """Return whether the thread is alive. This method returns True just before the run() method starts until just after the run() method terminates. The module function enumerate() returns a list of all alive threads. """ assert self._initialized, "Thread.__init__() not called" return self._started.is_set() and not self._stopped isAlive = is_alive @property def daemon(self): """A boolean value indicating whether this thread is a daemon thread. This must be set before start() is called, otherwise RuntimeError is raised. Its initial value is inherited from the creating thread; the main thread is not a daemon thread and therefore all threads created in the main thread default to daemon = False. The entire Python program exits when no alive non-daemon threads are left. """ assert self._initialized, "Thread.__init__() not called" return self._daemonic @daemon.setter def daemon(self, daemonic): if not self._initialized: raise RuntimeError("Thread.__init__() not called") if self._started.is_set(): raise RuntimeError("cannot set daemon status of active thread"); self._daemonic = daemonic def isDaemon(self): return self.daemon def setDaemon(self, daemonic): self.daemon = daemonic def getName(self): return self.name def setName(self, name): self.name = name # The timer class was contributed by Itamar Shtull-Trauring class Timer(Thread): """Call a function after a specified number of seconds: t = Timer(30.0, f, args=None, kwargs=None) t.start() t.cancel() # stop the timer's action if it's still waiting """ def __init__(self, interval, function, args=None, kwargs=None): Thread.__init__(self) self.interval = interval self.function = function self.args = args if args is not None else [] self.kwargs = kwargs if kwargs is not None else {} self.finished = Event() def cancel(self): """Stop the timer if it hasn't finished yet.""" self.finished.set() def run(self): self.finished.wait(self.interval) if not self.finished.is_set(): self.function(*self.args, **self.kwargs) self.finished.set() # Special thread class to represent the main thread # This is garbage collected through an exit handler class _MainThread(Thread): def __init__(self): Thread.__init__(self, name="MainThread", daemon=False) self._started.set() self._set_ident() with _active_limbo_lock: _active[self._ident] = self def _exitfunc(self): self._stop() t = _pickSomeNonDaemonThread() while t: t.join() t = _pickSomeNonDaemonThread() self._delete() def _pickSomeNonDaemonThread(): for t in enumerate(): if not t.daemon and t.is_alive(): return t return None # Dummy thread class to represent threads not started here. # These aren't garbage collected when they die, nor can they be waited for. # If they invoke anything in threading.py that calls current_thread(), they # leave an entry in the _active dict forever after. # Their purpose is to return *something* from current_thread(). # They are marked as daemon threads so we won't wait for them # when we exit (conform previous semantics). class _DummyThread(Thread): def __init__(self): Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True) # Thread._block consumes an OS-level locking primitive, which # can never be used by a _DummyThread. Since a _DummyThread # instance is immortal, that's bad, so release this resource. del self._block self._started.set() self._set_ident() with _active_limbo_lock: _active[self._ident] = self def _stop(self): pass def join(self, timeout=None): assert False, "cannot join a dummy thread" # Global API functions def current_thread(): """Return the current Thread object, corresponding to the caller's thread of control. If the caller's thread of control was not created through the threading module, a dummy thread object with limited functionality is returned. """ try: return _active[get_ident()] except KeyError: return _DummyThread() currentThread = current_thread def active_count(): """Return the number of Thread objects currently alive. The returned count is equal to the length of the list returned by enumerate(). """ with _active_limbo_lock: return len(_active) + len(_limbo) activeCount = active_count def _enumerate(): # Same as enumerate(), but without the lock. Internal use only. return list(_active.values()) + list(_limbo.values()) def enumerate(): """Return a list of all Thread objects currently alive. The list includes daemonic threads, dummy thread objects created by current_thread(), and the main thread. It excludes terminated threads and threads that have not yet been started. """ with _active_limbo_lock: return list(_active.values()) + list(_limbo.values()) from _thread import stack_size # Create the main thread object, # and make it available for the interpreter # (Py_Main) as threading._shutdown. _shutdown = _MainThread()._exitfunc # get thread-local implementation, either from the thread # module, or from the python fallback try: from _thread import _local as local except ImportError: from _threading_local import local def _after_fork(): # This function is called by Python/ceval.c:PyEval_ReInitThreads which # is called from PyOS_AfterFork. Here we cleanup threading module state # that should not exist after a fork. # Reset _active_limbo_lock, in case we forked while the lock was held # by another (non-forked) thread. http://bugs.python.org/issue874900 global _active_limbo_lock _active_limbo_lock = _allocate_lock() # fork() only copied the current thread; clear references to others. new_active = {} current = current_thread() with _active_limbo_lock: for thread in _enumerate(): # Any lock/condition variable may be currently locked or in an # invalid state, so we reinitialize them. thread._reset_internal_locks() if thread is current: # There is only one active thread. We reset the ident to # its new value since it can have changed. ident = get_ident() thread._ident = ident new_active[ident] = thread else: # All the others are already stopped. thread._stop() _limbo.clear() _active.clear() _active.update(new_active) assert len(_active) == 1
gpl-3.0
havard024/prego
venv/lib/python2.7/site-packages/django/middleware/csrf.py
20
8862
""" Cross Site Request Forgery Middleware. This module provides a middleware that implements protection against request forgeries from other sites. """ from __future__ import unicode_literals import hashlib import logging import re import random from django.conf import settings from django.core.urlresolvers import get_callable from django.utils.cache import patch_vary_headers from django.utils.encoding import force_text from django.utils.http import same_origin from django.utils.crypto import constant_time_compare, get_random_string logger = logging.getLogger('django.request') REASON_NO_REFERER = "Referer checking failed - no Referer." REASON_BAD_REFERER = "Referer checking failed - %s does not match %s." REASON_NO_CSRF_COOKIE = "CSRF cookie not set." REASON_BAD_TOKEN = "CSRF token missing or incorrect." CSRF_KEY_LENGTH = 32 def _get_failure_view(): """ Returns the view to be used for CSRF rejections """ return get_callable(settings.CSRF_FAILURE_VIEW) def _get_new_csrf_key(): return get_random_string(CSRF_KEY_LENGTH) def get_token(request): """ Returns the CSRF token required for a POST form. The token is an alphanumeric value. A side effect of calling this function is to make the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. """ request.META["CSRF_COOKIE_USED"] = True return request.META.get("CSRF_COOKIE", None) def _sanitize_token(token): # Allow only alphanum if len(token) > CSRF_KEY_LENGTH: return _get_new_csrf_key() token = re.sub('[^a-zA-Z0-9]+', '', force_text(token)) if token == "": # In case the cookie has been truncated to nothing at some point. return _get_new_csrf_key() return token class CsrfViewMiddleware(object): """ Middleware that requires a present and correct csrfmiddlewaretoken for POST requests that have a CSRF cookie, and sets an outgoing CSRF cookie. This middleware should be used in conjunction with the csrf_token template tag. """ # The _accept and _reject methods currently only exist for the sake of the # requires_csrf_token decorator. def _accept(self, request): # Avoid checking the request twice by adding a custom attribute to # request. This will be relevant when both decorator and middleware # are used. request.csrf_processing_done = True return None def _reject(self, request, reason): return _get_failure_view()(request, reason=reason) def process_view(self, request, callback, callback_args, callback_kwargs): if getattr(request, 'csrf_processing_done', False): return None try: csrf_token = _sanitize_token( request.COOKIES[settings.CSRF_COOKIE_NAME]) # Use same token next time request.META['CSRF_COOKIE'] = csrf_token except KeyError: csrf_token = None # Generate token and store it in the request, so it's # available to the view. request.META["CSRF_COOKIE"] = _get_new_csrf_key() # Wait until request.META["CSRF_COOKIE"] has been manipulated before # bailing out, so that get_token still works if getattr(callback, 'csrf_exempt', False): return None # Assume that anything not defined as 'safe' by RFC2616 needs protection if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): if getattr(request, '_dont_enforce_csrf_checks', False): # Mechanism to turn off CSRF checks for test suite. # It comes after the creation of CSRF cookies, so that # everything else continues to work exactly the same # (e.g. cookies are sent, etc.), but before any # branches that call reject(). return self._accept(request) if request.is_secure(): # Suppose user visits http://example.com/ # An active network attacker (man-in-the-middle, MITM) sends a # POST form that targets https://example.com/detonate-bomb/ and # submits it via JavaScript. # # The attacker will need to provide a CSRF cookie and token, but # that's no problem for a MITM and the session-independent # nonce we're using. So the MITM can circumvent the CSRF # protection. This is true for any HTTP connection, but anyone # using HTTPS expects better! For this reason, for # https://example.com/ we need additional protection that treats # http://example.com/ as completely untrusted. Under HTTPS, # Barth et al. found that the Referer header is missing for # same-domain requests in only about 0.2% of cases or less, so # we can use strict Referer checking. referer = request.META.get('HTTP_REFERER') if referer is None: logger.warning('Forbidden (%s): %s', REASON_NO_REFERER, request.path, extra={ 'status_code': 403, 'request': request, } ) return self._reject(request, REASON_NO_REFERER) # Note that request.get_host() includes the port. good_referer = 'https://%s/' % request.get_host() if not same_origin(referer, good_referer): reason = REASON_BAD_REFERER % (referer, good_referer) logger.warning('Forbidden (%s): %s', reason, request.path, extra={ 'status_code': 403, 'request': request, } ) return self._reject(request, reason) if csrf_token is None: # No CSRF cookie. For POST requests, we insist on a CSRF cookie, # and in this way we can avoid all CSRF attacks, including login # CSRF. logger.warning('Forbidden (%s): %s', REASON_NO_CSRF_COOKIE, request.path, extra={ 'status_code': 403, 'request': request, } ) return self._reject(request, REASON_NO_CSRF_COOKIE) # Check non-cookie token for match. request_csrf_token = "" if request.method == "POST": request_csrf_token = request.POST.get('csrfmiddlewaretoken', '') if request_csrf_token == "": # Fall back to X-CSRFToken, to make things easier for AJAX, # and possible for PUT/DELETE. request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '') if not constant_time_compare(request_csrf_token, csrf_token): logger.warning('Forbidden (%s): %s', REASON_BAD_TOKEN, request.path, extra={ 'status_code': 403, 'request': request, } ) return self._reject(request, REASON_BAD_TOKEN) return self._accept(request) def process_response(self, request, response): if getattr(response, 'csrf_processing_done', False): return response # If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was # never called, probaby because a request middleware returned a response # (for example, contrib.auth redirecting to a login page). if request.META.get("CSRF_COOKIE") is None: return response if not request.META.get("CSRF_COOKIE_USED", False): return response # Set the CSRF cookie even if it's already set, so we renew # the expiry timer. response.set_cookie(settings.CSRF_COOKIE_NAME, request.META["CSRF_COOKIE"], max_age = 60 * 60 * 24 * 7 * 52, domain=settings.CSRF_COOKIE_DOMAIN, path=settings.CSRF_COOKIE_PATH, secure=settings.CSRF_COOKIE_SECURE ) # Content varies with the CSRF cookie, so set the Vary header. patch_vary_headers(response, ('Cookie',)) response.csrf_processing_done = True return response
mit
gogogo/gogogo-hk
gogogo/models/property.py
1
3233
from google.appengine.ext import db from django import forms from django.utils.translation import ugettext_lazy as _ class TransitTypeProperty(db.IntegerProperty): """ Transit Type Property - Storage of transit type """ def __init__ (self,*args,**kwargs): kwargs["choices"] = range(0,8) db.IntegerProperty.__init__(self,*args,**kwargs) def validate(self, value): if isinstance(value,basestring): value = int(value) return super(TransitTypeProperty, self).validate(value) def get_form_field(self, **kwargs): attrs = { 'form_class': forms.ChoiceField, 'choices' : TransitTypeProperty.get_choices() } attrs.update(kwargs) return super(TransitTypeProperty, self).get_form_field(**attrs) def get_choices(): ret = [ (i,TransitTypeProperty.get_type_name(i)) for i in range(0,8)] return ret get_choices = staticmethod(get_choices) def get_basic_type_name_list(): """ Return a list of basic type name """ ret = [TransitTypeProperty.get_type_name(i) for i in range(0,8)] return ret get_basic_type_name_list = staticmethod(get_basic_type_name_list) def get_type_name(type): if type == 0: return _("Tram, Streetcar, Light rail") elif type == 1: return _("Subway, Metro") #Any underground rail system within a metropolitan area elif type == 2: return _("Rail") #Used for intercity or long-distance travel. elif type == 3: return _("Bus") elif type == 4: return _("Ferry") elif type == 5: return _("Cable car") elif type == 6: return _("Gondola, Suspended cable car") elif type == 7: return _("Funicular") else: return "" get_type_name = staticmethod(get_type_name) class PaymentMethodProperty(db.IntegerProperty): """ Payment Method """ def __init__ (self,*args,**kwargs): kwargs["choices"] = range(0,2) if "default" not in kwargs: kwargs["default"] = 0 db.IntegerProperty.__init__(self,*args,**kwargs) def validate(self, value): if isinstance(value,basestring): value = int(value) return super(PaymentMethodProperty, self).validate(value) def get_form_field(self, **kwargs): attrs = { 'form_class': forms.ChoiceField, 'choices' : PaymentMethodProperty.get_choices() } attrs.update(kwargs) return super(PaymentMethodProperty, self).get_form_field(**attrs) def get_choices(): ret = [ (i,PaymentMethodProperty.get_type_name(i)) for i in range(0,2)] return ret get_choices = staticmethod(get_choices) def get_type_name(type): if type == 0: return _("Fare is paid on board") elif type == 1: return _("Fare must be paid before boarding") get_type_name = staticmethod(get_type_name)
agpl-3.0
Oliver2213/NVDAYoutube-dl
addon/globalPlugins/nvdaYoutubeDL/lib/xml/dom/xmlbuilder.py
239
12337
"""Implementation of the DOM Level 3 'LS-Load' feature.""" import copy import xml.dom from xml.dom.NodeFilter import NodeFilter __all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"] class Options: """Features object that has variables set for each DOMBuilder feature. The DOMBuilder class uses an instance of this class to pass settings to the ExpatBuilder class. """ # Note that the DOMBuilder class in LoadSave constrains which of these # values can be set using the DOM Level 3 LoadSave feature. namespaces = 1 namespace_declarations = True validation = False external_parameter_entities = True external_general_entities = True external_dtd_subset = True validate_if_schema = False validate = False datatype_normalization = False create_entity_ref_nodes = True entities = True whitespace_in_element_content = True cdata_sections = True comments = True charset_overrides_xml_encoding = True infoset = False supported_mediatypes_only = False errorHandler = None filter = None class DOMBuilder: entityResolver = None errorHandler = None filter = None ACTION_REPLACE = 1 ACTION_APPEND_AS_CHILDREN = 2 ACTION_INSERT_AFTER = 3 ACTION_INSERT_BEFORE = 4 _legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN, ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE) def __init__(self): self._options = Options() def _get_entityResolver(self): return self.entityResolver def _set_entityResolver(self, entityResolver): self.entityResolver = entityResolver def _get_errorHandler(self): return self.errorHandler def _set_errorHandler(self, errorHandler): self.errorHandler = errorHandler def _get_filter(self): return self.filter def _set_filter(self, filter): self.filter = filter def setFeature(self, name, state): if self.supportsFeature(name): state = state and 1 or 0 try: settings = self._settings[(_name_xform(name), state)] except KeyError: raise xml.dom.NotSupportedErr( "unsupported feature: %r" % (name,)) else: for name, value in settings: setattr(self._options, name, value) else: raise xml.dom.NotFoundErr("unknown feature: " + repr(name)) def supportsFeature(self, name): return hasattr(self._options, _name_xform(name)) def canSetFeature(self, name, state): key = (_name_xform(name), state and 1 or 0) return key in self._settings # This dictionary maps from (feature,value) to a list of # (option,value) pairs that should be set on the Options object. # If a (feature,value) setting is not in this dictionary, it is # not supported by the DOMBuilder. # _settings = { ("namespace_declarations", 0): [ ("namespace_declarations", 0)], ("namespace_declarations", 1): [ ("namespace_declarations", 1)], ("validation", 0): [ ("validation", 0)], ("external_general_entities", 0): [ ("external_general_entities", 0)], ("external_general_entities", 1): [ ("external_general_entities", 1)], ("external_parameter_entities", 0): [ ("external_parameter_entities", 0)], ("external_parameter_entities", 1): [ ("external_parameter_entities", 1)], ("validate_if_schema", 0): [ ("validate_if_schema", 0)], ("create_entity_ref_nodes", 0): [ ("create_entity_ref_nodes", 0)], ("create_entity_ref_nodes", 1): [ ("create_entity_ref_nodes", 1)], ("entities", 0): [ ("create_entity_ref_nodes", 0), ("entities", 0)], ("entities", 1): [ ("entities", 1)], ("whitespace_in_element_content", 0): [ ("whitespace_in_element_content", 0)], ("whitespace_in_element_content", 1): [ ("whitespace_in_element_content", 1)], ("cdata_sections", 0): [ ("cdata_sections", 0)], ("cdata_sections", 1): [ ("cdata_sections", 1)], ("comments", 0): [ ("comments", 0)], ("comments", 1): [ ("comments", 1)], ("charset_overrides_xml_encoding", 0): [ ("charset_overrides_xml_encoding", 0)], ("charset_overrides_xml_encoding", 1): [ ("charset_overrides_xml_encoding", 1)], ("infoset", 0): [], ("infoset", 1): [ ("namespace_declarations", 0), ("validate_if_schema", 0), ("create_entity_ref_nodes", 0), ("entities", 0), ("cdata_sections", 0), ("datatype_normalization", 1), ("whitespace_in_element_content", 1), ("comments", 1), ("charset_overrides_xml_encoding", 1)], ("supported_mediatypes_only", 0): [ ("supported_mediatypes_only", 0)], ("namespaces", 0): [ ("namespaces", 0)], ("namespaces", 1): [ ("namespaces", 1)], } def getFeature(self, name): xname = _name_xform(name) try: return getattr(self._options, xname) except AttributeError: if name == "infoset": options = self._options return (options.datatype_normalization and options.whitespace_in_element_content and options.comments and options.charset_overrides_xml_encoding and not (options.namespace_declarations or options.validate_if_schema or options.create_entity_ref_nodes or options.entities or options.cdata_sections)) raise xml.dom.NotFoundErr("feature %s not known" % repr(name)) def parseURI(self, uri): if self.entityResolver: input = self.entityResolver.resolveEntity(None, uri) else: input = DOMEntityResolver().resolveEntity(None, uri) return self.parse(input) def parse(self, input): options = copy.copy(self._options) options.filter = self.filter options.errorHandler = self.errorHandler fp = input.byteStream if fp is None and options.systemId: import urllib2 fp = urllib2.urlopen(input.systemId) return self._parse_bytestream(fp, options) def parseWithContext(self, input, cnode, action): if action not in self._legal_actions: raise ValueError("not a legal action") raise NotImplementedError("Haven't written this yet...") def _parse_bytestream(self, stream, options): import xml.dom.expatbuilder builder = xml.dom.expatbuilder.makeBuilder(options) return builder.parseFile(stream) def _name_xform(name): return name.lower().replace('-', '_') class DOMEntityResolver(object): __slots__ = '_opener', def resolveEntity(self, publicId, systemId): assert systemId is not None source = DOMInputSource() source.publicId = publicId source.systemId = systemId source.byteStream = self._get_opener().open(systemId) # determine the encoding if the transport provided it source.encoding = self._guess_media_encoding(source) # determine the base URI is we can import posixpath, urlparse parts = urlparse.urlparse(systemId) scheme, netloc, path, params, query, fragment = parts # XXX should we check the scheme here as well? if path and not path.endswith("/"): path = posixpath.dirname(path) + "/" parts = scheme, netloc, path, params, query, fragment source.baseURI = urlparse.urlunparse(parts) return source def _get_opener(self): try: return self._opener except AttributeError: self._opener = self._create_opener() return self._opener def _create_opener(self): import urllib2 return urllib2.build_opener() def _guess_media_encoding(self, source): info = source.byteStream.info() if "Content-Type" in info: for param in info.getplist(): if param.startswith("charset="): return param.split("=", 1)[1].lower() class DOMInputSource(object): __slots__ = ('byteStream', 'characterStream', 'stringData', 'encoding', 'publicId', 'systemId', 'baseURI') def __init__(self): self.byteStream = None self.characterStream = None self.stringData = None self.encoding = None self.publicId = None self.systemId = None self.baseURI = None def _get_byteStream(self): return self.byteStream def _set_byteStream(self, byteStream): self.byteStream = byteStream def _get_characterStream(self): return self.characterStream def _set_characterStream(self, characterStream): self.characterStream = characterStream def _get_stringData(self): return self.stringData def _set_stringData(self, data): self.stringData = data def _get_encoding(self): return self.encoding def _set_encoding(self, encoding): self.encoding = encoding def _get_publicId(self): return self.publicId def _set_publicId(self, publicId): self.publicId = publicId def _get_systemId(self): return self.systemId def _set_systemId(self, systemId): self.systemId = systemId def _get_baseURI(self): return self.baseURI def _set_baseURI(self, uri): self.baseURI = uri class DOMBuilderFilter: """Element filter which can be used to tailor construction of a DOM instance. """ # There's really no need for this class; concrete implementations # should just implement the endElement() and startElement() # methods as appropriate. Using this makes it easy to only # implement one of them. FILTER_ACCEPT = 1 FILTER_REJECT = 2 FILTER_SKIP = 3 FILTER_INTERRUPT = 4 whatToShow = NodeFilter.SHOW_ALL def _get_whatToShow(self): return self.whatToShow def acceptNode(self, element): return self.FILTER_ACCEPT def startContainer(self, element): return self.FILTER_ACCEPT del NodeFilter class DocumentLS: """Mixin to create documents that conform to the load/save spec.""" async = False def _get_async(self): return False def _set_async(self, async): if async: raise xml.dom.NotSupportedErr( "asynchronous document loading is not supported") def abort(self): # What does it mean to "clear" a document? Does the # documentElement disappear? raise NotImplementedError( "haven't figured out what this means yet") def load(self, uri): raise NotImplementedError("haven't written this yet") def loadXML(self, source): raise NotImplementedError("haven't written this yet") def saveXML(self, snode): if snode is None: snode = self elif snode.ownerDocument is not self: raise xml.dom.WrongDocumentErr() return snode.toxml() class DOMImplementationLS: MODE_SYNCHRONOUS = 1 MODE_ASYNCHRONOUS = 2 def createDOMBuilder(self, mode, schemaType): if schemaType is not None: raise xml.dom.NotSupportedErr( "schemaType not yet supported") if mode == self.MODE_SYNCHRONOUS: return DOMBuilder() if mode == self.MODE_ASYNCHRONOUS: raise xml.dom.NotSupportedErr( "asynchronous builders are not supported") raise ValueError("unknown value for mode") def createDOMWriter(self): raise NotImplementedError( "the writer interface hasn't been written yet!") def createDOMInputSource(self): return DOMInputSource()
gpl-2.0
funkring/fdoo
addons/fetchmail/res_config.py
437
5234
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class fetchmail_config_settings(osv.osv_memory): """ This wizard can be inherited in conjunction with 'res.config.settings', in order to define fields that configure a fetchmail server. It relies on the following convention on the object:: class my_config_settings(osv.osv_memory): _name = 'my.settings' _inherit = ['res.config.settings', 'fetchmail.config.settings'] _columns = { 'fetchmail_stuff': fields.boolean(..., fetchmail_model='my.stuff', fetchmail_name='Incoming Stuff'), } def configure_fetchmail_stuff(self, cr, uid, ids, context=None): return self.configure_fetchmail(cr, uid, 'fetchmail_stuff', context) and in the form view:: <field name="fetchmail_stuff"/> <button type="object" name="configure_fetchmail_stuff"/> The method ``get_default_fetchmail`` determines the value of all fields that start with 'fetchmail_'. It looks up fetchmail server configurations that match the given model name (``fetchmail_model``) and are active. The button action ``configure_fetchmail_stuff`` is caught by the object, and calls automatically the method ``configure_fetchmail``; it opens the fetchmail server configuration form for the corresponding field. """ _name = 'fetchmail.config.settings' def get_default_fetchmail(self, cr, uid, fields, context=None): """ determine the value of all fields like 'fetchmail_XXX' """ ir_model = self.pool.get('ir.model') fetchmail_server = self.pool.get('fetchmail.server') fetchmail_fields = [f for f in fields if f.startswith('fetchmail_')] res = {} for f in fetchmail_fields: model_name = self._columns[f].fetchmail_model model_id = ir_model.search(cr, uid, [('model', '=', model_name)])[0] server_ids = fetchmail_server.search(cr, uid, [('object_id', '=', model_id), ('state', '=', 'done')]) res[f] = bool(server_ids) return res def set_fetchmail(self, cr, uid, ids, context=None): """ deactivate fetchmail servers for all fields like 'fetchmail_XXX' that are False """ config = self.browse(cr, uid, ids[0], context) fetchmail_fields = [f for f in self._columns if f.startswith('fetchmail_')] # determine which models should not have active fetchmail servers, and # deactivate all active servers for those models models = [self._columns[f].fetchmail_model for f in fetchmail_fields if not config[f]] if models: fetchmail_server = self.pool.get('fetchmail.server') server_ids = fetchmail_server.search(cr, uid, [('object_id.model', 'in', models), ('state', '=', 'done')]) fetchmail_server.set_draft(cr, uid, server_ids, context) def configure_fetchmail(self, cr, uid, field, context=None): """ open the form view of the fetchmail.server to configure """ action = { 'type': 'ir.actions.act_window', 'res_model': 'fetchmail.server', 'view_mode': 'form', 'target': 'current', } model_name = self._columns[field].fetchmail_model model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)])[0] server_ids = self.pool.get('fetchmail.server').search(cr, uid, [('object_id', '=', model_id)]) if server_ids: action['res_id'] = server_ids[0] else: action['context'] = { 'default_name': self._columns[field].fetchmail_name, 'default_object_id': model_id, } return action def __getattr__(self, name): """ catch calls to 'configure_fetchmail_XXX' """ if name.startswith('configure_fetchmail_'): return (lambda cr, uid, ids, context=None: self.configure_fetchmail(cr, uid, name[10:], context)) return super(fetchmail_config_settings, self).__getattr__(name) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
shubhdev/openedx
common/djangoapps/heartbeat/views.py
199
1440
from xmodule.modulestore.django import modulestore from dogapi import dog_stats_api from util.json_request import JsonResponse from django.db import connection from django.db.utils import DatabaseError from xmodule.exceptions import HeartbeatFailure @dog_stats_api.timed('edxapp.heartbeat') def heartbeat(request): """ Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc of service id: status or message. If the status for any service is anything other than True, it returns HTTP code 503 (Service Unavailable); otherwise, it returns 200. """ # This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will # delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow # any service to register itself as participating in the heartbeat. It's important that all implementation # do as little as possible but give a sound determination that they are ready. try: output = modulestore().heartbeat() except HeartbeatFailure as fail: return JsonResponse({fail.service: unicode(fail)}, status=503) cursor = connection.cursor() try: cursor.execute("SELECT CURRENT_DATE") cursor.fetchone() output['SQL'] = True except DatabaseError as fail: return JsonResponse({'SQL': unicode(fail)}, status=503) return JsonResponse(output)
agpl-3.0
ryfeus/lambda-packs
Tensorflow_LightGBM_Scipy_nightly/source/scipy/__config__.py
2
1289
# This file is generated by /tmp/pip-6gjs2vkw-build/-c # It contains system_info results at the time of building this package. __all__ = ["get_info","show"] openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} blas_mkl_info={} openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} def get_info(name): g = globals() return g.get(name, g.get(name + "_info", {})) def show(): for name,info_dict in globals().items(): if name[0] == "_" or type(info_dict) is not type({}): continue print(name + ":") if not info_dict: print(" NOT AVAILABLE") for k,v in info_dict.items(): v = str(v) if k == "sources" and len(v) > 200: v = v[:60] + " ...\n... " + v[-60:] print(" %s = %s" % (k,v))
mit
franky88/emperioanimesta
env/Lib/site-packages/django/db/migrations/migration.py
123
8324
from __future__ import unicode_literals from django.db.transaction import atomic from django.utils.encoding import python_2_unicode_compatible from .exceptions import IrreversibleError @python_2_unicode_compatible class Migration(object): """ The base class for all migrations. Migration files will import this from django.db.migrations.Migration and subclass it as a class called Migration. It will have one or more of the following attributes: - operations: A list of Operation instances, probably from django.db.migrations.operations - dependencies: A list of tuples of (app_path, migration_name) - run_before: A list of tuples of (app_path, migration_name) - replaces: A list of migration_names Note that all migrations come out of migrations and into the Loader or Graph as instances, having been initialized with their app label and name. """ # Operations to apply during this migration, in order. operations = [] # Other migrations that should be run before this migration. # Should be a list of (app, migration_name). dependencies = [] # Other migrations that should be run after this one (i.e. have # this migration added to their dependencies). Useful to make third-party # apps' migrations run after your AUTH_USER replacement, for example. run_before = [] # Migration names in this app that this migration replaces. If this is # non-empty, this migration will only be applied if all these migrations # are not applied. replaces = [] # Is this an initial migration? Initial migrations are skipped on # --fake-initial if the table or fields already exist. If None, check if # the migration has any dependencies to determine if there are dependencies # to tell if db introspection needs to be done. If True, always perform # introspection. If False, never perform introspection. initial = None # Whether to wrap the whole migration in a transaction. Only has an effect # on database backends which support transactional DDL. atomic = True def __init__(self, name, app_label): self.name = name self.app_label = app_label # Copy dependencies & other attrs as we might mutate them at runtime self.operations = list(self.__class__.operations) self.dependencies = list(self.__class__.dependencies) self.run_before = list(self.__class__.run_before) self.replaces = list(self.__class__.replaces) def __eq__(self, other): if not isinstance(other, Migration): return False return (self.name == other.name) and (self.app_label == other.app_label) def __ne__(self, other): return not (self == other) def __repr__(self): return "<Migration %s.%s>" % (self.app_label, self.name) def __str__(self): return "%s.%s" % (self.app_label, self.name) def __hash__(self): return hash("%s.%s" % (self.app_label, self.name)) def mutate_state(self, project_state, preserve=True): """ Takes a ProjectState and returns a new one with the migration's operations applied to it. Preserves the original object state by default and will return a mutated state from a copy. """ new_state = project_state if preserve: new_state = project_state.clone() for operation in self.operations: operation.state_forwards(self.app_label, new_state) return new_state def apply(self, project_state, schema_editor, collect_sql=False): """ Takes a project_state representing all migrations prior to this one and a schema_editor for a live database and applies the migration in a forwards order. Returns the resulting project state for efficient re-use by following Migrations. """ for operation in self.operations: # If this operation cannot be represented as SQL, place a comment # there instead if collect_sql: schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:" ) schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: continue # Save the state before the operation has run old_state = project_state.clone() operation.state_forwards(self.app_label, project_state) # Run the operation atomic_operation = operation.atomic or (self.atomic and operation.atomic is not False) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_forwards(self.app_label, schema_editor, old_state, project_state) else: # Normal behaviour operation.database_forwards(self.app_label, schema_editor, old_state, project_state) return project_state def unapply(self, project_state, schema_editor, collect_sql=False): """ Takes a project_state representing all migrations prior to this one and a schema_editor for a live database and applies the migration in a reverse order. The backwards migration process consists of two phases: 1. The intermediate states from right before the first until right after the last operation inside this migration are preserved. 2. The operations are applied in reverse order using the states recorded in step 1. """ # Construct all the intermediate states we need for a reverse migration to_run = [] new_state = project_state # Phase 1 for operation in self.operations: # If it's irreversible, error out if not operation.reversible: raise IrreversibleError("Operation %s in %s is not reversible" % (operation, self)) # Preserve new state from previous run to not tamper the same state # over all operations new_state = new_state.clone() old_state = new_state.clone() operation.state_forwards(self.app_label, new_state) to_run.insert(0, (operation, old_state, new_state)) # Phase 2 for operation, to_state, from_state in to_run: if collect_sql: schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:" ) schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: continue if not schema_editor.connection.features.can_rollback_ddl and operation.atomic: # We're forcing a transaction on a non-transactional-DDL backend with atomic(schema_editor.connection.alias): operation.database_backwards(self.app_label, schema_editor, from_state, to_state) else: # Normal behaviour operation.database_backwards(self.app_label, schema_editor, from_state, to_state) return project_state class SwappableTuple(tuple): """ Subclass of tuple so Django can tell this was originally a swappable dependency when it reads the migration file. """ def __new__(cls, value, setting): self = tuple.__new__(cls, value) self.setting = setting return self def swappable_dependency(value): """ Turns a setting value into a dependency. """ return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
gpl-3.0
bmazin/ARCONS-pipeline
examples/Pal2014-J0337/hTestLimit.py
1
8356
#Filename: hTestLimit.py #Author: Matt Strader # #This script opens a list of observed photon phases, import numpy as np import tables import numexpr import matplotlib.pyplot as plt import multiprocessing import functools import time from kuiper.kuiper import kuiper,kuiper_FPP from kuiper.htest import h_test,h_fpp,h_test2 from pulsarUtils import nSigma,plotPulseProfile from histMetrics import kuiperFpp,hTestFpp from inverseTransformSampling import inverseTransformSampler def hTestTrial(iTrial,nPhotons,photonPulseFraction,pulseModel,pulseModelQueryPoints): np.random.seed(int((time.time()+iTrial)*1e6)) modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints) nPulsePhotons = int(np.floor(photonPulseFraction*nPhotons)) nBackgroundPhotons = int(np.ceil((1.-photonPulseFraction) * nPhotons)) simPulsePhotons = modelSampler(nPulsePhotons) #background photons come from a uniform distribution simBackgroundPhotons = np.random.random(nBackgroundPhotons) simPhases = np.append(simPulsePhotons,simBackgroundPhotons) simHDict = h_test2(simPhases) simH,simM,simPval,simFourierCoeffs = simHDict['H'],simHDict['M'],simHDict['fpp'],simHDict['cs'] print '{} - H,M,fpp,sig:'.format(iTrial),simH,simM,simPval return {'H':simH,'M':simM,'fpp':simPval} if __name__=='__main__': path = '/Scratch/dataProcessing/J0337/masterPhotons3.h5' wvlStart = 4000. wvlEnd = 5500. bLoadFromPl = True nPhaseBins = 20 hTestPath = '/Scratch/dataProcessing/J0337/hTestResults_withProfiles_{}-{}.npz'.format(wvlStart,wvlEnd) phaseBinEdges = np.linspace(0.,1.,nPhaseBins+1) if bLoadFromPl: photFile = tables.openFile(path,'r') photTable = photFile.root.photons.photTable phases = photTable.readWhere('(wvlStart < wavelength) & (wavelength < wvlEnd)')['phase'] photFile.close() print 'cut wavelengths to range ({},{})'.format(wvlStart,wvlEnd) nPhotons = len(phases) print nPhotons,'real photons read' observedProfile,_ = np.histogram(phases,bins=phaseBinEdges) observedProfile = 1.0*observedProfile observedProfileErrors = np.sqrt(observedProfile) #Do H-test hDict = h_test2(phases) H,M,pval,fourierCoeffs = hDict['H'],hDict['M'],hDict['fpp'],hDict['cs'] print 'h-test on real data' print 'H,M,fpp:',H,M,pval print nSigma(1-pval),'sigmas' #h_test2 calculates all fourierCoeffs out to 20, but for the fourier model, we only want the ones out to order M, which optimizes the Zm^2 metric truncatedFourierCoeffs = fourierCoeffs[0:M] print 'fourier coeffs:',truncatedFourierCoeffs #for the model, we want the negative modes as well as positve, so add them modelFourierCoeffs = np.concatenate([truncatedFourierCoeffs[::-1],[1.],np.conj(truncatedFourierCoeffs)]) #make array of mode numbers modes = np.arange(-len(truncatedFourierCoeffs),len(truncatedFourierCoeffs)+1) #save so next time we can set bLoadFromPl=False np.savez(hTestPath,H=H,M=M,pval=pval,fourierCoeffs=fourierCoeffs,nPhotons=nPhotons,wvlRange=(wvlStart,wvlEnd),modelFourierCoeffs=modelFourierCoeffs,modes=modes,observedProfile=observedProfile,observedProfileErrors=observedProfileErrors,phaseBinEdges=phaseBinEdges) else: #Load values from previous run, when we had bLoadFromPl=True hTestDict = np.load(hTestPath) H,M,pval,fourierCoeffs,nPhotons,modelFourierCoeffs,modes = hTestDict['H'],hTestDict['M'],hTestDict['pval'],hTestDict['fourierCoeffs'],hTestDict['nPhotons'],hTestDict['modelFourierCoeffs'],hTestDict['modes'] observedProfile,observedProfileErrors,phaseBinEdges = hTestDict['observedProfile'],hTestDict['observedProfileErrors'],hTestDict['phaseBinEdges'] print 'h-test on real data' print 'H,M,fpp:',H,M,pval print nSigma(1-pval),'sigmas' #Plot the observed profile fig,ax = plt.subplots(1,1) plotPulseProfile(phaseBinEdges,observedProfile,profileErrors=observedProfileErrors,color='k',plotDoublePulse=False,label='observed',ax=ax) ax.set_ylabel('counts') ax.set_xlabel('phase') ax.set_title('Observed Folded Light Curve {}-{} nm'.format(wvlStart/10.,wvlEnd/10.)) #make as set of x points for the pulse model we'll make #Do NOT include x=0, or the inverted function will have a jump that causes an excess of samples #at phase=0 nSmoothPlotPoints=1000 pulseModelQueryPoints = np.linspace(1./nSmoothPlotPoints,1,nSmoothPlotPoints) def modelProfile(thetas): return np.sum( modelFourierCoeffs * np.exp(2.j*np.pi*modes*thetas[:,np.newaxis]),axis=1) lightCurveModel = np.abs(modelProfile(pulseModelQueryPoints)) #for this test we only want the model to be the pulsed component. We will add a DC offset later pulseModel = lightCurveModel - np.min(lightCurveModel) #initialPhotonPulseFraction = 1.*np.sum(pulseModel) / np.sum(lightCurveModel) photonPulseFraction=15400./nPhotons #skip to previously determined answer print 'photon fraction',photonPulseFraction #get samples with distribution of the modelProfile #modelSampler = inverseTransformSampler(pdf=lightCurveModel,queryPoints=pulseModelQueryPoints) modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints) nTrials = 1 #for each trial run the h test on a set of photon phases with our model profile, and with the pulse fraction specified #we want to make a distribution of H values for this pulse fraction, model, and number of photons #make a function that only takes the trial number (as an identifier) mappableHTestTrial = functools.partial(hTestTrial,pulseModel=pulseModel, pulseModelQueryPoints=pulseModelQueryPoints,nPhotons=nPhotons, photonPulseFraction=photonPulseFraction) pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-3)#leave a few processors for other people outDicts = pool.map(mappableHTestTrial,np.arange(nTrials)) simHs = np.array([out['H'] for out in outDicts]) simPvals = np.array([out['fpp'] for out in outDicts]) #save the resulting list of H vals np.savez('sim3-h-{}.npz'.format(nTrials),simHs=simHs,simPvals=simPvals,pval=pval,H=H,photonPulseFraction=photonPulseFraction,nPhotons=nPhotons) #make a model profile once more for a plot modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints) nPulsePhotons = int(np.floor(photonPulseFraction*nPhotons)) nBackgroundPhotons = int(np.ceil((1.-photonPulseFraction) * nPhotons)) simPulsePhotons = modelSampler(nPulsePhotons) #background photons come from a uniform distribution simBackgroundPhotons = np.random.random(nBackgroundPhotons) #put them together for the full profile simPhases = np.append(simPulsePhotons,simBackgroundPhotons) #make a binned phase profile to plot simProfile,_ = np.histogram(simPhases,bins=phaseBinEdges) simProfileErrors = np.sqrt(simProfile)#assume Poisson errors meanLevel = np.mean(simProfile) fig,ax = plt.subplots(1,1) ax.plot(pulseModelQueryPoints,meanLevel*lightCurveModel,color='r') plotPulseProfile(phaseBinEdges,simProfile,profileErrors=simProfileErrors,color='b',plotDoublePulse=False,label='sim',ax=ax) ax.set_title('Simulated profile') # #plt.show() print '{} trials'.format(len(simHs)) print 'observed fpp:',pval frac = 1.*np.sum(simPvals<pval)/len(simPvals) print 'fraction of trials with H below observed fpp:',frac #hHist,hBinEdges = np.histogram(simHs,bins=100,density=True) fppHist,fppBinEdges = np.histogram(simPvals,bins=100,density=True) if nTrials > 1: fig,ax = plt.subplots(1,1) ax.plot(fppBinEdges[0:-1],fppHist,drawstyle='steps-post',color='k') ax.axvline(pval,color='r') ax.set_xlabel('fpp') ax.set_ylabel('frequency') ax.set_title('Distribution of H for model profile') magG = 17.93 sineMagDiff = -2.5*np.log10(photonPulseFraction) print 'SDSS magnitude g: {:.2f}'.format(magG) print 'magnitude difference: {:.2f}'.format(sineMagDiff) print 'limiting g mag: {:.2f}'.format(magG+sineMagDiff) plt.show()
gpl-2.0
alanquillin/ryu
ryu/contrib/tinyrpc/transports/__init__.py
43
1789
#!/usr/bin/env python # -*- coding: utf-8 -*- class ServerTransport(object): """Base class for all server transports.""" def receive_message(self): """Receive a message from the transport. Blocks until another message has been received. May return a context opaque to clients that should be passed on :py:func:`~tinyrpc.transport.Transport.send_reply` to identify the client later on. :return: A tuple consisting of ``(context, message)``. """ raise NotImplementedError() def send_reply(self, context, reply): """Sends a reply to a client. The client is usually identified by passing ``context`` as returned from the original :py:func:`~tinyrpc.transport.Transport.receive_message` call. Messages must be strings, it is up to the sender to convert the beforehand. A non-string value raises a :py:exc:`TypeError`. :param context: A context returned by :py:func:`~tinyrpc.transport.Transport.receive_message`. :param reply: A string to send back as the reply. """ raise NotImplementedError class ClientTransport(object): """Base class for all client transports.""" def send_message(self, message, expect_reply=True): """Send a message to the server and possibly receive a reply. Sends a message to the connected server. Messages must be strings, it is up to the sender to convert the beforehand. A non-string value raises a :py:exc:`TypeError`. This function will block until one reply has been received. :param message: A string to send. :return: A string containing the server reply. """ raise NotImplementedError
apache-2.0
ImmobilienScout24/moto
tests/test_kms/test_kms.py
9
11046
from __future__ import unicode_literals import re import boto.kms from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException import sure # noqa from moto import mock_kms from nose.tools import assert_raises @mock_kms def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') key['KeyMetadata']['Description'].should.equal("my key") key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") key['KeyMetadata']['Enabled'].should.equal(True) @mock_kms def test_describe_key(): conn = boto.kms.connect_to_region("us-west-2") key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT') key_id = key['KeyMetadata']['KeyId'] key = conn.describe_key(key_id) key['KeyMetadata']['Description'].should.equal("my key") key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") @mock_kms def test_describe_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.describe_key.when.called_with("not-a-key").should.throw(JSONResponseError) @mock_kms def test_list_keys(): conn = boto.kms.connect_to_region("us-west-2") conn.create_key(policy="my policy", description="my key1", key_usage='ENCRYPT_DECRYPT') conn.create_key(policy="my policy", description="my key2", key_usage='ENCRYPT_DECRYPT') keys = conn.list_keys() keys['Keys'].should.have.length_of(2) @mock_kms def test__create_alias__returns_none_if_correct(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] resp = kms.create_alias('alias/my-alias', key_id) resp.should.be.none @mock_kms def test__create_alias__raises_if_reserved_alias(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] reserved_aliases = [ 'alias/aws/ebs', 'alias/aws/s3', 'alias/aws/redshift', 'alias/aws/rds', ] for alias_name in reserved_aliases: with assert_raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) ex = err.exception ex.error_message.should.be.none ex.error_code.should.equal('NotAuthorizedException') ex.body.should.equal({'__type': 'NotAuthorizedException'}) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__create_alias__can_create_multiple_aliases_for_same_key_id(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] kms.create_alias('alias/my-alias3', key_id).should.be.none kms.create_alias('alias/my-alias4', key_id).should.be.none kms.create_alias('alias/my-alias5', key_id).should.be.none @mock_kms def test__create_alias__raises_if_wrong_prefix(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] with assert_raises(JSONResponseError) as err: kms.create_alias('wrongprefix/my-alias', key_id) ex = err.exception ex.error_message.should.equal('Invalid identifier') ex.error_code.should.equal('ValidationException') ex.body.should.equal({'message': 'Invalid identifier', '__type': 'ValidationException'}) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__create_alias__raises_if_duplicate(): region = 'us-west-2' kms = boto.kms.connect_to_region(region) create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] alias = 'alias/my-alias' kms.create_alias(alias, key_id) with assert_raises(AlreadyExistsException) as err: kms.create_alias(alias, key_id) ex = err.exception ex.error_message.should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' .format(**locals())) ex.error_code.should.be.none ex.box_usage.should.be.none ex.request_id.should.be.none ex.body['message'].should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' .format(**locals())) ex.body['__type'].should.equal('AlreadyExistsException') ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__create_alias__raises_if_alias_has_restricted_characters(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] alias_names_with_restricted_characters = [ 'alias/my-alias!', 'alias/my-alias$', 'alias/my-alias@', ] for alias_name in alias_names_with_restricted_characters: with assert_raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) ex = err.exception ex.body['__type'].should.equal('ValidationException') ex.body['message'].should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) ex.error_code.should.equal('ValidationException') ex.message.should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__create_alias__raises_if_alias_has_colon_character(): # For some reason, colons are not accepted for an alias, even though they are accepted by regex ^[a-zA-Z0-9:/_-]+$ kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] alias_names_with_restricted_characters = [ 'alias/my:alias', ] for alias_name in alias_names_with_restricted_characters: with assert_raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) ex = err.exception ex.body['__type'].should.equal('ValidationException') ex.body['message'].should.equal("{alias_name} contains invalid characters for an alias".format(**locals())) ex.error_code.should.equal('ValidationException') ex.message.should.equal("{alias_name} contains invalid characters for an alias".format(**locals())) ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__create_alias__accepted_characters(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] alias_names_with_accepted_characters = [ 'alias/my-alias_/', 'alias/my_alias-/', ] for alias_name in alias_names_with_accepted_characters: kms.create_alias(alias_name, key_id) @mock_kms def test__create_alias__raises_if_target_key_id_is_existing_alias(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] alias = 'alias/my-alias' kms.create_alias(alias, key_id) with assert_raises(JSONResponseError) as err: kms.create_alias(alias, alias) ex = err.exception ex.body['__type'].should.equal('ValidationException') ex.body['message'].should.equal('Aliases must refer to keys. Not aliases') ex.error_code.should.equal('ValidationException') ex.message.should.equal('Aliases must refer to keys. Not aliases') ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__delete_alias(): kms = boto.connect_kms() create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] alias = 'alias/my-alias' kms.create_alias(alias, key_id) resp = kms.delete_alias(alias) resp.should.be.none # we can create the alias again, since it has been deleted kms.create_alias(alias, key_id) @mock_kms def test__delete_alias__raises_if_wrong_prefix(): kms = boto.connect_kms() with assert_raises(JSONResponseError) as err: kms.delete_alias('wrongprefix/my-alias') ex = err.exception ex.body['__type'].should.equal('ValidationException') ex.body['message'].should.equal('Invalid identifier') ex.error_code.should.equal('ValidationException') ex.message.should.equal('Invalid identifier') ex.reason.should.equal('Bad Request') ex.status.should.equal(400) @mock_kms def test__delete_alias__raises_if_alias_is_not_found(): region = 'us-west-2' kms = boto.kms.connect_to_region(region) alias_name = 'alias/unexisting-alias' with assert_raises(NotFoundException) as err: kms.delete_alias(alias_name) ex = err.exception ex.body['__type'].should.equal('NotFoundException') ex.body['message'].should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) ex.box_usage.should.be.none ex.error_code.should.be.none ex.message.should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) ex.reason.should.equal('Bad Request') ex.request_id.should.be.none ex.status.should.equal(400) @mock_kms def test__list_aliases(): region = "eu-west-1" kms = boto.kms.connect_to_region(region) create_resp = kms.create_key() key_id = create_resp['KeyMetadata']['KeyId'] kms.create_alias('alias/my-alias1', key_id) kms.create_alias('alias/my-alias2', key_id) kms.create_alias('alias/my-alias3', key_id) resp = kms.list_aliases() resp['Truncated'].should.be.false aliases = resp['Aliases'] def has_correct_arn(alias_obj): alias_name = alias_obj['AliasName'] alias_arn = alias_obj['AliasArn'] return re.match(r'arn:aws:kms:{region}:\d{{12}}:{alias_name}'.format(region=region, alias_name=alias_name), alias_arn) len([alias for alias in aliases if has_correct_arn(alias) and 'alias/aws/ebs' == alias['AliasName']]).should.equal(1) len([alias for alias in aliases if has_correct_arn(alias) and 'alias/aws/rds' == alias['AliasName']]).should.equal(1) len([alias for alias in aliases if has_correct_arn(alias) and 'alias/aws/redshift' == alias['AliasName']]).should.equal(1) len([alias for alias in aliases if has_correct_arn(alias) and 'alias/aws/s3' == alias['AliasName']]).should.equal(1) len([alias for alias in aliases if has_correct_arn(alias) and 'alias/my-alias1' == alias['AliasName']]).should.equal(1) len([alias for alias in aliases if has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == alias['TargetKeyId']]).should.equal(3) len(aliases).should.equal(7)
apache-2.0
DESHRAJ/fjord
vendor/packages/translate-toolkit/translate/misc/multistring.py
29
3583
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2006 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Supports a hybrid Unicode string that can also have a list of alternate strings in the strings attribute""" from translate.misc import autoencode class multistring(autoencode.autoencode): def __new__(newtype, string=u"", encoding=None, errors=None): if isinstance(string, list): if not string: raise ValueError("multistring must contain at least one string") mainstring = string[0] newstring = multistring.__new__(newtype, string[0], encoding, errors) newstring.strings = [newstring] + [autoencode.autoencode.__new__(autoencode.autoencode, altstring, encoding, errors) for altstring in string[1:]] else: newstring = autoencode.autoencode.__new__(newtype, string, encoding, errors) newstring.strings = [newstring] return newstring def __init__(self, *args, **kwargs): super(multistring, self).__init__() if not hasattr(self, "strings"): self.strings = [] def __cmp__(self, otherstring): if isinstance(otherstring, multistring): parentcompare = cmp(autoencode.autoencode(self), otherstring) if parentcompare: return parentcompare else: return cmp(self.strings[1:], otherstring.strings[1:]) elif isinstance(otherstring, autoencode.autoencode): return cmp(autoencode.autoencode(self), otherstring) elif isinstance(otherstring, unicode): return cmp(unicode(self), otherstring) elif isinstance(otherstring, str): return cmp(str(self), otherstring) elif isinstance(otherstring, list) and otherstring: return cmp(self, multistring(otherstring)) else: return cmp(type(self), type(otherstring)) def __ne__(self, otherstring): return self.__cmp__(otherstring) != 0 def __eq__(self, otherstring): return self.__cmp__(otherstring) == 0 def __repr__(self): parts = [autoencode.autoencode.__repr__(self)] + \ [repr(a) for a in self.strings[1:]] return "multistring([" + ",".join(parts) + "])" def replace(self, old, new, count=None): if count is None: newstr = multistring(super(multistring, self) \ .replace(old, new), self.encoding) else: newstr = multistring(super(multistring, self) \ .replace(old, new, count), self.encoding) for s in self.strings[1:]: if count is None: newstr.strings.append(s.replace(old, new)) else: newstr.strings.append(s.replace(old, new, count)) return newstr
bsd-3-clause
Saevon/webdnd
shared/utils/debug_toolbars.py
1
1502
import django from django.conf import settings from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from debug_toolbar.panels import DebugPanel import sys class VersionDebugPanel(DebugPanel): ''' Panel that displays the Django version. ''' name = 'Version' has_content = True def nav_title(self): return _('Versions') def nav_subtitle(self): return 'Django %s' % django.get_version() def url(self): return '' def title(self): return _('Versions') def content(self): versions = {} versions['Web D&D'] = settings.VERSION versions['Syncrae'] = settings.SYNCRAE_VERSION context = self.context.copy() context.update({ 'versions': versions, 'paths': sys.path, }) return render_to_string('debug_toolbar/panels/versions.html', context) class SyncraeSpyDebugPanel(DebugPanel): ''' Panel that shows Syncrae Messages ''' name = 'Syncrae' has_content = True def nav_title(self): return _('Syncrae') def nav_subtitle(self): return '' def url(self): return '' def title(self): return _('Syncrae') def content(self): return render_to_string('debug_syncrae.html', self.context) class DividerDebugPanel(DebugPanel): name = 'Divider' has_content = False def nav_title(self): return ' '
mit
stefco/geco_data
geco_irig_plot.py
1
5662
#!/usr/bin/env python # (c) Stefan Countryman, 2016-2017 DESC="""Plot an IRIG-B signal read from stdin. Assumes that the timeseries is a sequence of newline-delimited float literals.""" FAST_CHANNEL_BITRATE = 16384 # for IRIG-B, DuoTone, etc. # THE REST OF THE IMPORTS ARE AFTER THIS IF STATEMENT. # Quits immediately on --help or -h flags to skip slow imports when you just # want to read the help documentation. if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description=DESC) # TODO: make this -i and --ifo instead of detector. parser.add_argument("--detector", help=("the detector; used in the title of the output " "plot")) parser.add_argument("-O", "--outfile", help="the filename of the generated plot") parser.add_argument("-T", "--timeseries", help="copy from stdin to stdout while reading", action="store_true") parser.add_argument("-A", "--actualtime", help=("actual time signal was recorded " "(appears in title)")) args = parser.parse_args() # Force matplotlib to not use any Xwindows backend. NECESSARY ON CLUSTER. import matplotlib matplotlib.use('Agg') import sys import time import numpy as np import matplotlib.pyplot as plt import geco_irig_decode def read_timeseries_stdin(num_lines, cat_to_stdout=False): """Read in newline-delimited numerical data from stdin; don't read more than a second worth of data. If cat_to_stdout is True, print data that has been read in back to stdout (useful for piped commands).""" timeseries = np.zeros(num_lines) line = "" i = 0 while i < num_lines: line = float(sys.stdin.readline()) timeseries[i] = line if cat_to_stdout: print(line) i += 1 return timeseries def irigb_decoded_title(timeseries, IFO=None, actual_time=None): """Get a title for an IRIG-B timeseries plot that includes the decoded time in the timeseries itself.""" # get the detector name if IFO is None: detector_suffix = "" else: detector_suffix = " at " + IFO # get the actual time of recording, if provided if actual_time is None: actual_time_str = "" else: actual_time_str = "\nActual Time: {}".format(actual_time) # add title and so on try: decoded_time = geco_irig_decode.get_date_from_timeseries(timeseries) decoded_time_str = decoded_time.strftime('%a %b %d %X %Y') except ValueError as e: decoded_time_str = "COULD NOT DECODE TIME" fmt = "One Second of IRIG-B Signal{}\nDecoded Time: {}{}" return fmt.format(detector_suffix, decoded_time_str, actual_time_str) def irigb_output_filename(outfile=None): """Get the output filename for an IRIG-B plot.""" if outfile is None: output_filename = "irigb-plot-made-at-" + str(time.time()) + ".png" else: output_filename = outfile # append .png if not already there if output_filename.split(".")[-1] != "png": output_filename += ".png" return output_filename def plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1., output_filename=None, overlay=False, linewidth=1): """Plot a timeseries and produce num_subdivs subplots that show equal-sized subdivisions of the full timeseries data to show details (good for high-bitrate timeseries). If you want to keep plotting data to the same figure, set 'overlay=True', and the current figure will be plotted to.""" bitrate = int(len(timeseries) / float(dt)) times = np.linspace(0, 1, num=bitrate, endpoint=False) # find max and min values in timeseries; use these to set plot boundaries yrange = timeseries.max() - timeseries.min() ymax = timeseries.max() + 0.1*yrange ymin = timeseries.min() - 0.1*yrange if not overlay: plt.figure() # print("making plot") plt.gcf().set_figwidth(7) plt.gcf().set_figheight(4+1.2*num_subdivs) # ~1.2in height per zoomed plot # plot the full second on the first row; lines should be black ('k' option). plt.subplot(num_subdivs + 1, 1, 1) plt.ylim(ymin, ymax) plt.plot(times, timeseries, 'k', linewidth=linewidth) plt.tick_params(axis='y', labelsize='small') # make num_subdivs subplots to better show the full second for i in range(num_subdivs): # print("making plot " + str(i)) plt.subplot(num_subdivs+1, 1, i+2) plt.ylim(ymin, ymax) plt.xlim(float(i)/num_subdivs, (float(i)+1)/num_subdivs) start = bitrate*i // num_subdivs end = bitrate*(i+1) // num_subdivs plt.plot(times[start:end], timeseries[start:end], 'k', linewidth=linewidth) plt.tick_params(axis='y', labelsize='small') plt.suptitle(title) plt.xlabel("Time since start of second [$s$]") # print("saving plot") plt.subplots_adjust(left=0.125, right=0.9, bottom=0.1, top=0.9, wspace=0.2, hspace=0.5) if not (output_filename is None): plt.savefig(output_filename) return plt if __name__ == '__main__': timeseries = read_timeseries_stdin(FAST_CHANNEL_BITRATE, cat_to_stdout=args.timeseries) title = irigb_decoded_title(timeseries, args.detector, args.actualtime) output_filename = irigb_output_filename(args.outfile) plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1., output_filename=output_filename)
mit
SnappleCap/oh-mainline
vendor/packages/twisted/doc/conch/examples/sshsimpleserver.py
18
3772
#!/usr/bin/env python # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from twisted.cred import portal, checkers from twisted.conch import error, avatar from twisted.conch.checkers import SSHPublicKeyDatabase from twisted.conch.ssh import factory, userauth, connection, keys, session from twisted.internet import reactor, protocol, defer from twisted.python import log from zope.interface import implements import sys log.startLogging(sys.stderr) """ Example of running another protocol over an SSH channel. log in with username "user" and password "password". """ class ExampleAvatar(avatar.ConchUser): def __init__(self, username): avatar.ConchUser.__init__(self) self.username = username self.channelLookup.update({'session':session.SSHSession}) class ExampleRealm: implements(portal.IRealm) def requestAvatar(self, avatarId, mind, *interfaces): return interfaces[0], ExampleAvatar(avatarId), lambda: None class EchoProtocol(protocol.Protocol): """this is our example protocol that we will run over SSH """ def dataReceived(self, data): if data == '\r': data = '\r\n' elif data == '\x03': #^C self.transport.loseConnection() return self.transport.write(data) publicKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJSkbh/C+BR3utDS555mV' privateKey = """-----BEGIN RSA PRIVATE KEY----- MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW 4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== -----END RSA PRIVATE KEY-----""" class InMemoryPublicKeyChecker(SSHPublicKeyDatabase): def checkKey(self, credentials): return credentials.username == 'user' and \ keys.Key.fromString(data=publicKey).blob() == credentials.blob class ExampleSession: def __init__(self, avatar): """ We don't use it, but the adapter is passed the avatar as its first argument. """ def getPty(self, term, windowSize, attrs): pass def execCommand(self, proto, cmd): raise Exception("no executing commands") def openShell(self, trans): ep = EchoProtocol() ep.makeConnection(trans) trans.makeConnection(session.wrapProtocol(ep)) def eofReceived(self): pass def closed(self): pass from twisted.python import components components.registerAdapter(ExampleSession, ExampleAvatar, session.ISession) class ExampleFactory(factory.SSHFactory): publicKeys = { 'ssh-rsa': keys.Key.fromString(data=publicKey) } privateKeys = { 'ssh-rsa': keys.Key.fromString(data=privateKey) } services = { 'ssh-userauth': userauth.SSHUserAuthServer, 'ssh-connection': connection.SSHConnection } portal = portal.Portal(ExampleRealm()) passwdDB = checkers.InMemoryUsernamePasswordDatabaseDontUse() passwdDB.addUser('user', 'password') portal.registerChecker(passwdDB) portal.registerChecker(InMemoryPublicKeyChecker()) ExampleFactory.portal = portal if __name__ == '__main__': reactor.listenTCP(5022, ExampleFactory()) reactor.run()
agpl-3.0
TNick/pyl2extra
pyl2extra/datasets/images.py
1
13590
""" Dataset for images and related functionality. This module does not have dependencies inside pyl2extra package, so you can just copy-paste it inside your source tree. To use this dataset prepare a .csv file with targets (integers or real numbers) on first column and file paths on the second column: .. code:: 0,file1.png 1,file2.png Image file paths are relative to current directory (``os.getcwd()``). The images need not be square and can be in any format recognized by the ``Image`` module. Internally, the images are converted to RGB and are made square for you. Use it in a .yaml file like so: .. code:: dataset: &trndataset !obj:pyl2extra.datasets.images.Images { source: 'train.csv', image_size: 128 } The ``image_size`` can be skipped, in which case the size of the images is derived from first image that is provided. By default the class assumes a classification problem (targets are integers). If you need to uset it in a regression problem create it like so: .. code:: dataset: &trndataset !obj:pyl2extra.datasets.images.Images { source: 'train.csv', image_size: 128, regression: True } As the dataset simply wraps the ``DenseDesignMatrix``, parameters like ``rng`` (random number generator), ``preprocessor`` and ``fit_preprocessor`` can be used and will be passed to ``DenseDesignMatrix`` superclass. """ __authors__ = "Nicu Tofan" __copyright__ = "Copyright 2015, Nicu Tofan" __credits__ = ["Nicu Tofan"] __license__ = "3-clause BSD" __maintainer__ = "Nicu Tofan" __email__ = "nicu.tofan@gmail.com" import csv import numpy import os from PIL import Image from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix import theano class Images(DenseDesignMatrix): """ A pylearn2 dataset that loads the images from a list or csv file. Please note that - if you use this dataset and your model has a final Softmax layer you should construct it like so (YAML syntax): .. code:: !obj:pylearn2.models.mlp.Softmax { layer_name: 'y', irange: .0, n_classes: %(classes)d, binary_target_dim: 1 } where ``classes`` is the same number of classes passed to ``Images`` constructor. ``binary_target_dim`` is important and failing to set it constructs the wrong architecture, causing errors like: ValueError: VectorSpace(dim=1, dtype=float32) with total dimension 1 can't format a batch into VectorSpace(dim=X, dtype=float32) because its total dimension is X. Parameters ---------- source: OrderedDict, dict, str, tuple, list This argument provides the input images and (optionally) associated categories. The meaning of the argument depends on the data type: - if ``source`` is a string, it is interpreted to be the path towards a csv file; the file must NOT have a header, first column must contain the targets (classes or values) and second column must contain the paths for the image files; - if ``source`` is a dictionary, the keys must be the paths for image files, ``Image`` instances or numpy arrays and the values must be the classes or values (None or empty string if this instance does not provide the labels); - a tuple or list must have exactly one or two members: first one must be a list or tuple of image paths or Images or numpy arrays, while second one (optional) has the targets (classes as integers or real values). image_size: int, optional The size of the images in the final dataset. All images will be resized to be ``image_size`` x ``image_size`` pixels. classes: int, optional If this is a classification problem the parameter should be used to indicate the total number of classes and targets are expected to be integers in the range ``[0; classes-1]``. If this is a regression problem the parameter should be ``None`` and targets are expected to be real numbers. rng: object, optional A random number generator used for picking random \ indices into the design matrix when choosing minibatches. preprocessor: Preprocessor, optional Preprocessor to apply to images. fit_preprocessor: bool, optional Whether preprocessor can fit parameters when applied to training data. """ def __init__(self, source, image_size=None, classes=None, rng=None, preprocessor=None, fit_preprocessor=False): #: preserve original argument for future reference self.source = source #: Number of classes (None for regression) self.classes = classes # all images are loaded in ``ind`` variable ind = _init_input(source) # DenseDesignMatrix expects us to provide a numpy array # we choose to have number of examples on first axis ('b'), # then rows and columns of the image, then the channels # always 3 in our case self.axes = ('b', 0, 1, 'c') if image_size is None: dense_x = None else: dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3), dtype='uint8') categories = [] has_targets = False for i, (img, ctg) in enumerate(ind): if isinstance(img, Image.Image): img = numpy.array(img) width = img.shape[1] height = img.shape[0] largest = max(width, height) if image_size is None: # if the user did not specify an image size we determine # the size using the first image that we encounter; this is # usefull if all images are already of required size, # for example image_size = largest dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3), dtype='uint8') imgin = img # do we need to enlarge / shrink the image? elif largest != image_size: wpercent = image_size / float(largest) width = int(width * wpercent) height = int(height * wpercent) largest = max(width, height) # inefficient? could use scipy.ndimage.zoom. img_tmp = Image.fromarray(img) img_tmp = img_tmp.resize((width, height), Image.ANTIALIAS) imgin = numpy.array(img_tmp) else: imgin = img delta_x = (largest - width) / 2 delta_y = (largest - height) / 2 delta_x2 = delta_x + width delta_y2 = delta_y + height #print delta_x, delta_y, delta_x2, delta_y2, width, height dense_x[i, delta_y:delta_y2, delta_x:delta_x2, :] = imgin categories.append(ctg) if ctg != '': has_targets = True dense_x = numpy.cast[theano.config.floatX](dense_x) # if we have categories / values convert them to proper format if has_targets: if classes is None: # in regression we expect real values dense_y = numpy.empty(shape=(len(ind), 1), dtype=theano.config.floatX) for i, ctg in enumerate(categories): dense_y[i, 0] = float(ctg) else: # in classification we expect integers dense_y = numpy.empty(shape=(len(ind), 1), dtype=int) for i, ctg in enumerate(categories): dense_y[i, 0] = int(ctg) else: dense_y = None if rng is None: rng = DenseDesignMatrix._default_seed # everything else is handled by the DenseDesignMatrix superclass super(Images, self).__init__(topo_view=dense_x, y=dense_y, axes=self.axes, view_converter=None, preprocessor=preprocessor, fit_preprocessor=fit_preprocessor, X_labels=None, y_labels=classes if has_targets else None) def _init_input(source): """ Homogenize sources. """ if isinstance(source, basestring): # this is a csv file that we're going to read result = _load_list(_load_csv(source)) elif isinstance(source, dict): # keys are file names, values are classes result = _load_list(source.items()) elif isinstance(source, (list, tuple)): # one item lists the files, the other lists the classes if len(source) == 1: result = _load_list([(src, None) for src in source[0]]) elif len(source) == 2: if len(source[0]) == len(source[1]): result = _load_list(zip(source[0], source[1])) else: raise ValueError("Lists/tuples provded to Images class " "constructor are expected to have " "same length (%d != %d)" % (len(source[0]), len(source[1]))) else: raise ValueError("Lists/tuples provided to Images class " "constructor are expected to have one " "(images only) or two members (images" " and classes); the input has %d members." % len(source)) else: raise ValueError("Images class expects for its `source` argument " "a file path (string), a dictionary of " "file:class pairs, or a pair of lists (tuples); " "%s is not supported" % str(source.__class__)) return result def _load_csv(csv_path): """ Internal function for loading the content from a .csv file. Parameters ---------- csv_path: str The path towards the .csv file to read. Returns ------- result: list of tuples The method creates a list of tuples that should be passed to `_load_list()`. """ # we're going to accumulate files and categories here result = [] # compute absolute path of the source csv file csv_path = os.path.abspath(csv_path) with open(csv_path, 'rt') as fhand: # the reader is flexible, allowing delimiters # other than comma; quotation can also be customized csvr = csv.reader(fhand, delimiter=',', quotechar='"') # the reader will give us a list for each row of # the source file for row in csvr: # we're going to skip empty rows without warning if len(row) == 0: continue # we could skip the header here, if present; we # could even detect the column index from its # name; but we try to keep the things simple # class/value is always first, file path second result.append((row[1], row[0])) return result def _load_list(srclist): """ Internal function for loading the content from a list. Image files are converted to `numpy.ndarray`; empty classes are normalized to a string of lenghth 0. Parameters ---------- srclist: list of tuples A list of tuples, with first entry in tuple being a string, an Image or `numpy.ndarray` instances and second being classes (None for no class). Returns ------- result: list of tuples The method creates a list of tuples, with first entry in tuple being `numpy.ndarray` instances and second being targets (None for no target) - integer classes (classification) or real values (regression). """ # we're going to accumulate Images and categories here result = [] for img, cls in srclist: if isinstance(img, basestring): imgin = Image.open(img) elif isinstance(img, numpy.ndarray): imgin = Image.fromarray(img) elif isinstance(img, Image.Image): imgin = img elif Image.isImageType(img): imgin = img else: raise ValueError("Valid input for images are strings (a " "path towards a file), pil images " "and numpy arrays; %s is not supported" % str(img.__class__)) if cls is None: cls = '' imgin = imgin.convert('RGB') result.append((numpy.array(imgin), cls)) return result def one_image(image, image_size=None, classes=None, rng=None, preprocessor=None, fit_preprocessor=False): """ Convenience function that creates an Images dataset from a single image. Parameters ---------- image: string, image or numpy.ndarray The image to use as source. See :class:`Images` for a description of other parameters. """ return Images(source=((image,),), image_size=image_size, classes=classes, rng=rng, preprocessor=preprocessor, fit_preprocessor=fit_preprocessor)
bsd-3-clause
shanglt/youtube-dl
youtube_dl/extractor/planetaplay.py
113
1921
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class PlanetaPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?planetaplay\.com/\?sng=(?P<id>[0-9]+)' _API_URL = 'http://planetaplay.com/action/playlist/?sng={0:}' _THUMBNAIL_URL = 'http://planetaplay.com/img/thumb/{thumb:}' _TEST = { 'url': 'http://planetaplay.com/?sng=3586', 'md5': '9d569dceb7251a4e01355d5aea60f9db', 'info_dict': { 'id': '3586', 'ext': 'flv', 'title': 'md5:e829428ee28b1deed00de90de49d1da1', }, 'skip': 'Not accessible from Travis CI server', } _SONG_FORMATS = { 'lq': (0, 'http://www.planetaplay.com/videoplayback/{med_hash:}'), 'hq': (1, 'http://www.planetaplay.com/videoplayback/hi/{med_hash:}'), } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') response = self._download_json( self._API_URL.format(video_id), video_id)['response'] try: data = response.get('data')[0] except IndexError: raise ExtractorError( '%s: failed to get the playlist' % self.IE_NAME, expected=True) title = '{song_artists:} - {sng_name:}'.format(**data) thumbnail = self._THUMBNAIL_URL.format(**data) formats = [] for format_id, (quality, url_template) in self._SONG_FORMATS.items(): formats.append({ 'format_id': format_id, 'url': url_template.format(**data), 'quality': quality, 'ext': 'flv', }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, }
unlicense
ampax/edx-platform-backup
common/djangoapps/external_auth/migrations/0001_initial.py
114
6388
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'ExternalAuthMap' db.create_table('external_auth_externalauthmap', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('external_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), ('external_domain', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), ('external_credentials', self.gf('django.db.models.fields.TextField')(blank=True)), ('external_email', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), ('external_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True)), ('internal_password', self.gf('django.db.models.fields.CharField')(max_length=31, blank=True)), ('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('dtsignup', self.gf('django.db.models.fields.DateTimeField')(null=True)), )) db.send_create_signal('external_auth', ['ExternalAuthMap']) # Adding unique constraint on 'ExternalAuthMap', fields ['external_id', 'external_domain'] db.create_unique('external_auth_externalauthmap', ['external_id', 'external_domain']) def backwards(self, orm): # Removing unique constraint on 'ExternalAuthMap', fields ['external_id', 'external_domain'] db.delete_unique('external_auth_externalauthmap', ['external_id', 'external_domain']) # Deleting model 'ExternalAuthMap' db.delete_table('external_auth_externalauthmap') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'external_auth.externalauthmap': { 'Meta': {'unique_together': "(('external_id', 'external_domain'),)", 'object_name': 'ExternalAuthMap'}, 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'dtsignup': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'external_credentials': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'external_domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'external_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'external_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'internal_password': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True'}) } } complete_apps = ['external_auth']
agpl-3.0
prasanna08/oppia
scripts/linters/test_files/invalid_urlencode.py
4
1489
# coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python file with invalid syntax, used by scripts/linters/ python_linter_test. This file is using urlencode which is not allowed. """ from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import urllib import python_utils class FakeClass(python_utils.OBJECT): """This is a fake docstring for invalid syntax purposes.""" def __init__(self, fake_arg): self.fake_arg = fake_arg def fake_method(self, source_url, doseq): """This doesn't do anything. Args: source_url: str. The URL. doseq: bool. Boolean value. Returns: urlencode(object): Returns urlencode object. """ # Use of urlencode is not allowed. return urllib.urlencode(source_url, doseq=doseq)
apache-2.0
mahak/cloudify-cli
cloudify_cli/commands/users.py
1
9023
######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############ from .. import env from ..cli import cfy from ..table import print_data, print_single from ..utils import handle_client_error USER_COLUMNS = ['username', 'groups', 'role', 'group_system_roles', 'active', 'last_login_at', 'is_locked'] GET_DATA_COLUMNS = ['user_tenants', 'group_tenants'] NO_GET_DATA_COLUMNS = ['tenants'] USER_LABELS = {'role': 'system wide role', 'group_system_roles': 'system wide roles via groups'} def _format_user(user): user_tenants = dict( (str(tenant), str(user.user_tenants[tenant])) for tenant in user.user_tenants ) group_tenants = dict( (str(tenant), dict( (str(role), [str(group) for group in user.group_tenants[tenant][role]]) for role in user.group_tenants[tenant] )) for tenant in user.group_tenants ) user['user_tenants'] = str(user_tenants)[1:-1] user['group_tenants'] = str(group_tenants)[1:-1] return user def _format_group_system_roles(user): group_system_roles = dict( (str(role), [str(user_group) for user_group in user['group_system_roles'][role]]) for role in user['group_system_roles'] ) user['group_system_roles'] = str(group_system_roles).strip('{}') return user @cfy.group(name='users') @cfy.options.common_options def users(): """Handle Cloudify users """ if not env.is_initialized(): env.raise_uninitialized() @users.command(name='list', short_help='List users [manager only]') @cfy.options.sort_by('username') @cfy.options.descending @cfy.options.common_options @cfy.options.get_data @cfy.options.search @cfy.options.pagination_offset @cfy.options.pagination_size @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def list(sort_by, descending, get_data, search, pagination_offset, pagination_size, logger, client): """List all users """ logger.info('Listing all users...') users_list = client.users.list( sort=sort_by, is_descending=descending, _get_data=get_data, _search=search, _offset=pagination_offset, _size=pagination_size ) total = users_list.metadata.pagination.total # copy list columns = [] + USER_COLUMNS users_list = [_format_group_system_roles(user) for user in users_list] if get_data: users_list = [_format_user(user) for user in users_list] columns += GET_DATA_COLUMNS else: columns += NO_GET_DATA_COLUMNS print_data(columns, users_list, 'Users:', labels=USER_LABELS) logger.info('Showing {0} of {1} users'.format(len(users_list), total)) @users.command(name='create', short_help='Create a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.common_options @cfy.options.security_role @cfy.options.password @cfy.options.tenant_name(required=False) @cfy.options.user_tenant_role(required=False, options_flags=['-l', '--user-tenant-role']) @cfy.assert_manager_active() @cfy.pass_client(use_tenant_in_header=False) @cfy.pass_logger def create(username, security_role, password, tenant_name, user_tenant_role, logger, client): """Create a new user on the manager `USERNAME` is the username of the user """ client.users.create(username, password, security_role) logger.info('User `{0}` created with `{1}` security role'.format( username, security_role)) if tenant_name and user_tenant_role: client.tenants.add_user(username, tenant_name, user_tenant_role) logger.info( 'User `{0}` added successfully to tenant `{1}` with `{2}` role' .format(username, tenant_name, user_tenant_role)) @users.command(name='set-password', short_help='Set a new password for a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.password @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def set_password(username, password, logger, client): """Set a new password for a user `USERNAME` is the username of the user """ logger.info('Setting new password for user {0}...'.format(username)) client.users.set_password(username, password) logger.info('New password set') @users.command(name='set-role', short_help='Set a new role for a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.security_role @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def set_role(username, security_role, logger, client): """Set a new role for a user `USERNAME` is the username of the user """ logger.info('Setting new role for user {0}...'.format(username)) client.users.set_role(username, security_role) logger.info('New role `{0}` set'.format(security_role)) @users.command(name='get', short_help='Get details for a single user [manager only]') @cfy.argument( 'username', callback=cfy.validate_name, default=env.get_username()) @cfy.options.common_options @cfy.options.get_data @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def get(username, get_data, logger, client): """Get details for a single user `USERNAME` is the username of the user. (default: current user) """ logger.info('Getting info for user `{0}`...'.format(username)) if username == env.get_username(): user_details = client.users.get_self(_get_data=get_data) else: user_details = client.users.get(username, _get_data=get_data) # copy list columns = [] + USER_COLUMNS if get_data: _format_user(user_details) columns += GET_DATA_COLUMNS else: columns += NO_GET_DATA_COLUMNS print_single(columns, user_details, 'Requested user info:', labels=USER_LABELS) @users.command(name='delete', short_help='Delete a user [manager only]') @cfy.argument('username', callback=cfy.validate_name) @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def delete(username, logger, client): """Delete a user `USERNAME` is the username of the user """ logger.info('Deleting user `{0}`...'.format(username)) client.users.delete(username) logger.info('User removed') @users.command(name='activate', short_help='Make an inactive user active [manager only]') @cfy.argument('username') @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def activate(username, logger, client): """Activate a user `USERNAME` is the username of the user """ graceful_msg = 'User `{0}` is already active'.format(username) logger.info('Activating user `{0}`...'.format(username)) with handle_client_error(409, graceful_msg, logger): client.users.activate(username) logger.info('User activated') @users.command(name='deactivate', short_help='Make an active user inactive [manager only]') @cfy.argument('username') @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def deactivate(username, logger, client): """Deactivate a user `USERNAME` is the username of the user """ graceful_msg = 'User `{0}` is already inactive'.format(username) logger.info('Deactivating user `{0}`...'.format(username)) with handle_client_error(409, graceful_msg, logger): client.users.deactivate(username) logger.info('User deactivated') @users.command(name='unlock', short_help='Unlock a locked user [manager only]') @cfy.argument('username') @cfy.options.common_options @cfy.assert_manager_active() @cfy.pass_client() @cfy.pass_logger def unlock(username, logger, client): """Unlock a locked user `USERNAME` is the username of the user """ graceful_msg = 'User `{0}` is already unlocked'.format(username) logger.info('Unlocking user `{0}`...'.format(username)) with handle_client_error(409, graceful_msg, logger): client.users.unlock(username) logger.info('User unlocked')
apache-2.0
Krossom/python-for-android
python3-alpha/python3-src/Lib/encodings/aliases.py
58
15133
""" Encoding Aliases Support This module is used by the encodings package search function to map encodings names to module names. Note that the search function normalizes the encoding names before doing the lookup, so the mapping will have to map normalized encoding names to module names. Contents: The following aliases dictionary contains mappings of all IANA character set names for which the Python core library provides codecs. In addition to these, a few Python specific codec aliases have also been added. """ aliases = { # Please keep this list sorted alphabetically by value ! # ascii codec '646' : 'ascii', 'ansi_x3.4_1968' : 'ascii', 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name 'ansi_x3.4_1986' : 'ascii', 'cp367' : 'ascii', 'csascii' : 'ascii', 'ibm367' : 'ascii', 'iso646_us' : 'ascii', 'iso_646.irv_1991' : 'ascii', 'iso_ir_6' : 'ascii', 'us' : 'ascii', 'us_ascii' : 'ascii', ## base64_codec codec #'base64' : 'base64_codec', #'base_64' : 'base64_codec', # big5 codec 'big5_tw' : 'big5', 'csbig5' : 'big5', # big5hkscs codec 'big5_hkscs' : 'big5hkscs', 'hkscs' : 'big5hkscs', ## bz2_codec codec #'bz2' : 'bz2_codec', # cp037 codec '037' : 'cp037', 'csibm037' : 'cp037', 'ebcdic_cp_ca' : 'cp037', 'ebcdic_cp_nl' : 'cp037', 'ebcdic_cp_us' : 'cp037', 'ebcdic_cp_wt' : 'cp037', 'ibm037' : 'cp037', 'ibm039' : 'cp037', # cp1026 codec '1026' : 'cp1026', 'csibm1026' : 'cp1026', 'ibm1026' : 'cp1026', # cp1140 codec '1140' : 'cp1140', 'ibm1140' : 'cp1140', # cp1250 codec '1250' : 'cp1250', 'windows_1250' : 'cp1250', # cp1251 codec '1251' : 'cp1251', 'windows_1251' : 'cp1251', # cp1252 codec '1252' : 'cp1252', 'windows_1252' : 'cp1252', # cp1253 codec '1253' : 'cp1253', 'windows_1253' : 'cp1253', # cp1254 codec '1254' : 'cp1254', 'windows_1254' : 'cp1254', # cp1255 codec '1255' : 'cp1255', 'windows_1255' : 'cp1255', # cp1256 codec '1256' : 'cp1256', 'windows_1256' : 'cp1256', # cp1257 codec '1257' : 'cp1257', 'windows_1257' : 'cp1257', # cp1258 codec '1258' : 'cp1258', 'windows_1258' : 'cp1258', # cp424 codec '424' : 'cp424', 'csibm424' : 'cp424', 'ebcdic_cp_he' : 'cp424', 'ibm424' : 'cp424', # cp437 codec '437' : 'cp437', 'cspc8codepage437' : 'cp437', 'ibm437' : 'cp437', # cp500 codec '500' : 'cp500', 'csibm500' : 'cp500', 'ebcdic_cp_be' : 'cp500', 'ebcdic_cp_ch' : 'cp500', 'ibm500' : 'cp500', # cp775 codec '775' : 'cp775', 'cspc775baltic' : 'cp775', 'ibm775' : 'cp775', # cp850 codec '850' : 'cp850', 'cspc850multilingual' : 'cp850', 'ibm850' : 'cp850', # cp852 codec '852' : 'cp852', 'cspcp852' : 'cp852', 'ibm852' : 'cp852', # cp855 codec '855' : 'cp855', 'csibm855' : 'cp855', 'ibm855' : 'cp855', # cp857 codec '857' : 'cp857', 'csibm857' : 'cp857', 'ibm857' : 'cp857', # cp858 codec '858' : 'cp858', 'csibm858' : 'cp858', 'ibm858' : 'cp858', # cp860 codec '860' : 'cp860', 'csibm860' : 'cp860', 'ibm860' : 'cp860', # cp861 codec '861' : 'cp861', 'cp_is' : 'cp861', 'csibm861' : 'cp861', 'ibm861' : 'cp861', # cp862 codec '862' : 'cp862', 'cspc862latinhebrew' : 'cp862', 'ibm862' : 'cp862', # cp863 codec '863' : 'cp863', 'csibm863' : 'cp863', 'ibm863' : 'cp863', # cp864 codec '864' : 'cp864', 'csibm864' : 'cp864', 'ibm864' : 'cp864', # cp865 codec '865' : 'cp865', 'csibm865' : 'cp865', 'ibm865' : 'cp865', # cp866 codec '866' : 'cp866', 'csibm866' : 'cp866', 'ibm866' : 'cp866', # cp869 codec '869' : 'cp869', 'cp_gr' : 'cp869', 'csibm869' : 'cp869', 'ibm869' : 'cp869', # cp932 codec '932' : 'cp932', 'ms932' : 'cp932', 'mskanji' : 'cp932', 'ms_kanji' : 'cp932', # cp949 codec '949' : 'cp949', 'ms949' : 'cp949', 'uhc' : 'cp949', # cp950 codec '950' : 'cp950', 'ms950' : 'cp950', # euc_jis_2004 codec 'jisx0213' : 'euc_jis_2004', 'eucjis2004' : 'euc_jis_2004', 'euc_jis2004' : 'euc_jis_2004', # euc_jisx0213 codec 'eucjisx0213' : 'euc_jisx0213', # euc_jp codec 'eucjp' : 'euc_jp', 'ujis' : 'euc_jp', 'u_jis' : 'euc_jp', # euc_kr codec 'euckr' : 'euc_kr', 'korean' : 'euc_kr', 'ksc5601' : 'euc_kr', 'ks_c_5601' : 'euc_kr', 'ks_c_5601_1987' : 'euc_kr', 'ksx1001' : 'euc_kr', 'ks_x_1001' : 'euc_kr', # gb18030 codec 'gb18030_2000' : 'gb18030', # gb2312 codec 'chinese' : 'gb2312', 'csiso58gb231280' : 'gb2312', 'euc_cn' : 'gb2312', 'euccn' : 'gb2312', 'eucgb2312_cn' : 'gb2312', 'gb2312_1980' : 'gb2312', 'gb2312_80' : 'gb2312', 'iso_ir_58' : 'gb2312', # gbk codec '936' : 'gbk', 'cp936' : 'gbk', 'ms936' : 'gbk', ## hex_codec codec #'hex' : 'hex_codec', # hp_roman8 codec 'roman8' : 'hp_roman8', 'r8' : 'hp_roman8', 'csHPRoman8' : 'hp_roman8', # hz codec 'hzgb' : 'hz', 'hz_gb' : 'hz', 'hz_gb_2312' : 'hz', # iso2022_jp codec 'csiso2022jp' : 'iso2022_jp', 'iso2022jp' : 'iso2022_jp', 'iso_2022_jp' : 'iso2022_jp', # iso2022_jp_1 codec 'iso2022jp_1' : 'iso2022_jp_1', 'iso_2022_jp_1' : 'iso2022_jp_1', # iso2022_jp_2 codec 'iso2022jp_2' : 'iso2022_jp_2', 'iso_2022_jp_2' : 'iso2022_jp_2', # iso2022_jp_2004 codec 'iso_2022_jp_2004' : 'iso2022_jp_2004', 'iso2022jp_2004' : 'iso2022_jp_2004', # iso2022_jp_3 codec 'iso2022jp_3' : 'iso2022_jp_3', 'iso_2022_jp_3' : 'iso2022_jp_3', # iso2022_jp_ext codec 'iso2022jp_ext' : 'iso2022_jp_ext', 'iso_2022_jp_ext' : 'iso2022_jp_ext', # iso2022_kr codec 'csiso2022kr' : 'iso2022_kr', 'iso2022kr' : 'iso2022_kr', 'iso_2022_kr' : 'iso2022_kr', # iso8859_10 codec 'csisolatin6' : 'iso8859_10', 'iso_8859_10' : 'iso8859_10', 'iso_8859_10_1992' : 'iso8859_10', 'iso_ir_157' : 'iso8859_10', 'l6' : 'iso8859_10', 'latin6' : 'iso8859_10', # iso8859_11 codec 'thai' : 'iso8859_11', 'iso_8859_11' : 'iso8859_11', 'iso_8859_11_2001' : 'iso8859_11', # iso8859_13 codec 'iso_8859_13' : 'iso8859_13', 'l7' : 'iso8859_13', 'latin7' : 'iso8859_13', # iso8859_14 codec 'iso_8859_14' : 'iso8859_14', 'iso_8859_14_1998' : 'iso8859_14', 'iso_celtic' : 'iso8859_14', 'iso_ir_199' : 'iso8859_14', 'l8' : 'iso8859_14', 'latin8' : 'iso8859_14', # iso8859_15 codec 'iso_8859_15' : 'iso8859_15', 'l9' : 'iso8859_15', 'latin9' : 'iso8859_15', # iso8859_16 codec 'iso_8859_16' : 'iso8859_16', 'iso_8859_16_2001' : 'iso8859_16', 'iso_ir_226' : 'iso8859_16', 'l10' : 'iso8859_16', 'latin10' : 'iso8859_16', # iso8859_2 codec 'csisolatin2' : 'iso8859_2', 'iso_8859_2' : 'iso8859_2', 'iso_8859_2_1987' : 'iso8859_2', 'iso_ir_101' : 'iso8859_2', 'l2' : 'iso8859_2', 'latin2' : 'iso8859_2', # iso8859_3 codec 'csisolatin3' : 'iso8859_3', 'iso_8859_3' : 'iso8859_3', 'iso_8859_3_1988' : 'iso8859_3', 'iso_ir_109' : 'iso8859_3', 'l3' : 'iso8859_3', 'latin3' : 'iso8859_3', # iso8859_4 codec 'csisolatin4' : 'iso8859_4', 'iso_8859_4' : 'iso8859_4', 'iso_8859_4_1988' : 'iso8859_4', 'iso_ir_110' : 'iso8859_4', 'l4' : 'iso8859_4', 'latin4' : 'iso8859_4', # iso8859_5 codec 'csisolatincyrillic' : 'iso8859_5', 'cyrillic' : 'iso8859_5', 'iso_8859_5' : 'iso8859_5', 'iso_8859_5_1988' : 'iso8859_5', 'iso_ir_144' : 'iso8859_5', # iso8859_6 codec 'arabic' : 'iso8859_6', 'asmo_708' : 'iso8859_6', 'csisolatinarabic' : 'iso8859_6', 'ecma_114' : 'iso8859_6', 'iso_8859_6' : 'iso8859_6', 'iso_8859_6_1987' : 'iso8859_6', 'iso_ir_127' : 'iso8859_6', # iso8859_7 codec 'csisolatingreek' : 'iso8859_7', 'ecma_118' : 'iso8859_7', 'elot_928' : 'iso8859_7', 'greek' : 'iso8859_7', 'greek8' : 'iso8859_7', 'iso_8859_7' : 'iso8859_7', 'iso_8859_7_1987' : 'iso8859_7', 'iso_ir_126' : 'iso8859_7', # iso8859_8 codec 'csisolatinhebrew' : 'iso8859_8', 'hebrew' : 'iso8859_8', 'iso_8859_8' : 'iso8859_8', 'iso_8859_8_1988' : 'iso8859_8', 'iso_ir_138' : 'iso8859_8', # iso8859_9 codec 'csisolatin5' : 'iso8859_9', 'iso_8859_9' : 'iso8859_9', 'iso_8859_9_1989' : 'iso8859_9', 'iso_ir_148' : 'iso8859_9', 'l5' : 'iso8859_9', 'latin5' : 'iso8859_9', # johab codec 'cp1361' : 'johab', 'ms1361' : 'johab', # koi8_r codec 'cskoi8r' : 'koi8_r', # latin_1 codec # # Note that the latin_1 codec is implemented internally in C and a # lot faster than the charmap codec iso8859_1 which uses the same # encoding. This is why we discourage the use of the iso8859_1 # codec and alias it to latin_1 instead. # '8859' : 'latin_1', 'cp819' : 'latin_1', 'csisolatin1' : 'latin_1', 'ibm819' : 'latin_1', 'iso8859' : 'latin_1', 'iso8859_1' : 'latin_1', 'iso_8859_1' : 'latin_1', 'iso_8859_1_1987' : 'latin_1', 'iso_ir_100' : 'latin_1', 'l1' : 'latin_1', 'latin' : 'latin_1', 'latin1' : 'latin_1', # mac_cyrillic codec 'maccyrillic' : 'mac_cyrillic', # mac_greek codec 'macgreek' : 'mac_greek', # mac_iceland codec 'maciceland' : 'mac_iceland', # mac_latin2 codec 'maccentraleurope' : 'mac_latin2', 'maclatin2' : 'mac_latin2', # mac_roman codec 'macintosh' : 'mac_roman', 'macroman' : 'mac_roman', # mac_turkish codec 'macturkish' : 'mac_turkish', # mbcs codec 'dbcs' : 'mbcs', # ptcp154 codec 'csptcp154' : 'ptcp154', 'pt154' : 'ptcp154', 'cp154' : 'ptcp154', 'cyrillic_asian' : 'ptcp154', ## quopri_codec codec #'quopri' : 'quopri_codec', #'quoted_printable' : 'quopri_codec', #'quotedprintable' : 'quopri_codec', ## rot_13 codec #'rot13' : 'rot_13', # shift_jis codec 'csshiftjis' : 'shift_jis', 'shiftjis' : 'shift_jis', 'sjis' : 'shift_jis', 's_jis' : 'shift_jis', # shift_jis_2004 codec 'shiftjis2004' : 'shift_jis_2004', 'sjis_2004' : 'shift_jis_2004', 's_jis_2004' : 'shift_jis_2004', # shift_jisx0213 codec 'shiftjisx0213' : 'shift_jisx0213', 'sjisx0213' : 'shift_jisx0213', 's_jisx0213' : 'shift_jisx0213', # tactis codec 'tis260' : 'tactis', # tis_620 codec 'tis620' : 'tis_620', 'tis_620_0' : 'tis_620', 'tis_620_2529_0' : 'tis_620', 'tis_620_2529_1' : 'tis_620', 'iso_ir_166' : 'tis_620', # utf_16 codec 'u16' : 'utf_16', 'utf16' : 'utf_16', # utf_16_be codec 'unicodebigunmarked' : 'utf_16_be', 'utf_16be' : 'utf_16_be', # utf_16_le codec 'unicodelittleunmarked' : 'utf_16_le', 'utf_16le' : 'utf_16_le', # utf_32 codec 'u32' : 'utf_32', 'utf32' : 'utf_32', # utf_32_be codec 'utf_32be' : 'utf_32_be', # utf_32_le codec 'utf_32le' : 'utf_32_le', # utf_7 codec 'u7' : 'utf_7', 'utf7' : 'utf_7', 'unicode_1_1_utf_7' : 'utf_7', # utf_8 codec 'u8' : 'utf_8', 'utf' : 'utf_8', 'utf8' : 'utf_8', 'utf8_ucs2' : 'utf_8', 'utf8_ucs4' : 'utf_8', ## uu_codec codec #'uu' : 'uu_codec', ## zlib_codec codec #'zip' : 'zlib_codec', #'zlib' : 'zlib_codec', # temporary mac CJK aliases, will be replaced by proper codecs in 3.1 'x_mac_japanese' : 'shift_jis', 'x_mac_korean' : 'euc_kr', 'x_mac_simp_chinese' : 'gb2312', 'x_mac_trad_chinese' : 'big5', }
apache-2.0
tony/flask
tests/test_blueprints.py
143
18147
# -*- coding: utf-8 -*- """ tests.blueprints ~~~~~~~~~~~~~~~~ Blueprints (and currently modules) :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import pytest import flask from flask._compat import text_type from werkzeug.http import parse_cache_control_header from jinja2 import TemplateNotFound def test_blueprint_specific_error_handling(): frontend = flask.Blueprint('frontend', __name__) backend = flask.Blueprint('backend', __name__) sideend = flask.Blueprint('sideend', __name__) @frontend.errorhandler(403) def frontend_forbidden(e): return 'frontend says no', 403 @frontend.route('/frontend-no') def frontend_no(): flask.abort(403) @backend.errorhandler(403) def backend_forbidden(e): return 'backend says no', 403 @backend.route('/backend-no') def backend_no(): flask.abort(403) @sideend.route('/what-is-a-sideend') def sideend_no(): flask.abort(403) app = flask.Flask(__name__) app.register_blueprint(frontend) app.register_blueprint(backend) app.register_blueprint(sideend) @app.errorhandler(403) def app_forbidden(e): return 'application itself says no', 403 c = app.test_client() assert c.get('/frontend-no').data == b'frontend says no' assert c.get('/backend-no').data == b'backend says no' assert c.get('/what-is-a-sideend').data == b'application itself says no' def test_blueprint_specific_user_error_handling(): class MyDecoratorException(Exception): pass class MyFunctionException(Exception): pass blue = flask.Blueprint('blue', __name__) @blue.errorhandler(MyDecoratorException) def my_decorator_exception_handler(e): assert isinstance(e, MyDecoratorException) return 'boom' def my_function_exception_handler(e): assert isinstance(e, MyFunctionException) return 'bam' blue.register_error_handler(MyFunctionException, my_function_exception_handler) @blue.route('/decorator') def blue_deco_test(): raise MyDecoratorException() @blue.route('/function') def blue_func_test(): raise MyFunctionException() app = flask.Flask(__name__) app.register_blueprint(blue) c = app.test_client() assert c.get('/decorator').data == b'boom' assert c.get('/function').data == b'bam' def test_blueprint_url_definitions(): bp = flask.Blueprint('test', __name__) @bp.route('/foo', defaults={'baz': 42}) def foo(bar, baz): return '%s/%d' % (bar, baz) @bp.route('/bar') def bar(bar): return text_type(bar) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23}) app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19}) c = app.test_client() assert c.get('/1/foo').data == b'23/42' assert c.get('/2/foo').data == b'19/42' assert c.get('/1/bar').data == b'23' assert c.get('/2/bar').data == b'19' def test_blueprint_url_processors(): bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>') @bp.url_defaults def add_language_code(endpoint, values): values.setdefault('lang_code', flask.g.lang_code) @bp.url_value_preprocessor def pull_lang_code(endpoint, values): flask.g.lang_code = values.pop('lang_code') @bp.route('/') def index(): return flask.url_for('.about') @bp.route('/about') def about(): return flask.url_for('.index') app = flask.Flask(__name__) app.register_blueprint(bp) c = app.test_client() assert c.get('/de/').data == b'/de/about' assert c.get('/de/about').data == b'/de/' def test_templates_and_static(test_apps): from blueprintapp import app c = app.test_client() rv = c.get('/') assert rv.data == b'Hello from the Frontend' rv = c.get('/admin/') assert rv.data == b'Hello from the Admin' rv = c.get('/admin/index2') assert rv.data == b'Hello from the Admin' rv = c.get('/admin/static/test.txt') assert rv.data.strip() == b'Admin File' rv.close() rv = c.get('/admin/static/css/test.css') assert rv.data.strip() == b'/* nested file */' rv.close() # try/finally, in case other tests use this app for Blueprint tests. max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT'] try: expected_max_age = 3600 if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age: expected_max_age = 7200 app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age rv = c.get('/admin/static/css/test.css') cc = parse_cache_control_header(rv.headers['Cache-Control']) assert cc.max_age == expected_max_age rv.close() finally: app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default with app.test_request_context(): assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt' with app.test_request_context(): try: flask.render_template('missing.html') except TemplateNotFound as e: assert e.name == 'missing.html' else: assert 0, 'expected exception' with flask.Flask(__name__).test_request_context(): assert flask.render_template('nested/nested.txt') == 'I\'m nested' def test_default_static_cache_timeout(): app = flask.Flask(__name__) class MyBlueprint(flask.Blueprint): def get_send_file_max_age(self, filename): return 100 blueprint = MyBlueprint('blueprint', __name__, static_folder='static') app.register_blueprint(blueprint) # try/finally, in case other tests use this app for Blueprint tests. max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT'] try: with app.test_request_context(): unexpected_max_age = 3600 if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age: unexpected_max_age = 7200 app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age rv = blueprint.send_static_file('index.html') cc = parse_cache_control_header(rv.headers['Cache-Control']) assert cc.max_age == 100 rv.close() finally: app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default def test_templates_list(test_apps): from blueprintapp import app templates = sorted(app.jinja_env.list_templates()) assert templates == ['admin/index.html', 'frontend/index.html'] def test_dotted_names(): frontend = flask.Blueprint('myapp.frontend', __name__) backend = flask.Blueprint('myapp.backend', __name__) @frontend.route('/fe') def frontend_index(): return flask.url_for('myapp.backend.backend_index') @frontend.route('/fe2') def frontend_page2(): return flask.url_for('.frontend_index') @backend.route('/be') def backend_index(): return flask.url_for('myapp.frontend.frontend_index') app = flask.Flask(__name__) app.register_blueprint(frontend) app.register_blueprint(backend) c = app.test_client() assert c.get('/fe').data.strip() == b'/be' assert c.get('/fe2').data.strip() == b'/fe' assert c.get('/be').data.strip() == b'/fe' def test_dotted_names_from_app(): app = flask.Flask(__name__) app.testing = True test = flask.Blueprint('test', __name__) @app.route('/') def app_index(): return flask.url_for('test.index') @test.route('/test/') def index(): return flask.url_for('app_index') app.register_blueprint(test) with app.test_client() as c: rv = c.get('/') assert rv.data == b'/test/' def test_empty_url_defaults(): bp = flask.Blueprint('bp', __name__) @bp.route('/', defaults={'page': 1}) @bp.route('/page/<int:page>') def something(page): return str(page) app = flask.Flask(__name__) app.register_blueprint(bp) c = app.test_client() assert c.get('/').data == b'1' assert c.get('/page/2').data == b'2' def test_route_decorator_custom_endpoint(): bp = flask.Blueprint('bp', __name__) @bp.route('/foo') def foo(): return flask.request.endpoint @bp.route('/bar', endpoint='bar') def foo_bar(): return flask.request.endpoint @bp.route('/bar/123', endpoint='123') def foo_bar_foo(): return flask.request.endpoint @bp.route('/bar/foo') def bar_foo(): return flask.request.endpoint app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.request.endpoint c = app.test_client() assert c.get('/').data == b'index' assert c.get('/py/foo').data == b'bp.foo' assert c.get('/py/bar').data == b'bp.bar' assert c.get('/py/bar/123').data == b'bp.123' assert c.get('/py/bar/foo').data == b'bp.bar_foo' def test_route_decorator_custom_endpoint_with_dots(): bp = flask.Blueprint('bp', __name__) @bp.route('/foo') def foo(): return flask.request.endpoint try: @bp.route('/bar', endpoint='bar.bar') def foo_bar(): return flask.request.endpoint except AssertionError: pass else: raise AssertionError('expected AssertionError not raised') try: @bp.route('/bar/123', endpoint='bar.123') def foo_bar_foo(): return flask.request.endpoint except AssertionError: pass else: raise AssertionError('expected AssertionError not raised') def foo_foo_foo(): pass pytest.raises( AssertionError, lambda: bp.add_url_rule( '/bar/123', endpoint='bar.123', view_func=foo_foo_foo ) ) pytest.raises( AssertionError, bp.route('/bar/123', endpoint='bar.123'), lambda: None ) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') c = app.test_client() assert c.get('/py/foo').data == b'bp.foo' # The rule's didn't actually made it through rv = c.get('/py/bar') assert rv.status_code == 404 rv = c.get('/py/bar/123') assert rv.status_code == 404 def test_template_filter(): bp = flask.Blueprint('bp', __name__) @bp.app_template_filter() def my_reverse(s): return s[::-1] app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'my_reverse' in app.jinja_env.filters.keys() assert app.jinja_env.filters['my_reverse'] == my_reverse assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba' def test_add_template_filter(): bp = flask.Blueprint('bp', __name__) def my_reverse(s): return s[::-1] bp.add_app_template_filter(my_reverse) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'my_reverse' in app.jinja_env.filters.keys() assert app.jinja_env.filters['my_reverse'] == my_reverse assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba' def test_template_filter_with_name(): bp = flask.Blueprint('bp', __name__) @bp.app_template_filter('strrev') def my_reverse(s): return s[::-1] app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'strrev' in app.jinja_env.filters.keys() assert app.jinja_env.filters['strrev'] == my_reverse assert app.jinja_env.filters['strrev']('abcd') == 'dcba' def test_add_template_filter_with_name(): bp = flask.Blueprint('bp', __name__) def my_reverse(s): return s[::-1] bp.add_app_template_filter(my_reverse, 'strrev') app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'strrev' in app.jinja_env.filters.keys() assert app.jinja_env.filters['strrev'] == my_reverse assert app.jinja_env.filters['strrev']('abcd') == 'dcba' def test_template_filter_with_template(): bp = flask.Blueprint('bp', __name__) @bp.app_template_filter() def super_reverse(s): return s[::-1] app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_template_filter_after_route_with_template(): app = flask.Flask(__name__) @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') bp = flask.Blueprint('bp', __name__) @bp.app_template_filter() def super_reverse(s): return s[::-1] app.register_blueprint(bp, url_prefix='/py') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_add_template_filter_with_template(): bp = flask.Blueprint('bp', __name__) def super_reverse(s): return s[::-1] bp.add_app_template_filter(super_reverse) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_template_filter_with_name_and_template(): bp = flask.Blueprint('bp', __name__) @bp.app_template_filter('super_reverse') def my_reverse(s): return s[::-1] app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_add_template_filter_with_name_and_template(): bp = flask.Blueprint('bp', __name__) def my_reverse(s): return s[::-1] bp.add_app_template_filter(my_reverse, 'super_reverse') app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_filter.html', value='abcd') rv = app.test_client().get('/') assert rv.data == b'dcba' def test_template_test(): bp = flask.Blueprint('bp', __name__) @bp.app_template_test() def is_boolean(value): return isinstance(value, bool) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'is_boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['is_boolean'] == is_boolean assert app.jinja_env.tests['is_boolean'](False) def test_add_template_test(): bp = flask.Blueprint('bp', __name__) def is_boolean(value): return isinstance(value, bool) bp.add_app_template_test(is_boolean) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'is_boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['is_boolean'] == is_boolean assert app.jinja_env.tests['is_boolean'](False) def test_template_test_with_name(): bp = flask.Blueprint('bp', __name__) @bp.app_template_test('boolean') def is_boolean(value): return isinstance(value, bool) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['boolean'] == is_boolean assert app.jinja_env.tests['boolean'](False) def test_add_template_test_with_name(): bp = flask.Blueprint('bp', __name__) def is_boolean(value): return isinstance(value, bool) bp.add_app_template_test(is_boolean, 'boolean') app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') assert 'boolean' in app.jinja_env.tests.keys() assert app.jinja_env.tests['boolean'] == is_boolean assert app.jinja_env.tests['boolean'](False) def test_template_test_with_template(): bp = flask.Blueprint('bp', __name__) @bp.app_template_test() def boolean(value): return isinstance(value, bool) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_template_test_after_route_with_template(): app = flask.Flask(__name__) @app.route('/') def index(): return flask.render_template('template_test.html', value=False) bp = flask.Blueprint('bp', __name__) @bp.app_template_test() def boolean(value): return isinstance(value, bool) app.register_blueprint(bp, url_prefix='/py') rv = app.test_client().get('/') assert b'Success!' in rv.data def test_add_template_test_with_template(): bp = flask.Blueprint('bp', __name__) def boolean(value): return isinstance(value, bool) bp.add_app_template_test(boolean) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_template_test_with_name_and_template(): bp = flask.Blueprint('bp', __name__) @bp.app_template_test('boolean') def is_boolean(value): return isinstance(value, bool) app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data def test_add_template_test_with_name_and_template(): bp = flask.Blueprint('bp', __name__) def is_boolean(value): return isinstance(value, bool) bp.add_app_template_test(is_boolean, 'boolean') app = flask.Flask(__name__) app.register_blueprint(bp, url_prefix='/py') @app.route('/') def index(): return flask.render_template('template_test.html', value=False) rv = app.test_client().get('/') assert b'Success!' in rv.data
bsd-3-clause
jasonseminara/OpenSourceFinal
lib/python3.5/site-packages/pip/_vendor/distlib/markers.py
1261
6282
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # """Parser for the environment markers micro-language defined in PEP 345.""" import ast import os import sys import platform from .compat import python_implementation, string_types from .util import in_venv __all__ = ['interpret'] class Evaluator(object): """ A limited evaluator for Python expressions. """ operators = { 'eq': lambda x, y: x == y, 'gt': lambda x, y: x > y, 'gte': lambda x, y: x >= y, 'in': lambda x, y: x in y, 'lt': lambda x, y: x < y, 'lte': lambda x, y: x <= y, 'not': lambda x: not x, 'noteq': lambda x, y: x != y, 'notin': lambda x, y: x not in y, } allowed_values = { 'sys_platform': sys.platform, 'python_version': '%s.%s' % sys.version_info[:2], # parsing sys.platform is not reliable, but there is no other # way to get e.g. 2.7.2+, and the PEP is defined with sys.version 'python_full_version': sys.version.split(' ', 1)[0], 'os_name': os.name, 'platform_in_venv': str(in_venv()), 'platform_release': platform.release(), 'platform_version': platform.version(), 'platform_machine': platform.machine(), 'platform_python_implementation': python_implementation(), } def __init__(self, context=None): """ Initialise an instance. :param context: If specified, names are looked up in this mapping. """ self.context = context or {} self.source = None def get_fragment(self, offset): """ Get the part of the source which is causing a problem. """ fragment_len = 10 s = '%r' % (self.source[offset:offset + fragment_len]) if offset + fragment_len < len(self.source): s += '...' return s def get_handler(self, node_type): """ Get a handler for the specified AST node type. """ return getattr(self, 'do_%s' % node_type, None) def evaluate(self, node, filename=None): """ Evaluate a source string or node, using ``filename`` when displaying errors. """ if isinstance(node, string_types): self.source = node kwargs = {'mode': 'eval'} if filename: kwargs['filename'] = filename try: node = ast.parse(node, **kwargs) except SyntaxError as e: s = self.get_fragment(e.offset) raise SyntaxError('syntax error %s' % s) node_type = node.__class__.__name__.lower() handler = self.get_handler(node_type) if handler is None: if self.source is None: s = '(source not available)' else: s = self.get_fragment(node.col_offset) raise SyntaxError("don't know how to evaluate %r %s" % ( node_type, s)) return handler(node) def get_attr_key(self, node): assert isinstance(node, ast.Attribute), 'attribute node expected' return '%s.%s' % (node.value.id, node.attr) def do_attribute(self, node): if not isinstance(node.value, ast.Name): valid = False else: key = self.get_attr_key(node) valid = key in self.context or key in self.allowed_values if not valid: raise SyntaxError('invalid expression: %s' % key) if key in self.context: result = self.context[key] else: result = self.allowed_values[key] return result def do_boolop(self, node): result = self.evaluate(node.values[0]) is_or = node.op.__class__ is ast.Or is_and = node.op.__class__ is ast.And assert is_or or is_and if (is_and and result) or (is_or and not result): for n in node.values[1:]: result = self.evaluate(n) if (is_or and result) or (is_and and not result): break return result def do_compare(self, node): def sanity_check(lhsnode, rhsnode): valid = True if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str): valid = False #elif (isinstance(lhsnode, ast.Attribute) # and isinstance(rhsnode, ast.Attribute)): # klhs = self.get_attr_key(lhsnode) # krhs = self.get_attr_key(rhsnode) # valid = klhs != krhs if not valid: s = self.get_fragment(node.col_offset) raise SyntaxError('Invalid comparison: %s' % s) lhsnode = node.left lhs = self.evaluate(lhsnode) result = True for op, rhsnode in zip(node.ops, node.comparators): sanity_check(lhsnode, rhsnode) op = op.__class__.__name__.lower() if op not in self.operators: raise SyntaxError('unsupported operation: %r' % op) rhs = self.evaluate(rhsnode) result = self.operators[op](lhs, rhs) if not result: break lhs = rhs lhsnode = rhsnode return result def do_expression(self, node): return self.evaluate(node.body) def do_name(self, node): valid = False if node.id in self.context: valid = True result = self.context[node.id] elif node.id in self.allowed_values: valid = True result = self.allowed_values[node.id] if not valid: raise SyntaxError('invalid expression: %s' % node.id) return result def do_str(self, node): return node.s def interpret(marker, execution_context=None): """ Interpret a marker and return a result depending on environment. :param marker: The marker to interpret. :type marker: str :param execution_context: The context used for name lookup. :type execution_context: mapping """ return Evaluator(execution_context).evaluate(marker.strip())
mit
bclau/nova
tools/patch_tox_venv.py
11
1659
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import install_venv_common as install_venv # noqa def first_file(file_list): for candidate in file_list: if os.path.exists(candidate): return candidate def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.environ['VIRTUAL_ENV'] pip_requires = first_file([ os.path.join(root, 'requirements.txt'), os.path.join(root, 'tools', 'pip-requires'), ]) test_requires = first_file([ os.path.join(root, 'test-requirements.txt'), os.path.join(root, 'tools', 'test-requires'), ]) py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'nova' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) #NOTE(dprince): For Tox we only run post_process (which patches files, etc) install.post_process() if __name__ == '__main__': main(sys.argv)
apache-2.0
nebril/fuel-web
tasklib/tasklib/tests/functional/test_run_exec.py
4
2108
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tasklib.tests import base from tasklib.utils import STATUS class TestFunctionalExecTasks(base.BaseFunctionalTest): """Each test will follow next pattern: 1. Run test with provided name - taskcmd -c conf.yaml run test/test 2. check status of task """ def test_simple_run(self): exit_code, out, err = self.execute(['run', 'exec/simple']) self.assertEqual(exit_code, 0) exit_code, out, err = self.execute(['status', 'exec/simple']) self.assertEqual(out.strip('\n'), STATUS.end.name) self.assertEqual(exit_code, 0) def test_failed_run(self): exit_code, out, err = self.execute(['run', 'exec/fail']) self.assertEqual(exit_code, 2) exit_code, out, err = self.execute(['status', 'exec/fail']) self.assertEqual(out.strip('\n'), STATUS.failed.name) self.assertEqual(exit_code, 2) def test_error(self): exit_code, out, err = self.execute(['run', 'exec/error']) self.assertEqual(exit_code, 3) exit_code, out, err = self.execute(['status', 'exec/error']) self.assertEqual(out.strip('\n'), STATUS.error.name) self.assertEqual(exit_code, 3) def test_notfound(self): exit_code, out, err = self.execute(['run', 'exec/notfound']) self.assertEqual(exit_code, 4) exit_code, out, err = self.execute(['status', 'exec/notfound']) self.assertEqual(out.strip('\n'), STATUS.notfound.name) self.assertEqual(exit_code, 4)
apache-2.0
rednach/krill
modules/dummy_poller/module.py
18
4992
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2009-2014: # Gabes Jean, naparuba@gmail.com # Gerhard Lausser, Gerhard.Lausser@consol.de # Gregory Starck, g.starck@gmail.com # Hartmut Goebel, h.goebel@goebel-consult.de # # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. # This Class is an example of an Scheduler module # Here for the configuration phase AND running one import sys import signal import time from Queue import Empty from shinken.basemodule import BaseModule from shinken.log import logger properties = { 'daemons': ['poller'], 'type': 'dummy_poller', 'external': False, # To be a real worker module, you must set this 'worker_capable': True, } # called by the plugin manager to get a broker def get_instance(mod_conf): logger.info("[Dummy Poller] Get a Dummy poller module for plugin %s", mod_conf.get_name()) instance = Dummy_poller(mod_conf) return instance # Just print some stuff class Dummy_poller(BaseModule): def __init__(self, mod_conf): BaseModule.__init__(self, mod_conf) # Called by poller to say 'let's prepare yourself guy' def init(self): logger.info("[Dummy Poller] Initialization of the dummy poller module") self.i_am_dying = False # Get new checks if less than nb_checks_max # If no new checks got and no check in queue, # sleep for 1 sec # REF: doc/shinken-action-queues.png (3) def get_new_checks(self): try: while(True): logger.debug("[Dummy Poller] I %d wait for a message", self.id) msg = self.s.get(block=False) if msg is not None: self.checks.append(msg.get_data()) logger.debug("[Dummy Poller] I, %d, got a message!", self.id) except Empty, exp: if len(self.checks) == 0: time.sleep(1) # Launch checks that are in status # REF: doc/shinken-action-queues.png (4) def launch_new_checks(self): # queue for chk in self.checks: if chk.status == 'queue': logger.warning("[Dummy Poller] Dummy (bad) check for %s", str(chk.command)) chk.exit_status = 2 chk.get_outputs('All is NOT SO well', 8012) chk.status = 'done' chk.execution_time = 0.1 # Check the status of checks # if done, return message finished :) # REF: doc/shinken-action-queues.png (5) def manage_finished_checks(self): to_del = [] for action in self.checks: to_del.append(action) try: self.returns_queue.put(action) except IOError, exp: logger.info("[Dummy Poller] %d exiting: %s", self.id, exp) sys.exit(2) for chk in to_del: self.checks.remove(chk) # id = id of the worker # s = Global Queue Master->Slave # m = Queue Slave->Master # return_queue = queue managed by manager # c = Control Queue for the worker def work(self, s, returns_queue, c): logger.info("[Dummy Poller] Module Dummy started!") ## restore default signal handler for the workers: signal.signal(signal.SIGTERM, signal.SIG_DFL) timeout = 1.0 self.checks = [] self.returns_queue = returns_queue self.s = s self.t_each_loop = time.time() while True: begin = time.time() msg = None cmsg = None # If we are dying (big problem!) we do not # take new jobs, we just finished the current one if not self.i_am_dying: # REF: doc/shinken-action-queues.png (3) self.get_new_checks() # REF: doc/shinken-action-queues.png (4) self.launch_new_checks() # REF: doc/shinken-action-queues.png (5) self.manage_finished_checks() # Now get order from master try: cmsg = c.get(block=False) if cmsg.get_type() == 'Die': logger.info("[Dummy Poller] %d : Dad say we are dying...", self.id) break except Exception: pass timeout -= time.time() - begin if timeout < 0: timeout = 1.0
agpl-3.0
nicoboss/Floatmotion
OpenGL/GL/ARB/texture_env_add.py
9
1124
'''OpenGL extension ARB.texture_env_add This module customises the behaviour of the OpenGL.raw.GL.ARB.texture_env_add to provide a more Python-friendly API Overview (from the spec) New texture environment function ADD is supported with the following equation: Cv = min(1, Cf + Ct) New function may be specified by calling TexEnv with ADD token. One possible application is to add a specular highlight texture to a Gouraud-shaded primitive to emulate Phong shading, in a single pass. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/texture_env_add.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.ARB.texture_env_add import * from OpenGL.raw.GL.ARB.texture_env_add import _EXTENSION_NAME def glInitTextureEnvAddARB(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
agpl-3.0
openstack/heat
heat/engine/update.py
1
12695
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from heat.common import exception from heat.engine import dependencies from heat.engine import resource from heat.engine import scheduler from heat.engine import stk_defn from heat.objects import resource as resource_objects LOG = logging.getLogger(__name__) class StackUpdate(object): """A Task to perform the update of an existing stack to a new template.""" def __init__(self, existing_stack, new_stack, previous_stack, rollback=False): """Initialise with the existing stack and the new stack.""" self.existing_stack = existing_stack self.new_stack = new_stack self.previous_stack = previous_stack self.rollback = rollback self.existing_snippets = dict((n, r.frozen_definition()) for n, r in self.existing_stack.items() if n in self.new_stack) def __repr__(self): if self.rollback: return '%s Rollback' % str(self.existing_stack) else: return '%s Update' % str(self.existing_stack) def __call__(self): """Return a co-routine that updates the stack.""" cleanup_prev = scheduler.DependencyTaskGroup( self.previous_stack.dependencies, self._remove_backup_resource, reverse=True) def get_error_wait_time(resource): return resource.cancel_grace_period() updater = scheduler.DependencyTaskGroup( self.dependencies(), self._resource_update, error_wait_time=get_error_wait_time) if not self.rollback: yield from cleanup_prev() try: yield from updater() finally: self.previous_stack.reset_dependencies() def _resource_update(self, res): if res.name in self.new_stack and self.new_stack[res.name] is res: return self._process_new_resource_update(res) else: return self._process_existing_resource_update(res) def _remove_backup_resource(self, prev_res): if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE), (prev_res.DELETE, prev_res.COMPLETE)): LOG.debug("Deleting backup resource %s", prev_res.name) yield from prev_res.destroy() @staticmethod def _exchange_stacks(existing_res, prev_res): resource_objects.Resource.exchange_stacks(existing_res.stack.context, existing_res.id, prev_res.id) prev_stack, existing_stack = prev_res.stack, existing_res.stack prev_stack.add_resource(existing_res) existing_stack.add_resource(prev_res) def _create_resource(self, new_res): res_name = new_res.name # Clean up previous resource if res_name in self.previous_stack: prev_res = self.previous_stack[res_name] if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE), (prev_res.DELETE, prev_res.COMPLETE)): # Swap in the backup resource if it is in a valid state, # instead of creating a new resource if prev_res.status == prev_res.COMPLETE: LOG.debug("Swapping in backup Resource %s", res_name) self._exchange_stacks(self.existing_stack[res_name], prev_res) return LOG.debug("Deleting backup Resource %s", res_name) yield from prev_res.destroy() # Back up existing resource if res_name in self.existing_stack: LOG.debug("Backing up existing Resource %s", res_name) existing_res = self.existing_stack[res_name] self.previous_stack.add_resource(existing_res) existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE) self.existing_stack.add_resource(new_res) # Save new resource definition to backup stack if it is not # present in backup stack template already # it allows to resolve all dependencies that existing resource # can have if it was copied to backup stack if (res_name not in self.previous_stack.t[self.previous_stack.t.RESOURCES]): LOG.debug("Storing definition of new Resource %s", res_name) self.previous_stack.t.add_resource(new_res.t) self.previous_stack.t.store(self.previous_stack.context) yield from new_res.create() self._update_resource_data(new_res) def _check_replace_restricted(self, res): registry = res.stack.env.registry restricted_actions = registry.get_rsrc_restricted_actions(res.name) existing_res = self.existing_stack[res.name] if 'replace' in restricted_actions: ex = exception.ResourceActionRestricted(action='replace') failure = exception.ResourceFailure(ex, existing_res, existing_res.UPDATE) existing_res._add_event(existing_res.UPDATE, existing_res.FAILED, str(ex)) raise failure def _update_resource_data(self, resource): # Use the *new* template to determine the attrs to cache node_data = resource.node_data(self.new_stack.defn) stk_defn.update_resource_data(self.existing_stack.defn, resource.name, node_data) # Also update the new stack's definition with the data, so that # following resources can calculate dep_attr values correctly (e.g. if # the actual attribute name in a get_attr function also comes from a # get_attr function.) stk_defn.update_resource_data(self.new_stack.defn, resource.name, node_data) def _process_new_resource_update(self, new_res): res_name = new_res.name if res_name in self.existing_stack: existing_res = self.existing_stack[res_name] is_substituted = existing_res.check_is_substituted(type(new_res)) if type(existing_res) is type(new_res) or is_substituted: try: yield from self._update_in_place(existing_res, new_res, is_substituted) except resource.UpdateReplace: pass else: # Save updated resource definition to backup stack # cause it allows the backup stack resources to be # synchronized LOG.debug("Storing definition of updated Resource %s", res_name) self.previous_stack.t.add_resource(new_res.t) self.previous_stack.t.store(self.previous_stack.context) self.existing_stack.t.add_resource(new_res.t) self.existing_stack.t.store(self.existing_stack.context) LOG.info("Resource %(res_name)s for stack " "%(stack_name)s updated", {'res_name': res_name, 'stack_name': self.existing_stack.name}) self._update_resource_data(existing_res) return else: self._check_replace_restricted(new_res) yield from self._create_resource(new_res) def _update_in_place(self, existing_res, new_res, is_substituted=False): existing_snippet = self.existing_snippets[existing_res.name] prev_res = self.previous_stack.get(new_res.name) # Note the new resource snippet is resolved in the context # of the existing stack (which is the stack being updated) # but with the template of the new stack (in case the update # is switching template implementations) new_snippet = new_res.t.reparse(self.existing_stack.defn, self.new_stack.t) if is_substituted: substitute = type(new_res)(existing_res.name, existing_res.t, existing_res.stack) existing_res.stack.resources[existing_res.name] = substitute existing_res = substitute existing_res.converge = self.new_stack.converge yield from existing_res.update(new_snippet, existing_snippet, prev_resource=prev_res) def _process_existing_resource_update(self, existing_res): res_name = existing_res.name if res_name in self.previous_stack: backup_res = self.previous_stack[res_name] yield from self._remove_backup_resource(backup_res) if res_name in self.new_stack: new_res = self.new_stack[res_name] if new_res.state == (new_res.INIT, new_res.COMPLETE): # Already updated in-place return if existing_res.stack is not self.previous_stack: yield from existing_res.destroy() if res_name not in self.new_stack: self.existing_stack.remove_resource(res_name) def dependencies(self): """Return the Dependencies graph for the update. Returns a Dependencies object representing the dependencies between update operations to move from an existing stack definition to a new one. """ existing_deps = self.existing_stack.dependencies new_deps = self.new_stack.dependencies def edges(): # Create/update the new stack's resources in create order for e in new_deps.graph().edges(): yield e # Destroy/cleanup the old stack's resources in delete order for e in existing_deps.graph(reverse=True).edges(): yield e # Don't cleanup old resources until after they have been replaced for name, res in self.existing_stack.items(): if name in self.new_stack: yield (res, self.new_stack[name]) return dependencies.Dependencies(edges()) def preview(self): upd_keys = set(self.new_stack.resources.keys()) cur_keys = set(self.existing_stack.resources.keys()) common_keys = cur_keys.intersection(upd_keys) deleted_keys = cur_keys.difference(upd_keys) added_keys = upd_keys.difference(cur_keys) updated_keys = [] replaced_keys = [] for key in common_keys: current_res = self.existing_stack.resources[key] updated_res = self.new_stack.resources[key] current_props = current_res.frozen_definition().properties( current_res.properties_schema, current_res.context) updated_props = updated_res.frozen_definition().properties( updated_res.properties_schema, updated_res.context) # type comparison must match that in _process_new_resource_update if type(current_res) is not type(updated_res): replaced_keys.append(key) continue try: if current_res.preview_update(updated_res.frozen_definition(), current_res.frozen_definition(), updated_props, current_props, None): updated_keys.append(key) except resource.UpdateReplace: replaced_keys.append(key) return { 'unchanged': list(set(common_keys).difference( set(updated_keys + replaced_keys))), 'updated': updated_keys, 'replaced': replaced_keys, 'added': list(added_keys), 'deleted': list(deleted_keys), }
apache-2.0
ally24k/autocomplete
lib/flask/ctx.py
776
14266
# -*- coding: utf-8 -*- """ flask.ctx ~~~~~~~~~ Implements the objects required to keep the context. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import sys from functools import update_wrapper from werkzeug.exceptions import HTTPException from .globals import _request_ctx_stack, _app_ctx_stack from .module import blueprint_is_module from .signals import appcontext_pushed, appcontext_popped class _AppCtxGlobals(object): """A plain object.""" def get(self, name, default=None): return self.__dict__.get(name, default) def __contains__(self, item): return item in self.__dict__ def __iter__(self): return iter(self.__dict__) def __repr__(self): top = _app_ctx_stack.top if top is not None: return '<flask.g of %r>' % top.app.name return object.__repr__(self) def after_this_request(f): """Executes a function after this request. This is useful to modify response objects. The function is passed the response object and has to return the same or a new one. Example:: @app.route('/') def index(): @after_this_request def add_header(response): response.headers['X-Foo'] = 'Parachute' return response return 'Hello World!' This is more useful if a function other than the view function wants to modify a response. For instance think of a decorator that wants to add some headers without converting the return value into a response object. .. versionadded:: 0.9 """ _request_ctx_stack.top._after_request_functions.append(f) return f def copy_current_request_context(f): """A helper function that decorates a function to retain the current request context. This is useful when working with greenlets. The moment the function is decorated a copy of the request context is created and then pushed when the function is called. Example:: import gevent from flask import copy_current_request_context @app.route('/') def index(): @copy_current_request_context def do_some_work(): # do some work here, it can access flask.request like you # would otherwise in the view function. ... gevent.spawn(do_some_work) return 'Regular response' .. versionadded:: 0.10 """ top = _request_ctx_stack.top if top is None: raise RuntimeError('This decorator can only be used at local scopes ' 'when a request context is on the stack. For instance within ' 'view functions.') reqctx = top.copy() def wrapper(*args, **kwargs): with reqctx: return f(*args, **kwargs) return update_wrapper(wrapper, f) def has_request_context(): """If you have code that wants to test if a request context is there or not this function can be used. For instance, you may want to take advantage of request information if the request object is available, but fail silently if it is unavailable. :: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and has_request_context(): remote_addr = request.remote_addr self.remote_addr = remote_addr Alternatively you can also just test any of the context bound objects (such as :class:`request` or :class:`g` for truthness):: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and request: remote_addr = request.remote_addr self.remote_addr = remote_addr .. versionadded:: 0.7 """ return _request_ctx_stack.top is not None def has_app_context(): """Works like :func:`has_request_context` but for the application context. You can also just do a boolean check on the :data:`current_app` object instead. .. versionadded:: 0.9 """ return _app_ctx_stack.top is not None class AppContext(object): """The application context binds an application object implicitly to the current thread or greenlet, similar to how the :class:`RequestContext` binds request information. The application context is also implicitly created if a request context is created but the application is not on top of the individual application context. """ def __init__(self, app): self.app = app self.url_adapter = app.create_url_adapter(None) self.g = app.app_ctx_globals_class() # Like request context, app contexts can be pushed multiple times # but there a basic "refcount" is enough to track them. self._refcnt = 0 def push(self): """Binds the app context to the current context.""" self._refcnt += 1 _app_ctx_stack.push(self) appcontext_pushed.send(self.app) def pop(self, exc=None): """Pops the app context.""" self._refcnt -= 1 if self._refcnt <= 0: if exc is None: exc = sys.exc_info()[1] self.app.do_teardown_appcontext(exc) rv = _app_ctx_stack.pop() assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ % (rv, self) appcontext_popped.send(self.app) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): self.pop(exc_value) class RequestContext(object): """The request context contains all request relevant information. It is created at the beginning of the request and pushed to the `_request_ctx_stack` and removed at the end of it. It will create the URL adapter and request object for the WSGI environment provided. Do not attempt to use this class directly, instead use :meth:`~flask.Flask.test_request_context` and :meth:`~flask.Flask.request_context` to create this object. When the request context is popped, it will evaluate all the functions registered on the application for teardown execution (:meth:`~flask.Flask.teardown_request`). The request context is automatically popped at the end of the request for you. In debug mode the request context is kept around if exceptions happen so that interactive debuggers have a chance to introspect the data. With 0.4 this can also be forced for requests that did not fail and outside of `DEBUG` mode. By setting ``'flask._preserve_context'`` to `True` on the WSGI environment the context will not pop itself at the end of the request. This is used by the :meth:`~flask.Flask.test_client` for example to implement the deferred cleanup functionality. You might find this helpful for unittests where you need the information from the context local around for a little longer. Make sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in that situation, otherwise your unittests will leak memory. """ def __init__(self, app, environ, request=None): self.app = app if request is None: request = app.request_class(environ) self.request = request self.url_adapter = app.create_url_adapter(self.request) self.flashes = None self.session = None # Request contexts can be pushed multiple times and interleaved with # other request contexts. Now only if the last level is popped we # get rid of them. Additionally if an application context is missing # one is created implicitly so for each level we add this information self._implicit_app_ctx_stack = [] # indicator if the context was preserved. Next time another context # is pushed the preserved context is popped. self.preserved = False # remembers the exception for pop if there is one in case the context # preservation kicks in. self._preserved_exc = None # Functions that should be executed after the request on the response # object. These will be called before the regular "after_request" # functions. self._after_request_functions = [] self.match_request() # XXX: Support for deprecated functionality. This is going away with # Flask 1.0 blueprint = self.request.blueprint if blueprint is not None: # better safe than sorry, we don't want to break code that # already worked bp = app.blueprints.get(blueprint) if bp is not None and blueprint_is_module(bp): self.request._is_old_module = True def _get_g(self): return _app_ctx_stack.top.g def _set_g(self, value): _app_ctx_stack.top.g = value g = property(_get_g, _set_g) del _get_g, _set_g def copy(self): """Creates a copy of this request context with the same request object. This can be used to move a request context to a different greenlet. Because the actual request object is the same this cannot be used to move a request context to a different thread unless access to the request object is locked. .. versionadded:: 0.10 """ return self.__class__(self.app, environ=self.request.environ, request=self.request ) def match_request(self): """Can be overridden by a subclass to hook into the matching of the request. """ try: url_rule, self.request.view_args = \ self.url_adapter.match(return_rule=True) self.request.url_rule = url_rule except HTTPException as e: self.request.routing_exception = e def push(self): """Binds the request context to the current context.""" # If an exception occurs in debug mode or if context preservation is # activated under exception situations exactly one context stays # on the stack. The rationale is that you want to access that # information under debug situations. However if someone forgets to # pop that context again we want to make sure that on the next push # it's invalidated, otherwise we run at risk that something leaks # memory. This is usually only a problem in testsuite since this # functionality is not active in production environments. top = _request_ctx_stack.top if top is not None and top.preserved: top.pop(top._preserved_exc) # Before we push the request context we have to ensure that there # is an application context. app_ctx = _app_ctx_stack.top if app_ctx is None or app_ctx.app != self.app: app_ctx = self.app.app_context() app_ctx.push() self._implicit_app_ctx_stack.append(app_ctx) else: self._implicit_app_ctx_stack.append(None) _request_ctx_stack.push(self) # Open the session at the moment that the request context is # available. This allows a custom open_session method to use the # request context (e.g. code that access database information # stored on `g` instead of the appcontext). self.session = self.app.open_session(self.request) if self.session is None: self.session = self.app.make_null_session() def pop(self, exc=None): """Pops the request context and unbinds it by doing that. This will also trigger the execution of functions registered by the :meth:`~flask.Flask.teardown_request` decorator. .. versionchanged:: 0.9 Added the `exc` argument. """ app_ctx = self._implicit_app_ctx_stack.pop() clear_request = False if not self._implicit_app_ctx_stack: self.preserved = False self._preserved_exc = None if exc is None: exc = sys.exc_info()[1] self.app.do_teardown_request(exc) # If this interpreter supports clearing the exception information # we do that now. This will only go into effect on Python 2.x, # on 3.x it disappears automatically at the end of the exception # stack. if hasattr(sys, 'exc_clear'): sys.exc_clear() request_close = getattr(self.request, 'close', None) if request_close is not None: request_close() clear_request = True rv = _request_ctx_stack.pop() assert rv is self, 'Popped wrong request context. (%r instead of %r)' \ % (rv, self) # get rid of circular dependencies at the end of the request # so that we don't require the GC to be active. if clear_request: rv.request.environ['werkzeug.request'] = None # Get rid of the app as well if necessary. if app_ctx is not None: app_ctx.pop(exc) def auto_pop(self, exc): if self.request.environ.get('flask._preserve_context') or \ (exc is not None and self.app.preserve_context_on_exception): self.preserved = True self._preserved_exc = exc else: self.pop(exc) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): # do not pop the request stack if we are in debug mode and an # exception happened. This will allow the debugger to still # access the request object in the interactive shell. Furthermore # the context can be force kept alive for the test client. # See flask.testing for how this works. self.auto_pop(exc_value) def __repr__(self): return '<%s \'%s\' [%s] of %s>' % ( self.__class__.__name__, self.request.url, self.request.method, self.app.name, )
apache-2.0
ray-project/ray
rllib/env/wrappers/tests/test_exception_wrapper.py
2
1736
import random import unittest import gym from ray.rllib.env.wrappers.exception_wrapper import ResetOnExceptionWrapper, \ TooManyResetAttemptsException class TestResetOnExceptionWrapper(unittest.TestCase): def test_unstable_env(self): class UnstableEnv(gym.Env): observation_space = gym.spaces.Discrete(2) action_space = gym.spaces.Discrete(2) def step(self, action): if random.choice([True, False]): raise ValueError("An error from a unstable environment.") return self.observation_space.sample(), 0.0, False, {} def reset(self): return self.observation_space.sample() env = UnstableEnv() env = ResetOnExceptionWrapper(env) try: self._run_for_100_steps(env) except Exception: self.fail() def test_very_unstable_env(self): class VeryUnstableEnv(gym.Env): observation_space = gym.spaces.Discrete(2) action_space = gym.spaces.Discrete(2) def step(self, action): return self.observation_space.sample(), 0.0, False, {} def reset(self): raise ValueError("An error from a very unstable environment.") env = VeryUnstableEnv() env = ResetOnExceptionWrapper(env) self.assertRaises(TooManyResetAttemptsException, lambda: self._run_for_100_steps(env)) @staticmethod def _run_for_100_steps(env): env.reset() for _ in range(100): env.step(env.action_space.sample()) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
apache-2.0
diefenbach/django-lfs
lfs/marketing/models.py
1
1821
# django imports from django.db import models from django.utils.translation import ugettext_lazy as _, ugettext # lfs imports from lfs.catalog.models import Product from lfs.order.models import Order class Topseller(models.Model): """Selected products are in any case among topsellers. """ product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product")) position = models.PositiveSmallIntegerField(_(u"Position"), default=1) class Meta: ordering = ["position"] app_label = 'marketing' def __str__(self): return u"%s (%s)" % (self.product.name, self.position) class ProductSales(models.Model): """Stores totals sales per product. """ product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product")) sales = models.IntegerField(_(u"sales"), default=0) class Meta: app_label = 'marketing' class FeaturedProduct(models.Model): """Featured products are manually selected by the shop owner """ product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product")) position = models.PositiveSmallIntegerField(_(u"Position"), default=1) active = models.BooleanField(_(u"Active"), default=True) class Meta: ordering = ["position"] app_label = 'marketing' def __str__(self): return u"%s (%s)" % (self.product.name, self.position) class OrderRatingMail(models.Model): """Saves whether and when a rating mail has been send for an order. """ order = models.ForeignKey(Order, models.CASCADE, verbose_name=_(u"Order")) send_date = models.DateTimeField(auto_now=True) def __str__(self): return u"%s (%s)" % (self.order.id, self.send_date.strftime(ugettext('DATE_FORMAT'))) class Meta: app_label = 'marketing'
bsd-3-clause
JCROM-Android/jcrom_external_chromium_org
third_party/tlslite/tlslite/X509CertChain.py
76
9052
"""Class representing an X.509 certificate chain.""" from utils import cryptomath from X509 import X509 class X509CertChain: """This class represents a chain of X.509 certificates. @type x509List: list @ivar x509List: A list of L{tlslite.X509.X509} instances, starting with the end-entity certificate and with every subsequent certificate certifying the previous. """ def __init__(self, x509List=None): """Create a new X509CertChain. @type x509List: list @param x509List: A list of L{tlslite.X509.X509} instances, starting with the end-entity certificate and with every subsequent certificate certifying the previous. """ if x509List: self.x509List = x509List else: self.x509List = [] def parseChain(self, s): """Parse a PEM-encoded X.509 certificate file chain file. @type s: str @param s: A PEM-encoded (eg: Base64) X.509 certificate file, with every certificate wrapped within "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" tags). Extraneous data outside such tags, such as human readable representations, will be ignored. """ class PEMIterator(object): """Simple iterator over PEM-encoded certificates within a string. @type data: string @ivar data: A string containing PEM-encoded (Base64) certificates, with every certificate wrapped within "-----BEGIN CERTIFICATE-----" and "-----END CERTIFICATE-----" tags). Extraneous data outside such tags, such as human readable representations, will be ignored. @type index: integer @ivar index: The current offset within data to begin iterating from. """ _CERTIFICATE_HEADER = "-----BEGIN CERTIFICATE-----" """The PEM encoding block header for X.509 certificates.""" _CERTIFICATE_FOOTER = "-----END CERTIFICATE-----" """The PEM encoding block footer for X.509 certificates.""" def __init__(self, s): self.data = s self.index = 0 def __iter__(self): return self def next(self): """Iterates and returns the next L{tlslite.X509.X509} certificate in data. @rtype tlslite.X509.X509 """ self.index = self.data.find(self._CERTIFICATE_HEADER, self.index) if self.index == -1: raise StopIteration end = self.data.find(self._CERTIFICATE_FOOTER, self.index) if end == -1: raise StopIteration certStr = self.data[self.index+len(self._CERTIFICATE_HEADER) : end] self.index = end + len(self._CERTIFICATE_FOOTER) bytes = cryptomath.base64ToBytes(certStr) return X509().parseBinary(bytes) self.x509List = list(PEMIterator(s)) return self def getNumCerts(self): """Get the number of certificates in this chain. @rtype: int """ return len(self.x509List) def getEndEntityPublicKey(self): """Get the public key from the end-entity certificate. @rtype: L{tlslite.utils.RSAKey.RSAKey} """ if self.getNumCerts() == 0: raise AssertionError() return self.x509List[0].publicKey def getFingerprint(self): """Get the hex-encoded fingerprint of the end-entity certificate. @rtype: str @return: A hex-encoded fingerprint. """ if self.getNumCerts() == 0: raise AssertionError() return self.x509List[0].getFingerprint() def getCommonName(self): """Get the Subject's Common Name from the end-entity certificate. The cryptlib_py module must be installed in order to use this function. @rtype: str or None @return: The CN component of the certificate's subject DN, if present. """ if self.getNumCerts() == 0: raise AssertionError() return self.x509List[0].getCommonName() def validate(self, x509TrustList): """Check the validity of the certificate chain. This checks that every certificate in the chain validates with the subsequent one, until some certificate validates with (or is identical to) one of the passed-in root certificates. The cryptlib_py module must be installed in order to use this function. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The certificate chain must extend to one of these certificates to be considered valid. """ import cryptlib_py c1 = None c2 = None lastC = None rootC = None try: rootFingerprints = [c.getFingerprint() for c in x509TrustList] #Check that every certificate in the chain validates with the #next one for cert1, cert2 in zip(self.x509List, self.x509List[1:]): #If we come upon a root certificate, we're done. if cert1.getFingerprint() in rootFingerprints: return True c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(), cryptlib_py.CRYPT_UNUSED) c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(), cryptlib_py.CRYPT_UNUSED) try: cryptlib_py.cryptCheckCert(c1, c2) except: return False cryptlib_py.cryptDestroyCert(c1) c1 = None cryptlib_py.cryptDestroyCert(c2) c2 = None #If the last certificate is one of the root certificates, we're #done. if self.x509List[-1].getFingerprint() in rootFingerprints: return True #Otherwise, find a root certificate that the last certificate #chains to, and validate them. lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(), cryptlib_py.CRYPT_UNUSED) for rootCert in x509TrustList: rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(), cryptlib_py.CRYPT_UNUSED) if self._checkChaining(lastC, rootC): try: cryptlib_py.cryptCheckCert(lastC, rootC) return True except: return False return False finally: if not (c1 is None): cryptlib_py.cryptDestroyCert(c1) if not (c2 is None): cryptlib_py.cryptDestroyCert(c2) if not (lastC is None): cryptlib_py.cryptDestroyCert(lastC) if not (rootC is None): cryptlib_py.cryptDestroyCert(rootC) def _checkChaining(self, lastC, rootC): import cryptlib_py import array def compareNames(name): try: length = cryptlib_py.cryptGetAttributeString(lastC, name, None) lastName = array.array('B', [0] * length) cryptlib_py.cryptGetAttributeString(lastC, name, lastName) lastName = lastName.tostring() except cryptlib_py.CryptException, e: if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: lastName = None try: length = cryptlib_py.cryptGetAttributeString(rootC, name, None) rootName = array.array('B', [0] * length) cryptlib_py.cryptGetAttributeString(rootC, name, rootName) rootName = rootName.tostring() except cryptlib_py.CryptException, e: if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: rootName = None return lastName == rootName cryptlib_py.cryptSetAttribute(lastC, cryptlib_py.CRYPT_CERTINFO_ISSUERNAME, cryptlib_py.CRYPT_UNUSED) if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME): return False if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME): return False if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME): return False if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME): return False if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME): return False return True
bsd-3-clause
olologin/scikit-learn
examples/svm/plot_iris.py
225
3252
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
40223137/w1717
static/Brython3.1.0-20150301-090019/Lib/unittest/mock.py
739
71473
# mock.py # Test tools for mocking and patching. # Maintained by Michael Foord # Backport for other versions of Python available from # http://pypi.python.org/pypi/mock __all__ = ( 'Mock', 'MagicMock', 'patch', 'sentinel', 'DEFAULT', 'ANY', 'call', 'create_autospec', 'FILTER_DIR', 'NonCallableMock', 'NonCallableMagicMock', 'mock_open', 'PropertyMock', ) __version__ = '1.0' import inspect import pprint import sys from functools import wraps BaseExceptions = (BaseException,) if 'java' in sys.platform: # jython import java BaseExceptions = (BaseException, java.lang.Throwable) FILTER_DIR = True # Workaround for issue #12370 # Without this, the __class__ properties wouldn't be set correctly _safe_super = super def _is_instance_mock(obj): # can't use isinstance on Mock objects because they override __class__ # The base class for all mocks is NonCallableMock return issubclass(type(obj), NonCallableMock) def _is_exception(obj): return ( isinstance(obj, BaseExceptions) or isinstance(obj, type) and issubclass(obj, BaseExceptions) ) class _slotted(object): __slots__ = ['a'] DescriptorTypes = ( type(_slotted.a), property, ) def _getsignature(func, skipfirst, instance=False): if isinstance(func, type) and not instance: try: func = func.__init__ except AttributeError: return skipfirst = True elif not isinstance(func, FunctionTypes): # for classes where instance is True we end up here too try: func = func.__call__ except AttributeError: return try: argspec = inspect.getfullargspec(func) except TypeError: # C function / method, possibly inherited object().__init__ return regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec # instance methods and classmethods need to lose the self argument if getattr(func, '__self__', None) is not None: regargs = regargs[1:] if skipfirst: # this condition and the above one are never both True - why? regargs = regargs[1:] signature = inspect.formatargspec( regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann, formatvalue=lambda value: "") return signature[1:-1], func def _check_signature(func, mock, skipfirst, instance=False): if not _callable(func): return result = _getsignature(func, skipfirst, instance) if result is None: return signature, func = result # can't use self because "self" is common as an argument name # unfortunately even not in the first place src = "lambda _mock_self, %s: None" % signature checksig = eval(src, {}) _copy_func_details(func, checksig) type(mock)._mock_check_sig = checksig def _copy_func_details(func, funcopy): funcopy.__name__ = func.__name__ funcopy.__doc__ = func.__doc__ # we explicitly don't copy func.__dict__ into this copy as it would # expose original attributes that should be mocked funcopy.__module__ = func.__module__ funcopy.__defaults__ = func.__defaults__ funcopy.__kwdefaults__ = func.__kwdefaults__ def _callable(obj): if isinstance(obj, type): return True if getattr(obj, '__call__', None) is not None: return True return False def _is_list(obj): # checks for list or tuples # XXXX badly named! return type(obj) in (list, tuple) def _instance_callable(obj): """Given an object, return True if the object is callable. For classes, return True if instances would be callable.""" if not isinstance(obj, type): # already an instance return getattr(obj, '__call__', None) is not None # *could* be broken by a class overriding __mro__ or __dict__ via # a metaclass for base in (obj,) + obj.__mro__: if base.__dict__.get('__call__') is not None: return True return False def _set_signature(mock, original, instance=False): # creates a function with signature (*args, **kwargs) that delegates to a # mock. It still does signature checking by calling a lambda with the same # signature as the original. if not _callable(original): return skipfirst = isinstance(original, type) result = _getsignature(original, skipfirst, instance) if result is None: # was a C function (e.g. object().__init__ ) that can't be mocked return signature, func = result src = "lambda %s: None" % signature checksig = eval(src, {}) _copy_func_details(func, checksig) name = original.__name__ if not name.isidentifier(): name = 'funcopy' context = {'_checksig_': checksig, 'mock': mock} src = """def %s(*args, **kwargs): _checksig_(*args, **kwargs) return mock(*args, **kwargs)""" % name exec (src, context) funcopy = context[name] _setup_func(funcopy, mock) return funcopy def _setup_func(funcopy, mock): funcopy.mock = mock # can't use isinstance with mocks if not _is_instance_mock(mock): return def assert_called_with(*args, **kwargs): return mock.assert_called_with(*args, **kwargs) def assert_called_once_with(*args, **kwargs): return mock.assert_called_once_with(*args, **kwargs) def assert_has_calls(*args, **kwargs): return mock.assert_has_calls(*args, **kwargs) def assert_any_call(*args, **kwargs): return mock.assert_any_call(*args, **kwargs) def reset_mock(): funcopy.method_calls = _CallList() funcopy.mock_calls = _CallList() mock.reset_mock() ret = funcopy.return_value if _is_instance_mock(ret) and not ret is mock: ret.reset_mock() funcopy.called = False funcopy.call_count = 0 funcopy.call_args = None funcopy.call_args_list = _CallList() funcopy.method_calls = _CallList() funcopy.mock_calls = _CallList() funcopy.return_value = mock.return_value funcopy.side_effect = mock.side_effect funcopy._mock_children = mock._mock_children funcopy.assert_called_with = assert_called_with funcopy.assert_called_once_with = assert_called_once_with funcopy.assert_has_calls = assert_has_calls funcopy.assert_any_call = assert_any_call funcopy.reset_mock = reset_mock mock._mock_delegate = funcopy def _is_magic(name): return '__%s__' % name[2:-2] == name class _SentinelObject(object): "A unique, named, sentinel object." def __init__(self, name): self.name = name def __repr__(self): return 'sentinel.%s' % self.name class _Sentinel(object): """Access attributes to return a named object, usable as a sentinel.""" def __init__(self): self._sentinels = {} def __getattr__(self, name): if name == '__bases__': # Without this help(unittest.mock) raises an exception raise AttributeError return self._sentinels.setdefault(name, _SentinelObject(name)) sentinel = _Sentinel() DEFAULT = sentinel.DEFAULT _missing = sentinel.MISSING _deleted = sentinel.DELETED def _copy(value): if type(value) in (dict, list, tuple, set): return type(value)(value) return value _allowed_names = set( [ 'return_value', '_mock_return_value', 'side_effect', '_mock_side_effect', '_mock_parent', '_mock_new_parent', '_mock_name', '_mock_new_name' ] ) def _delegating_property(name): _allowed_names.add(name) _the_name = '_mock_' + name def _get(self, name=name, _the_name=_the_name): sig = self._mock_delegate if sig is None: return getattr(self, _the_name) return getattr(sig, name) def _set(self, value, name=name, _the_name=_the_name): sig = self._mock_delegate if sig is None: self.__dict__[_the_name] = value else: setattr(sig, name, value) return property(_get, _set) class _CallList(list): def __contains__(self, value): if not isinstance(value, list): return list.__contains__(self, value) len_value = len(value) len_self = len(self) if len_value > len_self: return False for i in range(0, len_self - len_value + 1): sub_list = self[i:i+len_value] if sub_list == value: return True return False def __repr__(self): return pprint.pformat(list(self)) def _check_and_set_parent(parent, value, name, new_name): if not _is_instance_mock(value): return False if ((value._mock_name or value._mock_new_name) or (value._mock_parent is not None) or (value._mock_new_parent is not None)): return False _parent = parent while _parent is not None: # setting a mock (value) as a child or return value of itself # should not modify the mock if _parent is value: return False _parent = _parent._mock_new_parent if new_name: value._mock_new_parent = parent value._mock_new_name = new_name if name: value._mock_parent = parent value._mock_name = name return True class Base(object): _mock_return_value = DEFAULT _mock_side_effect = None def __init__(self, *args, **kwargs): pass class NonCallableMock(Base): """A non-callable version of `Mock`""" def __new__(cls, *args, **kw): # every instance has its own class # so we can create magic methods on the # class without stomping on other mocks new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__}) instance = object.__new__(new) return instance def __init__( self, spec=None, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs ): if _new_parent is None: _new_parent = parent __dict__ = self.__dict__ __dict__['_mock_parent'] = parent __dict__['_mock_name'] = name __dict__['_mock_new_name'] = _new_name __dict__['_mock_new_parent'] = _new_parent if spec_set is not None: spec = spec_set spec_set = True self._mock_add_spec(spec, spec_set) __dict__['_mock_children'] = {} __dict__['_mock_wraps'] = wraps __dict__['_mock_delegate'] = None __dict__['_mock_called'] = False __dict__['_mock_call_args'] = None __dict__['_mock_call_count'] = 0 __dict__['_mock_call_args_list'] = _CallList() __dict__['_mock_mock_calls'] = _CallList() __dict__['method_calls'] = _CallList() if kwargs: self.configure_mock(**kwargs) _safe_super(NonCallableMock, self).__init__( spec, wraps, name, spec_set, parent, _spec_state ) def attach_mock(self, mock, attribute): """ Attach a mock as an attribute of this one, replacing its name and parent. Calls to the attached mock will be recorded in the `method_calls` and `mock_calls` attributes of this one.""" mock._mock_parent = None mock._mock_new_parent = None mock._mock_name = '' mock._mock_new_name = None setattr(self, attribute, mock) def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) def _mock_add_spec(self, spec, spec_set): _spec_class = None if spec is not None and not _is_list(spec): if isinstance(spec, type): _spec_class = spec else: _spec_class = _get_class(spec) spec = dir(spec) __dict__ = self.__dict__ __dict__['_spec_class'] = _spec_class __dict__['_spec_set'] = spec_set __dict__['_mock_methods'] = spec def __get_return_value(self): ret = self._mock_return_value if self._mock_delegate is not None: ret = self._mock_delegate.return_value if ret is DEFAULT: ret = self._get_child_mock( _new_parent=self, _new_name='()' ) self.return_value = ret return ret def __set_return_value(self, value): if self._mock_delegate is not None: self._mock_delegate.return_value = value else: self._mock_return_value = value _check_and_set_parent(self, value, None, '()') __return_value_doc = "The value to be returned when the mock is called." return_value = property(__get_return_value, __set_return_value, __return_value_doc) @property def __class__(self): if self._spec_class is None: return type(self) return self._spec_class called = _delegating_property('called') call_count = _delegating_property('call_count') call_args = _delegating_property('call_args') call_args_list = _delegating_property('call_args_list') mock_calls = _delegating_property('mock_calls') def __get_side_effect(self): delegated = self._mock_delegate if delegated is None: return self._mock_side_effect return delegated.side_effect def __set_side_effect(self, value): value = _try_iter(value) delegated = self._mock_delegate if delegated is None: self._mock_side_effect = value else: delegated.side_effect = value side_effect = property(__get_side_effect, __set_side_effect) def reset_mock(self): "Restore the mock object to its initial state." self.called = False self.call_args = None self.call_count = 0 self.mock_calls = _CallList() self.call_args_list = _CallList() self.method_calls = _CallList() for child in self._mock_children.values(): if isinstance(child, _SpecState): continue child.reset_mock() ret = self._mock_return_value if _is_instance_mock(ret) and ret is not self: ret.reset_mock() def configure_mock(self, **kwargs): """Set attributes on the mock through keyword arguments. Attributes plus return values and side effects can be set on child mocks using standard dot notation and unpacking a dictionary in the method call: >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} >>> mock.configure_mock(**attrs)""" for arg, val in sorted(kwargs.items(), # we sort on the number of dots so that # attributes are set before we set attributes on # attributes key=lambda entry: entry[0].count('.')): args = arg.split('.') final = args.pop() obj = self for entry in args: obj = getattr(obj, entry) setattr(obj, final, val) def __getattr__(self, name): if name == '_mock_methods': raise AttributeError(name) elif self._mock_methods is not None: if name not in self._mock_methods or name in _all_magics: raise AttributeError("Mock object has no attribute %r" % name) elif _is_magic(name): raise AttributeError(name) result = self._mock_children.get(name) if result is _deleted: raise AttributeError(name) elif result is None: wraps = None if self._mock_wraps is not None: # XXXX should we get the attribute without triggering code # execution? wraps = getattr(self._mock_wraps, name) result = self._get_child_mock( parent=self, name=name, wraps=wraps, _new_name=name, _new_parent=self ) self._mock_children[name] = result elif isinstance(result, _SpecState): result = create_autospec( result.spec, result.spec_set, result.instance, result.parent, result.name ) self._mock_children[name] = result return result def __repr__(self): _name_list = [self._mock_new_name] _parent = self._mock_new_parent last = self dot = '.' if _name_list == ['()']: dot = '' seen = set() while _parent is not None: last = _parent _name_list.append(_parent._mock_new_name + dot) dot = '.' if _parent._mock_new_name == '()': dot = '' _parent = _parent._mock_new_parent # use ids here so as not to call __hash__ on the mocks if id(_parent) in seen: break seen.add(id(_parent)) _name_list = list(reversed(_name_list)) _first = last._mock_name or 'mock' if len(_name_list) > 1: if _name_list[1] not in ('()', '().'): _first += '.' _name_list[0] = _first name = ''.join(_name_list) name_string = '' if name not in ('mock', 'mock.'): name_string = ' name=%r' % name spec_string = '' if self._spec_class is not None: spec_string = ' spec=%r' if self._spec_set: spec_string = ' spec_set=%r' spec_string = spec_string % self._spec_class.__name__ return "<%s%s%s id='%s'>" % ( type(self).__name__, name_string, spec_string, id(self) ) def __dir__(self): """Filter the output of `dir(mock)` to only useful members.""" if not FILTER_DIR: return object.__dir__(self) extras = self._mock_methods or [] from_type = dir(type(self)) from_dict = list(self.__dict__) from_type = [e for e in from_type if not e.startswith('_')] from_dict = [e for e in from_dict if not e.startswith('_') or _is_magic(e)] return sorted(set(extras + from_type + from_dict + list(self._mock_children))) def __setattr__(self, name, value): if name in _allowed_names: # property setters go through here return object.__setattr__(self, name, value) elif (self._spec_set and self._mock_methods is not None and name not in self._mock_methods and name not in self.__dict__): raise AttributeError("Mock object has no attribute '%s'" % name) elif name in _unsupported_magics: msg = 'Attempting to set unsupported magic method %r.' % name raise AttributeError(msg) elif name in _all_magics: if self._mock_methods is not None and name not in self._mock_methods: raise AttributeError("Mock object has no attribute '%s'" % name) if not _is_instance_mock(value): setattr(type(self), name, _get_method(name, value)) original = value value = lambda *args, **kw: original(self, *args, **kw) else: # only set _new_name and not name so that mock_calls is tracked # but not method calls _check_and_set_parent(self, value, None, name) setattr(type(self), name, value) self._mock_children[name] = value elif name == '__class__': self._spec_class = value return else: if _check_and_set_parent(self, value, name, name): self._mock_children[name] = value return object.__setattr__(self, name, value) def __delattr__(self, name): if name in _all_magics and name in type(self).__dict__: delattr(type(self), name) if name not in self.__dict__: # for magic methods that are still MagicProxy objects and # not set on the instance itself return if name in self.__dict__: object.__delattr__(self, name) obj = self._mock_children.get(name, _missing) if obj is _deleted: raise AttributeError(name) if obj is not _missing: del self._mock_children[name] self._mock_children[name] = _deleted def _format_mock_call_signature(self, args, kwargs): name = self._mock_name or 'mock' return _format_call_signature(name, args, kwargs) def _format_mock_failure_message(self, args, kwargs): message = 'Expected call: %s\nActual call: %s' expected_string = self._format_mock_call_signature(args, kwargs) call_args = self.call_args if len(call_args) == 3: call_args = call_args[1:] actual_string = self._format_mock_call_signature(*call_args) return message % (expected_string, actual_string) def assert_called_with(_mock_self, *args, **kwargs): """assert that the mock was called with the specified arguments. Raises an AssertionError if the args and keyword args passed in are different to the last call to the mock.""" self = _mock_self if self.call_args is None: expected = self._format_mock_call_signature(args, kwargs) raise AssertionError('Expected call: %s\nNot called' % (expected,)) if self.call_args != (args, kwargs): msg = self._format_mock_failure_message(args, kwargs) raise AssertionError(msg) def assert_called_once_with(_mock_self, *args, **kwargs): """assert that the mock was called exactly once and with the specified arguments.""" self = _mock_self if not self.call_count == 1: msg = ("Expected '%s' to be called once. Called %s times." % (self._mock_name or 'mock', self.call_count)) raise AssertionError(msg) return self.assert_called_with(*args, **kwargs) def assert_has_calls(self, calls, any_order=False): """assert the mock has been called with the specified calls. The `mock_calls` list is checked for the calls. If `any_order` is False (the default) then the calls must be sequential. There can be extra calls before or after the specified calls. If `any_order` is True then the calls can be in any order, but they must all appear in `mock_calls`.""" if not any_order: if calls not in self.mock_calls: raise AssertionError( 'Calls not found.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) return all_calls = list(self.mock_calls) not_found = [] for kall in calls: try: all_calls.remove(kall) except ValueError: not_found.append(kall) if not_found: raise AssertionError( '%r not all found in call list' % (tuple(not_found),) ) def assert_any_call(self, *args, **kwargs): """assert the mock has been called with the specified arguments. The assert passes if the mock has *ever* been called, unlike `assert_called_with` and `assert_called_once_with` that only pass if the call is the most recent one.""" kall = call(*args, **kwargs) if kall not in self.call_args_list: expected_string = self._format_mock_call_signature(args, kwargs) raise AssertionError( '%s call not found' % expected_string ) def _get_child_mock(self, **kw): """Create the child mocks for attributes and return value. By default child mocks will be the same type as the parent. Subclasses of Mock may want to override this to customize the way child mocks are made. For non-callable mocks the callable variant will be used (rather than any custom subclass).""" _type = type(self) if not issubclass(_type, CallableMixin): if issubclass(_type, NonCallableMagicMock): klass = MagicMock elif issubclass(_type, NonCallableMock) : klass = Mock else: klass = _type.__mro__[1] return klass(**kw) def _try_iter(obj): if obj is None: return obj if _is_exception(obj): return obj if _callable(obj): return obj try: return iter(obj) except TypeError: # XXXX backwards compatibility # but this will blow up on first call - so maybe we should fail early? return obj class CallableMixin(Base): def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs): self.__dict__['_mock_return_value'] = return_value _safe_super(CallableMixin, self).__init__( spec, wraps, name, spec_set, parent, _spec_state, _new_name, _new_parent, **kwargs ) self.side_effect = side_effect def _mock_check_sig(self, *args, **kwargs): # stub method that can be replaced with one with a specific signature pass def __call__(_mock_self, *args, **kwargs): # can't use self in-case a function / method we are mocking uses self # in the signature _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) def _mock_call(_mock_self, *args, **kwargs): self = _mock_self self.called = True self.call_count += 1 self.call_args = _Call((args, kwargs), two=True) self.call_args_list.append(_Call((args, kwargs), two=True)) _new_name = self._mock_new_name _new_parent = self._mock_new_parent self.mock_calls.append(_Call(('', args, kwargs))) seen = set() skip_next_dot = _new_name == '()' do_method_calls = self._mock_parent is not None name = self._mock_name while _new_parent is not None: this_mock_call = _Call((_new_name, args, kwargs)) if _new_parent._mock_new_name: dot = '.' if skip_next_dot: dot = '' skip_next_dot = False if _new_parent._mock_new_name == '()': skip_next_dot = True _new_name = _new_parent._mock_new_name + dot + _new_name if do_method_calls: if _new_name == name: this_method_call = this_mock_call else: this_method_call = _Call((name, args, kwargs)) _new_parent.method_calls.append(this_method_call) do_method_calls = _new_parent._mock_parent is not None if do_method_calls: name = _new_parent._mock_name + '.' + name _new_parent.mock_calls.append(this_mock_call) _new_parent = _new_parent._mock_new_parent # use ids here so as not to call __hash__ on the mocks _new_parent_id = id(_new_parent) if _new_parent_id in seen: break seen.add(_new_parent_id) ret_val = DEFAULT effect = self.side_effect if effect is not None: if _is_exception(effect): raise effect if not _callable(effect): result = next(effect) if _is_exception(result): raise result if result is DEFAULT: result = self.return_value return result ret_val = effect(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value if (self._mock_wraps is not None and self._mock_return_value is DEFAULT): return self._mock_wraps(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value return ret_val class Mock(CallableMixin, NonCallableMock): """ Create a new `Mock` object. `Mock` takes several optional arguments that specify the behaviour of the Mock object: * `spec`: This can be either a list of strings or an existing object (a class or instance) that acts as the specification for the mock object. If you pass in an object then a list of strings is formed by calling dir on the object (excluding unsupported magic attributes and methods). Accessing any attribute not in this list will raise an `AttributeError`. If `spec` is an object (rather than a list of strings) then `mock.__class__` returns the class of the spec object. This allows mocks to pass `isinstance` tests. * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* or get an attribute on the mock that isn't on the object passed as `spec_set` will raise an `AttributeError`. * `side_effect`: A function to be called whenever the Mock is called. See the `side_effect` attribute. Useful for raising exceptions or dynamically changing return values. The function is called with the same arguments as the mock, and unless it returns `DEFAULT`, the return value of this function is used as the return value. If `side_effect` is an iterable then each call to the mock will return the next value from the iterable. If any of the members of the iterable are exceptions they will be raised instead of returned. * `return_value`: The value returned when the mock is called. By default this is a new Mock (created on first access). See the `return_value` attribute. * `wraps`: Item for the mock object to wrap. If `wraps` is not None then calling the Mock will pass the call through to the wrapped object (returning the real result). Attribute access on the mock will return a Mock object that wraps the corresponding attribute of the wrapped object (so attempting to access an attribute that doesn't exist will raise an `AttributeError`). If the mock has an explicit `return_value` set then calls are not passed to the wrapped object and the `return_value` is returned instead. * `name`: If the mock has a name then it will be used in the repr of the mock. This can be useful for debugging. The name is propagated to child mocks. Mocks can also be called with arbitrary keyword arguments. These will be used to set attributes on the mock after it is created. """ def _dot_lookup(thing, comp, import_path): try: return getattr(thing, comp) except AttributeError: __import__(import_path) return getattr(thing, comp) def _importer(target): components = target.split('.') import_path = components.pop(0) thing = __import__(import_path) for comp in components: import_path += ".%s" % comp thing = _dot_lookup(thing, comp, import_path) return thing def _is_started(patcher): # XXXX horrible return hasattr(patcher, 'is_local') class _patch(object): attribute_name = None _active_patches = set() def __init__( self, getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ): if new_callable is not None: if new is not DEFAULT: raise ValueError( "Cannot use 'new' and 'new_callable' together" ) if autospec is not None: raise ValueError( "Cannot use 'autospec' and 'new_callable' together" ) self.getter = getter self.attribute = attribute self.new = new self.new_callable = new_callable self.spec = spec self.create = create self.has_local = False self.spec_set = spec_set self.autospec = autospec self.kwargs = kwargs self.additional_patchers = [] def copy(self): patcher = _patch( self.getter, self.attribute, self.new, self.spec, self.create, self.spec_set, self.autospec, self.new_callable, self.kwargs ) patcher.attribute_name = self.attribute_name patcher.additional_patchers = [ p.copy() for p in self.additional_patchers ] return patcher def __call__(self, func): if isinstance(func, type): return self.decorate_class(func) return self.decorate_callable(func) def decorate_class(self, klass): for attr in dir(klass): if not attr.startswith(patch.TEST_PREFIX): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue patcher = self.copy() setattr(klass, attr, patcher(attr_value)) return klass def decorate_callable(self, func): if hasattr(func, 'patchings'): func.patchings.append(self) return func @wraps(func) def patched(*args, **keywargs): extra_args = [] entered_patchers = [] exc_info = tuple() try: for patching in patched.patchings: arg = patching.__enter__() entered_patchers.append(patching) if patching.attribute_name is not None: keywargs.update(arg) elif patching.new is DEFAULT: extra_args.append(arg) args += tuple(extra_args) return func(*args, **keywargs) except: if (patching not in entered_patchers and _is_started(patching)): # the patcher may have been started, but an exception # raised whilst entering one of its additional_patchers entered_patchers.append(patching) # Pass the exception to __exit__ exc_info = sys.exc_info() # re-raise the exception raise finally: for patching in reversed(entered_patchers): patching.__exit__(*exc_info) patched.patchings = [self] return patched def get_original(self): target = self.getter() name = self.attribute original = DEFAULT local = False try: original = target.__dict__[name] except (AttributeError, KeyError): original = getattr(target, name, DEFAULT) else: local = True if not self.create and original is DEFAULT: raise AttributeError( "%s does not have the attribute %r" % (target, name) ) return original, local def __enter__(self): """Perform the patch.""" new, spec, spec_set = self.new, self.spec, self.spec_set autospec, kwargs = self.autospec, self.kwargs new_callable = self.new_callable self.target = self.getter() # normalise False to None if spec is False: spec = None if spec_set is False: spec_set = None if autospec is False: autospec = None if spec is not None and autospec is not None: raise TypeError("Can't specify spec and autospec") if ((spec is not None or autospec is not None) and spec_set not in (True, None)): raise TypeError("Can't provide explicit spec_set *and* spec or autospec") original, local = self.get_original() if new is DEFAULT and autospec is None: inherit = False if spec is True: # set spec to the object we are replacing spec = original if spec_set is True: spec_set = original spec = None elif spec is not None: if spec_set is True: spec_set = spec spec = None elif spec_set is True: spec_set = original if spec is not None or spec_set is not None: if original is DEFAULT: raise TypeError("Can't use 'spec' with create=True") if isinstance(original, type): # If we're patching out a class and there is a spec inherit = True Klass = MagicMock _kwargs = {} if new_callable is not None: Klass = new_callable elif spec is not None or spec_set is not None: this_spec = spec if spec_set is not None: this_spec = spec_set if _is_list(this_spec): not_callable = '__call__' not in this_spec else: not_callable = not callable(this_spec) if not_callable: Klass = NonCallableMagicMock if spec is not None: _kwargs['spec'] = spec if spec_set is not None: _kwargs['spec_set'] = spec_set # add a name to mocks if (isinstance(Klass, type) and issubclass(Klass, NonCallableMock) and self.attribute): _kwargs['name'] = self.attribute _kwargs.update(kwargs) new = Klass(**_kwargs) if inherit and _is_instance_mock(new): # we can only tell if the instance should be callable if the # spec is not a list this_spec = spec if spec_set is not None: this_spec = spec_set if (not _is_list(this_spec) and not _instance_callable(this_spec)): Klass = NonCallableMagicMock _kwargs.pop('name') new.return_value = Klass(_new_parent=new, _new_name='()', **_kwargs) elif autospec is not None: # spec is ignored, new *must* be default, spec_set is treated # as a boolean. Should we check spec is not None and that spec_set # is a bool? if new is not DEFAULT: raise TypeError( "autospec creates the mock for you. Can't specify " "autospec and new." ) if original is DEFAULT: raise TypeError("Can't use 'autospec' with create=True") spec_set = bool(spec_set) if autospec is True: autospec = original new = create_autospec(autospec, spec_set=spec_set, _name=self.attribute, **kwargs) elif kwargs: # can't set keyword args when we aren't creating the mock # XXXX If new is a Mock we could call new.configure_mock(**kwargs) raise TypeError("Can't pass kwargs to a mock we aren't creating") new_attr = new self.temp_original = original self.is_local = local setattr(self.target, self.attribute, new_attr) if self.attribute_name is not None: extra_args = {} if self.new is DEFAULT: extra_args[self.attribute_name] = new for patching in self.additional_patchers: arg = patching.__enter__() if patching.new is DEFAULT: extra_args.update(arg) return extra_args return new def __exit__(self, *exc_info): """Undo the patch.""" if not _is_started(self): raise RuntimeError('stop called on unstarted patcher') if self.is_local and self.temp_original is not DEFAULT: setattr(self.target, self.attribute, self.temp_original) else: delattr(self.target, self.attribute) if not self.create and not hasattr(self.target, self.attribute): # needed for proxy objects like django settings setattr(self.target, self.attribute, self.temp_original) del self.temp_original del self.is_local del self.target for patcher in reversed(self.additional_patchers): if _is_started(patcher): patcher.__exit__(*exc_info) def start(self): """Activate a patch, returning any created mock.""" result = self.__enter__() self._active_patches.add(self) return result def stop(self): """Stop an active patch.""" self._active_patches.discard(self) return self.__exit__() def _get_target(target): try: target, attribute = target.rsplit('.', 1) except (TypeError, ValueError): raise TypeError("Need a valid target to patch. You supplied: %r" % (target,)) getter = lambda: _importer(target) return getter, attribute def _patch_object( target, attribute, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs ): """ patch the named member (`attribute`) on an object (`target`) with a mock object. `patch.object` can be used as a decorator, class decorator or a context manager. Arguments `new`, `spec`, `create`, `spec_set`, `autospec` and `new_callable` have the same meaning as for `patch`. Like `patch`, `patch.object` takes arbitrary keyword arguments for configuring the mock object it creates. When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ getter = lambda: target return _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ) def _patch_multiple(target, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs): """Perform multiple patches in a single call. It takes the object to be patched (either as an object or a string to fetch the object by importing) and keyword arguments for the patches:: with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): ... Use `DEFAULT` as the value if you want `patch.multiple` to create mocks for you. In this case the created mocks are passed into a decorated function by keyword, and a dictionary is returned when `patch.multiple` is used as a context manager. `patch.multiple` can be used as a decorator, class decorator or a context manager. The arguments `spec`, `spec_set`, `create`, `autospec` and `new_callable` have the same meaning as for `patch`. These arguments will be applied to *all* patches done by `patch.multiple`. When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ if type(target) is str: getter = lambda: _importer(target) else: getter = lambda: target if not kwargs: raise ValueError( 'Must supply at least one keyword argument with patch.multiple' ) # need to wrap in a list for python 3, where items is a view items = list(kwargs.items()) attribute, new = items[0] patcher = _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, {} ) patcher.attribute_name = attribute for attribute, new in items[1:]: this_patcher = _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, {} ) this_patcher.attribute_name = attribute patcher.additional_patchers.append(this_patcher) return patcher def patch( target, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs ): """ `patch` acts as a function decorator, class decorator or a context manager. Inside the body of the function or with statement, the `target` is patched with a `new` object. When the function/with statement exits the patch is undone. If `new` is omitted, then the target is replaced with a `MagicMock`. If `patch` is used as a decorator and `new` is omitted, the created mock is passed in as an extra argument to the decorated function. If `patch` is used as a context manager the created mock is returned by the context manager. `target` should be a string in the form `'package.module.ClassName'`. The `target` is imported and the specified object replaced with the `new` object, so the `target` must be importable from the environment you are calling `patch` from. The target is imported when the decorated function is executed, not at decoration time. The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` if patch is creating one for you. In addition you can pass `spec=True` or `spec_set=True`, which causes patch to pass in the object being mocked as the spec/spec_set object. `new_callable` allows you to specify a different class, or callable object, that will be called to create the `new` object. By default `MagicMock` is used. A more powerful form of `spec` is `autospec`. If you set `autospec=True` then the mock with be created with a spec from the object being replaced. All attributes of the mock will also have the spec of the corresponding attribute of the object being replaced. Methods and functions being mocked will have their arguments checked and will raise a `TypeError` if they are called with the wrong signature. For mocks replacing a class, their return value (the 'instance') will have the same spec as the class. Instead of `autospec=True` you can pass `autospec=some_object` to use an arbitrary object as the spec instead of the one being replaced. By default `patch` will fail to replace attributes that don't exist. If you pass in `create=True`, and the attribute doesn't exist, patch will create the attribute for you when the patched function is called, and delete it again afterwards. This is useful for writing tests against attributes that your production code creates at runtime. It is off by default because it can be dangerous. With it switched on you can write passing tests against APIs that don't actually exist! Patch can be used as a `TestCase` class decorator. It works by decorating each test method in the class. This reduces the boilerplate code when your test methods share a common patchings set. `patch` finds tests by looking for method names that start with `patch.TEST_PREFIX`. By default this is `test`, which matches the way `unittest` finds tests. You can specify an alternative prefix by setting `patch.TEST_PREFIX`. Patch can be used as a context manager, with the with statement. Here the patching applies to the indented block after the with statement. If you use "as" then the patched object will be bound to the name after the "as"; very useful if `patch` is creating a mock object for you. `patch` takes arbitrary keyword arguments. These will be passed to the `Mock` (or `new_callable`) on construction. `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are available for alternate use-cases. """ getter, attribute = _get_target(target) return _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ) class _patch_dict(object): """ Patch a dictionary, or dictionary like object, and restore the dictionary to its original state after the test. `in_dict` can be a dictionary or a mapping like container. If it is a mapping then it must at least support getting, setting and deleting items plus iterating over keys. `in_dict` can also be a string specifying the name of the dictionary, which will then be fetched by importing it. `values` can be a dictionary of values to set in the dictionary. `values` can also be an iterable of `(key, value)` pairs. If `clear` is True then the dictionary will be cleared before the new values are set. `patch.dict` can also be called with arbitrary keyword arguments to set values in the dictionary:: with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): ... `patch.dict` can be used as a context manager, decorator or class decorator. When used as a class decorator `patch.dict` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ def __init__(self, in_dict, values=(), clear=False, **kwargs): if isinstance(in_dict, str): in_dict = _importer(in_dict) self.in_dict = in_dict # support any argument supported by dict(...) constructor self.values = dict(values) self.values.update(kwargs) self.clear = clear self._original = None def __call__(self, f): if isinstance(f, type): return self.decorate_class(f) @wraps(f) def _inner(*args, **kw): self._patch_dict() try: return f(*args, **kw) finally: self._unpatch_dict() return _inner def decorate_class(self, klass): for attr in dir(klass): attr_value = getattr(klass, attr) if (attr.startswith(patch.TEST_PREFIX) and hasattr(attr_value, "__call__")): decorator = _patch_dict(self.in_dict, self.values, self.clear) decorated = decorator(attr_value) setattr(klass, attr, decorated) return klass def __enter__(self): """Patch the dict.""" self._patch_dict() def _patch_dict(self): values = self.values in_dict = self.in_dict clear = self.clear try: original = in_dict.copy() except AttributeError: # dict like object with no copy method # must support iteration over keys original = {} for key in in_dict: original[key] = in_dict[key] self._original = original if clear: _clear_dict(in_dict) try: in_dict.update(values) except AttributeError: # dict like object with no update method for key in values: in_dict[key] = values[key] def _unpatch_dict(self): in_dict = self.in_dict original = self._original _clear_dict(in_dict) try: in_dict.update(original) except AttributeError: for key in original: in_dict[key] = original[key] def __exit__(self, *args): """Unpatch the dict.""" self._unpatch_dict() return False start = __enter__ stop = __exit__ def _clear_dict(in_dict): try: in_dict.clear() except AttributeError: keys = list(in_dict) for key in keys: del in_dict[key] def _patch_stopall(): """Stop all active patches.""" for patch in list(_patch._active_patches): patch.stop() patch.object = _patch_object patch.dict = _patch_dict patch.multiple = _patch_multiple patch.stopall = _patch_stopall patch.TEST_PREFIX = 'test' magic_methods = ( "lt le gt ge eq ne " "getitem setitem delitem " "len contains iter " "hash str sizeof " "enter exit " "divmod neg pos abs invert " "complex int float index " "trunc floor ceil " "bool next " ) numerics = "add sub mul div floordiv mod lshift rshift and xor or pow " inplace = ' '.join('i%s' % n for n in numerics.split()) right = ' '.join('r%s' % n for n in numerics.split()) # not including __prepare__, __instancecheck__, __subclasscheck__ # (as they are metaclass methods) # __del__ is not supported at all as it causes problems if it exists _non_defaults = set('__%s__' % method for method in [ 'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex', 'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat', 'setformat', 'repr', 'dir', 'subclasses', 'format', ]) def _get_method(name, func): "Turns a callable object (like a mock) into a real function" def method(self, *args, **kw): return func(self, *args, **kw) method.__name__ = name return method _magics = set( '__%s__' % method for method in ' '.join([magic_methods, numerics, inplace, right]).split() ) _all_magics = _magics | _non_defaults _unsupported_magics = set([ '__getattr__', '__setattr__', '__init__', '__new__', '__prepare__' '__instancecheck__', '__subclasscheck__', '__del__' ]) _calculate_return_value = { '__hash__': lambda self: object.__hash__(self), '__str__': lambda self: object.__str__(self), '__sizeof__': lambda self: object.__sizeof__(self), } _return_values = { '__lt__': NotImplemented, '__gt__': NotImplemented, '__le__': NotImplemented, '__ge__': NotImplemented, '__int__': 1, '__contains__': False, '__len__': 0, '__exit__': False, '__complex__': 1j, '__float__': 1.0, '__bool__': True, '__index__': 1, } def _get_eq(self): def __eq__(other): ret_val = self.__eq__._mock_return_value if ret_val is not DEFAULT: return ret_val return self is other return __eq__ def _get_ne(self): def __ne__(other): if self.__ne__._mock_return_value is not DEFAULT: return DEFAULT return self is not other return __ne__ def _get_iter(self): def __iter__(): ret_val = self.__iter__._mock_return_value if ret_val is DEFAULT: return iter([]) # if ret_val was already an iterator, then calling iter on it should # return the iterator unchanged return iter(ret_val) return __iter__ _side_effect_methods = { '__eq__': _get_eq, '__ne__': _get_ne, '__iter__': _get_iter, } def _set_return_value(mock, method, name): fixed = _return_values.get(name, DEFAULT) if fixed is not DEFAULT: method.return_value = fixed return return_calulator = _calculate_return_value.get(name) if return_calulator is not None: try: return_value = return_calulator(mock) except AttributeError: # XXXX why do we return AttributeError here? # set it as a side_effect instead? return_value = AttributeError(name) method.return_value = return_value return side_effector = _side_effect_methods.get(name) if side_effector is not None: method.side_effect = side_effector(mock) class MagicMixin(object): def __init__(self, *args, **kw): _safe_super(MagicMixin, self).__init__(*args, **kw) self._mock_set_magics() def _mock_set_magics(self): these_magics = _magics if self._mock_methods is not None: these_magics = _magics.intersection(self._mock_methods) remove_magics = set() remove_magics = _magics - these_magics for entry in remove_magics: if entry in type(self).__dict__: # remove unneeded magic methods delattr(self, entry) # don't overwrite existing attributes if called a second time these_magics = these_magics - set(type(self).__dict__) _type = type(self) for entry in these_magics: setattr(_type, entry, MagicProxy(entry, self)) class NonCallableMagicMock(MagicMixin, NonCallableMock): """A version of `MagicMock` that isn't callable.""" def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics() class MagicMock(MagicMixin, Mock): """ MagicMock is a subclass of Mock with default implementations of most of the magic methods. You can use MagicMock without having to configure the magic methods yourself. If you use the `spec` or `spec_set` arguments then *only* magic methods that exist in the spec will be created. Attributes and the return value of a `MagicMock` will also be `MagicMocks`. """ def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics() class MagicProxy(object): def __init__(self, name, parent): self.name = name self.parent = parent def __call__(self, *args, **kwargs): m = self.create_mock() return m(*args, **kwargs) def create_mock(self): entry = self.name parent = self.parent m = parent._get_child_mock(name=entry, _new_name=entry, _new_parent=parent) setattr(parent, entry, m) _set_return_value(parent, m, entry) return m def __get__(self, obj, _type=None): return self.create_mock() class _ANY(object): "A helper object that compares equal to everything." def __eq__(self, other): return True def __ne__(self, other): return False def __repr__(self): return '<ANY>' ANY = _ANY() def _format_call_signature(name, args, kwargs): message = '%s(%%s)' % name formatted_args = '' args_string = ', '.join([repr(arg) for arg in args]) kwargs_string = ', '.join([ '%s=%r' % (key, value) for key, value in kwargs.items() ]) if args_string: formatted_args = args_string if kwargs_string: if formatted_args: formatted_args += ', ' formatted_args += kwargs_string return message % formatted_args class _Call(tuple): """ A tuple for holding the results of a call to a mock, either in the form `(args, kwargs)` or `(name, args, kwargs)`. If args or kwargs are empty then a call tuple will compare equal to a tuple without those values. This makes comparisons less verbose:: _Call(('name', (), {})) == ('name',) _Call(('name', (1,), {})) == ('name', (1,)) _Call(((), {'a': 'b'})) == ({'a': 'b'},) The `_Call` object provides a useful shortcut for comparing with call:: _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) If the _Call has no name then it will match any name. """ def __new__(cls, value=(), name=None, parent=None, two=False, from_kall=True): name = '' args = () kwargs = {} _len = len(value) if _len == 3: name, args, kwargs = value elif _len == 2: first, second = value if isinstance(first, str): name = first if isinstance(second, tuple): args = second else: kwargs = second else: args, kwargs = first, second elif _len == 1: value, = value if isinstance(value, str): name = value elif isinstance(value, tuple): args = value else: kwargs = value if two: return tuple.__new__(cls, (args, kwargs)) return tuple.__new__(cls, (name, args, kwargs)) def __init__(self, value=(), name=None, parent=None, two=False, from_kall=True): self.name = name self.parent = parent self.from_kall = from_kall def __eq__(self, other): if other is ANY: return True try: len_other = len(other) except TypeError: return False self_name = '' if len(self) == 2: self_args, self_kwargs = self else: self_name, self_args, self_kwargs = self other_name = '' if len_other == 0: other_args, other_kwargs = (), {} elif len_other == 3: other_name, other_args, other_kwargs = other elif len_other == 1: value, = other if isinstance(value, tuple): other_args = value other_kwargs = {} elif isinstance(value, str): other_name = value other_args, other_kwargs = (), {} else: other_args = () other_kwargs = value else: # len 2 # could be (name, args) or (name, kwargs) or (args, kwargs) first, second = other if isinstance(first, str): other_name = first if isinstance(second, tuple): other_args, other_kwargs = second, {} else: other_args, other_kwargs = (), second else: other_args, other_kwargs = first, second if self_name and other_name != self_name: return False # this order is important for ANY to work! return (other_args, other_kwargs) == (self_args, self_kwargs) def __ne__(self, other): return not self.__eq__(other) def __call__(self, *args, **kwargs): if self.name is None: return _Call(('', args, kwargs), name='()') name = self.name + '()' return _Call((self.name, args, kwargs), name=name, parent=self) def __getattr__(self, attr): if self.name is None: return _Call(name=attr, from_kall=False) name = '%s.%s' % (self.name, attr) return _Call(name=name, parent=self, from_kall=False) def __repr__(self): if not self.from_kall: name = self.name or 'call' if name.startswith('()'): name = 'call%s' % name return name if len(self) == 2: name = 'call' args, kwargs = self else: name, args, kwargs = self if not name: name = 'call' elif not name.startswith('()'): name = 'call.%s' % name else: name = 'call%s' % name return _format_call_signature(name, args, kwargs) def call_list(self): """For a call object that represents multiple calls, `call_list` returns a list of all the intermediate calls as well as the final call.""" vals = [] thing = self while thing is not None: if thing.from_kall: vals.append(thing) thing = thing.parent return _CallList(reversed(vals)) call = _Call(from_kall=False) def create_autospec(spec, spec_set=False, instance=False, _parent=None, _name=None, **kwargs): """Create a mock object using another object as a spec. Attributes on the mock will use the corresponding attribute on the `spec` object as their spec. Functions or methods being mocked will have their arguments checked to check that they are called with the correct signature. If `spec_set` is True then attempting to set attributes that don't exist on the spec object will raise an `AttributeError`. If a class is used as a spec then the return value of the mock (the instance of the class) will have the same spec. You can use a class as the spec for an instance object by passing `instance=True`. The returned mock will only be callable if instances of the mock are callable. `create_autospec` also takes arbitrary keyword arguments that are passed to the constructor of the created mock.""" if _is_list(spec): # can't pass a list instance to the mock constructor as it will be # interpreted as a list of strings spec = type(spec) is_type = isinstance(spec, type) _kwargs = {'spec': spec} if spec_set: _kwargs = {'spec_set': spec} elif spec is None: # None we mock with a normal mock without a spec _kwargs = {} _kwargs.update(kwargs) Klass = MagicMock if type(spec) in DescriptorTypes: # descriptors don't have a spec # because we don't know what type they return _kwargs = {} elif not _callable(spec): Klass = NonCallableMagicMock elif is_type and instance and not _instance_callable(spec): Klass = NonCallableMagicMock _new_name = _name if _parent is None: # for a top level object no _new_name should be set _new_name = '' mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, name=_name, **_kwargs) if isinstance(spec, FunctionTypes): # should only happen at the top level because we don't # recurse for functions mock = _set_signature(mock, spec) else: _check_signature(spec, mock, is_type, instance) if _parent is not None and not instance: _parent._mock_children[_name] = mock if is_type and not instance and 'return_value' not in kwargs: mock.return_value = create_autospec(spec, spec_set, instance=True, _name='()', _parent=mock) for entry in dir(spec): if _is_magic(entry): # MagicMock already does the useful magic methods for us continue # XXXX do we need a better way of getting attributes without # triggering code execution (?) Probably not - we need the actual # object to mock it so we would rather trigger a property than mock # the property descriptor. Likewise we want to mock out dynamically # provided attributes. # XXXX what about attributes that raise exceptions other than # AttributeError on being fetched? # we could be resilient against it, or catch and propagate the # exception when the attribute is fetched from the mock try: original = getattr(spec, entry) except AttributeError: continue kwargs = {'spec': original} if spec_set: kwargs = {'spec_set': original} if not isinstance(original, FunctionTypes): new = _SpecState(original, spec_set, mock, entry, instance) mock._mock_children[entry] = new else: parent = mock if isinstance(spec, FunctionTypes): parent = mock.mock new = MagicMock(parent=parent, name=entry, _new_name=entry, _new_parent=parent, **kwargs) mock._mock_children[entry] = new skipfirst = _must_skip(spec, entry, is_type) _check_signature(original, new, skipfirst=skipfirst) # so functions created with _set_signature become instance attributes, # *plus* their underlying mock exists in _mock_children of the parent # mock. Adding to _mock_children may be unnecessary where we are also # setting as an instance attribute? if isinstance(new, FunctionTypes): setattr(mock, entry, new) return mock def _must_skip(spec, entry, is_type): if not isinstance(spec, type): if entry in getattr(spec, '__dict__', {}): # instance attribute - shouldn't skip return False spec = spec.__class__ for klass in spec.__mro__: result = klass.__dict__.get(entry, DEFAULT) if result is DEFAULT: continue if isinstance(result, (staticmethod, classmethod)): return False return is_type # shouldn't get here unless function is a dynamically provided attribute # XXXX untested behaviour return is_type def _get_class(obj): try: return obj.__class__ except AttributeError: # it is possible for objects to have no __class__ return type(obj) class _SpecState(object): def __init__(self, spec, spec_set=False, parent=None, name=None, ids=None, instance=False): self.spec = spec self.ids = ids self.spec_set = spec_set self.parent = parent self.instance = instance self.name = name FunctionTypes = ( # python function type(create_autospec), # instance method type(ANY.__eq__), ) file_spec = None def mock_open(mock=None, read_data=''): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. The `mock` argument is the mock object to configure. If `None` (the default) then a `MagicMock` will be created for you, with the API limited to methods or attributes available on standard file handles. `read_data` is a string for the `read` method of the file handle to return. This is an empty string by default. """ global file_spec if file_spec is None: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) if mock is None: mock = MagicMock(name='open', spec=open) handle = MagicMock(spec=file_spec) handle.write.return_value = None handle.__enter__.return_value = handle handle.read.return_value = read_data mock.return_value = handle return mock class PropertyMock(Mock): """ A mock intended to be used as a property, or other descriptor, on a class. `PropertyMock` provides `__get__` and `__set__` methods so you can specify a return value when it is fetched. Fetching a `PropertyMock` instance from an object calls the mock, with no args. Setting it calls the mock with the value being set. """ def _get_child_mock(self, **kwargs): return MagicMock(**kwargs) def __get__(self, obj, obj_type): return self() def __set__(self, obj, val): self(val)
gpl-3.0
ehashman/oh-mainline
vendor/packages/Pygments/pygments/lexers/dalvik.py
364
3442
# -*- coding: utf-8 -*- """ pygments.lexers.dalvik ~~~~~~~~~~~~~~~~~~~~~~ Pygments lexers for Dalvik VM-related languages. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups from pygments.token import Keyword, Text, Comment, Name, String, Number, \ Punctuation __all__ = ['SmaliLexer'] class SmaliLexer(RegexLexer): """ For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly code. *New in Pygments 1.6.* """ name = 'Smali' aliases = ['smali'] filenames = ['*.smali'] mimetypes = ['text/smali'] tokens = { 'root': [ include('comment'), include('label'), include('field'), include('method'), include('class'), include('directive'), include('access-modifier'), include('instruction'), include('literal'), include('punctuation'), include('type'), include('whitespace') ], 'directive': [ (r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|' r'enum|method|registers|locals|array-data|packed-switch|' r'sparse-switch|catchall|catch|line|parameter|local|prologue|' r'epilogue|source)', Keyword), (r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|' 'packed-switch|sparse-switch|parameter|local)', Keyword), (r'^[ \t]*\.restart local', Keyword), ], 'access-modifier': [ (r'(public|private|protected|static|final|synchronized|bridge|' r'varargs|native|abstract|strictfp|synthetic|constructor|' r'declared-synchronized|interface|enum|annotation|volatile|' r'transient)', Keyword), ], 'whitespace': [ (r'\n', Text), (r'\s+', Text), ], 'instruction': [ (r'\b[vp]\d+\b', Name.Builtin), # registers (r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions ], 'literal': [ (r'".*"', String), (r'0x[0-9A-Fa-f]+t?', Number.Hex), (r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'[0-9]+L?', Number.Integer), ], 'field': [ (r'(\$?\b)([A-Za-z0-9_$]*)(:)', bygroups(Punctuation, Name.Variable, Punctuation)), ], 'method': [ (r'<(?:cl)?init>', Name.Function), # constructor (r'(\$?\b)([A-Za-z0-9_$]*)(\()', bygroups(Punctuation, Name.Function, Punctuation)), ], 'label': [ (r':[A-Za-z0-9_]+', Name.Label), ], 'class': [ # class names in the form Lcom/namespace/ClassName; # I only want to color the ClassName part, so the namespace part is # treated as 'Text' (r'(L)((?:[A-Za-z0-9_$]+/)*)([A-Za-z0-9_$]+)(;)', bygroups(Keyword.Type, Text, Name.Class, Text)), ], 'punctuation': [ (r'->', Punctuation), (r'[{},\(\):=\.-]', Punctuation), ], 'type': [ (r'[ZBSCIJFDV\[]+', Keyword.Type), ], 'comment': [ (r'#.*?\n', Comment), ], }
agpl-3.0
rain2o/collective.pfg.skiplogic
setup.py
1
1050
from setuptools import setup, find_packages import os version = '0.1' setup(name='collective.pfg.skiplogic', version=version, description="Adds skip logic capabilities to ploneformgen forms", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # Get more strings from # http://pypi.python.org/pypi?:action=list_classifiers classifiers=[ "Framework :: Plone", "Programming Language :: Python", ], keywords='', author='', author_email='', url='http://svn.plone.org/svn/collective/', license='GPL', packages=find_packages(exclude=['ez_setup']), namespace_packages=['collective', 'collective.pfg'], include_package_data=True, zip_safe=False, install_requires=[ 'setuptools', # -*- Extra requirements: -*- ], entry_points=""" # -*- Entry points: -*- [z3c.autoinclude.plugin] target = plone """, )
gpl-2.0
adviti/melange
thirdparty/google_appengine/lib/django_1_2/tests/regressiontests/middleware_exceptions/tests.py
51
1441
import sys from django.test import TestCase from django.core.signals import got_request_exception class TestException(Exception): pass class TestMiddleware(object): def process_request(self, request): raise TestException('Test Exception') class MiddlewareExceptionTest(TestCase): def setUp(self): self.exceptions = [] got_request_exception.connect(self._on_request_exception) self.client.handler.load_middleware() def tearDown(self): got_request_exception.disconnect(self._on_request_exception) self.exceptions = [] def _on_request_exception(self, sender, request, **kwargs): self.exceptions.append(sys.exc_info()) def test_process_request(self): self.client.handler._request_middleware.insert(0, TestMiddleware().process_request) try: response = self.client.get('/') except TestException, e: # Test client indefinitely re-raises any exceptions being raised # during request handling. Hence actual testing that exception was # properly handled is done by relying on got_request_exception # signal being sent. pass except Exception, e: self.fail("Unexpected exception: %s" % e) self.assertEquals(len(self.exceptions), 1) exception, value, tb = self.exceptions[0] self.assertEquals(value.args, ('Test Exception', ))
apache-2.0
yasserglez/tagfs
packages/tagfs/contrib/django/db/backends/postgresql/introspection.py
9
3694
from django.db.backends import BaseDatabaseIntrospection class DatabaseIntrospection(BaseDatabaseIntrospection): # Maps type codes to Django Field types. data_types_reverse = { 16: 'BooleanField', 21: 'SmallIntegerField', 23: 'IntegerField', 25: 'TextField', 700: 'FloatField', 701: 'FloatField', 869: 'IPAddressField', 1043: 'CharField', 1082: 'DateField', 1083: 'TimeField', 1114: 'DateTimeField', 1184: 'DateTimeField', 1266: 'TimeField', 1700: 'DecimalField', } def get_table_list(self, cursor): "Returns a list of table names in the current database." cursor.execute(""" SELECT c.relname FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r', 'v', '') AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid)""") return [row[0] for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) return cursor.description def get_relations(self, cursor, table_name): """ Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based. """ cursor.execute(""" SELECT con.conkey, con.confkey, c2.relname FROM pg_constraint con, pg_class c1, pg_class c2 WHERE c1.oid = con.conrelid AND c2.oid = con.confrelid AND c1.relname = %s AND con.contype = 'f'""", [table_name]) relations = {} for row in cursor.fetchall(): try: # row[0] and row[1] are like "{2}", so strip the curly braces. relations[int(row[0][1:-1]) - 1] = (int(row[1][1:-1]) - 1, row[2]) except ValueError: continue return relations def get_indexes(self, cursor, table_name): """ Returns a dictionary of fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} """ # This query retrieves each index on the given table, including the # first associated field name cursor.execute(""" SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index idx, pg_catalog.pg_attribute attr WHERE c.oid = idx.indrelid AND idx.indexrelid = c2.oid AND attr.attrelid = c.oid AND attr.attnum = idx.indkey[0] AND c.relname = %s""", [table_name]) indexes = {} for row in cursor.fetchall(): # row[1] (idx.indkey) is stored in the DB as an array. It comes out as # a string of space-separated integers. This designates the field # indexes (1-based) of the fields that have indexes on the table. # Here, we skip any indexes across multiple fields. if ' ' in row[1]: continue indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]} return indexes
mit
Enchufa2/ns-3-dev-git
examples/tutorial/first.py
102
2128
# /* # * This program is free software; you can redistribute it and/or modify # * it under the terms of the GNU General Public License version 2 as # * published by the Free Software Foundation; # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # */ import ns.applications import ns.core import ns.internet import ns.network import ns.point_to_point ns.core.LogComponentEnable("UdpEchoClientApplication", ns.core.LOG_LEVEL_INFO) ns.core.LogComponentEnable("UdpEchoServerApplication", ns.core.LOG_LEVEL_INFO) nodes = ns.network.NodeContainer() nodes.Create(2) pointToPoint = ns.point_to_point.PointToPointHelper() pointToPoint.SetDeviceAttribute("DataRate", ns.core.StringValue("5Mbps")) pointToPoint.SetChannelAttribute("Delay", ns.core.StringValue("2ms")) devices = pointToPoint.Install(nodes) stack = ns.internet.InternetStackHelper() stack.Install(nodes) address = ns.internet.Ipv4AddressHelper() address.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0")) interfaces = address.Assign(devices) echoServer = ns.applications.UdpEchoServerHelper(9) serverApps = echoServer.Install(nodes.Get(1)) serverApps.Start(ns.core.Seconds(1.0)) serverApps.Stop(ns.core.Seconds(10.0)) echoClient = ns.applications.UdpEchoClientHelper(interfaces.GetAddress(1), 9) echoClient.SetAttribute("MaxPackets", ns.core.UintegerValue(1)) echoClient.SetAttribute("Interval", ns.core.TimeValue(ns.core.Seconds(1.0))) echoClient.SetAttribute("PacketSize", ns.core.UintegerValue(1024)) clientApps = echoClient.Install(nodes.Get(0)) clientApps.Start(ns.core.Seconds(2.0)) clientApps.Stop(ns.core.Seconds(10.0)) ns.core.Simulator.Run() ns.core.Simulator.Destroy()
gpl-2.0
kaushik94/boto
boto/sdb/db/test_db.py
33
5431
import logging import time from datetime import datetime from boto.sdb.db.model import Model from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty from boto.exception import SDBPersistenceError logging.basicConfig() log = logging.getLogger('test_db') log.setLevel(logging.DEBUG) _objects = {} # # This will eventually be moved to the boto.tests module and become a real unit test # but for now it will live here. It shows examples of each of the Property types in # use and tests the basic operations. # class TestBasic(Model): name = StringProperty() size = IntegerProperty() foo = BooleanProperty() date = DateTimeProperty() class TestFloat(Model): name = StringProperty() value = FloatProperty() class TestRequired(Model): req = StringProperty(required=True, default='foo') class TestReference(Model): ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs') class TestSubClass(TestBasic): answer = IntegerProperty() class TestPassword(Model): password = PasswordProperty() class TestList(Model): name = StringProperty() nums = ListProperty(int) class TestMap(Model): name = StringProperty() map = MapProperty() class TestListReference(Model): name = StringProperty() basics = ListProperty(TestBasic) class TestAutoNow(Model): create_date = DateTimeProperty(auto_now_add=True) modified_date = DateTimeProperty(auto_now=True) class TestUnique(Model): name = StringProperty(unique=True) def test_basic(): global _objects t = TestBasic() t.name = 'simple' t.size = -42 t.foo = True t.date = datetime.now() log.debug('saving object') t.put() _objects['test_basic_t'] = t time.sleep(5) log.debug('now try retrieving it') tt = TestBasic.get_by_id(t.id) _objects['test_basic_tt'] = tt assert tt.id == t.id l = TestBasic.get_by_id([t.id]) assert len(l) == 1 assert l[0].id == t.id assert t.size == tt.size assert t.foo == tt.foo assert t.name == tt.name #assert t.date == tt.date return t def test_float(): global _objects t = TestFloat() t.name = 'float object' t.value = 98.6 log.debug('saving object') t.save() _objects['test_float_t'] = t time.sleep(5) log.debug('now try retrieving it') tt = TestFloat.get_by_id(t.id) _objects['test_float_tt'] = tt assert tt.id == t.id assert tt.name == t.name assert tt.value == t.value return t def test_required(): global _objects t = TestRequired() _objects['test_required_t'] = t t.put() return t def test_reference(t=None): global _objects if not t: t = test_basic() tt = TestReference() tt.ref = t tt.put() time.sleep(10) tt = TestReference.get_by_id(tt.id) _objects['test_reference_tt'] = tt assert tt.ref.id == t.id for o in t.refs: log.debug(o) def test_subclass(): global _objects t = TestSubClass() _objects['test_subclass_t'] = t t.name = 'a subclass' t.size = -489 t.save() def test_password(): global _objects t = TestPassword() _objects['test_password_t'] = t t.password = "foo" t.save() time.sleep(5) # Make sure it stored ok tt = TestPassword.get_by_id(t.id) _objects['test_password_tt'] = tt #Testing password equality assert tt.password == "foo" #Testing password not stored as string assert str(tt.password) != "foo" def test_list(): global _objects t = TestList() _objects['test_list_t'] = t t.name = 'a list of ints' t.nums = [1, 2, 3, 4, 5] t.put() tt = TestList.get_by_id(t.id) _objects['test_list_tt'] = tt assert tt.name == t.name for n in tt.nums: assert isinstance(n, int) def test_list_reference(): global _objects t = TestBasic() t.put() _objects['test_list_ref_t'] = t tt = TestListReference() tt.name = "foo" tt.basics = [t] tt.put() time.sleep(5) _objects['test_list_ref_tt'] = tt ttt = TestListReference.get_by_id(tt.id) assert ttt.basics[0].id == t.id def test_unique(): global _objects t = TestUnique() name = 'foo' + str(int(time.time())) t.name = name t.put() _objects['test_unique_t'] = t time.sleep(10) tt = TestUnique() _objects['test_unique_tt'] = tt tt.name = name try: tt.put() assert False except(SDBPersistenceError): pass def test_datetime(): global _objects t = TestAutoNow() t.put() _objects['test_datetime_t'] = t time.sleep(5) tt = TestAutoNow.get_by_id(t.id) assert tt.create_date.timetuple() == t.create_date.timetuple() def test(): log.info('test_basic') t1 = test_basic() log.info('test_required') test_required() log.info('test_reference') test_reference(t1) log.info('test_subclass') test_subclass() log.info('test_password') test_password() log.info('test_list') test_list() log.info('test_list_reference') test_list_reference() log.info("test_datetime") test_datetime() log.info('test_unique') test_unique() if __name__ == "__main__": test()
mit
benregn/itu-courses
itu/pipelines.py
1
1027
import pymongo from scrapy.exceptions import DropItem from scrapy.conf import settings from scrapy import log class MongoDBPipeline(object): def __init__(self): connection = pymongo.Connection( settings['MONGODB_SERVER'], settings['MONGODB_PORT']) db = connection[settings['MONGODB_DB']] self.collection = db[settings['MONGODB_COLLECTION']] def process_item(self, item, spider): valid = True for data in item: # here we only check if the data is not null # but we could do any crazy validation we want if not data: valid = False raise DropItem( "Missing %s course from %s" % (data, item['url'])) if valid: self.collection.insert(dict(item)) log.msg("Item written to MongoDB database %s/%s" % (settings['MONGODB_DB'], settings['MONGODB_COLLECTION']), level=log.DEBUG, spider=spider) return item
mit
mnip91/proactive-component-monitoring
dev/scripts/perf/perf_graph.py
12
2516
#!/usr/bin/env python import sys import os import string import numpy as np import matplotlib.pyplot as plt import re def main(): dir = sys.argv[1] if len(sys.argv) == 1: dict = create_dict(dir) draw_graph(dict) else: for i in range(2, len(sys.argv)): dict = create_dict(dir, sys.argv[i]) draw_graph(dict, sys.argv[i]) def create_dict(rootdir, match='.*'): pattern = re.compile(match) dict = {} for branch in os.listdir(rootdir): branch_dict = {} for test in os.listdir(os.path.join(rootdir, branch)): if pattern.match(test): file = open(os.path.join(rootdir, branch, test)) str = file.readline() str = str.strip() start = str.find("=") if start != -1: branch_dict[test] = round(string.atof(str[start+1:]),2) else: branch_dict[test] = -1. dict[branch] = branch_dict return dict def get_all_test_name(dict): for branch in dict: return dict[branch].keys() def get_branches(dict): return dict.keys() def compare_by_branch(dict): def local_print(test, d): print test for t in d: print "\t" + t + "\t" + str(d[t]) print for test in get_all_test_name(dict): local_dict = {} for branch in dict: local_dict[branch] = dict[branch][test] local_print(test, local_dict) ### Unused ### def short_test_name(long_name): return long_name[long_name.rfind('.Test')+5:] def draw_graph(dict, title): def autolabel(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height), ha='center', va='bottom') def set_legend(bars, branches): bs = () for bar in bars: bs = bs + (bar[0],) ax.legend( bs, branches) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b'] branches = get_branches(dict) all_tests = get_all_test_name(dict) N = len(all_tests) ind = np.arange(N) width = 0.35 fig = plt.figure() ax = fig.add_subplot(111) data_sets = [] for branch in branches: data =() for test in all_tests: data = data + (dict[branch].get(test, 0),) data_sets.append(data) bars = [] counter = 0 for data in data_sets: bar = ax.bar(ind + (counter*width), data, width, color=colors[counter]) bars.append(bar) counter += 1 # add some ax.set_ylabel('Perf') ax.set_title('Branch perf comparison for ' + title) ax.set_xticks(ind+width) ax.set_xticklabels(map(short_test_name, all_tests)) set_legend(bars, branches) for bar in bars: autolabel(bar) plt.savefig(title + ".png") if __name__ == "__main__": main()
agpl-3.0
HonzaKral/django
django/apps/config.py
121
8077
import os from importlib import import_module from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured from django.utils._os import upath from django.utils.module_loading import module_has_submodule MODELS_MODULE_NAME = 'models' class AppConfig(object): """ Class representing a Django application and its configuration. """ def __init__(self, app_name, app_module): # Full Python path to the application eg. 'django.contrib.admin'. self.name = app_name # Root module for the application eg. <module 'django.contrib.admin' # from 'django/contrib/admin/__init__.pyc'>. self.module = app_module # The following attributes could be defined at the class level in a # subclass, hence the test-and-set pattern. # Last component of the Python path to the application eg. 'admin'. # This value must be unique across a Django project. if not hasattr(self, 'label'): self.label = app_name.rpartition(".")[2] # Human-readable name for the application eg. "Admin". if not hasattr(self, 'verbose_name'): self.verbose_name = self.label.title() # Filesystem path to the application directory eg. # u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on # Python 2 and a str on Python 3. if not hasattr(self, 'path'): self.path = self._path_from_module(app_module) # Module containing models eg. <module 'django.contrib.admin.models' # from 'django/contrib/admin/models.pyc'>. Set by import_models(). # None if the application doesn't have a models module. self.models_module = None # Mapping of lower case model names to model classes. Initially set to # None to prevent accidental access before import_models() runs. self.models = None def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.label) def _path_from_module(self, module): """Attempt to determine app's filesystem path from its module.""" # See #21874 for extended discussion of the behavior of this method in # various cases. # Convert paths to list because Python 3.3 _NamespacePath does not # support indexing. paths = list(getattr(module, '__path__', [])) if len(paths) != 1: filename = getattr(module, '__file__', None) if filename is not None: paths = [os.path.dirname(filename)] if len(paths) > 1: raise ImproperlyConfigured( "The app module %r has multiple filesystem locations (%r); " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module, paths)) elif not paths: raise ImproperlyConfigured( "The app module %r has no filesystem location, " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module,)) return upath(paths[0]) @classmethod def create(cls, entry): """ Factory that creates an app config from an entry in INSTALLED_APPS. """ try: # If import_module succeeds, entry is a path to an app module, # which may specify an app config class with default_app_config. # Otherwise, entry is a path to an app config class or an error. module = import_module(entry) except ImportError: # Track that importing as an app module failed. If importing as an # app config class fails too, we'll trigger the ImportError again. module = None mod_path, _, cls_name = entry.rpartition('.') # Raise the original exception when entry cannot be a path to an # app config class. if not mod_path: raise else: try: # If this works, the app module specifies an app config class. entry = module.default_app_config except AttributeError: # Otherwise, it simply uses the default app config class. return cls(entry, module) else: mod_path, _, cls_name = entry.rpartition('.') # If we're reaching this point, we must attempt to load the app config # class located at <mod_path>.<cls_name> mod = import_module(mod_path) try: cls = getattr(mod, cls_name) except AttributeError: if module is None: # If importing as an app module failed, that error probably # contains the most informative traceback. Trigger it again. import_module(entry) else: raise # Check for obvious errors. (This check prevents duck typing, but # it could be removed if it became a problem in practice.) if not issubclass(cls, AppConfig): raise ImproperlyConfigured( "'%s' isn't a subclass of AppConfig." % entry) # Obtain app name here rather than in AppClass.__init__ to keep # all error checking for entries in INSTALLED_APPS in one place. try: app_name = cls.name except AttributeError: raise ImproperlyConfigured( "'%s' must supply a name attribute." % entry) # Ensure app_name points to a valid module. app_module = import_module(app_name) # Entry is a path to an app config class. return cls(app_name, app_module) def check_models_ready(self): """ Raises an exception if models haven't been imported yet. """ if self.models is None: raise AppRegistryNotReady( "Models for app '%s' haven't been imported yet." % self.label) def get_model(self, model_name): """ Returns the model with the given case-insensitive model_name. Raises LookupError if no model exists with this name. """ self.check_models_ready() try: return self.models[model_name.lower()] except KeyError: raise LookupError( "App '%s' doesn't have a '%s' model." % (self.label, model_name)) def get_models(self, include_auto_created=False, include_deferred=False, include_swapped=False): """ Returns an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models created to satisfy deferred attribute queries, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API. """ self.check_models_ready() for model in self.models.values(): if model._deferred and not include_deferred: continue if model._meta.auto_created and not include_auto_created: continue if model._meta.swapped and not include_swapped: continue yield model def import_models(self, all_models): # Dictionary of models for this app, primarily maintained in the # 'all_models' attribute of the Apps this AppConfig is attached to. # Injected as a parameter because it gets populated when models are # imported, which might happen before populate() imports models. self.models = all_models if module_has_submodule(self.module, MODELS_MODULE_NAME): models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME) self.models_module = import_module(models_module_name) def ready(self): """ Override this method in subclasses to run code when Django starts. """
bsd-3-clause
robertnishihara/ray
streaming/python/tests/test_word_count.py
1
1689
import os import ray from ray.streaming import StreamingContext def test_word_count(): ray.init(_load_code_from_local=True) ctx = StreamingContext.Builder() \ .build() ctx.read_text_file(__file__) \ .set_parallelism(1) \ .flat_map(lambda x: x.split()) \ .map(lambda x: (x, 1)) \ .key_by(lambda x: x[0]) \ .reduce(lambda old_value, new_value: (old_value[0], old_value[1] + new_value[1])) \ .filter(lambda x: "ray" not in x) \ .sink(lambda x: print("result", x)) ctx.submit("word_count") import time time.sleep(3) ray.shutdown() def test_simple_word_count(): ray.init(_load_code_from_local=True) ctx = StreamingContext.Builder() \ .build() sink_file = "/tmp/ray_streaming_test_simple_word_count.txt" if os.path.exists(sink_file): os.remove(sink_file) def sink_func(x): with open(sink_file, "a") as f: line = "{}:{},".format(x[0], x[1]) print("sink_func", line) f.write(line) ctx.from_values("a", "b", "c") \ .set_parallelism(1) \ .flat_map(lambda x: [x, x]) \ .map(lambda x: (x, 1)) \ .key_by(lambda x: x[0]) \ .reduce(lambda old_value, new_value: (old_value[0], old_value[1] + new_value[1])) \ .sink(sink_func) ctx.submit("word_count") import time time.sleep(3) ray.shutdown() with open(sink_file, "r") as f: result = f.read() assert "a:2" in result assert "b:2" in result assert "c:2" in result if __name__ == "__main__": test_word_count() test_simple_word_count()
apache-2.0
letolab/airy
airy/utils/cache.py
1
9676
""" This module contains helper functions for controlling caching. It does so by managing the "Vary" header of responses. It includes functions to patch the header of response objects directly and decorators that change functions to do that header-patching themselves. For information on the Vary header, see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44 Essentially, the "Vary" HTTP header defines which headers a cache should take into account when building its cache key. Requests with the same path but different header content for headers named in "Vary" need to get different cache keys to prevent delivery of wrong content. An example: i18n middleware would need to distinguish caches by the "Accept-language" header. """ import re import time from airy.core.conf import settings from airy.core.cache import get_cache from airy.utils.encoding import smart_str, iri_to_uri from airy.utils.http import http_date from airy.utils.hashcompat import md5_constructor from airy.utils.translation import get_language from airy.http import HttpRequest cc_delim_re = re.compile(r'\s*,\s*') def patch_cache_control(response, **kwargs): """ This function patches the Cache-Control header by adding all keyword arguments to it. The transformation is as follows: * All keyword parameter names are turned to lowercase, and underscores are converted to hyphens. * If the value of a parameter is True (exactly True, not just a true value), only the parameter name is added to the header. * All other parameters are added with their value, after applying str() to it. """ def dictitem(s): t = s.split('=', 1) if len(t) > 1: return (t[0].lower(), t[1]) else: return (t[0].lower(), True) def dictvalue(t): if t[1] is True: return t[0] else: return t[0] + '=' + smart_str(t[1]) if response.has_header('Cache-Control'): cc = cc_delim_re.split(response['Cache-Control']) cc = dict([dictitem(el) for el in cc]) else: cc = {} # If there's already a max-age header but we're being asked to set a new # max-age, use the minimum of the two ages. In practice this happens when # a decorator and a piece of middleware both operate on a given view. if 'max-age' in cc and 'max_age' in kwargs: kwargs['max_age'] = min(cc['max-age'], kwargs['max_age']) # Allow overriding private caching and vice versa if 'private' in cc and 'public' in kwargs: del cc['private'] elif 'public' in cc and 'private' in kwargs: del cc['public'] for (k, v) in kwargs.items(): cc[k.replace('_', '-')] = v cc = ', '.join([dictvalue(el) for el in cc.items()]) response['Cache-Control'] = cc def get_max_age(response): """ Returns the max-age from the response Cache-Control header as an integer (or ``None`` if it wasn't found or wasn't an integer. """ if not response.has_header('Cache-Control'): return cc = dict([_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control'])]) if 'max-age' in cc: try: return int(cc['max-age']) except (ValueError, TypeError): pass def patch_response_headers(response, cache_timeout=None): """ Adds some useful headers to the given HttpResponse object: ETag, Last-Modified, Expires and Cache-Control Each header is only added if it isn't already set. cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used by default. """ if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS if cache_timeout < 0: cache_timeout = 0 # Can't have max-age negative if settings.USE_ETAGS and not response.has_header('ETag'): response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest() if not response.has_header('Last-Modified'): response['Last-Modified'] = http_date() if not response.has_header('Expires'): response['Expires'] = http_date(time.time() + cache_timeout) patch_cache_control(response, max_age=cache_timeout) def add_never_cache_headers(response): """ Adds headers to a response to indicate that a page should never be cached. """ patch_response_headers(response, cache_timeout=-1) def patch_vary_headers(response, newheaders): """ Adds (or updates) the "Vary" header in the given HttpResponse object. newheaders is a list of header names that should be in "Vary". Existing headers in "Vary" aren't removed. """ # Note that we need to keep the original order intact, because cache # implementations may rely on the order of the Vary contents in, say, # computing an MD5 hash. if response.has_header('Vary'): vary_headers = cc_delim_re.split(response['Vary']) else: vary_headers = [] # Use .lower() here so we treat headers as case-insensitive. existing_headers = set([header.lower() for header in vary_headers]) additional_headers = [newheader for newheader in newheaders if newheader.lower() not in existing_headers] response['Vary'] = ', '.join(vary_headers + additional_headers) def has_vary_header(response, header_query): """ Checks to see if the response has a given header name in its Vary header. """ if not response.has_header('Vary'): return False vary_headers = cc_delim_re.split(response['Vary']) existing_headers = set([header.lower() for header in vary_headers]) return header_query.lower() in existing_headers def _i18n_cache_key_suffix(request, cache_key): """If enabled, returns the cache key ending with a locale.""" if settings.USE_I18N: # first check if LocaleMiddleware or another middleware added # LANGUAGE_CODE to request, then fall back to the active language # which in turn can also fall back to settings.LANGUAGE_CODE cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language()) return cache_key def _generate_cache_key(request, method, headerlist, key_prefix): """Returns a cache key from the headers given in the header list.""" ctx = md5_constructor() for header in headerlist: value = request.META.get(header, None) if value is not None: ctx.update(value) path = md5_constructor(iri_to_uri(request.get_full_path())) cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % ( key_prefix, request.method, path.hexdigest(), ctx.hexdigest()) return _i18n_cache_key_suffix(request, cache_key) def _generate_cache_header_key(key_prefix, request): """Returns a cache key for the header cache.""" path = md5_constructor(iri_to_uri(request.get_full_path())) cache_key = 'views.decorators.cache.cache_header.%s.%s' % ( key_prefix, path.hexdigest()) return _i18n_cache_key_suffix(request, cache_key) def get_cache_key(request, key_prefix=None, method='GET', cache=None): """ Returns a cache key based on the request path and query. It can be used in the request phase because it pulls the list of headers to take into account from the global path registry and uses those to build a cache key to check against. If there is no headerlist stored, the page needs to be rebuilt, so this function returns None. """ if key_prefix is None: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX cache_key = _generate_cache_header_key(key_prefix, request) if cache is None: cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS) headerlist = cache.get(cache_key, None) if headerlist is not None: return _generate_cache_key(request, method, headerlist, key_prefix) else: return None def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None): """ Learns what headers to take into account for some request path from the response object. It stores those headers in a global path registry so that later access to that path will know what headers to take into account without building the response object itself. The headers are named in the Vary header of the response, but we want to prevent response generation. The list of headers to use for cache key generation is stored in the same cache as the pages themselves. If the cache ages some data out of the cache, this just means that we have to build the response once to get at the Vary header and so at the list of headers to use for the cache key. """ if key_prefix is None: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS cache_key = _generate_cache_header_key(key_prefix, request) if cache is None: cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS) if response.has_header('Vary'): headerlist = ['HTTP_'+header.upper().replace('-', '_') for header in cc_delim_re.split(response['Vary'])] cache.set(cache_key, headerlist, cache_timeout) return _generate_cache_key(request, request.method, headerlist, key_prefix) else: # if there is no Vary header, we still need a cache key # for the request.get_full_path() cache.set(cache_key, [], cache_timeout) return _generate_cache_key(request, request.method, [], key_prefix) def _to_tuple(s): t = s.split('=',1) if len(t) == 2: return t[0].lower(), t[1] return t[0].lower(), True
bsd-2-clause
tboyce021/home-assistant
homeassistant/components/timer/reproduce_state.py
16
2247
"""Reproduce an Timer state.""" import asyncio import logging from typing import Any, Dict, Iterable, Optional from homeassistant.const import ATTR_ENTITY_ID from homeassistant.core import Context, State from homeassistant.helpers.typing import HomeAssistantType from . import ( ATTR_DURATION, DOMAIN, SERVICE_CANCEL, SERVICE_PAUSE, SERVICE_START, STATUS_ACTIVE, STATUS_IDLE, STATUS_PAUSED, ) _LOGGER = logging.getLogger(__name__) VALID_STATES = {STATUS_IDLE, STATUS_ACTIVE, STATUS_PAUSED} async def _async_reproduce_state( hass: HomeAssistantType, state: State, *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce a single state.""" cur_state = hass.states.get(state.entity_id) if cur_state is None: _LOGGER.warning("Unable to find entity %s", state.entity_id) return if state.state not in VALID_STATES: _LOGGER.warning( "Invalid state specified for %s: %s", state.entity_id, state.state ) return # Return if we are already at the right state. if cur_state.state == state.state and cur_state.attributes.get( ATTR_DURATION ) == state.attributes.get(ATTR_DURATION): return service_data = {ATTR_ENTITY_ID: state.entity_id} if state.state == STATUS_ACTIVE: service = SERVICE_START if ATTR_DURATION in state.attributes: service_data[ATTR_DURATION] = state.attributes[ATTR_DURATION] elif state.state == STATUS_PAUSED: service = SERVICE_PAUSE elif state.state == STATUS_IDLE: service = SERVICE_CANCEL await hass.services.async_call( DOMAIN, service, service_data, context=context, blocking=True ) async def async_reproduce_states( hass: HomeAssistantType, states: Iterable[State], *, context: Optional[Context] = None, reproduce_options: Optional[Dict[str, Any]] = None, ) -> None: """Reproduce Timer states.""" await asyncio.gather( *( _async_reproduce_state( hass, state, context=context, reproduce_options=reproduce_options ) for state in states ) )
apache-2.0
webbhorn/netgroups
tools/perf/util/setup.py
242
1531
#!/usr/bin/python2 from distutils.core import setup, Extension from os import getenv from distutils.command.build_ext import build_ext as _build_ext from distutils.command.install_lib import install_lib as _install_lib class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) self.build_lib = build_lib self.build_temp = build_tmp class install_lib(_install_lib): def finalize_options(self): _install_lib.finalize_options(self) self.build_dir = build_lib cflags = getenv('CFLAGS', '').split() # switch off several checks (need to be at the end of cflags list) cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ] build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') libtraceevent = getenv('LIBTRACEEVENT') liblk = getenv('LIBLK') ext_sources = [f.strip() for f in file('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, extra_objects = [libtraceevent, liblk], ) setup(name='perf', version='0.1', description='Interface with the Linux profiling infrastructure', author='Arnaldo Carvalho de Melo', author_email='acme@redhat.com', license='GPLv2', url='http://perf.wiki.kernel.org', ext_modules=[perf], cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
gpl-2.0
837468220/python-for-android
python3-alpha/python3-src/Lib/test/test_sys.py
47
32005
import unittest, test.support import sys, io, os import struct import subprocess import textwrap import warnings import operator import codecs # count the number of test runs, used to create unique # strings to intern in test_intern() numruns = 0 try: import threading except ImportError: threading = None class SysModuleTest(unittest.TestCase): def setUp(self): self.orig_stdout = sys.stdout self.orig_stderr = sys.stderr self.orig_displayhook = sys.displayhook def tearDown(self): sys.stdout = self.orig_stdout sys.stderr = self.orig_stderr sys.displayhook = self.orig_displayhook test.support.reap_children() def test_original_displayhook(self): import builtins out = io.StringIO() sys.stdout = out dh = sys.__displayhook__ self.assertRaises(TypeError, dh) if hasattr(builtins, "_"): del builtins._ dh(None) self.assertEqual(out.getvalue(), "") self.assertTrue(not hasattr(builtins, "_")) dh(42) self.assertEqual(out.getvalue(), "42\n") self.assertEqual(builtins._, 42) del sys.stdout self.assertRaises(RuntimeError, dh, 42) def test_lost_displayhook(self): del sys.displayhook code = compile("42", "<string>", "single") self.assertRaises(RuntimeError, eval, code) def test_custom_displayhook(self): def baddisplayhook(obj): raise ValueError sys.displayhook = baddisplayhook code = compile("42", "<string>", "single") self.assertRaises(ValueError, eval, code) def test_original_excepthook(self): err = io.StringIO() sys.stderr = err eh = sys.__excepthook__ self.assertRaises(TypeError, eh) try: raise ValueError(42) except ValueError as exc: eh(*sys.exc_info()) self.assertTrue(err.getvalue().endswith("ValueError: 42\n")) def test_excepthook(self): with test.support.captured_output("stderr") as stderr: sys.excepthook(1, '1', 1) self.assertTrue("TypeError: print_exception(): Exception expected for " \ "value, str found" in stderr.getvalue()) # FIXME: testing the code for a lost or replaced excepthook in # Python/pythonrun.c::PyErr_PrintEx() is tricky. def test_exit(self): self.assertRaises(TypeError, sys.exit, 42, 42) # call without argument try: sys.exit(0) except SystemExit as exc: self.assertEqual(exc.code, 0) except: self.fail("wrong exception") else: self.fail("no exception") # call with tuple argument with one entry # entry will be unpacked try: sys.exit(42) except SystemExit as exc: self.assertEqual(exc.code, 42) except: self.fail("wrong exception") else: self.fail("no exception") # call with integer argument try: sys.exit((42,)) except SystemExit as exc: self.assertEqual(exc.code, 42) except: self.fail("wrong exception") else: self.fail("no exception") # call with string argument try: sys.exit("exit") except SystemExit as exc: self.assertEqual(exc.code, "exit") except: self.fail("wrong exception") else: self.fail("no exception") # call with tuple argument with two entries try: sys.exit((17, 23)) except SystemExit as exc: self.assertEqual(exc.code, (17, 23)) except: self.fail("wrong exception") else: self.fail("no exception") # test that the exit machinery handles SystemExits properly rc = subprocess.call([sys.executable, "-c", "raise SystemExit(47)"]) self.assertEqual(rc, 47) def check_exit_message(code, expected, env=None): process = subprocess.Popen([sys.executable, "-c", code], stderr=subprocess.PIPE, env=env) stdout, stderr = process.communicate() self.assertEqual(process.returncode, 1) self.assertTrue(stderr.startswith(expected), "%s doesn't start with %s" % (ascii(stderr), ascii(expected))) # test that stderr buffer if flushed before the exit message is written # into stderr check_exit_message( r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")', b"unflushed,message") # test that the exit message is written with backslashreplace error # handler to stderr check_exit_message( r'import sys; sys.exit("surrogates:\uDCFF")', b"surrogates:\\udcff") # test that the unicode message is encoded to the stderr encoding # instead of the default encoding (utf8) env = os.environ.copy() env['PYTHONIOENCODING'] = 'latin-1' check_exit_message( r'import sys; sys.exit("h\xe9")', b"h\xe9", env=env) def test_getdefaultencoding(self): self.assertRaises(TypeError, sys.getdefaultencoding, 42) # can't check more than the type, as the user might have changed it self.assertIsInstance(sys.getdefaultencoding(), str) # testing sys.settrace() is done in test_sys_settrace.py # testing sys.setprofile() is done in test_sys_setprofile.py def test_setcheckinterval(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") self.assertRaises(TypeError, sys.setcheckinterval) orig = sys.getcheckinterval() for n in 0, 100, 120, orig: # orig last to restore starting state sys.setcheckinterval(n) self.assertEqual(sys.getcheckinterval(), n) @unittest.skipUnless(threading, 'Threading required for this test.') def test_switchinterval(self): self.assertRaises(TypeError, sys.setswitchinterval) self.assertRaises(TypeError, sys.setswitchinterval, "a") self.assertRaises(ValueError, sys.setswitchinterval, -1.0) self.assertRaises(ValueError, sys.setswitchinterval, 0.0) orig = sys.getswitchinterval() # sanity check self.assertTrue(orig < 0.5, orig) try: for n in 0.00001, 0.05, 3.0, orig: sys.setswitchinterval(n) self.assertAlmostEqual(sys.getswitchinterval(), n) finally: sys.setswitchinterval(orig) def test_recursionlimit(self): self.assertRaises(TypeError, sys.getrecursionlimit, 42) oldlimit = sys.getrecursionlimit() self.assertRaises(TypeError, sys.setrecursionlimit) self.assertRaises(ValueError, sys.setrecursionlimit, -42) sys.setrecursionlimit(10000) self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(), 'fatal error if run with a trace function') def test_recursionlimit_recovery(self): # NOTE: this test is slightly fragile in that it depends on the current # recursion count when executing the test being low enough so as to # trigger the recursion recovery detection in the _Py_MakeEndRecCheck # macro (see ceval.h). oldlimit = sys.getrecursionlimit() def f(): f() try: for i in (50, 1000): # Issue #5392: stack overflow after hitting recursion limit twice sys.setrecursionlimit(i) self.assertRaises(RuntimeError, f) self.assertRaises(RuntimeError, f) finally: sys.setrecursionlimit(oldlimit) def test_recursionlimit_fatalerror(self): # A fatal error occurs if a second recursion limit is hit when recovering # from a first one. if os.name == "nt": raise unittest.SkipTest( "under Windows, test would generate a spurious crash dialog") code = textwrap.dedent(""" import sys def f(): try: f() except RuntimeError: f() sys.setrecursionlimit(%d) f()""") for i in (50, 1000): sub = subprocess.Popen([sys.executable, '-c', code % i], stderr=subprocess.PIPE) err = sub.communicate()[1] self.assertTrue(sub.returncode, sub.returncode) self.assertTrue( b"Fatal Python error: Cannot recover from stack overflow" in err, err) def test_getwindowsversion(self): # Raise SkipTest if sys doesn't have getwindowsversion attribute test.support.get_attribute(sys, "getwindowsversion") v = sys.getwindowsversion() self.assertEqual(len(v), 5) self.assertIsInstance(v[0], int) self.assertIsInstance(v[1], int) self.assertIsInstance(v[2], int) self.assertIsInstance(v[3], int) self.assertIsInstance(v[4], str) self.assertRaises(IndexError, operator.getitem, v, 5) self.assertIsInstance(v.major, int) self.assertIsInstance(v.minor, int) self.assertIsInstance(v.build, int) self.assertIsInstance(v.platform, int) self.assertIsInstance(v.service_pack, str) self.assertIsInstance(v.service_pack_minor, int) self.assertIsInstance(v.service_pack_major, int) self.assertIsInstance(v.suite_mask, int) self.assertIsInstance(v.product_type, int) self.assertEqual(v[0], v.major) self.assertEqual(v[1], v.minor) self.assertEqual(v[2], v.build) self.assertEqual(v[3], v.platform) self.assertEqual(v[4], v.service_pack) # This is how platform.py calls it. Make sure tuple # still has 5 elements maj, min, buildno, plat, csd = sys.getwindowsversion() def test_call_tracing(self): self.assertRaises(TypeError, sys.call_tracing, type, 2) def test_dlopenflags(self): if hasattr(sys, "setdlopenflags"): self.assertTrue(hasattr(sys, "getdlopenflags")) self.assertRaises(TypeError, sys.getdlopenflags, 42) oldflags = sys.getdlopenflags() self.assertRaises(TypeError, sys.setdlopenflags) sys.setdlopenflags(oldflags+1) self.assertEqual(sys.getdlopenflags(), oldflags+1) sys.setdlopenflags(oldflags) def test_refcount(self): # n here must be a global in order for this test to pass while # tracing with a python function. Tracing calls PyFrame_FastToLocals # which will add a copy of any locals to the frame object, causing # the reference count to increase by 2 instead of 1. global n self.assertRaises(TypeError, sys.getrefcount) c = sys.getrefcount(None) n = None self.assertEqual(sys.getrefcount(None), c+1) del n self.assertEqual(sys.getrefcount(None), c) if hasattr(sys, "gettotalrefcount"): self.assertIsInstance(sys.gettotalrefcount(), int) def test_getframe(self): self.assertRaises(TypeError, sys._getframe, 42, 42) self.assertRaises(ValueError, sys._getframe, 2000000000) self.assertTrue( SysModuleTest.test_getframe.__code__ \ is sys._getframe().f_code ) # sys._current_frames() is a CPython-only gimmick. def test_current_frames(self): have_threads = True try: import _thread except ImportError: have_threads = False if have_threads: self.current_frames_with_threads() else: self.current_frames_without_threads() # Test sys._current_frames() in a WITH_THREADS build. @test.support.reap_threads def current_frames_with_threads(self): import threading, _thread import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(_thread.get_ident()) entered_g.set() leave_g.wait() t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_frames() main_id = _thread.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) # Verify that the captured main-thread frame is _this_ frame. frame = d.pop(main_id) self.assertTrue(frame is sys._getframe()) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a litte tricky, since various bits of # threading.py are also in the thread's call stack. frame = d.pop(thread_id) stack = traceback.extract_stack(frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i+1] self.assertEqual(funcname, "g456") self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"]) # Reap the spawned thread. leave_g.set() t.join() # Test sys._current_frames() when thread support doesn't exist. def current_frames_without_threads(self): # Not much happens here: there is only one thread, with artificial # "thread id" 0. d = sys._current_frames() self.assertEqual(len(d), 1) self.assertIn(0, d) self.assertTrue(d[0] is sys._getframe()) def test_attributes(self): self.assertIsInstance(sys.api_version, int) self.assertIsInstance(sys.argv, list) self.assertIn(sys.byteorder, ("little", "big")) self.assertIsInstance(sys.builtin_module_names, tuple) self.assertIsInstance(sys.copyright, str) self.assertIsInstance(sys.exec_prefix, str) self.assertIsInstance(sys.executable, str) self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.int_info), 2) self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) self.assertTrue(sys.int_info.sizeof_digit >= 1) self.assertEqual(type(sys.int_info.bits_per_digit), int) self.assertEqual(type(sys.int_info.sizeof_digit), int) self.assertIsInstance(sys.hexversion, int) self.assertEqual(len(sys.hash_info), 5) self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width) # sys.hash_info.modulus should be a prime; we do a quick # probable primality test (doesn't exclude the possibility of # a Carmichael number) for x in range(1, 100): self.assertEqual( pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus), 1, "sys.hash_info.modulus {} is a non-prime".format( sys.hash_info.modulus) ) self.assertIsInstance(sys.hash_info.inf, int) self.assertIsInstance(sys.hash_info.nan, int) self.assertIsInstance(sys.hash_info.imag, int) self.assertIsInstance(sys.maxsize, int) self.assertIsInstance(sys.maxunicode, int) self.assertIsInstance(sys.platform, str) self.assertIsInstance(sys.prefix, str) self.assertIsInstance(sys.version, str) vi = sys.version_info self.assertIsInstance(vi[:], tuple) self.assertEqual(len(vi), 5) self.assertIsInstance(vi[0], int) self.assertIsInstance(vi[1], int) self.assertIsInstance(vi[2], int) self.assertIn(vi[3], ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi[4], int) self.assertIsInstance(vi.major, int) self.assertIsInstance(vi.minor, int) self.assertIsInstance(vi.micro, int) self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi.serial, int) self.assertEqual(vi[0], vi.major) self.assertEqual(vi[1], vi.minor) self.assertEqual(vi[2], vi.micro) self.assertEqual(vi[3], vi.releaselevel) self.assertEqual(vi[4], vi.serial) self.assertTrue(vi > (1,0,0)) self.assertIsInstance(sys.float_repr_style, str) self.assertIn(sys.float_repr_style, ('short', 'legacy')) if not sys.platform.startswith('win'): self.assertIsInstance(sys.abiflags, str) def test_43581(self): # Can't use sys.stdout, as this is a StringIO object when # the test runs under regrtest. self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding) def test_intern(self): global numruns numruns += 1 self.assertRaises(TypeError, sys.intern) s = "never interned before" + str(numruns) self.assertTrue(sys.intern(s) is s) s2 = s.swapcase().swapcase() self.assertTrue(sys.intern(s2) is s) # Subclasses of string can't be interned, because they # provide too much opportunity for insane things to happen. # We don't want them in the interned dict and if they aren't # actually interned, we don't want to create the appearance # that they are by allowing intern() to succeed. class S(str): def __hash__(self): return 123 self.assertRaises(TypeError, sys.intern, S("abc")) def test_sys_flags(self): self.assertTrue(sys.flags) attrs = ("debug", "division_warning", "inspect", "interactive", "optimize", "dont_write_bytecode", "no_user_site", "no_site", "ignore_environment", "verbose", "bytes_warning", "quiet") for attr in attrs: self.assertTrue(hasattr(sys.flags, attr), attr) self.assertEqual(type(getattr(sys.flags, attr)), int, attr) self.assertTrue(repr(sys.flags)) self.assertEqual(len(sys.flags), len(attrs)) def test_clear_type_cache(self): sys._clear_type_cache() def test_ioencoding(self): env = dict(os.environ) # Test character: cent sign, encoded as 0x4A (ASCII J) in CP424, # not representable in ASCII. env["PYTHONIOENCODING"] = "cp424" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, "\xa2\n".encode("cp424")) env["PYTHONIOENCODING"] = "ascii:replace" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, b'?') def test_executable(self): # Issue #7774: Ensure that sys.executable is an empty string if argv[0] # has been set to an non existent program name and Python is unable to # retrieve the real program name # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. python_dir = os.path.dirname(os.path.realpath(sys.executable)) p = subprocess.Popen( ["nonexistent", "-c", 'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'], executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir) stdout = p.communicate()[0] executable = stdout.strip().decode("ASCII") p.wait() self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))]) def check_fsencoding(self, fs_encoding, expected=None): self.assertIsNotNone(fs_encoding) codecs.lookup(fs_encoding) if expected: self.assertEqual(fs_encoding, expected) def test_getfilesystemencoding(self): fs_encoding = sys.getfilesystemencoding() if sys.platform == 'darwin': expected = 'utf-8' elif sys.platform == 'win32': expected = 'mbcs' else: expected = None self.check_fsencoding(fs_encoding, expected) class SizeofTest(unittest.TestCase): TPFLAGS_HAVE_GC = 1<<14 TPFLAGS_HEAPTYPE = 1<<9 def setUp(self): self.c = len(struct.pack('c', b' ')) self.H = len(struct.pack('H', 0)) self.i = len(struct.pack('i', 0)) self.l = len(struct.pack('l', 0)) self.P = len(struct.pack('P', 0)) # due to missing size_t information from struct, it is assumed that # sizeof(Py_ssize_t) = sizeof(void*) self.header = 'PP' self.vheader = self.header + 'P' if hasattr(sys, "gettotalrefcount"): self.header += '2P' self.vheader += '2P' self.longdigit = sys.int_info.sizeof_digit import _testcapi self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD self.file = open(test.support.TESTFN, 'wb') def tearDown(self): self.file.close() test.support.unlink(test.support.TESTFN) def check_sizeof(self, o, size): result = sys.getsizeof(o) # add GC header size if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\ ((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))): size += self.gc_headsize msg = 'wrong size for %s: got %d, expected %d' \ % (type(o), result, size) self.assertEqual(result, size, msg) def calcsize(self, fmt): """Wrapper around struct.calcsize which enforces the alignment of the end of a structure to the alignment requirement of pointer. Note: This wrapper should only be used if a pointer member is included and no member with a size larger than a pointer exists. """ return struct.calcsize(fmt + '0P') def test_gc_head_size(self): # Check that the gc header size is added to objects tracked by the gc. h = self.header vh = self.vheader size = self.calcsize gc_header_size = self.gc_headsize # bool objects are not gc tracked self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit) # but lists are self.assertEqual(sys.getsizeof([]), size(vh + 'PP') + gc_header_size) def test_default(self): h = self.header vh = self.vheader size = self.calcsize self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit) self.assertEqual(sys.getsizeof(True, -1), size(vh) + self.longdigit) def test_objecttypes(self): # check all types defined in Objects/ h = self.header vh = self.vheader size = self.calcsize check = self.check_sizeof # bool check(True, size(vh) + self.longdigit) # buffer # XXX # builtin_function_or_method check(len, size(h + '3P')) # bytearray samples = [b'', b'u'*100000] for sample in samples: x = bytearray(sample) check(x, size(vh + 'iPP') + x.__alloc__() * self.c) # bytearray_iterator check(iter(bytearray()), size(h + 'PP')) # cell def get_cell(): x = 42 def inner(): return x return inner check(get_cell().__closure__[0], size(h + 'P')) # code check(get_cell().__code__, size(h + '5i8Pi3P')) # complex check(complex(0,1), size(h + '2d')) # method_descriptor (descriptor object) check(str.lower, size(h + '2PP')) # classmethod_descriptor (descriptor object) # XXX # member_descriptor (descriptor object) import datetime check(datetime.timedelta.days, size(h + '2PP')) # getset_descriptor (descriptor object) import collections check(collections.defaultdict.default_factory, size(h + '2PP')) # wrapper_descriptor (descriptor object) check(int.__add__, size(h + '2P2P')) # method-wrapper (descriptor object) check({}.__iter__, size(h + '2P')) # dict check({}, size(h + '3P2P' + 8*'P2P')) longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8} check(longdict, size(h + '3P2P' + 8*'P2P') + 16*size('P2P')) # dictionary-keyiterator check({}.keys(), size(h + 'P')) # dictionary-valueiterator check({}.values(), size(h + 'P')) # dictionary-itemiterator check({}.items(), size(h + 'P')) # dictproxy class C(object): pass check(C.__dict__, size(h + 'P')) # BaseException check(BaseException(), size(h + '5P')) # UnicodeEncodeError check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5P 2P2PP')) # UnicodeDecodeError # XXX # check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP')) # UnicodeTranslateError check(UnicodeTranslateError("", 0, 1, ""), size(h + '5P 2P2PP')) # ellipses check(Ellipsis, size(h + '')) # EncodingMap import codecs, encodings.iso8859_3 x = codecs.charmap_build(encodings.iso8859_3.decoding_table) check(x, size(h + '32B2iB')) # enumerate check(enumerate([]), size(h + 'l3P')) # reverse check(reversed(''), size(h + 'PP')) # float check(float(0), size(h + 'd')) # sys.floatinfo check(sys.float_info, size(vh) + self.P * len(sys.float_info)) # frame import inspect CO_MAXBLOCKS = 20 x = inspect.currentframe() ncells = len(x.f_code.co_cellvars) nfrees = len(x.f_code.co_freevars) extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\ ncells + nfrees - 1 check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P')) # function def func(): pass check(func, size(h + '11P')) class c(): @staticmethod def foo(): pass @classmethod def bar(cls): pass # staticmethod check(foo, size(h + 'P')) # classmethod check(bar, size(h + 'P')) # generator def get_gen(): yield 1 check(get_gen(), size(h + 'Pi2P')) # iterator check(iter('abc'), size(h + 'lP')) # callable-iterator import re check(re.finditer('',''), size(h + '2P')) # list samples = [[], [1,2,3], ['1', '2', '3']] for sample in samples: check(sample, size(vh + 'PP') + len(sample)*self.P) # sortwrapper (list) # XXX # cmpwrapper (list) # XXX # listiterator (list) check(iter([]), size(h + 'lP')) # listreverseiterator (list) check(reversed([]), size(h + 'lP')) # long check(0, size(vh)) check(1, size(vh) + self.longdigit) check(-1, size(vh) + self.longdigit) PyLong_BASE = 2**sys.int_info.bits_per_digit check(int(PyLong_BASE), size(vh) + 2*self.longdigit) check(int(PyLong_BASE**2-1), size(vh) + 2*self.longdigit) check(int(PyLong_BASE**2), size(vh) + 3*self.longdigit) # memory check(memoryview(b''), size(h + 'PP2P2i7P')) # module check(unittest, size(h + '3P')) # None check(None, size(h + '')) # NotImplementedType check(NotImplemented, size(h)) # object check(object(), size(h + '')) # property (descriptor object) class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, "") check(x, size(h + '4Pi')) # PyCapsule # XXX # rangeiterator check(iter(range(1)), size(h + '4l')) # reverse check(reversed(''), size(h + 'PP')) # range check(range(1), size(h + '4P')) check(range(66000), size(h + '4P')) # set # frozenset PySet_MINSIZE = 8 samples = [[], range(10), range(50)] s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP') for sample in samples: minused = len(sample) if minused == 0: tmp = 1 # the computation of minused is actually a bit more complicated # but this suffices for the sizeof test minused = minused*2 newsize = PySet_MINSIZE while newsize <= minused: newsize = newsize << 1 if newsize <= 8: check(set(sample), s) check(frozenset(sample), s) else: check(set(sample), s + newsize*struct.calcsize('lP')) check(frozenset(sample), s + newsize*struct.calcsize('lP')) # setiterator check(iter(set()), size(h + 'P3P')) # slice check(slice(0), size(h + '3P')) # super check(super(int), size(h + '3P')) # tuple check((), size(vh)) check((1,2,3), size(vh) + 3*self.P) # type # (PyTypeObject + PyNumberMethods + PyMappingMethods + # PySequenceMethods + PyBufferProcs) s = size(vh + 'P2P15Pl4PP9PP11PI') + size('16Pi17P 3P 10P 2P 2P') check(int, s) # class class newstyleclass(object): pass check(newstyleclass, s) # unicode usize = len('\0'.encode('unicode-internal')) samples = ['', '1'*100] # we need to test for both sizes, because we don't know if the string # has been cached for s in samples: basicsize = size(h + 'PPPiP') + usize * (len(s) + 1) check(s, basicsize) # weakref import weakref check(weakref.ref(int), size(h + '2Pl2P')) # weakproxy # XXX # weakcallableproxy check(weakref.proxy(int), size(h + '2Pl2P')) def test_pythontypes(self): # check all types defined in Python/ h = self.header vh = self.vheader size = self.calcsize check = self.check_sizeof # _ast.AST import _ast check(_ast.AST(), size(h + '')) # imp.NullImporter import imp check(imp.NullImporter(self.file.name), size(h + '')) try: raise TypeError except TypeError: tb = sys.exc_info()[2] # traceback if tb != None: check(tb, size(h + '2P2i')) # symtable entry # XXX # sys.flags check(sys.flags, size(vh) + self.P * len(sys.flags)) def test_main(): test.support.run_unittest(SysModuleTest, SizeofTest) if __name__ == "__main__": test_main()
apache-2.0
znick/anytask
anytask/users/models.py
1
9320
# -*- coding: utf-8 -*- import logging import os from courses.models import Course from django.contrib.auth.models import User from django.db import models from django.db.models.signals import post_save from groups.models import Group from mail.models import Message from users.model_user_status import UserStatus from years.common import get_current_year from anytask.storage import OverwriteStorage logger = logging.getLogger('django.request') def get_upload_path(instance, filename): return os.path.join('images', 'user_%d' % instance.user.id, filename) class UserProfile(models.Model): user = models.OneToOneField(User, db_index=True, null=False, blank=False, unique=True, related_name='profile') middle_name = models.CharField(max_length=128, db_index=True, null=True, blank=True) user_status = models.ManyToManyField(UserStatus, db_index=True, blank=True, related_name='users_by_status') avatar = models.ImageField('profile picture', upload_to=get_upload_path, blank=True, null=True, storage=OverwriteStorage()) birth_date = models.DateField(blank=True, null=True) info = models.TextField(default="", blank=True, null=True) phone = models.CharField(max_length=128, null=True, blank=True) city_of_residence = models.CharField(max_length=191, null=True, blank=True) university = models.CharField(max_length=191, null=True, blank=True) university_in_process = models.BooleanField(null=False, blank=False, default=False) university_class = models.CharField(max_length=191, null=True, blank=True) university_department = models.CharField(max_length=191, null=True, blank=True) university_year_end = models.CharField(max_length=191, null=True, blank=True) additional_info = models.TextField(null=True, blank=True) unit = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) position = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) academic_degree = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) academic_title = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) show_email = models.BooleanField(db_index=False, null=False, blank=False, default=True) send_my_own_events = models.BooleanField(db_index=False, null=False, blank=False, default=False) unread_messages = models.ManyToManyField(Message, blank=True, related_name='unread_messages') deleted_messages = models.ManyToManyField(Message, blank=True, related_name='deleted_messages') send_notify_messages = models.ManyToManyField(Message, blank=True, related_name='send_notify_messages') added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True) login_via_yandex = models.BooleanField(db_index=False, null=False, blank=False, default=False) ya_uid = models.IntegerField(null=True, blank=True) ya_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_contest_uid = models.CharField(max_length=191, null=True, blank=True) ya_contest_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_contest_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_passport_uid = models.CharField(max_length=191, null=True, blank=True) ya_passport_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_passport_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_passport_email = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) telegram_uid = models.IntegerField(default=None, null=True, blank=True) notify_in_telegram = models.BooleanField(default=False, null=False, blank=False) language = models.CharField(default="ru", max_length=128, unique=False, null=True, blank=True) time_zone = models.TextField(null=False, blank=False, default='Europe/Moscow') location = models.TextField(null=True, blank=True, default="") def is_current_year_student(self): return Group.objects.filter(year=get_current_year()).filter(students=self.user).count() > 0 def __unicode__(self): return unicode(self.user) def is_active(self): for status in self.user_status.all(): if status.tag == 'not_active' or status.tag == 'academic': return False return True def set_status(self, new_status): if not isinstance(new_status, UserStatus): new_status = UserStatus.objects.get(id=new_status) if new_status.type: self.user_status.remove(*self.user_status.filter(type=new_status.type)) self.user_status.add(new_status) def get_unread_count(self): return self.unread_messages.exclude(id__in=self.deleted_messages.all()).count() def can_sync_contest(self): for course in Course.objects.filter(is_active=True): if course.get_user_group(self.user) and course.send_to_contest_from_users: return True return False class UserProfileLog(models.Model): user = models.ForeignKey(User, db_index=True, null=False, blank=False, related_name='profiles_logs_by_user') middle_name = models.CharField(max_length=128, db_index=True, null=True, blank=True) user_status = models.ManyToManyField(UserStatus, db_index=True, blank=True) avatar = models.ImageField('profile picture', upload_to=get_upload_path, blank=True, null=True, storage=OverwriteStorage()) birth_date = models.DateField(blank=True, null=True) info = models.TextField(default="", blank=True, null=True) phone = models.CharField(max_length=128, null=True, blank=True) city_of_residence = models.CharField(max_length=191, null=True, blank=True) university = models.CharField(max_length=191, null=True, blank=True) university_in_process = models.BooleanField(null=False, blank=False, default=False) university_class = models.CharField(max_length=50, null=True, blank=True) university_department = models.CharField(max_length=191, null=True, blank=True) university_year_end = models.CharField(max_length=20, null=True, blank=True) additional_info = models.TextField(null=True, blank=True) unit = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) position = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) academic_degree = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) academic_title = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) show_email = models.BooleanField(db_index=False, null=False, blank=False, default=True) send_my_own_events = models.BooleanField(db_index=False, null=False, blank=False, default=False) unread_messages = models.ManyToManyField(Message, blank=True, related_name='log_unread_messages') deleted_messages = models.ManyToManyField(Message, blank=True, related_name='log_deleted_messages') send_notify_messages = models.ManyToManyField(Message, blank=True, related_name='log_send_notify_messages') added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now login_via_yandex = models.BooleanField(db_index=False, null=False, blank=False, default=True) ya_uid = models.IntegerField(null=True, blank=True) ya_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_contest_uid = models.IntegerField(null=True, blank=True) ya_contest_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_contest_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_passport_uid = models.IntegerField(null=True, blank=True) ya_passport_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_passport_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) ya_passport_email = models.CharField(default="", max_length=128, unique=False, null=True, blank=True) telegram_uid = models.IntegerField(default=None, null=True, blank=True) notify_in_telegram = models.BooleanField(default=False, null=False, blank=False) language = models.CharField(default="ru", max_length=128, unique=False, null=True, blank=True) updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True) def is_current_year_student(self): return Group.objects.filter(year=get_current_year()).filter(students=self.user).count() > 0 def __unicode__(self): return unicode(self.user) def create_user_profile(sender, instance, created, **kwargs): if created: UserProfile.objects.create(user=instance) post_save.connect(create_user_profile, sender=User)
mit
QQuick/Transcrypt
transcrypt/modules/org/transcrypt/autotester/__init__.py
1
12645
# First run a test from the command prompt, generating an HTML file. # The output of the test is stored in a DIV. # Also the script is automatically included in the HTML file. # Loading the HTML file will run the script. # This will compare the output of the script running in the browswer to the output in the DIV. # If those two match, the test reports OK, else it reports failure. from org.transcrypt.stubs.browser import __main__, __envir__, __pragma__ from org.transcrypt.autotester.html import HTMLGenerator, DataConverter, JSTesterUI, itemsAreEqual # Don't import __envir__ from __base__ since it will overwrite __buildin__.__envir__ in the browser # Import from stubs will be skipped in the browser # ... The ice is a bit thin here __pragma__ ('nokwargs') import itertools def getFileLocation(ancestor): """ This function needs to crawl up the stack and find out where the ancestor caller of this function was in the source code of either the python or javascript, depending on environment. @param ancestor the ancestor of this function that we want to capture file information about. @return string indicating the file position and line number """ if __envir__.executor_name == __envir__.transpiler_name: # js s = None __pragma__('js', '{}', ''' var e = new Error(); if ( ! e.stack ) { console.log("MAJOR ISSUE: Browser Error lacks Stack"); } else { s = e.stack; } ''') # Now we will process the stack to find the grandparent # calling function # @note - I'm explicitly not including a 're' module # dependency here frames = None __pragma__('js', '{}', ''' var linereg = new RegExp("\\n\\r|\\n", "g"); frames = s.toString().split(linereg); ''') if ( frames is None or (len(frames) < 2)): __pragma__('js', '{}', 'console.log("Failed to Split Stack");') return("UNKNOWN:???") # @note - if the call stack in transcrypts javascript # translation changes then this index may need to change # @todo - need more work here to determine this because # this is fragile gpFrame = frames[(ancestor*2 + 1)] # This regex splits the string coming from the javascript # stacktrace so that we can connect the file and line number # runTests (http://localhost:8080/run/autotest.js:3159:8) # func URL filename lineno:colno # Group 1 = function # Group 2 & 3 = protocol and hostname # Group 4 = Path on this host (filename is at the end) # Group 5 = lineno # Group 6 = column number in file frameReg = r"([^(]*)\(?([^:]*:)\/{2,3}([^:/]*:?)([^:]*):(\d+):(\d+)" m = None __pragma__('js', '{}', ''' var r = new RegExp(frameReg); m = r.exec(gpFrame); ''') if m: filepath = m[4] # Split the filepath and take the last element # to the get filename pathParts = filepath.split("/") filename = pathParts[len(pathParts)-1] lineno = m[5] return( "{}:{}".format(filename, lineno) ) else: __pragma__('js', '{}', 'console.log("Failed to Match Frame", gpFrame);') return("UNKNOWN:???") #ELSE # Needed because Transcrypt imports are compile time __pragma__("skip") from inspect import getframeinfo, stack s = stack() caller = getframeinfo(s[ancestor][0]) # Trim the file name path so that we don't get # a lot of unnecessary content filepath = caller.filename # @todo - this is a hack - we should use os.path pathParts = filepath.split('/') filename = "/".join(pathParts[-2:]) return( "%s:%d" % (filename, caller.lineno)) __pragma__ ('noskip') class AutoTester: """ Main testing class for comparing CPython to Transcrypt. This class is primarily used by calling the "check" method to confirm that the result is the same in both environments and "done" when all checks for a particular module have been completed. """ def __init__ (self, symbols = []): self.symbols = symbols # refDict/testDict contains the test results # of each testlet identified by name as the key self._currTestlet = "UNKNOWN" self.testDict = {} self.refDict = {} if __envir__.executor_name == __envir__.transpiler_name: self.ui = JSTesterUI() else: self.ui = None def sortedRepr (self, any): # When using sets or dicts, use elemens or keys # of one type, in sort order def tryGetNumKey (key): if type (key) == str: # Try to interpret key as numerical, see comment with repr function in __builtins__ try: return int (key) except: try: return float (key) except: return key else: return key if type (any) == dict: return '{' + ', '.join ([ '{}: {}'.format (repr (key), repr (any [key])) for index, key in enumerate (sorted ([tryGetNumKey (key) for key in any.keys ()], key = lambda aKey: str (aKey))) ]) + '}' elif type (any) == set: if len (any): return '{' + ', '.join (sorted ([str (item) for item in list (any)])) + '}' else: return repr (any) elif type (any) == range: return repr (list (any)) else: return repr (any) __pragma__('kwargs') def check (self, *args, ancestor = 2): """ Given a set of values from either the python or transcrypt environments, we log the position of the check call in the test and representative values of the passed arguments for later comparison. """ position=getFileLocation(ancestor) # N.B. stubs.browser provides a special sorting repr item = ' '.join ([self.sortedRepr (arg) for arg in args]) if __envir__.executor_name == __envir__.transpiler_name: self.testDict[self._currTestlet].append((position,item)) else: self.refDict[self._currTestlet].append((position,item)) __pragma__('nokwargs') def expectException(self, func): """ This method attempts to call the passed method and checks to see whether an exception was generated. @return string indicating "no exception" or "exception" """ try: func() return("no exception") except Exception as exc: return("exception") def throwToError(self, func): """ This function invokes the passed function and then converts an exception to an error response so that the unit test can continue even in the case where an exception may or may not occur. """ try: return(func()) except Exception as exc: return (None, "!!!{}".format(str(exc))) def checkEval(self, func): """ Check the result of the passed function which is invoked without arguments. If this function throws an exception, that exception is caught and converted to an error with can be compared against the result. This allows the user to control for exception that may or may not be generated in the unit tests """ ret = self.throwToError(func) self.check(ret, ancestor = 3) def checkPad(self, val, count): """ This method is to help manage flow control in unit tests and keep all unit tests aligned """ for i in range(0, count): self.check(val) def _getTotalErrorCnt(self, testData, refData): """ This method determines the total number of non-matching values in the test and reference data for a particular module. """ errCount = 0 for i,(refPos, refItem) in enumerate(refData): try: testPos,testItem = testData[i] if not itemsAreEqual (testItem, refItem): errCount+=1 except: errCount+=1 return(errCount) def compare (self): # Load the python reference data from the hidden HTML div dc = DataConverter() self.refDict = dc.getPythonResults() totalErrors = 0 sKeys = sorted(self.refDict.keys()) for key in sKeys: refData = self.refDict[key] try: testData = self.testDict[key] if ( testData is None ): raise KeyError("No Test Data Module: {}".format(key)) except KeyError: # No Test Data found for this key - we will populate with # errors for all ref data self.ui.appendSeqRowName(key, len(refData)) for i,(refPos, refItem) in enumerate(refData): self.ui.appendTableResult(key, None, None, refPos, refItem, False) continue # know we have testData so let's determine the total number of # errors for this test module. This will allow us to both set # the num of errors in the test module header row and set the # rows to the appropriate initial collapsed/expanded state. errCount= self._getTotalErrorCnt(testData, refData) collapse = (errCount == 0) self.ui.appendSeqRowName(key, errCount) # Now we will populate the table with all the rows # of data fro the comparison for i,(refPos, refItem) in enumerate(refData): try: # This will throw if testData's length is # shorter than refData's testPos,testItem = testData[i] except: testPos = None testItem = None self.ui.appendTableResult( key, testPos, testItem, refPos, refItem, collapse ) totalErrors += errCount self.ui.setOutputStatus( totalErrors == 0 ) def _cleanName(self, name): """ Clean the passed name of characters that won't be allowed in CSS class or HTML id strings. """ # Convert testletName to replace any of the characters that # are not acceptable in a CSS class or HTML id - this is to # make our lives easier # @note - I'm SPECIFICALLY not using a regex here because the # regex engine module is still under dev and could possibly # have issues ret = name invalidChars = [ '~', '!', '@', '$', '%', '^', '&', '*', '(', ')', '+', '=', ',', '.', '/', "'", ';', ':', '"', '?', '>', '<', '[', ']', '\\', '{', '}', '|', '`', '#', " ", ] for ch in invalidChars: ret = ret.replace(ch, "_") return(ret) def run (self, testlet, testletName): testletName = self._cleanName(testletName) self._currTestlet = testletName if __envir__.executor_name == __envir__.transpiler_name: self.testDict[self._currTestlet] = [] else: self.refDict[self._currTestlet] = [] try: testlet.run (self) except Exception as exc: if ( self.ui is not None ): self.ui.setOutputStatus(False) self.ui.showException(testletName, exc) else: # Error - No UI yet, reraise specific exception to enable finding out why raise def done (self): if __envir__.executor_name == __envir__.transpiler_name: self.compare () else: fnameBase = __main__.__file__.replace ('\\', '/') hg = HTMLGenerator(fnameBase) hg.generate_html(self.refDict)
apache-2.0
walterbender/Pippy
pippy_app.py
2
59457
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Copyright (C) 2007,2008,2009 Chris Ball, based on Collabora's # "hellomesh" demo. # # Copyright (C) 2013,14 Walter Bender # Copyright (C) 2013,14 Ignacio Rodriguez # Copyright (C) 2013 Jorge Gomez # Copyright (C) 2013,14 Sai Vineet # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """Pippy Activity: A simple Python programming activity .""" import re import os import subprocess from random import uniform import locale import json import sys from shutil import copy2 from signal import SIGTERM from gettext import gettext as _ import uuid import dbus from dbus.mainloop.glib import DBusGMainLoop from gi import require_version require_version('Gdk', '3.0') require_version('Gtk', '3.0') from gi.repository import Gdk from gi.repository import Gtk from gi.repository import GLib from gi.repository import Pango try: require_version('Vte', '2.91') except: require_version('Vte', '2.90') from gi.repository import Vte from gi.repository import GObject DBusGMainLoop(set_as_default=True) bus = dbus.SessionBus() from sugar3.datastore import datastore from sugar3.activity import activity as activity from sugar3.activity.widgets import EditToolbar from sugar3.activity.widgets import StopButton from sugar3.activity.activity import get_bundle_path from sugar3.graphics.alert import Alert from sugar3.graphics.alert import ConfirmationAlert from sugar3.graphics.alert import NotifyAlert from sugar3.graphics.icon import Icon from sugar3.graphics.objectchooser import ObjectChooser from sugar3.graphics.toggletoolbutton import ToggleToolButton from sugar3.graphics.toolbarbox import ToolbarButton from sugar3.graphics.toolbutton import ToolButton from sugar3.graphics.toolbarbox import ToolbarBox from sugar3.activity.widgets import ActivityToolbarButton from jarabe.view.customizebundle import generate_unique_id from activity import ViewSourceActivity from activity import TARGET_TYPE_TEXT from collabwrapper import CollabWrapper from filedialog import FileDialog from icondialog import IconDialog from notebook import SourceNotebook, tab_object from toolbars import DevelopViewToolbar import sound_check import logging text_buffer = None # magic prefix to use utf-8 source encoding PYTHON_PREFIX = '''#!/usr/bin/python3 # -*- coding: utf-8 -*- ''' # Force category names into Pootle DEFAULT_CATEGORIES = [_('graphics'), _('math'), _('python'), _('sound'), _('string'), _('tutorials')] _logger = logging.getLogger('pippy-activity') DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3 # -*- coding: utf-8 -*- from distutils.core import setup setup(name='{modulename}', version='1.0', py_modules=[ {filenames} ], ) """ # This is .format()'ed with the list of the file names. DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3 # -*- coding: utf-8 -*- from distutils.core import setup setup(name='{modulename}', version='1.0', py_modules=[ {filenames} ], ) """ # This is .format()'ed with the list of the file names. def _has_new_vte_api(): try: return (Vte.MAJOR_VERSION >= 0 and Vte.MINOR_VERSION >= 38) except: # Really old versions of Vte don't have VERSION return False def _find_object_id(activity_id, mimetype='text/x-python'): ''' Round-about way of accessing self._jobject.object_id ''' dsobjects, nobjects = datastore.find({'mime_type': [mimetype]}) for dsobject in dsobjects: if 'activity_id' in dsobject.metadata and \ dsobject.metadata['activity_id'] == activity_id: return dsobject.object_id return None class PippyActivity(ViewSourceActivity): '''Pippy Activity as specified in activity.info''' def __init__(self, handle): self._pippy_instance = self self.session_data = [] # Used to manage saving self._loaded_session = [] # Used to manage tabs self._py_file_loaded_from_journal = False self._py_object_id = None self._dialog = None sys.path.append(os.path.join(self.get_activity_root(), 'Library')) ViewSourceActivity.__init__(self, handle) self._collab = CollabWrapper(self) self._collab.message.connect(self.__message_cb) self.set_canvas(self.initialize_display()) self.after_init() self.connect("notify::active", self.__active_cb) self._collab.setup() def focus(): """ Enforce focus for the text view once. """ widget = self.get_toplevel().get_focus() textview = self._source_tabs.get_text_view() if widget is None and textview is not None: textview.grab_focus() return True return False GLib.timeout_add(100, focus) def initialize_display(self): '''Build activity toolbar with title input, share button and export buttons ''' toolbar_box = ToolbarBox() activity_button = ActivityToolbarButton(self) toolbar_box.toolbar.insert(activity_button, 0) self.set_toolbar_box(toolbar_box) activity_button.show() toolbar_box.show() activity_toolbar = activity_button.page separator = Gtk.SeparatorToolItem() activity_toolbar.insert(separator, -1) separator.show() button = ToolButton('pippy-import-doc') button.set_tooltip(_('Import Python file to new tab')) button.connect('clicked', self._import_py_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-export-doc') button.set_tooltip(_('Export as Pippy document')) button.connect('clicked', self._export_document_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-export-library') button.set_tooltip(_('Save this file to the Pippy library')) button.connect('clicked', self._save_as_library) activity_toolbar.insert(button, -1) if not self._library_writable(): button.set_sensitive(False) button.show() button = ToolButton('pippy-export-example') button.set_tooltip(_('Export as new Pippy example')) button.connect('clicked', self._export_example_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-create-bundle') button.set_tooltip(_('Create a Sugar activity bundle')) button.connect('clicked', self._create_bundle_cb) activity_toolbar.insert(button, -1) button.show() button = ToolButton('pippy-create-distutils') # TRANS: A distutils package is used to distribute Python modules button.set_tooltip(_('Export as a distutils package')) button.connect('clicked', self._export_distutils_cb) activity_toolbar.insert(button, -1) button.show() self._edit_toolbar = EditToolbar() button = ToolbarButton() button.set_page(self._edit_toolbar) button.props.icon_name = 'toolbar-edit' button.props.label = _('Edit') self.get_toolbar_box().toolbar.insert(button, -1) button.show() self._edit_toolbar.show() self._edit_toolbar.undo.connect('clicked', self.__undobutton_cb) self._edit_toolbar.redo.connect('clicked', self.__redobutton_cb) self._edit_toolbar.copy.connect('clicked', self.__copybutton_cb) self._edit_toolbar.paste.connect('clicked', self.__pastebutton_cb) view_btn = ToolbarButton() view_toolbar = DevelopViewToolbar(self) view_btn.props.page = view_toolbar view_btn.props.icon_name = 'toolbar-view' view_btn.props.label = _('View') view_toolbar.connect('font-size-changed', self._font_size_changed_cb) self.get_toolbar_box().toolbar.insert(view_btn, -1) self.view_toolbar = view_toolbar view_toolbar.show() actions_toolbar = self.get_toolbar_box().toolbar self._toggle_output = ToggleToolButton('tray-show') self._toggle_output.set_tooltip(_('Show output panel')) self._toggle_output.connect('toggled', self._toggle_output_cb) actions_toolbar.insert(self._toggle_output, -1) self._toggle_output.show() self._inverted_colors = ToggleToolButton(icon_name='dark-theme') self._inverted_colors.set_tooltip(_('Inverted Colors')) self._inverted_colors.set_accelerator('<Ctrl><Shift>I') self._inverted_colors.connect( 'toggled', self.__inverted_colors_toggled_cb) actions_toolbar.insert(self._inverted_colors, -1) self._inverted_colors.show() icons_path = os.path.join(get_bundle_path(), 'icons') icon_bw = Gtk.Image() icon_bw.set_from_file(os.path.join(icons_path, 'run_bw.svg')) icon_bw.show() icon_color = Gtk.Image() icon_color.set_from_file(os.path.join(icons_path, 'run_color.svg')) icon_color.show() button = ToolButton(label=_('Run!')) button.props.accelerator = _('<alt>r') button.set_icon_widget(icon_bw) button.set_tooltip(_('Run!')) button.connect('clicked', self._flash_cb, dict({'bw': icon_bw, 'color': icon_color})) button.connect('clicked', self._go_button_cb) actions_toolbar.insert(button, -1) button.show() icon_bw = Gtk.Image() icon_bw.set_from_file(os.path.join(icons_path, 'stopit_bw.svg')) icon_bw.show() icon_color = Gtk.Image() icon_color.set_from_file(os.path.join(icons_path, 'stopit_color.svg')) icon_color.show() button = ToolButton(label=_('Stop')) button.props.accelerator = _('<alt>s') button.set_icon_widget(icon_bw) button.connect('clicked', self._flash_cb, dict({'bw': icon_bw, 'color': icon_color})) button.connect('clicked', self._stop_button_cb) button.set_tooltip(_('Stop')) actions_toolbar.insert(button, -1) button.show() icon_bw = Gtk.Image() icon_bw.set_from_file(os.path.join(icons_path, 'eraser_bw.svg')) icon_bw.show() icon_color = Gtk.Image() icon_color.set_from_file(os.path.join(icons_path, 'eraser_color.svg')) icon_color.show() button = ToolButton(label=_('Clear output panel')) button.props.accelerator = _('<alt>c') button.set_icon_widget(icon_bw) button.connect('clicked', self._clear_button_cb) button.connect('clicked', self._flash_cb, dict({'bw': icon_bw, 'color': icon_color})) button.set_tooltip(_('Clear output panel')) actions_toolbar.insert(button, -1) button.show() activity_toolbar.show() separator = Gtk.SeparatorToolItem() self.get_toolbar_box().toolbar.insert(separator, -1) separator.show() button = ToolButton('pippy-openoff') button.set_tooltip(_('Open an example')) button.connect('clicked', self._load_example_cb) self.get_toolbar_box().toolbar.insert(button, -1) button.show() separator = Gtk.SeparatorToolItem() separator.props.draw = False separator.set_expand(True) self.get_toolbar_box().toolbar.insert(separator, -1) separator.show() stop = StopButton(self) self.get_toolbar_box().toolbar.insert(stop, -1) stop.show() vpane = Gtk.Paned.new(orientation=Gtk.Orientation.VERTICAL) vpane.set_position(400) # setting initial position self.paths = [] try: if sound_check.finddir(): TAMTAM_AVAILABLE = True else: TAMTAM_AVAILABLE = False except sound_check.SoundLibraryNotFoundError: TAMTAM_AVAILABLE = False data_path = os.path.join(get_bundle_path(), 'data') # get default language from locale locale_lang = locale.getdefaultlocale()[0] if locale_lang is None: lang = 'en' else: lang = locale_lang.split('_')[0] _logger.debug(locale.getdefaultlocale()) _logger.debug(lang) # construct the path for both lang_path = os.path.join(data_path, lang) en_lang_path = os.path.join(data_path, 'en') # get all folders in lang examples all_folders = [] if os.path.exists(lang_path): for d in sorted(os.listdir(lang_path)): all_folders.append(d) # get all folders in English examples for d in sorted(os.listdir(en_lang_path)): # check if folder isn't already in list if d not in all_folders: all_folders.append(d) for folder in all_folders: # Skip sound folders if TAMTAM is not installed if folder == 'sound' and not TAMTAM_AVAILABLE: continue direntry = {} # check if dir exists in pref language, if exists, add it if os.path.exists(os.path.join(lang_path, folder)): direntry = { 'name': _(folder.capitalize()), 'path': os.path.join(lang_path, folder) + '/'} # if not try to see if it's in default English path elif os.path.exists(os.path.join(en_lang_path, folder)): direntry = { 'name': _(folder.capitalize()), 'path': os.path.join(en_lang_path, folder) + '/'} self.paths.append([direntry['name'], direntry['path']]) # Adding local examples data_path = os.path.join(get_bundle_path(), 'data') self.paths.append([_('My examples'), data_path]) self._source_tabs = SourceNotebook(self, self._collab) self._source_tabs.connect('tab-added', self._add_source_cb) self._source_tabs.connect('tab-renamed', self._rename_source_cb) self._source_tabs.connect('tab-closed', self._close_source_cb) if self._loaded_session: for name, content, path in self._loaded_session: self._source_tabs.add_tab(name, content, path) else: self.session_data.append(None) self._source_tabs.add_tab() # New instance, ergo empty tab vpane.add1(self._source_tabs) self._source_tabs.show() self._outbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL) self._vte = Vte.Terminal() self._vte.set_encoding('utf-8') self._vte.set_size(30, 5) self._vte.set_scrollback_lines(-1) self._vte_set_colors('#000000', '#E7E7E7') self._child_exited_handler = None self._vte.connect('child_exited', self._child_exited_cb) self._vte.connect('drag_data_received', self._vte_drop_cb) self._outbox.pack_start(self._vte, True, True, 0) outsb = Gtk.Scrollbar(orientation=Gtk.Orientation.VERTICAL) outsb.set_adjustment(self._vte.get_vadjustment()) outsb.show() self._outbox.pack_start(outsb, False, False, 0) self._load_config() vpane.add2(self._outbox) self._outbox.show() vpane.show() return vpane def _vte_set_colors(self, bg, fg): # XXX support both Vte APIs if _has_new_vte_api(): foreground = Gdk.RGBA() foreground.parse(bg) background = Gdk.RGBA() background.parse(fg) else: foreground = Gdk.color_parse(bg) background = Gdk.color_parse(fg) self._vte.set_colors(foreground, background, []) def after_init(self): self._outbox.hide() def _font_size_changed_cb(self, widget, size): self._source_tabs.set_font_size(size) self._vte.set_font( Pango.FontDescription('Monospace {}'.format(size))) def _store_config(self): font_size = self._source_tabs.get_font_size() _config_file_path = os.path.join( activity.get_activity_root(), 'data', 'config.json') with open(_config_file_path, "w") as f: f.write(json.dumps(font_size)) def _load_config(self): _config_file_path = os.path.join( activity.get_activity_root(), 'data', 'config.json') if not os.path.isfile(_config_file_path): return with open(_config_file_path, "r") as f: font_size = json.loads(f.read()) self.view_toolbar.set_font_size(font_size) self._vte.set_font( Pango.FontDescription('Monospace {}'.format(font_size))) def __active_cb(self, widget, event): _logger.debug('__active_cb %r', self.props.active) if self.props.active: self.resume() else: self.pause() def do_visibility_notify_event(self, event): _logger.debug('do_visibility_notify_event %r', event.get_state()) if event.get_state() == Gdk.VisibilityState.FULLY_OBSCURED: self.pause() else: self.resume() def pause(self): # FIXME: We had resume, but no pause? pass def resume(self): if self._dialog is not None: self._dialog.set_keep_above(True) def _toggle_output_cb(self, button): shown = button.get_active() if shown: self._outbox.show_all() self._toggle_output.set_tooltip(_('Hide output panel')) self._toggle_output.set_icon_name('tray-hide') else: self._outbox.hide() self._toggle_output.set_tooltip(_('Show output panel')) self._toggle_output.set_icon_name('tray-show') def __inverted_colors_toggled_cb(self, button): if button.props.active: self._vte_set_colors('#E7E7E7', '#000000') self._source_tabs.set_dark() button.set_icon_name('light-theme') button.set_tooltip(_('Normal Colors')) else: self._vte_set_colors('#000000', '#E7E7E7') self._source_tabs.set_light() button.set_icon_name('dark-theme') button.set_tooltip(_('Inverted Colors')) def _load_example_cb(self, widget): widget.set_icon_name('pippy-openon') self._dialog = FileDialog(self.paths, self, widget) self._dialog.show() self._dialog.run() path = self._dialog.get_path() if path: self._select_func_cb(path) def _add_source_cb(self, button, force=False, editor_id=None): if self._collab._leader or force: if editor_id is None: editor_id = str(uuid.uuid1()) self._source_tabs.add_tab(editor_id=editor_id) self.session_data.append(None) self._source_tabs.get_nth_page(-1).show_all() self._source_tabs.get_text_view().grab_focus() if self._collab._leader: self._collab.post(dict( action='add-source', editor_id=editor_id)) else: # The leader must do it first so that they can set # up the text buffer self._collab.post(dict(action='add-source-request')) # Check if dark mode enabled, apply it if self._inverted_colors.props.active: self._source_tabs.set_dark() def _rename_source_cb(self, notebook, page, name): _logger.debug('_rename_source_cb %r %r' % (page, name)) self._collab.post(dict(action='rename-source', page=page, name=name)) def _close_source_cb(self, notebook, page): _logger.debug('_close_source_cb %r' % (page)) self._collab.post(dict(action='close-source', page=page)) def __message_cb(self, collab, buddy, msg): action = msg.get('action') if action == 'add-source-request' and self._collab._leader: self._add_source_cb(None, force=True) elif action == 'add-source': self._add_source_cb( None, force=True, editor_id=msg.get('editor_id')) elif action == 'rename-source': page = msg.get('page') name = msg.get('name') _logger.debug('__message_cb rename-source %r %r' % (page, name)) self._source_tabs.rename_tab(page, name) elif action == 'close-source': page = msg.get('page') _logger.debug('__message_cb close-source %r' % (page)) self._source_tabs.close_tab(page) def _vte_drop_cb(self, widget, context, x, y, selection, targetType, time): if targetType == TARGET_TYPE_TEXT: self._vte.feed_child(selection.data) def get_data(self): return self._source_tabs.get_all_data() def set_data(self, data): # Remove initial new/blank thing self.session_data = [] self._loaded_session = [] try: self._source_tabs.remove_page(0) tab_object.pop(0) self._source_tabs.last_tab = 0 except IndexError: pass list_ = list(zip(*data)) for name, code, path, modified, editor_id in list_: self._source_tabs.add_tab( label=name, editor_id=editor_id) self.session_data.append(None) # maybe? def _selection_cb(self, value): self.save() _logger.debug('clicked! %s' % value['path']) _file = open(value['path'], 'r') lines = _file.readlines() self._add_source_cb(None) text_buffer = self._source_tabs.get_text_buffer() text_buffer.set_text(''.join(lines)) text_buffer.set_modified(False) self._pippy_instance.metadata['title'] = value['name'] self._stop_button_cb(None) self._reset_vte() self._source_tabs.set_current_label(value['name']) self._source_tabs.set_current_path(value['path']) self._source_tabs.get_text_view().grab_focus() def _select_func_cb(self, path): values = {} values['name'] = os.path.basename(path) values['path'] = path self._selection_cb(values) def _timer_cb(self, button, icons): button.set_icon_widget(icons['bw']) button.show_all() return False def _flash_cb(self, button, icons): button.set_icon_widget(icons['color']) button.show_all() GObject.timeout_add(400, self._timer_cb, button, icons) def _clear_button_cb(self, button): self.save() self._stop_button_cb(None) self._reset_vte() self._source_tabs.get_text_view().grab_focus() def _write_all_buffers(self, tmp_dir): data = self._source_tabs.get_all_data() zipdata = list(zip(data[0], data[1])) for name, content in zipdata: name = self._source_tabs.purify_name(name) with open(os.path.join(tmp_dir, name), 'w') as f: # Write utf-8 coding prefix if there's not one already if re.match(r'coding[:=]\s*([-\w.]+)', '\n'.join(content.splitlines()[:2])) is None: f.write(PYTHON_PREFIX) f.write(content) def _reset_vte(self): self._vte.grab_focus() self._vte.feed(b'\x1B[H\x1B[J\x1B[0;39m') def __undobutton_cb(self, butston): text_buffer = self._source_tabs.get_text_buffer() if text_buffer.can_undo(): text_buffer.undo() def __redobutton_cb(self, button): text_buffer = self._source_tabs.get_text_buffer() if text_buffer.can_redo(): text_buffer.redo() def __copybutton_cb(self, button): text_buffer = self._source_tabs.get_text_buffer() if self._vte.get_has_selection(): self._vte.copy_clipboard() elif text_buffer.get_has_selection(): clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) text_buffer.copy_clipboard(clipboard) def __pastebutton_cb(self, button): text_buffer = self._source_tabs.get_text_buffer() clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) text_buffer.paste_clipboard(clipboard, None, True) def _go_button_cb(self, button): self._stop_button_cb(button) # Try stopping old code first. self._reset_vte() # FIXME: We're losing an odd race here # Gtk.main_iteration(block=False) if self._toggle_output.get_active() is False: self._outbox.show_all() self._toggle_output.set_active(True) pippy_tmp_dir = '%s/tmp/' % self.get_activity_root() self._write_all_buffers(pippy_tmp_dir) current_file = os.path.join( pippy_tmp_dir, self._source_tabs.get_current_file_name()) # Write activity.py here too, to support pippy-based activities. copy2('%s/activity.py' % get_bundle_path(), '%s/tmp/activity.py' % self.get_activity_root()) # XXX Support both Vte APIs if _has_new_vte_api(): vte_run = self._vte.spawn_sync else: vte_run = self._vte.fork_command_full self._pid = vte_run( Vte.PtyFlags.DEFAULT, get_bundle_path(), ['/bin/sh', '-c', 'python3 %s; sleep 1' % current_file, 'PYTHONPATH=%s/library:%s' % (get_bundle_path(), os.getenv('PYTHONPATH', ''))], ['PYTHONPATH=%s/library:%s' % (get_bundle_path(), os.getenv('PYTHONPATH', ''))], GLib.SpawnFlags.DO_NOT_REAP_CHILD, None, None,) def _stop_button_cb(self, button): try: if self._pid is not None: os.kill(self._pid[1], SIGTERM) except: pass # Process must already be dead. def _library_writable(self): return os.access(os.path.join(get_bundle_path(), 'library'), os.W_OK) def _save_as_library(self, button): library_dir = os.path.join(get_bundle_path(), 'library') file_name = self._source_tabs.get_current_file_name() text_buffer = self._source_tabs.get_text_buffer() content = text_buffer.get_text( *text_buffer.get_bounds(), include_hidden_chars=True) if not os.path.isdir(library_dir): os.mkdir(library_dir) with open(os.path.join(library_dir, file_name), 'w') as f: f.write(content) success = True if success: alert = NotifyAlert(5) alert.props.title = _('Python File added to Library') IMPORT_MESSAGE = _('The file you selected has been added' ' to the library. Use "import {importname}"' ' to import the library for using.') alert.props.msg = IMPORT_MESSAGE.format(importname=file_name[:-3]) alert.connect('response', self._remove_alert_cb) self.add_alert(alert) def _export_document_cb(self, __): self.copy() alert = NotifyAlert() alert.props.title = _('Saved') alert.props.msg = _('The document has been saved to journal.') alert.connect('response', lambda x, i: self.remove_alert(x)) self.add_alert(alert) def _remove_alert_cb(self, alert, response_id): self.remove_alert(alert) def _import_py_cb(self, button): chooser = ObjectChooser() result = chooser.run() if result is Gtk.ResponseType.ACCEPT: dsitem = chooser.get_selected_object() if dsitem.metadata['mime_type'] != 'text/x-python': alert = NotifyAlert(5) alert.props.title = _('Error importing Python file') alert.props.msg = _('The file you selected is not a ' 'Python file.') alert.connect('response', self._remove_alert_cb) self.add_alert(alert) elif dsitem.object_id in self.session_data: alert = NotifyAlert(5) alert.props.title = _('Error importing Python file') alert.props.msg = _('The file you selected is already ' 'open') alert.connect('response', self._remove_alert_cb) self.add_alert(alert) else: name = dsitem.metadata['title'] file_path = dsitem.get_file_path() content = open(file_path, 'r').read() self._source_tabs.add_tab(name, content, None) self._source_tabs.set_current_label(name) self.session_data.append(dsitem.object_id) _logger.debug('after import py: %r' % self.session_data) chooser.destroy() def _create_bundle_cb(self, button): from shutil import rmtree from tempfile import mkdtemp # Get the name of this pippy program. title = self._pippy_instance.metadata['title'].replace('.py', '') title = title.replace('-', '') if title == 'Pippy Activity': alert = Alert() alert.props.title = _('Save as Activity Error') alert.props.msg = _('Please give your activity a meaningful name ' 'before attempting to save it as an activity.') ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return alert_icon = Alert() ok_icon = Icon(icon_name='dialog-ok') alert_icon.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert_icon.props.title = _('Activity icon') alert_icon.props.msg = _('Please select an activity icon.') self._stop_button_cb(None) # try stopping old code first. self._reset_vte() self._outbox.show_all() self._vte.feed(_("Creating activity bundle...").encode()) self._vte.feed(b'\r\n') TMPDIR = 'instance' app_temp = mkdtemp('.activity', 'Pippy', os.path.join(self.get_activity_root(), TMPDIR)) sourcefile = os.path.join(app_temp, 'xyzzy.py') # invoke ourself to build the activity bundle. _logger.debug('writing out source file: %s' % sourcefile) def internal_callback(window=None, event=None): icon = '%s/activity/activity-default.svg' % (get_bundle_path()) if window: icon = window.get_icon() self._stop_button_cb(None) # Try stopping old code first. self._reset_vte() self._vte.feed(_('Creating activity bundle...').encode()) self._vte.feed(b'\r\n') TMPDIR = 'instance' app_temp = mkdtemp('.activity', 'Pippy', os.path.join(self.get_activity_root(), TMPDIR)) sourcefile = os.path.join(app_temp, 'xyzzy.py') # Invoke ourself to build the activity bundle. _logger.debug('writing out source file: %s' % sourcefile) # Write out application code self._write_text_buffer(sourcefile) try: # FIXME: vte invocation was raising errors. # Switched to subprocss output = subprocess.check_output( ['/usr/bin/python3', '%s/pippy_app.py' % get_bundle_path(), '-p', '%s/library' % get_bundle_path(), '-d', app_temp, title, sourcefile, icon]) self._vte.feed(output) self._vte.feed(b'\r\n') self._bundle_cb(title, app_temp) except subprocess.CalledProcessError: rmtree(app_temp, ignore_errors=True) # clean up! self._vte.feed(_('Save as Activity Error').encode()) self._vte.feed(b'\r\n') raise def _alert_response(alert, response_id): self.remove_alert(alert) def _dialog(): dialog = IconDialog() dialog.connect('destroy', internal_callback) GObject.idle_add(_dialog) alert_icon.connect('response', _alert_response) self.add_alert(alert_icon) def _write_text_buffer(self, filename): text_buffer = self._source_tabs.get_text_buffer() start, end = text_buffer.get_bounds() text = text_buffer.get_text(start, end, True) with open(filename, 'w') as f: # Write utf-8 coding prefix if there's not one already if re.match(r'coding[:=]\s*([-\w.]+)', '\n'.join(text.splitlines()[:2])) is None: f.write(PYTHON_PREFIX) for line in text: f.write(line) def _export_distutils_cb(self, button): app_temp = os.path.join(self.get_activity_root(), 'instance') data = self._source_tabs.get_all_data() for filename, content in zip(data[0], data[1]): fileobj = open(os.path.join(app_temp, filename), 'w') fileobj.write(content) fileobj.close() filenames = ','.join([("'" + name[:-3] + "'") for name in data[0]]) title = self._pippy_instance.metadata['title'] if title is _('Pippy Activity'): alert = Alert() alert.props.title = _('Save as distutils package error') alert.props.msg = _('Please give your activity a meaningful ' 'name before attempting to save it ' 'as an distutils package.') ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return found = next(( name for name in data[0] if name != self._source_tabs.purify_name(name)), None) if found is not None: example = self._source_tabs.purify_name(found) alert = Alert() alert.props.title = _('Save as distutils package error') alert.props.msg = _('Please give your source files a proper ' 'name, for example "%s", before attempting to ' 'save it as an distutils package.') % example ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return setup_script = DISTUTILS_SETUP_SCRIPT.format(modulename=title, filenames=filenames) setupfile = open(os.path.join(app_temp, 'setup.py'), 'w') setupfile.write(setup_script) setupfile.close() os.chdir(app_temp) subprocess.check_output( ['/usr/bin/python3', os.path.join(app_temp, 'setup.py'), 'sdist', '-v']) # Hand off to journal os.chmod(app_temp, 0o777) jobject = datastore.create() metadata = { 'title': '%s distutils bundle' % title, 'title_set_by_user': '1', 'mime_type': 'application/x-gzip', } for k, v in list(metadata.items()): # The dict.update method is missing =( jobject.metadata[k] = v tarname = 'dist/{modulename}-1.0.tar.gz'.format(modulename=title) jobject.file_path = os.path.join(app_temp, tarname) datastore.write(jobject) def _export_example_cb(self, button): # Get the name of this pippy program. title = self._pippy_instance.metadata['title'] if title == _('Pippy Activity'): alert = Alert() alert.props.title = _('Save as Example Error') alert.props.msg = \ _('Please give your activity a meaningful ' 'name before attempting to save it as an example.') ok_icon = Icon(icon_name='dialog-ok') alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon) alert.connect('response', self._dismiss_alert_cb) self.add_alert(alert) return self._stop_button_cb(None) # Try stopping old code first. self._reset_vte() self._vte.feed(_('Creating example...').encode()) self._vte.feed(b'\r\n') local_data = os.path.join(os.environ['SUGAR_ACTIVITY_ROOT'], 'data') local_file = os.path.join(local_data, title) if os.path.exists(local_file): alert = ConfirmationAlert() alert.props.title = _('Save as Example Warning') alert.props.msg = _('This example already exists. ' 'Do you want to overwrite it?') alert.connect('response', self._confirmation_alert_cb, local_file) self.add_alert(alert) else: self.write_file(local_file) self._reset_vte() self._vte.feed(_('Saved as example.').encode()) self._vte.feed(b'\r\n') self._add_to_example_list(local_file) def _child_exited_cb(self, *args): '''Called whenever a child exits. If there's a handler, run it.''' h, self._child_exited_handler = self._child_exited_handler, None if h is not None: h() def _bundle_cb(self, title, app_temp): '''Called when we're done building a bundle for a source file.''' from sugar3 import profile from shutil import rmtree try: # Find the .xo file: were we successful? bundle_file = [f for f in os.listdir(app_temp) if f.endswith('.xo')] if len(bundle_file) != 1: _logger.debug("Couldn't find bundle: %s" % str(bundle_file)) self._vte.feed(b'\r\n') self._vte.feed(_('Error saving activity to journal.').encode()) self._vte.feed(b'\r\n') return # Something went wrong. # Hand off to journal os.chmod(app_temp, 0o755) jobject = datastore.create() metadata = { 'title': '%s Bundle' % title, 'title_set_by_user': '1', 'buddies': '', 'preview': '', 'icon-color': profile.get_color().to_string(), 'mime_type': 'application/vnd.olpc-sugar', } for k, v in list(metadata.items()): # The dict.update method is missing =( jobject.metadata[k] = v jobject.file_path = os.path.join(app_temp, bundle_file[0]) datastore.write(jobject) self._vte.feed(b'\r\n') self._vte.feed(_('Activity saved to journal.').encode()) self._vte.feed(b'\r\n') self.journal_show_object(jobject.object_id) jobject.destroy() finally: rmtree(app_temp, ignore_errors=True) # clean up! def _dismiss_alert_cb(self, alert, response_id): self.remove_alert(alert) def _confirmation_alert_cb(self, alert, response_id, local_file): # Callback for conf alert self.remove_alert(alert) if response_id is Gtk.ResponseType.OK: self.write_file(local_file) self._reset_vte() self._vte.feed(_('Saved as example.').encode()) self._vte.feed(b'\r\n') else: self._reset_vte() def _add_to_example_list(self, local_file): entry = {'name': _(os.path.basename(local_file)), 'path': local_file} _iter = self.model.insert_before(self.example_iter, None) self.model.set_value(_iter, 0, entry) self.model.set_value(_iter, 1, entry['name']) def is_example(self, path): if path is None: return False for name in self.paths: if path.startswith(name[1]): return True return False def _get_pippy_object_id(self): ''' We need the object_id of this pippy instance to save in the .py file metadata''' if self._pippy_instance == self: return _find_object_id(self.metadata['activity_id'], mimetype='application/json') else: return self._pippy_instance.get_object_id() def write_file(self, file_path): pippy_id = self._get_pippy_object_id() data = self._source_tabs.get_all_data() zipped_data = list(zip(*data)) session_list = [] app_temp = os.path.join(self.get_activity_root(), 'instance') tmpfile = os.path.join(app_temp, 'pippy-tempfile-storing.py') if not self.session_data: self.session_data.append(None) for zipdata, content in zip(zipped_data, self.session_data): _logger.debug('Session data %r', content) name, python_code, path, modified, editor_id = zipdata if content is not None and content == self._py_object_id: _logger.debug('saving to self') self.metadata['title'] = name self.metadata['mime_type'] = 'text/x-python' if pippy_id is not None: self.metadata['pippy_instance'] = pippy_id __file = open(file_path, 'w') __file.write(python_code) __file.close() session_list.append([name, content]) elif content is not None and content[0] != '/': _logger.debug('Saving an existing dsobject') dsobject = datastore.get(content) dsobject.metadata['title'] = name dsobject.metadata['mime_type'] = 'text/x-python' if pippy_id is not None: dsobject.metadata['pippy_instance'] = pippy_id __file = open(tmpfile, 'w') __file.write(python_code) __file.close() dsobject.set_file_path(tmpfile) datastore.write(dsobject) session_list.append([name, dsobject.object_id]) elif modified: _logger.debug('Creating new dsobj for modified code') if len(python_code) > 0: dsobject = datastore.create() dsobject.metadata['title'] = name dsobject.metadata['mime_type'] = 'text/x-python' if pippy_id is not None: dsobject.metadata['pippy_instance'] = pippy_id __file = open(tmpfile, 'w') __file.write(python_code) __file.close() dsobject.set_file_path(tmpfile) datastore.write(dsobject) session_list.append([name, dsobject.object_id]) # If there are multiple Nones, we need to find # the correct one. if content is None and \ self.session_data.count(None) > 1: i = zipped_data.index(zipdata) else: i = self.session_data.index(content) self.session_data[i] = dsobject.object_id elif content is not None or path is not None: _logger.debug('Saving reference to sample file') if path is None: # Should not happen, but just in case... _logger.error('path is None.') session_list.append([name, content]) else: session_list.append([name, path]) else: # Should not happen, but just in case... _logger.debug('Nothing to save in tab? %s %s %s %s' % (str(name), str(python_code), str(path), str(content))) self._pippy_instance.metadata['mime_type'] = 'application/json' pippy_data = json.dumps(session_list) # Override file path if we created a new Pippy instance if self._py_file_loaded_from_journal: file_path = os.path.join(app_temp, 'pippy-temp-instance-data') _file = open(file_path, 'w') _file.write(pippy_data) _file.close() if self._py_file_loaded_from_journal: _logger.debug('setting pippy instance file_path to %s' % file_path) self._pippy_instance.set_file_path(file_path) datastore.write(self._pippy_instance) self._store_config() def read_file(self, file_path): # Either we are opening Python code or a list of objects # stored (json-encoded) in a Pippy instance, or a shared # session. # Remove initial new/blank thing self.session_data = [] self._loaded_session = [] try: self._source_tabs.remove_page(0) tab_object.pop(0) self._source_tabs.last_tab = 0 except IndexError: pass if self.metadata['mime_type'] == 'text/x-python': _logger.debug('Loading Python code') # Opening some Python code directly try: text = open(file_path).read() except: alert = NotifyAlert(10) alert.props.title = _('Error') alert.props.msg = _('Error reading data.') def _remove_alert(alert, response_id): self.remove_alert(alert) alert.connect("response", _remove_alert) self.add_alert(alert) return self._py_file_loaded_from_journal = True # Discard the '#!/usr/bin/python3' and 'coding: utf-8' lines, # if present python_code = re.sub(r'^' + re.escape(PYTHON_PREFIX), '', text) name = self.metadata['title'] self._loaded_session.append([name, python_code, None]) # Since we loaded Python code, we need to create (or # restore) a Pippy instance if 'pippy_instance' in self.metadata: _logger.debug('found a pippy instance: %s' % self.metadata['pippy_instance']) try: self._pippy_instance = datastore.get( self.metadata['pippy_instance']) except: _logger.debug('Cannot find old Pippy instance: %s') self._pippy_instance = None if self._pippy_instance in [self, None]: self._pippy_instance = datastore.create() self._pippy_instance.metadata['title'] = self.metadata['title'] self._pippy_instance.metadata['mime_type'] = 'application/json' self._pippy_instance.metadata['activity'] = 'org.laptop.Pippy' datastore.write(self._pippy_instance) self.metadata['pippy_instance'] = \ self._pippy_instance.get_object_id() _logger.debug('get_object_id %s' % self.metadata['pippy_instance']) # We need the Pippy file path so we can read the session data file_path = self._pippy_instance.get_file_path() # Finally, add this Python object to the session data self._py_object_id = _find_object_id(self.metadata['activity_id']) self.session_data.append(self._py_object_id) _logger.debug('session_data: %s' % self.session_data) if self.metadata['mime_type'] == 'application/json' or \ self._pippy_instance != self: # Reading file list from Pippy instance _logger.debug('Loading Pippy instance') if len(file_path) == 0: return data = json.loads(open(file_path).read()) for name, content in data: # content is either a datastore id or the path to some # sample code if content is not None and content[0] == '/': # a path try: python_code = open(content).read() except: _logger.error('Could not open %s; skipping' % content) path = content elif content != self._py_object_id: try: dsobject = datastore.get(content) if 'mime_type' not in dsobject.metadata: _logger.error( 'Warning: %s missing mime_type' % content) elif dsobject.metadata['mime_type'] != 'text/x-python': _logger.error( 'Warning: %s has unexpected mime_type %s' % (content, dsobject.metadata['mime_type'])) except: # Could be that the item has subsequently been # deleted from the datastore, so we skip it. _logger.error('Could not open %s; skipping' % content) continue try: python_code = open(dsobject.get_file_path()).read() except: # Malformed bundle? _logger.error('Could not open %s; skipping' % dsobject.get_file_path()) continue path = None # Queue up the creation of the tabs... # And add this content to the session data if content not in self.session_data: self.session_data.append(content) self._loaded_session.append([name, python_code, path]) # Create tabs from the datastore, else add a blank tab if self._loaded_session: for name, content, path in self._loaded_session: self._source_tabs.add_tab(name, content, path) else: self._source_tabs.add_tab() # TEMPLATES AND INLINE FILES ACTIVITY_INFO_TEMPLATE = ''' [Activity] name = %(title)s bundle_id = %(bundle_id)s exec = sugar-activity3 %(class)s icon = activity-icon activity_version = %(version)d mime_types = %(mime_types)s show_launcher = yes %(extra_info)s ''' PIPPY_ICON = """<?xml version="1.0" ?><!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd' [ <!ENTITY stroke_color "#010101"> <!ENTITY fill_color "#FFFFFF"> ]> <svg enable-background="new 0 0 55 55" height="55px" version="1.1" viewBox="0 0 55 55" width="55px" x="0px" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" y="0px"><g display="block" id="activity-pippy"> <path d="M28.497,48.507 c5.988,0,14.88-2.838,14.88-11.185 c0-9.285-7.743-10.143-10.954-11.083 c-3.549-0.799-5.913-1.914-6.055-3.455 c-0.243-2.642,1.158-3.671,3.946-3.671 c0,0,6.632,3.664,12.266,0.74 c1.588-0.823,4.432-4.668,4.432-7.32 c0-2.653-9.181-5.719-11.967-5.719 c-2.788,0-5.159,3.847-5.159,3.847 c-5.574,0-11.149,5.306-11.149,10.612 c0,5.305,5.333,9.455,11.707,10.612 c2.963,0.469,5.441,2.22,4.878,5.438 c-0.457,2.613-2.995,5.306-8.361,5.306 c-4.252,0-13.3-0.219-14.745-4.079 c-0.929-2.486,0.168-5.205,1.562-5.205l-0.027-0.16 c-1.42-0.158-5.548,0.16-5.548,5.465 C8.202,45.452,17.347,48.507,28.497,48.507z" fill="&fill_color;" stroke="&stroke_color;" stroke-linecap="round" stroke-linejoin="round" stroke-width="3.5"/> <path d="M42.579,19.854c-2.623-0.287-6.611-2-7.467-5.022" fill="none" stroke="&stroke_color;" stroke-linecap="round" stroke-width="3"/> <circle cx="35.805" cy="10.96" fill="&stroke_color;" r="1.676"/> </g></svg><!-- " --> """ # ACTIVITY META-INFORMATION # this is used by Pippy to generate a bundle for itself. def pippy_activity_version(): '''Returns the version number of the generated activity bundle.''' return 39 def pippy_activity_extra_files(): '''Returns a map of 'extra' files which should be included in the generated activity bundle.''' # Cheat here and generate the map from the fs contents. extra = {} bp = get_bundle_path() for d in ['po', 'data', 'post']: # everybody gets library for root, dirs, files in os.walk(os.path.join(bp, d)): for name in files: fn = os.path.join(root, name).replace(bp + '/', '') extra[fn] = open(os.path.join(root, name), 'r').read() return extra def pippy_activity_news(): '''Return the NEWS file for this activity.''' # Cheat again. return open(os.path.join(get_bundle_path(), 'NEWS')).read() def pippy_activity_icon(): '''Return an SVG document specifying the icon for this activity.''' return PIPPY_ICON def pippy_activity_class(): '''Return the class which should be started to run this activity.''' return 'pippy_app.PippyActivity' def pippy_activity_bundle_id(): '''Return the bundle_id for the generated activity.''' return 'org.laptop.Pippy' def pippy_activity_mime_types(): '''Return the mime types handled by the generated activity, as a list.''' return ['text/x-python'] def pippy_activity_extra_info(): return ''' license = GPLv2+ update_url = http://activities.sugarlabs.org ''' # ACTIVITY BUNDLER def main(): '''Create a bundle from a pippy-style source file''' from optparse import OptionParser from pyclbr import readmodule_ex from tempfile import mkdtemp from shutil import copytree, copy2, rmtree from sugar3.activity import bundlebuilder parser = OptionParser(usage='%prog [options] [title] [sourcefile] [icon]') parser.add_option('-d', '--dir', dest='dir', default='.', metavar='DIR', help='Put generated bundle in the specified directory.') parser.add_option('-p', '--pythonpath', dest='path', action='append', default=[], metavar='DIR', help='Append directory to python search path.') (options, args) = parser.parse_args() if len(args) < 3: parser.error('The title, sourcefile and icon arguments are required.') title = args[0] sourcefile = args[1] icon_path = args[2] pytitle = re.sub(r'[^A-Za-z0-9_]', '', title) if re.match(r'[0-9]', pytitle) is not None: pytitle = '_' + pytitle # first character cannot be numeric # First take a gander at the source file and see if it's got extra info # for us. sourcedir, basename = os.path.split(sourcefile) if not sourcedir: sourcedir = '.' module, ext = os.path.splitext(basename) f = open(icon_path, 'r') icon = f.read() f.close() # Things we look for: bundle_info = { 'version': 1, 'extra_files': {}, 'news': 'No news.', 'icon': icon, 'class': 'activity.VteActivity', 'bundle_id': ('org.sugarlabs.pippy.%s%d' % (generate_unique_id(), int(round(uniform(1000, 9999), 0)))), 'mime_types': '', 'extra_info': '', } # Are any of these things in the module? try_import = False info = readmodule_ex(module, [sourcedir] + options.path) for func in list(bundle_info.keys()): p_a_func = 'pippy_activity_%s' % func if p_a_func in info: try_import = True if try_import: # Yes, let's try to execute them to get better info about our bundle oldpath = list(sys.path) sys.path[0:0] = [sourcedir] + options.path modobj = __import__(module) for func in list(bundle_info.keys()): p_a_func = 'pippy_activity_%s' % func if p_a_func in modobj.__dict__: bundle_info[func] = modobj.__dict__[p_a_func]() sys.path = oldpath # Okay! We've done the hard part. Now let's build a bundle. # Create a new temp dir in which to create the bundle. app_temp = mkdtemp('.activity', 'Pippy') # Hope TMPDIR is set correctly! bundle = get_bundle_path() try: copytree('%s/library' % bundle, '%s/library' % app_temp) copy2('%s/activity.py' % bundle, '%s/activity.py' % app_temp) # create activity.info file. bundle_info['title'] = title bundle_info['pytitle'] = pytitle # put 'extra' files in place. extra_files = { 'activity/activity.info': ACTIVITY_INFO_TEMPLATE % bundle_info, 'activity/activity-icon.svg': bundle_info['icon'], 'NEWS': bundle_info['news'], } extra_files.update(bundle_info['extra_files']) for path, contents in list(extra_files.items()): # safety first! assert '..' not in path dirname, filename = os.path.split(path) dirname = os.path.join(app_temp, dirname) if not os.path.exists(dirname): os.makedirs(dirname) with open(os.path.join(dirname, filename), 'w') as f: f.write(contents) # Put script into $app_temp/pippy_app.py copy2(sourcefile, '%s/pippy_app.py' % app_temp) # Invoke bundle builder olddir = os.getcwd() oldargv = sys.argv os.chdir(app_temp) sys.argv = ['setup.py', 'dist_xo'] print('\r\nStarting bundlebuilder\r\n') bundlebuilder.start() sys.argv = oldargv os.chdir(olddir) # Move to destination directory. src = '%s/dist/%s-%d.xo' % (app_temp, pytitle, bundle_info['version']) dst = '%s/%s-%d.xo' % (options.dir, pytitle, bundle_info['version']) if not os.path.exists(src): print('Cannot find %s\r\n' % (src)) else: copy2(src, dst) finally: rmtree(app_temp, ignore_errors=True) print('Finally\r\n') if __name__ == '__main__': from gettext import gettext as _ if False: # Change this to True to test within Pippy sys.argv = sys.argv + ['-d', '/tmp', 'Pippy', '/home/olpc/pippy_app.py'] print(_('Working...')) sys.stdout.flush() main() print(_('done!')) sys.exit(0)
gpl-3.0
Xeralux/tensorflow
tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py
119
3797
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sparse feature column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework.ops import internal_convert_to_tensor from tensorflow.python.framework.ops import name_scope class SparseFeatureColumn(object): """Represents a sparse feature column. Contains three tensors representing a sparse feature column, they are example indices (`int64`), feature indices (`int64`), and feature values (`float`). Feature weights are optional, and are treated as `1.0f` if missing. For example, consider a batch of 4 examples, which contains the following features in a particular `SparseFeatureColumn`: * Example 0: feature 5, value 1 * Example 1: feature 6, value 1 and feature 10, value 0.5 * Example 2: no features * Example 3: two copies of feature 2, value 1 This SparseFeatureColumn will be represented as follows: ``` <0, 5, 1> <1, 6, 1> <1, 10, 0.5> <3, 2, 1> <3, 2, 1> ``` For a batch of 2 examples below: * Example 0: feature 5 * Example 1: feature 6 is represented by `SparseFeatureColumn` as: ``` <0, 5, 1> <1, 6, 1> ``` @@__init__ @@example_indices @@feature_indices @@feature_values """ def __init__(self, example_indices, feature_indices, feature_values): """Creates a `SparseFeatureColumn` representation. Args: example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. Returns: A `SparseFeatureColumn` """ with name_scope(None, 'SparseFeatureColumn', [example_indices, feature_indices]): self._example_indices = internal_convert_to_tensor( example_indices, name='example_indices', dtype=dtypes.int64) self._feature_indices = internal_convert_to_tensor( feature_indices, name='feature_indices', dtype=dtypes.int64) self._feature_values = None if feature_values is not None: with name_scope(None, 'SparseFeatureColumn', [feature_values]): self._feature_values = internal_convert_to_tensor( feature_values, name='feature_values', dtype=dtypes.float32) @property def example_indices(self): """The example indices represented as a dense tensor. Returns: A 1-D Tensor of int64 with shape `[N]`. """ return self._example_indices @property def feature_indices(self): """The feature indices represented as a dense tensor. Returns: A 1-D Tensor of int64 with shape `[N]`. """ return self._feature_indices @property def feature_values(self): """The feature values represented as a dense tensor. Returns: May return None, or a 1-D Tensor of float32 with shape `[N]`. """ return self._feature_values
apache-2.0