content
stringlengths
0
1.55M
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_stmt>re<class_stmt>Autoconf(AutotoolsPackage GNUMirrorPackage)<block_start>"""Autoconf -- system configuration part of autotools"""<line_sep>homepage='https://www.gnu.org/software/autoconf/'<line_sep>gnu_mirror_path='autoconf/autoconf-2.69.tar.gz'<line_sep>version('2.71' sha256='431075ad0bf529ef13cb41e9042c542381103e80015686222b8a9d4abef42a1c')<line_sep>version('2.70' sha256='f05f410fda74323ada4bdc4610db37f8dbd556602ba65bc843edb4d4d4a1b2b7')<line_sep>version('2.69' sha256='954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969' preferred=<true>)<line_sep>version('2.62' sha256='83aa747e6443def0ebd1882509c53f5a2133f502ddefa21b3de141c433914bdd')<line_sep>version('2.59' sha256='9cd05c73c5fcb1f5ccae53dd6cac36bb8cb9c7b3e97ffae5a7c05c72594c88d8')<line_sep># https://savannah.gnu.org/support/?110396 patch('https://git.savannah.gnu.org/cgit/autoconf.git/patch/?id=05972f49ee632cd98057a3caf82ebfb9574846da' sha256='eaa3f69d927a853313a0b06e2117c51adab6377a2278549b05abc5df93643e16' when='@2.70')<line_sep># Apply long-time released and already in-use upstream patches to fix test cases: # tests/foreign.at (Libtool): Be tolerant of 'quote' replacing the older `quote' patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-fix-libtool-test.patch' sha256='7793209b33013dc0f81208718c68440c5aae80e7a1c4b8d336e382525af791a7' when='@2.69')<line_sep># Fix bin/autoscan.in for current perl releases (reported already in January 2013) patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26.patch' sha256='35c449281546376449766f92d49fc121ca50e330e60fefcfc9be2af3253082c2' when='@2.62:2.69 ^perl@5.17:')<line_sep># Fix bin/autoheader.in for current perl relases not having "." in @INC: patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26-2.patch' sha256='a49dd5bac3b62daa0ff688ab4d508d71dbd2f4f8d7e2a02321926346161bf3ee' when='@2.62:2.69 ^perl@5.17:')<line_sep># Note: m4 is not a pure build-time dependency of autoconf. m4 is # needed when autoconf runs, not only when autoconf is built. depends_on('m4@1.4.6:' type=('build' 'run'))<line_sep>depends_on('perl' type=('build' 'run'))<line_sep>build_directory='spack-build'<line_sep>tags=['build-tools']<line_sep>executables=['^autoconf$' '^autoheader$' '^autom4te$' '^autoreconf$' '^autoscan$' '^autoupdate$' '^ifnames$']<line_sep>@classmethod<def_stmt>determine_version cls exe<block_start>output=Executable(exe)('--version' output=str error=str)<line_sep>match=re.search(r'\(GNU Autoconf\)\s+(\S+)' output)<line_sep><return>match.group(1)<if>match<else><none><block_end><def_stmt>patch self# The full perl shebang might be too long; we have to fix this here # because autom4te is called during the build <block_start>patched_file='bin/autom4te.in'<line_sep># We save and restore the modification timestamp of the file to prevent # regeneration of the respective man page: <with_stmt>keep_modification_time(patched_file)<block_start>filter_file('^#! @PERL@ -w' '#! /usr/bin/env perl' patched_file)<block_end><if_stmt>self.version<eq>Version('2.62')# skip help2man for patched autoheader.in and autoscan.in <block_start>touch('man/autoheader.1')<line_sep>touch('man/autoscan.1')<block_end><block_end># make installcheck would execute the testsuite a 2nd time, skip it <def_stmt>installcheck self<block_start><pass><block_end>@run_after('install')<def_stmt>filter_sbang self# We have to do this after install because otherwise the install # target will try to rebuild the binaries (filter_file updates the # timestamps) # Revert sbang, so Spack's sbang hook can fix it up <block_start>filter_file('^#! /usr/bin/env perl' '#! {0} -w'.format(self.spec['perl'].command.path) self.prefix.bin.autom4te backup=<false>)<block_end><def_stmt>_make_executable self name<block_start><return>Executable(join_path(self.prefix.bin name))<block_end><def_stmt>setup_dependent_package self module dependent_spec# Autoconf is very likely to be a build dependency, # so we add the tools it provides to the dependent module <block_start>executables=['autoconf' 'autoheader' 'autom4te' 'autoreconf' 'autoscan' 'autoupdate' 'ifnames']<for_stmt>name executables<block_start>setattr(module name self._make_executable(name))<block_end><block_end><block_end>
<class_stmt>myIntSynthProvider(object)<block_start><def_stmt>__init__ self valobj dict<block_start>self.valobj=valobj<line_sep>self.val=self.valobj.GetChildMemberWithName("theValue")<block_end><def_stmt>num_children self<block_start><return>0<block_end><def_stmt>get_child_at_index self index<block_start><return><none><block_end><def_stmt>get_child_index self name<block_start><return><none><block_end><def_stmt>update self<block_start><return><false><block_end><def_stmt>has_children self<block_start><return><false><block_end><def_stmt>get_value self<block_start><return>self.val<block_end><block_end><class_stmt>myArraySynthProvider(object)<block_start><def_stmt>__init__ self valobj dict<block_start>self.valobj=valobj<line_sep>self.array=self.valobj.GetChildMemberWithName("array")<block_end><def_stmt>num_children self max_count<block_start><if_stmt>16<l>max_count<block_start><return>16<block_end><return>max_count<block_end><def_stmt>get_child_at_index self index<block_start><return><none><block_end># Keep it simple when this is not tested here. <def_stmt>get_child_index self name<block_start><return><none><block_end># Keep it simple when this is not tested here. <def_stmt>has_children self<block_start><return><true><block_end><block_end>
<import_stmt>math<line_sep># Modify the parameters here UNROLL_FACTOR=32<line_sep>DATA_T='unsigned short'<line_sep># Generate the code data_type=DATA_T<line_sep>level=int(math.log2(UNROLL_FACTOR))<for_stmt>layer range(level-1 -1 -1)<block_start>pair=int(math.pow(2 layer))<for_stmt>i range(pair)# data_t tmp_[layer]_[pair] = tmp_[layer+1]_[pair*2]_[pair*2+1] <block_start><if_stmt>layer<eq>level-1<block_start>print(f'{data_type} mul_{layer}_{i}_0 = local_A[0][{i<times>2}] * local_B[0][{i<times>2}];')<line_sep>print(f'{data_type} add_{layer}_{i} = mul_{layer}_{i}_0 + local_A[0][{i<times>2+1}] * local_B[0][{i<times>2+1}];')<block_end><else_stmt><block_start>print(f'{data_type} add_{layer}_{i} = add_{layer+1}_{i<times>2} + add_{layer+1}_{i<times>2+1};')<block_end><block_end><block_end>print('local_C[c7][c6] += add_0_0;')<line_sep>
<import_from_stmt>. depth<import_from_stmt>. outliers<import_from_stmt>. stats<import_from_stmt>. visualization<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("PAT")<line_sep># initialize MessageLogger and output report process.load("FWCore.MessageLogger.MessageLogger_cfi")<line_sep>process.MessageLogger.cerr.threshold='INFO'<line_sep>process.MessageLogger.cerr.INFO=cms.untracked.PSet(default=cms.untracked.PSet(limit=cms.untracked.int32(0)) PATSummaryTables=cms.untracked.PSet(limit=cms.untracked.int32(-1)))<line_sep>process.options=cms.untracked.PSet(wantSummary=cms.untracked.bool(<true>))<line_sep># source process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring(#'file:/afs/cern.ch/cms/PRS/top/cmssw-data/relval200-for-pat-testing/TauolaTTbar-Summer08_IDEAL_V9_v1-AODSIM.80.root' '/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/1E84F77B-341C-DE11-8A99-0019DB29C5FC.root' '/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/34267FD6-1C1C-DE11-A836-001617C3B78C.root' '/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/68BF59CF-1C1C-DE11-AFA9-000423D98BC4.root'))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1000))<line_sep>process.load("Configuration.StandardSequences.Geometry_cff")<line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>#process.GlobalTag.globaltag = cms.string('IDEAL_V9::All') process.GlobalTag.globaltag=cms.string('STARTUP_V9::All')<line_sep>process.load("Configuration.StandardSequences.MagneticField_cff")<line_sep># PAT Layer 0+1 process.load("PhysicsTools.PatAlgos.patSequences_cff")<line_sep>process.load("MuonAnalysis.MuonAssociators.muonL1Match_cfi")<line_sep>process.muonL1Match.preselection=cms.string("")<line_sep>process.allLayer1Muons.trigPrimMatch=cms.VInputTag(cms.InputTag("muonL1Match") cms.InputTag("muonL1Match" "propagatedReco") )<line_sep>## Put your EDAnalyzer here ## process.plots = cms.EDFilter("DataPlotter", ## muons = cms.InputTag("cleanLayer1Muons"), ## muonCut = cms.string("") ## ) process.p=cms.Path(process.muonL1Match<times>process.patDefaultSequence# * process.plots )<line_sep>process.TFileService=cms.Service("TFileService" fileName=cms.string("plots.root"))<line_sep>
<import_from_stmt>datetime datetime<line_sep># string to datetime object datetime_str='09/19/18 13:55:26'<line_sep>datetime_object=datetime.strptime(datetime_str '%m/%d/%y %H:%M:%S')<line_sep>print(type(datetime_object))<line_sep>print(datetime_object)# printed in default format # string to date object date_str='09-19-2018'<line_sep>date_object=datetime.strptime(date_str '%m-%d-%Y').date()<line_sep>print(type(date_object))<line_sep>print(date_object)# printed in default formatting # string to time object time_str='13::55::26'<line_sep>time_object=datetime.strptime(time_str '%H::%M::%S').time()<line_sep>print(type(time_object))<line_sep>print(time_object)<line_sep># time module <import_stmt>time<line_sep>time_obj=time.strptime(time_str '%H::%M::%S')<line_sep>print(type(time_obj))<line_sep>print(time_obj)<line_sep># default formatting - "%a %b %d %H:%M:%S %Y" print(time.strptime('Wed Sep 19 14:55:02 2018'))<line_sep># exception handling example datetime_str='09/19/18 13:55:26'<try_stmt><block_start>datetime_object=datetime.strptime(datetime_str '%m/%d/%y')<block_end><except_stmt>ValueError<as>ve<block_start>print('ValueError Raised:' ve)<block_end>time_str='99::55::26'<try_stmt><block_start>time_object=time.strptime(time_str '%H::%M::%S')<block_end><except_stmt>ValueError<as>e<block_start>print('ValueError:' e)<block_end># str to datetime with locale <import_stmt>locale<line_sep>locale.setlocale(locale.LC_ALL 'de_DE')<line_sep>date_str_de_DE='10-Dezember-2018 Montag'# de_DE locale datetime_object=datetime.strptime(date_str_de_DE '%d-%B-%Y %A')<line_sep>print(datetime_object)<line_sep>
""" 235. Lowest Common Ancestor of a Binary Search Tree """<line_sep># Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None <class_stmt>Solution(object)<block_start><def_stmt>lowestCommonAncestor self root p q<block_start>""" :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode """<line_sep>minn=min(p.val q.val)<line_sep>maxx=max(p.val q.val)<while_stmt>root.val<l>minn<or>root.val<g>maxx<block_start><if_stmt>root.val<l>minn<block_start>root=root.right<block_end><else_stmt><block_start>root=root.left<block_end><block_end><return>root<block_end><block_end><class_stmt>Solution(object)<block_start><def_stmt>lowestCommonAncestor self root p q<block_start>""" :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode """<if_stmt>(p.val-root.val)<times>(q.val-root.val)<le>0<block_start><return>root<block_end><elif_stmt>p.val<l>root.val<block_start><return>self.lowestCommonAncestor(root.left p q)<block_end><else_stmt><block_start><return>self.lowestCommonAncestor(root.right p q)<block_end><block_end><block_end>
<import_from_stmt>functools wraps<import_from_stmt>collections Iterable<import_from_stmt>django.conf settings<import_from_stmt>django.shortcuts render<import_from_stmt>django.core.exceptions PermissionDenied<import_from_stmt>django.utils.decorators available_attrs<import_from_stmt>django.utils.encoding force_str<import_from_stmt>django.utils.six.moves.urllib.parse urlparse<import_from_stmt>django.utils.six string_types<import_from_stmt>django.contrib.auth REDIRECT_FIELD_NAME<import_from_stmt>django.shortcuts resolve_url<import_from_stmt>waliki.utils is_authenticated<import_from_stmt>.models ACLRule<import_from_stmt>.settings WALIKI_ANONYMOUS_USER_PERMISSIONS WALIKI_LOGGED_USER_PERMISSIONS WALIKI_RENDER_403 <def_stmt>check_perms perms user slug raise_exception=<false><block_start>"""a helper user to check if a user has the permissions for a given slug"""<if_stmt>isinstance(perms string_types)<block_start>perms={perms}<block_end><else_stmt><block_start>perms=set(perms)<block_end>allowed_users=ACLRule.get_users_for(perms slug)<if_stmt>allowed_users<block_start><return>user<in>allowed_users<block_end><if_stmt>perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS))<block_start><return><true><block_end><if_stmt>is_authenticated(user)<and>perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS))<block_start><return><true><block_end># First check if the user has the permission (even anon users) <if_stmt>user.has_perms(['waliki.%s'%p<for>p perms])<block_start><return><true><block_end># In case the 403 handler should be called raise the exception <if_stmt>raise_exception<block_start><raise>PermissionDenied<block_end># As the last resort, show the login form <return><false><block_end><def_stmt>permission_required perms login_url=<none> raise_exception=<false> redirect_field_name=REDIRECT_FIELD_NAME<block_start>""" this is analog to django's builtin ``permission_required`` decorator, but improved to check per slug ACLRules and default permissions for anonymous and logged in users if there is a rule affecting a slug, the user needs to be part of the rule's allowed users. If there isn't a matching rule, defaults permissions apply. """<def_stmt>decorator view_func<block_start>@wraps(view_func assigned=available_attrs(view_func))<def_stmt>_wrapped_view request *args **kwargs<block_start><if_stmt>check_perms(perms request.user kwargs['slug'] raise_exception=raise_exception)<block_start><return>view_func(request *args **kwargs)<block_end><if_stmt>is_authenticated(request.user)<block_start><if_stmt>WALIKI_RENDER_403<block_start><return>render(request 'waliki/403.html' kwargs status=403)<block_end><else_stmt><block_start><raise>PermissionDenied<block_end><block_end>path=request.build_absolute_uri()<line_sep># urlparse chokes on lazy objects in Python 3, force to str resolved_login_url=force_str(resolve_url(login_url<or>settings.LOGIN_URL))<line_sep># If the login url is the same scheme and net location then just # use the path as the "next" url. login_scheme,login_netloc=urlparse(resolved_login_url)[:2]<line_sep>current_scheme,current_netloc=urlparse(path)[:2]<if_stmt>((<not>login_scheme<or>login_scheme<eq>current_scheme)<and>(<not>login_netloc<or>login_netloc<eq>current_netloc))<block_start>path=request.get_full_path()<block_end><import_from_stmt>django.contrib.auth.views redirect_to_login<line_sep><return>redirect_to_login(path resolved_login_url redirect_field_name)<block_end><return>_wrapped_view<block_end><return>decorator<block_end>
""" Generate coulomb matrices for molecules. See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003. """<import_stmt>numpy<as>np<import_from_stmt>typing Any List Optional<import_from_stmt>deepchem.utils.typing RDKitMol<import_from_stmt>deepchem.utils.data_utils pad_array<import_from_stmt>deepchem.feat.base_classes MolecularFeaturizer<class_stmt>CoulombMatrix(MolecularFeaturizer)<block_start>"""Calculate Coulomb matrices for molecules. Coulomb matrices provide a representation of the electronic structure of a molecule. For a molecule with `N` atoms, the Coulomb matrix is a `N X N` matrix where each element gives the strength of the electrostatic interaction between two atoms. The method is described in more detail in [1]_. Examples -------- >>> import deepchem as dc >>> featurizers = dc.feat.CoulombMatrix(max_atoms=23) >>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv >>> tasks = ["atomization_energy"] >>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers) >>> dataset = loader.create_dataset(input_file) References ---------- .. [1] Montavon, Grégoire, et al. "Learning invariant representations of molecules for atomization energy prediction." Advances in neural information processing systems. 2012. Note ---- This class requires RDKit to be installed. """<def_stmt>__init__ self max_atoms:int remove_hydrogens:bool=<false> randomize:bool=<false> upper_tri:bool=<false> n_samples:int=1 seed:Optional[int]=<none><block_start>"""Initialize this featurizer. Parameters ---------- max_atoms: int The maximum number of atoms expected for molecules this featurizer will process. remove_hydrogens: bool, optional (default False) If True, remove hydrogens before processing them. randomize: bool, optional (default False) If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices. upper_tri: bool, optional (default False) Generate only upper triangle part of Coulomb matrices. n_samples: int, optional (default 1) If `randomize` is set to True, the number of random samples to draw. seed: int, optional (default None) Random seed to use. """<line_sep>self.max_atoms=int(max_atoms)<line_sep>self.remove_hydrogens=remove_hydrogens<line_sep>self.randomize=randomize<line_sep>self.upper_tri=upper_tri<line_sep>self.n_samples=n_samples<if_stmt>seed<is><not><none><block_start>seed=int(seed)<block_end>self.seed=seed<block_end><def_stmt>_featurize self datapoint:RDKitMol **kwargs<arrow>np.ndarray<block_start>""" Calculate Coulomb matrices for molecules. If extra randomized matrices are generated, they are treated as if they are features for additional conformers. Since Coulomb matrices are symmetric, only the (flattened) upper triangular portion is returned. Parameters ---------- datapoint: rdkit.Chem.rdchem.Mol RDKit Mol object Returns ------- np.ndarray The coulomb matrices of the given molecule. The default shape is `(num_confs, max_atoms, max_atoms)`. If num_confs == 1, the shape is `(max_atoms, max_atoms)`. """<if_stmt>'mol'<in>kwargs<block_start>datapoint=kwargs.get("mol")<line_sep><raise>DeprecationWarning('Mol is being phased out as a parameter, please pass "datapoint" instead.')<block_end>features=self.coulomb_matrix(datapoint)<if_stmt>self.upper_tri<block_start>features=[f[np.triu_indices_from(f)]<for>f features]<block_end>features=np.asarray(features)<if_stmt>features.shape[0]<eq>1# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)` <block_start>features=np.squeeze(features axis=0)<block_end><return>features<block_end><def_stmt>coulomb_matrix self mol:RDKitMol<arrow>np.ndarray<block_start>""" Generate Coulomb matrices for each conformer of the given molecule. Parameters ---------- mol: rdkit.Chem.rdchem.Mol RDKit Mol object Returns ------- np.ndarray The coulomb matrices of the given molecule """<try_stmt><block_start><import_from_stmt>rdkit Chem<import_from_stmt>rdkit.Chem AllChem<block_end><except_stmt>ModuleNotFoundError<block_start><raise>ImportError("This class requires RDKit to be installed.")<block_end># Check whether num_confs >=1 or not num_confs=len(mol.GetConformers())<if_stmt>num_confs<eq>0<block_start>mol=Chem.AddHs(mol)<line_sep>AllChem.EmbedMolecule(mol AllChem.ETKDG())<block_end><if_stmt>self.remove_hydrogens<block_start>mol=Chem.RemoveHs(mol)<block_end>n_atoms=mol.GetNumAtoms()<line_sep>z=[atom.GetAtomicNum()<for>atom mol.GetAtoms()]<line_sep>rval=[]<for_stmt>conf mol.GetConformers()<block_start>d=self.get_interatomic_distances(conf)<line_sep>m=np.outer(z z)/d<line_sep>m[range(n_atoms) range(n_atoms)]=0.5<times>np.array(z)<power>2.4<if_stmt>self.randomize<block_start><for_stmt>random_m self.randomize_coulomb_matrix(m)<block_start>random_m=pad_array(random_m self.max_atoms)<line_sep>rval.append(random_m)<block_end><block_end><else_stmt><block_start>m=pad_array(m self.max_atoms)<line_sep>rval.append(m)<block_end><block_end><return>np.asarray(rval)<block_end><def_stmt>randomize_coulomb_matrix self m:np.ndarray<arrow>List[np.ndarray]<block_start>"""Randomize a Coulomb matrix as decribed in [1]_: 1. Compute row norms for M in a vector row_norms. 2. Sample a zero-mean unit-variance noise vector e with dimension equal to row_norms. 3. Permute the rows and columns of M with the permutation that sorts row_norms + e. Parameters ---------- m: np.ndarray Coulomb matrix. Returns ------- List[np.ndarray] List of the random coulomb matrix References ---------- .. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003 """<line_sep>rval=[]<line_sep>row_norms=np.asarray([np.linalg.norm(row)<for>row m] dtype=float)<line_sep>rng=np.random.RandomState(self.seed)<for_stmt>i range(self.n_samples)<block_start>e=rng.normal(size=row_norms.size)<line_sep>p=np.argsort(row_norms+e)<line_sep>new=m[p][: p]# permute rows first, then columns rval.append(new)<block_end><return>rval<block_end>@staticmethod<def_stmt>get_interatomic_distances conf:Any<arrow>np.ndarray<block_start>""" Get interatomic distances for atoms in a molecular conformer. Parameters ---------- conf: rdkit.Chem.rdchem.Conformer Molecule conformer. Returns ------- np.ndarray The distances matrix for all atoms in a molecule """<line_sep>n_atoms=conf.GetNumAtoms()<line_sep>coords=[# Convert AtomPositions from Angstrom to bohr (atomic units) conf.GetAtomPosition(i).__idiv__(0.52917721092)<for>i range(n_atoms)]<line_sep>d=np.zeros((n_atoms n_atoms) dtype=float)<for_stmt>i range(n_atoms)<block_start><for_stmt>j range(i)<block_start>d[i j]=coords[i].Distance(coords[j])<line_sep>d[j i]=d[i j]<block_end><block_end><return>d<block_end><block_end><class_stmt>CoulombMatrixEig(CoulombMatrix)<block_start>"""Calculate the eigenvalues of Coulomb matrices for molecules. This featurizer computes the eigenvalues of the Coulomb matrices for provided molecules. Coulomb matrices are described in [1]_. Examples -------- >>> import deepchem as dc >>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23) >>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv >>> tasks = ["atomization_energy"] >>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers) >>> dataset = loader.create_dataset(input_file) References ---------- .. [1] Montavon, Grégoire, et al. "Learning invariant representations of molecules for atomization energy prediction." Advances in neural information processing systems. 2012. """<def_stmt>__init__ self max_atoms:int remove_hydrogens:bool=<false> randomize:bool=<false> n_samples:int=1 seed:Optional[int]=<none><block_start>"""Initialize this featurizer. Parameters ---------- max_atoms: int The maximum number of atoms expected for molecules this featurizer will process. remove_hydrogens: bool, optional (default False) If True, remove hydrogens before processing them. randomize: bool, optional (default False) If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices. n_samples: int, optional (default 1) If `randomize` is set to True, the number of random samples to draw. seed: int, optional (default None) Random seed to use. """<line_sep>self.max_atoms=int(max_atoms)<line_sep>self.remove_hydrogens=remove_hydrogens<line_sep>self.randomize=randomize<line_sep>self.n_samples=n_samples<if_stmt>seed<is><not><none><block_start>seed=int(seed)<block_end>self.seed=seed<block_end><def_stmt>_featurize self datapoint:RDKitMol **kwargs<arrow>np.ndarray<block_start>""" Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues are returned sorted by absolute value in descending order and padded by max_atoms. Parameters ---------- datapoint: rdkit.Chem.rdchem.Mol RDKit Mol object Returns ------- np.ndarray The eigenvalues of Coulomb matrix for molecules. The default shape is `(num_confs, max_atoms)`. If num_confs == 1, the shape is `(max_atoms,)`. """<if_stmt>'mol'<in>kwargs<block_start>datapoint=kwargs.get("mol")<line_sep><raise>DeprecationWarning('Mol is being phased out as a parameter, please pass "datapoint" instead.')<block_end>cmat=self.coulomb_matrix(datapoint)<line_sep>features_list=[]<for_stmt>f cmat<block_start>w,v=np.linalg.eig(f)<line_sep>w_abs=np.abs(w)<line_sep>sortidx=np.argsort(w_abs)<line_sep>sortidx=sortidx[::-1]<line_sep>w=w[sortidx]<line_sep>f=pad_array(w self.max_atoms)<line_sep>features_list.append(f)<block_end>features=np.asarray(features_list)<if_stmt>features.shape[0]<eq>1# `(1, max_atoms)` -> `(max_atoms,)` <block_start>features=np.squeeze(features axis=0)<block_end><return>features<block_end><block_end>
# pylint: disable=no-self-use,invalid-name <import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_stmt>pytest<import_from_stmt>allennlp.data.dataset_readers SnliReader<import_from_stmt>allennlp.common.util ensure_list<import_from_stmt>allennlp.common.testing AllenNlpTestCase<class_stmt>TestSnliReader(object)<block_start>@pytest.mark.parametrize(u"lazy" (<true> <false>))<def_stmt>test_read_from_file self lazy<block_start>reader=SnliReader(lazy=lazy)<line_sep>instances=reader.read(AllenNlpTestCase.FIXTURES_ROOT/u'data'/u'snli.jsonl')<line_sep>instances=ensure_list(instances)<line_sep>instance1={u"premise":[u"A" u"person" u"on" u"a" u"horse" u"jumps" u"over" u"a" u"broken" u"down" u"airplane" u"."] u"hypothesis":[u"A" u"person" u"is" u"training" u"his" u"horse" u"for" u"a" u"competition" u"."] u"label":u"neutral"}<line_sep>instance2={u"premise":[u"A" u"person" u"on" u"a" u"horse" u"jumps" u"over" u"a" u"broken" u"down" u"airplane" u"."] u"hypothesis":[u"A" u"person" u"is" u"at" u"a" u"diner" u"," u"ordering" u"an" u"omelette" u"."] u"label":u"contradiction"}<line_sep>instance3={u"premise":[u"A" u"person" u"on" u"a" u"horse" u"jumps" u"over" u"a" u"broken" u"down" u"airplane" u"."] u"hypothesis":[u"A" u"person" u"is" u"outdoors" u"," u"on" u"a" u"horse" u"."] u"label":u"entailment"}<assert_stmt>len(instances)<eq>3<line_sep>fields=instances[0].fields<assert_stmt>[t.text<for>t fields[u"premise"].tokens]<eq>instance1[u"premise"]<assert_stmt>[t.text<for>t fields[u"hypothesis"].tokens]<eq>instance1[u"hypothesis"]<assert_stmt>fields[u"label"].label<eq>instance1[u"label"]<line_sep>fields=instances[1].fields<assert_stmt>[t.text<for>t fields[u"premise"].tokens]<eq>instance2[u"premise"]<assert_stmt>[t.text<for>t fields[u"hypothesis"].tokens]<eq>instance2[u"hypothesis"]<assert_stmt>fields[u"label"].label<eq>instance2[u"label"]<line_sep>fields=instances[2].fields<assert_stmt>[t.text<for>t fields[u"premise"].tokens]<eq>instance3[u"premise"]<assert_stmt>[t.text<for>t fields[u"hypothesis"].tokens]<eq>instance3[u"hypothesis"]<assert_stmt>fields[u"label"].label<eq>instance3[u"label"]<block_end><block_end>
<import_from_stmt>flask Flask<import_from_stmt>flask_restful_swagger.swagger SwaggerRegistry<try_stmt><block_start><import_from_stmt>unittest.mock patch<block_end><except_stmt>ImportError<block_start><import_from_stmt>mock patch<block_end>@patch("flask_restful_swagger.swagger._get_current_registry")@patch("flask_restful_swagger.swagger.render_homepage")<def_stmt>test_get_swagger_registry homepage registry<block_start>mock_registry={"apiVersion":"mock_version" "swaggerVersion":"mock_swagger_version" "basePath":"mock_path" "spec_endpoint_path":"mock_spec_endpoint_path" "description":"mock_description" }<line_sep>registry.return_value=mock_registry<line_sep>app=Flask(__name__)<line_sep>resource=SwaggerRegistry()<line_sep>bases=[base.__name__<for>base SwaggerRegistry.__mro__]<assert_stmt>sorted(bases)<eq>["MethodView" "Resource" "SwaggerRegistry" "View" "object" ]<with_stmt>app.test_request_context(path="/some_path.html")<block_start>_=resource.get()<assert_stmt>homepage.called<line_sep>homepage.assert_called_once_with("mock_pathmock_spec_endpoint_path/_/resource_list.json")<block_end><with_stmt>app.test_request_context(path="/some_path")<block_start>homepage.reset_mock()<line_sep>response=resource.get()<assert_stmt><not>homepage.called<assert_stmt>response<eq>mock_registry<block_end><block_end>
<import_from_stmt>netCDF4 Dataset<import_stmt>matplotlib<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>matplotlib.patches Polygon<import_from_stmt>matplotlib.collections PatchCollection<import_stmt>matplotlib.cm<as>cm<import_stmt>numpy<as>np<line_sep>#------------------------------------------------------------- <def_stmt>plot_subfigure axis array nCells nEdgesOnCell verticesOnCell xCell yCell zCell xVertex yVertex zVertex cmin cmax cmap<block_start>xMin=1.0e30<line_sep>xMax=-1.0e30<line_sep>yMin=1.0e30<line_sep>yMax=-1.0e30<line_sep>cmap=plt.get_cmap(cmap)<line_sep>patches=[]<line_sep>colors=[]<for_stmt>iCell range(0 nCells)<block_start><if_stmt>(yCell[iCell]<g>0.0)<block_start>vertices=[]<for_stmt>iVertexOnCell range(0 nEdgesOnCell[iCell])<block_start>iVertex=verticesOnCell[iCell iVertexOnCell]<line_sep>vertices.append((xVertex[iVertex] zVertex[iVertex]))<block_end>colors.append(array[iCell])<line_sep>patches.append(Polygon(vertices))<line_sep>xMin=min(xMin xVertex[iVertex])<line_sep>xMax=max(xMax xVertex[iVertex])<line_sep>yMin=min(yMin zVertex[iVertex])<line_sep>yMax=max(yMax zVertex[iVertex])<block_end><block_end>pc=PatchCollection(patches cmap=cmap)<line_sep>pc.set_array(np.array(colors))<line_sep>pc.set_clim(cmin cmax)<line_sep>axis.add_collection(pc)<line_sep>axis.set_xlim(xMin xMax)<line_sep>axis.set_ylim(yMin yMax)<line_sep>axis.set_aspect("equal")<line_sep>axis.ticklabel_format(style='plain')<line_sep>axis.tick_params(axis='x' which='both' bottom=<false> top=<false> labelbottom=<false>)<line_sep>axis.tick_params(axis='y' which='both' left=<false> right=<false> labelleft=<false>)<block_end>#------------------------------------------------------------- <def_stmt>plot_testcase <block_start>nGrids=[2562 10242 40962 163842]<line_sep>testTypes=["cosine_bell" "slotted_cylinder"]<line_sep>methods=["IR" "IR" "upwind"]<line_sep>iTimes=[0 -1 -1]<for_stmt>nGrid nGrids<block_start>print("nGrid: " nGrid)<line_sep>fig,axes=plt.subplots(3 4)<line_sep>iTestType=-1<for_stmt>testType testTypes<block_start>iTestType<augadd>1<line_sep>print(" Test type: " testType)<line_sep>iMethod=-1<for_stmt>method,iTime zip(methods iTimes)<block_start>iMethod<augadd>1<line_sep>print(" Method: " method ", iTime: " iTime)<line_sep>filenamein="./output_%s_%s_%i/output.2000.nc"%(method testType nGrid)<line_sep>filein=Dataset(filenamein "r")<line_sep>nCells=len(filein.dimensions["nCells"])<line_sep>nEdgesOnCell=filein.variables["nEdgesOnCell"][:]<line_sep>verticesOnCell=filein.variables["verticesOnCell"][:]<line_sep>xCell=filein.variables["xCell"][:]<line_sep>yCell=filein.variables["yCell"][:]<line_sep>zCell=filein.variables["zCell"][:]<line_sep>xVertex=filein.variables["xVertex"][:]<line_sep>yVertex=filein.variables["yVertex"][:]<line_sep>zVertex=filein.variables["zVertex"][:]<line_sep>verticesOnCell[:]=verticesOnCell[:]-1<line_sep>iceAreaCategory=filein.variables["iceAreaCategory"][:]<line_sep>filein.close()<line_sep>iceAreaCell=np.sum(iceAreaCategory axis=(2 3))<line_sep>plot_subfigure(axes[iMethod iTestType<times>2] iceAreaCell[iTime] nCells nEdgesOnCell verticesOnCell xCell yCell zCell xVertex yVertex zVertex 0.0 1.0 "viridis")<line_sep>iceAreaCellDiff=iceAreaCell[iTime]-iceAreaCell[0]<if_stmt>(iMethod<ne>0)<block_start>plot_subfigure(axes[iMethod iTestType<times>2+1] iceAreaCellDiff nCells nEdgesOnCell verticesOnCell xCell yCell zCell xVertex yVertex zVertex -1.0 1.0 "bwr")<block_end><else_stmt><block_start>axes[iMethod iTestType<times>2+1].axis('off')<block_end><block_end><block_end>plt.savefig("advection_%6.6i.png"%(nGrid) dpi=300)<line_sep>plt.cla()<line_sep>plt.close(fig)<block_end><block_end>#------------------------------------------------------------------------------- <if_stmt>__name__<eq>"__main__"<block_start>plot_testcase()<block_end>
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>abc<import_from_stmt>oslo_serialization jsonutils<as>json<import_from_stmt>freezer.storage physical<class_stmt>FsLikeStorage(physical.PhysicalStorage metaclass=abc.ABCMeta)<block_start>_type='fslike'<def_stmt>__init__ self storage_path max_segment_size skip_prepare=<false><block_start>super(FsLikeStorage self).__init__(storage_path=storage_path max_segment_size=max_segment_size skip_prepare=skip_prepare)<block_end><def_stmt>prepare self<block_start>self.create_dirs(self.storage_path)<block_end><def_stmt>info self<block_start><pass><block_end><def_stmt>write_backup self rich_queue backup<block_start>""" Stores backup in storage :type rich_queue: freezer.utils.streaming.RichQueue :type backup: freezer.storage.base.Backup """<line_sep>backup=backup.copy(storage=self)<line_sep>path=backup.data_path<line_sep>self.create_dirs(path.rsplit('/' 1)[0])<with_stmt>self.open(path mode='wb')<as>b_file<block_start><for_stmt>message rich_queue.get_messages()<block_start>b_file.write(message)<block_end><block_end><block_end><def_stmt>backup_blocks self backup<block_start>""" :param backup: :type backup: freezer.storage.base.Backup :return: """<with_stmt>self.open(backup.data_path 'rb')<as>backup_file<block_start><while_stmt><true><block_start>chunk=backup_file.read(self.max_segment_size)<if_stmt>len(chunk)<block_start><yield>chunk<block_end><else_stmt><block_start><break><block_end><block_end><block_end><block_end>@abc.abstractmethod<def_stmt>open self filename mode<block_start>""" :type filename: str :param filename: :type mode: str :param mode: :return: """<line_sep><pass><block_end><def_stmt>add_stream self stream package_name headers=<none><block_start>""" :param stream: data :param package_name: path :param headers: backup metadata information :return: """<line_sep>split=package_name.rsplit('/' 1)<line_sep># create backup_basedir backup_basedir="{0}/{1}".format(self.storage_path package_name)<line_sep>self.create_dirs(backup_basedir)<line_sep># define backup_data_name backup_basepath="{0}/{1}".format(backup_basedir split[0])<line_sep>backup_metadata="%s/metadata"%backup_basedir<line_sep># write backup to backup_basepath <with_stmt>self.open(backup_basepath 'wb')<as>backup_file<block_start><for_stmt>el stream<block_start>backup_file.write(el)<block_end><block_end># write data matadata to backup_metadata <with_stmt>self.open(backup_metadata 'wb')<as>backup_meta<block_start>backup_meta.write(json.dumps(headers))<block_end><block_end><block_end>
#### # This script demonstrates how to use the Tableau Server Client # to create new projects, both at the root level and how to nest them using # parent_id. # # # To run the script, you must have installed Python 3.6 or later. #### <import_stmt>argparse<import_stmt>logging<import_stmt>sys<import_stmt>tableauserverclient<as>TSC<def_stmt>create_project server project_item<block_start><try_stmt><block_start>project_item=server.projects.create(project_item)<line_sep>print('Created a new project called: %s'%project_item.name)<line_sep><return>project_item<block_end><except_stmt>TSC.ServerResponseError<block_start>print('We have already created this project: %s'%project_item.name)<line_sep>sys.exit(1)<block_end><block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser(description='Create new projects.')<line_sep># Common options; please keep those in sync across all samples parser.add_argument('--server' '-s' required=<true> help='server address')<line_sep>parser.add_argument('--site' '-S' help='site name')<line_sep>parser.add_argument('--token-name' '-p' required=<true> help='name of the personal access token used to sign into the server')<line_sep>parser.add_argument('--token-value' '-v' required=<true> help='value of the personal access token used to sign into the server')<line_sep>parser.add_argument('--logging-level' '-l' choices=['debug' 'info' 'error'] default='error' help='desired logging level (set to error by default)')<line_sep># Options specific to this sample # This sample has no additional options, yet. If you add some, please add them here args=parser.parse_args()<line_sep># Set logging level based on user input, or error by default logging_level=getattr(logging args.logging_level.upper())<line_sep>logging.basicConfig(level=logging_level)<line_sep>tableau_auth=TSC.PersonalAccessTokenAuth(args.token_name args.token_value site_id=args.site)<line_sep>server=TSC.Server(args.server use_server_version=<true>)<with_stmt>server.auth.sign_in(tableau_auth)# Use highest Server REST API version available <block_start>server.use_server_version()<line_sep># Without parent_id specified, projects are created at the top level. top_level_project=TSC.ProjectItem(name='Top Level Project')<line_sep>top_level_project=create_project(server top_level_project)<line_sep># Specifying parent_id creates a nested projects. child_project=TSC.ProjectItem(name='Child Project' parent_id=top_level_project.id)<line_sep>child_project=create_project(server child_project)<line_sep># Projects can be nested at any level. grand_child_project=TSC.ProjectItem(name='Grand Child Project' parent_id=child_project.id)<line_sep>grand_child_project=create_project(server grand_child_project)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_stmt>threading Thread<import_stmt>time<import_stmt>unittest<import_stmt>rasterio<as>rio<import_from_stmt>rasterio.env get_gdal_config<class_stmt>TestThreading(unittest.TestCase)<block_start><def_stmt>test_multiopen self<block_start>""" Open a file from different threads. Regression test for issue #986 """<def_stmt>func delay<block_start><try_stmt><block_start><with_stmt>rio.open('tests/data/RGB.byte.tif')<block_start>time.sleep(delay)<block_end><block_end><except_stmt>Exception<as>err<block_start><global>exceptions<line_sep>exceptions.append(err)<block_end><block_end><global>exceptions<line_sep>exceptions=[]<line_sep>t1=Thread(target=func args=(0.1 ))<line_sep>t2=Thread(target=func args=(0 ))<with_stmt>rio.Env()<block_start>t1.start()<line_sep>t2.start()# potential error if Env manages globals unsafely t1.join()<line_sep>t2.join()<block_end><assert_stmt><not>exceptions<block_end><def_stmt>test_reliability self<block_start>"""Allow for nondeterminism of race condition"""<for_stmt>i range(3)<block_start>self.test_multiopen()<block_end><block_end><block_end><def_stmt>test_child_thread_inherits_env <block_start>"""A new thread inherit's the main thread's env"""<def_stmt>func <block_start><with_stmt>rio.Env(lol='wut')<block_start><assert_stmt>get_gdal_config('lol')<eq>'wut'<line_sep># The next config option will have been set in the main thread. <assert_stmt>get_gdal_config('FROM_MAIN')<is><true><block_end><block_end>t1=Thread(target=func)<with_stmt>rio.Env(FROM_MAIN=<true>)<block_start>t1.start()<assert_stmt>get_gdal_config('FROM_MAIN')<is><true><assert_stmt>get_gdal_config('lol')<is><none><line_sep>t1.join()<block_end><block_end><def_stmt>test_child_thread_isolation <block_start>"""Child threads have isolated environments"""<def_stmt>func key value other_key<block_start>env={key:value}<with_stmt>rio.Env(**env)<block_start><assert_stmt>get_gdal_config(key)<eq>value<line_sep># The other key is one set in another child thread. <assert_stmt>get_gdal_config(other_key)<is><none><block_end><block_end>t1=Thread(target=func args=('is_t1' <true> 'is_t2'))<line_sep>t2=Thread(target=func args=('is_t2' <true> 'is_t1'))<line_sep>t1.start()<line_sep>t2.start()<line_sep>t1.join()<line_sep>t2.join()<block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>os<import_stmt>angr<line_sep>test_location=os.path.join(os.path.dirname(os.path.realpath(__file__)) '..' '..' 'binaries' 'tests')<def_stmt>test_vtable_extraction_x86_64 <block_start>p=angr.Project(os.path.join(test_location "x86_64" "cpp_classes") auto_load_libs=<false>)<line_sep>vtables_sizes={0x403cb0:24 0x403cd8:16 0x403cf8:16 0x403d18:16}<line_sep>vtable_analysis=p.analyses.VtableFinder()<line_sep>vtables=vtable_analysis.vtables_list<assert_stmt>len(vtables)<eq>4<for_stmt>vtable vtables<block_start><assert_stmt>vtable.vaddr<in>[0x403cb0 0x403cd8 0x403cf8 0x403d18]<assert_stmt>vtables_sizes[vtable.vaddr]<eq>vtable.size<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test_vtable_extraction_x86_64()<block_end>
"""JSON (de)serialization framework. The framework presented here is somewhat based on `Go's "json" package`_ (especially the ``omitempty`` functionality). .. _`Go's "json" package`: http://golang.org/pkg/encoding/json/ """<import_stmt>abc<import_stmt>binascii<import_stmt>logging<import_stmt>OpenSSL<import_stmt>six<import_from_stmt>josepy b64 errors interfaces util<line_sep>logger=logging.getLogger(__name__)<class_stmt>Field(object)<block_start>"""JSON object field. :class:`Field` is meant to be used together with :class:`JSONObjectWithFields`. ``encoder`` (``decoder``) is a callable that accepts a single parameter, i.e. a value to be encoded (decoded), and returns the serialized (deserialized) value. In case of errors it should raise :class:`~josepy.errors.SerializationError` (:class:`~josepy.errors.DeserializationError`). Note, that ``decoder`` should perform partial serialization only. :ivar str json_name: Name of the field when encoded to JSON. :ivar default: Default value (used when not present in JSON object). :ivar bool omitempty: If ``True`` and the field value is empty, then it will not be included in the serialized JSON object, and ``default`` will be used for deserialization. Otherwise, if ``False``, field is considered as required, value will always be included in the serialized JSON objected, and it must also be present when deserializing. """<line_sep>__slots__=('json_name' 'default' 'omitempty' 'fdec' 'fenc')<def_stmt>__init__ self json_name default=<none> omitempty=<false> decoder=<none> encoder=<none># pylint: disable=too-many-arguments <block_start>self.json_name=json_name<line_sep>self.default=default<line_sep>self.omitempty=omitempty<line_sep>self.fdec=self.default_decoder<if>decoder<is><none><else>decoder<line_sep>self.fenc=self.default_encoder<if>encoder<is><none><else>encoder<block_end>@classmethod<def_stmt>_empty cls value<block_start>"""Is the provided value considered "empty" for this field? This is useful for subclasses that might want to override the definition of being empty, e.g. for some more exotic data types. """<line_sep><return><not>isinstance(value bool)<and><not>value<block_end><def_stmt>omit self value<block_start>"""Omit the value in output?"""<line_sep><return>self._empty(value)<and>self.omitempty<block_end><def_stmt>_update_params self **kwargs<block_start>current=dict(json_name=self.json_name default=self.default omitempty=self.omitempty decoder=self.fdec encoder=self.fenc)<line_sep>current.update(kwargs)<line_sep><return>type(self)(**current)<block_end># pylint: disable=star-args <def_stmt>decoder self fdec<block_start>"""Descriptor to change the decoder on JSON object field."""<line_sep><return>self._update_params(decoder=fdec)<block_end><def_stmt>encoder self fenc<block_start>"""Descriptor to change the encoder on JSON object field."""<line_sep><return>self._update_params(encoder=fenc)<block_end><def_stmt>decode self value<block_start>"""Decode a value, optionally with context JSON object."""<line_sep><return>self.fdec(value)<block_end><def_stmt>encode self value<block_start>"""Encode a value, optionally with context JSON object."""<line_sep><return>self.fenc(value)<block_end>@classmethod<def_stmt>default_decoder cls value<block_start>"""Default decoder. Recursively deserialize into immutable types ( :class:`josepy.util.frozendict` instead of :func:`dict`, :func:`tuple` instead of :func:`list`). """<line_sep># bases cases for different types returned by json.loads <if_stmt>isinstance(value list)<block_start><return>tuple(cls.default_decoder(subvalue)<for>subvalue value)<block_end><elif_stmt>isinstance(value dict)<block_start><return>util.frozendict(dict((cls.default_decoder(key) cls.default_decoder(value))<for>key,value six.iteritems(value)))<block_end><else_stmt># integer or string <block_start><return>value<block_end><block_end>@classmethod<def_stmt>default_encoder cls value<block_start>"""Default (passthrough) encoder."""<line_sep># field.to_partial_json() is no good as encoder has to do partial # serialization only <return>value<block_end><block_end><class_stmt>JSONObjectWithFieldsMeta(abc.ABCMeta)<block_start>"""Metaclass for :class:`JSONObjectWithFields` and its subclasses. It makes sure that, for any class ``cls`` with ``__metaclass__`` set to ``JSONObjectWithFieldsMeta``: 1. All fields (attributes of type :class:`Field`) in the class definition are moved to the ``cls._fields`` dictionary, where keys are field attribute names and values are fields themselves. 2. ``cls.__slots__`` is extended by all field attribute names (i.e. not :attr:`Field.json_name`). Original ``cls.__slots__`` are stored in ``cls._orig_slots``. In a consequence, for a field attribute name ``some_field``, ``cls.some_field`` will be a slot descriptor and not an instance of :class:`Field`. For example:: some_field = Field('someField', default=()) class Foo(object): __metaclass__ = JSONObjectWithFieldsMeta __slots__ = ('baz',) some_field = some_field assert Foo.__slots__ == ('some_field', 'baz') assert Foo._orig_slots == () assert Foo.some_field is not Field assert Foo._fields.keys() == ['some_field'] assert Foo._fields['some_field'] is some_field As an implementation note, this metaclass inherits from :class:`abc.ABCMeta` (and not the usual :class:`type`) to mitigate the metaclass conflict (:class:`ImmutableMap` and :class:`JSONDeSerializable`, parents of :class:`JSONObjectWithFields`, use :class:`abc.ABCMeta` as its metaclass). """<def_stmt>__new__ mcs name bases dikt<block_start>fields={}<for_stmt>base bases<block_start>fields.update(getattr(base '_fields' {}))<block_end># Do not reorder, this class might override fields from base classes! <for_stmt>key,value tuple(six.iteritems(dikt))# not six.iterkeys() (in-place edit!) <block_start><if_stmt>isinstance(value Field)<block_start>fields[key]=dikt.pop(key)<block_end><block_end>dikt['_orig_slots']=dikt.get('__slots__' ())<line_sep>dikt['__slots__']=tuple(list(dikt['_orig_slots'])+list(six.iterkeys(fields)))<line_sep>dikt['_fields']=fields<line_sep><return>abc.ABCMeta.__new__(mcs name bases dikt)<block_end><block_end>@six.add_metaclass(JSONObjectWithFieldsMeta)<class_stmt>JSONObjectWithFields(util.ImmutableMap interfaces.JSONDeSerializable)# pylint: disable=too-few-public-methods <block_start>"""JSON object with fields. Example:: class Foo(JSONObjectWithFields): bar = Field('Bar') empty = Field('Empty', omitempty=True) @bar.encoder def bar(value): return value + 'bar' @bar.decoder def bar(value): if not value.endswith('bar'): raise errors.DeserializationError('No bar suffix!') return value[:-3] assert Foo(bar='baz').to_partial_json() == {'Bar': 'bazbar'} assert Foo.from_json({'Bar': 'bazbar'}) == Foo(bar='baz') assert (Foo.from_json({'Bar': 'bazbar', 'Empty': '!'}) == Foo(bar='baz', empty='!')) assert Foo(bar='baz').bar == 'baz' """<line_sep>@classmethod<def_stmt>_defaults cls<block_start>"""Get default fields values."""<line_sep><return>dict([(slot field.default)<for>slot,field six.iteritems(cls._fields)])<block_end><def_stmt>__init__ self **kwargs# pylint: disable=star-args <block_start>super(JSONObjectWithFields self).__init__(**(dict(self._defaults() **kwargs)))<block_end><def_stmt>encode self name<block_start>"""Encode a single field. :param str name: Name of the field to be encoded. :raises errors.SerializationError: if field cannot be serialized :raises errors.Error: if field could not be found """<try_stmt><block_start>field=self._fields[name]<block_end><except_stmt>KeyError<block_start><raise>errors.Error("Field not found: {0}".format(name))<block_end><return>field.encode(getattr(self name))<block_end><def_stmt>fields_to_partial_json self<block_start>"""Serialize fields to JSON."""<line_sep>jobj={}<line_sep>omitted=set()<for_stmt>slot,field six.iteritems(self._fields)<block_start>value=getattr(self slot)<if_stmt>field.omit(value)<block_start>omitted.add((slot value))<block_end><else_stmt><block_start><try_stmt><block_start>jobj[field.json_name]=field.encode(value)<block_end><except_stmt>errors.SerializationError<as>error<block_start><raise>errors.SerializationError('Could not encode {0} ({1}): {2}'.format(slot value error))<block_end><block_end><block_end><return>jobj<block_end><def_stmt>to_partial_json self<block_start><return>self.fields_to_partial_json()<block_end>@classmethod<def_stmt>_check_required cls jobj<block_start>missing=set()<for_stmt>_,field six.iteritems(cls._fields)<block_start><if_stmt><not>field.omitempty<and>field.json_name<not><in>jobj<block_start>missing.add(field.json_name)<block_end><block_end><if_stmt>missing<block_start><raise>errors.DeserializationError('The following fields are required: {0}'.format(','.join(missing)))<block_end><block_end>@classmethod<def_stmt>fields_from_json cls jobj<block_start>"""Deserialize fields from JSON."""<line_sep>cls._check_required(jobj)<line_sep>fields={}<for_stmt>slot,field six.iteritems(cls._fields)<block_start><if_stmt>field.json_name<not><in>jobj<and>field.omitempty<block_start>fields[slot]=field.default<block_end><else_stmt><block_start>value=jobj[field.json_name]<try_stmt><block_start>fields[slot]=field.decode(value)<block_end><except_stmt>errors.DeserializationError<as>error<block_start><raise>errors.DeserializationError('Could not decode {0!r} ({1!r}): {2}'.format(slot value error))<block_end><block_end><block_end><return>fields<block_end>@classmethod<def_stmt>from_json cls jobj<block_start><return>cls(**cls.fields_from_json(jobj))<block_end><block_end><def_stmt>encode_b64jose data<block_start>"""Encode JOSE Base-64 field. :param bytes data: :rtype: `unicode` """<line_sep># b64encode produces ASCII characters only <return>b64.b64encode(data).decode('ascii')<block_end><def_stmt>decode_b64jose data size=<none> minimum=<false><block_start>"""Decode JOSE Base-64 field. :param unicode data: :param int size: Required length (after decoding). :param bool minimum: If ``True``, then `size` will be treated as minimum required length, as opposed to exact equality. :rtype: bytes """<line_sep>error_cls=TypeError<if>six.PY2<else>binascii.Error<try_stmt><block_start>decoded=b64.b64decode(data.encode())<block_end><except_stmt>error_cls<as>error<block_start><raise>errors.DeserializationError(error)<block_end><if_stmt>size<is><not><none><and>((<not>minimum<and>len(decoded)<ne>size)<or>(minimum<and>len(decoded)<l>size))<block_start><raise>errors.DeserializationError("Expected at least or exactly {0} bytes".format(size))<block_end><return>decoded<block_end><def_stmt>encode_hex16 value<block_start>"""Hexlify. :param bytes value: :rtype: unicode """<line_sep><return>binascii.hexlify(value).decode()<block_end><def_stmt>decode_hex16 value size=<none> minimum=<false><block_start>"""Decode hexlified field. :param unicode value: :param int size: Required length (after decoding). :param bool minimum: If ``True``, then `size` will be treated as minimum required length, as opposed to exact equality. :rtype: bytes """<line_sep>value=value.encode()<if_stmt>size<is><not><none><and>((<not>minimum<and>len(value)<ne>size<times>2)<or>(minimum<and>len(value)<l>size<times>2))<block_start><raise>errors.DeserializationError()<block_end>error_cls=TypeError<if>six.PY2<else>binascii.Error<try_stmt><block_start><return>binascii.unhexlify(value)<block_end><except_stmt>error_cls<as>error<block_start><raise>errors.DeserializationError(error)<block_end><block_end><def_stmt>encode_cert cert<block_start>"""Encode certificate as JOSE Base-64 DER. :type cert: `OpenSSL.crypto.X509` wrapped in `.ComparableX509` :rtype: unicode """<line_sep><return>encode_b64jose(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1 cert.wrapped))<block_end><def_stmt>decode_cert b64der<block_start>"""Decode JOSE Base-64 DER-encoded certificate. :param unicode b64der: :rtype: `OpenSSL.crypto.X509` wrapped in `.ComparableX509` """<try_stmt><block_start><return>util.ComparableX509(OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1 decode_b64jose(b64der)))<block_end><except_stmt>OpenSSL.crypto.Error<as>error<block_start><raise>errors.DeserializationError(error)<block_end><block_end><def_stmt>encode_csr csr<block_start>"""Encode CSR as JOSE Base-64 DER. :type csr: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509` :rtype: unicode """<line_sep><return>encode_b64jose(OpenSSL.crypto.dump_certificate_request(OpenSSL.crypto.FILETYPE_ASN1 csr.wrapped))<block_end><def_stmt>decode_csr b64der<block_start>"""Decode JOSE Base-64 DER-encoded CSR. :param unicode b64der: :rtype: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509` """<try_stmt><block_start><return>util.ComparableX509(OpenSSL.crypto.load_certificate_request(OpenSSL.crypto.FILETYPE_ASN1 decode_b64jose(b64der)))<block_end><except_stmt>OpenSSL.crypto.Error<as>error<block_start><raise>errors.DeserializationError(error)<block_end><block_end><class_stmt>TypedJSONObjectWithFields(JSONObjectWithFields)<block_start>"""JSON object with type."""<line_sep>typ=NotImplemented<line_sep>"""Type of the object. Subclasses must override."""<line_sep>type_field_name="type"<line_sep>"""Field name used to distinguish different object types. Subclasses will probably have to override this. """<line_sep>TYPES=NotImplemented<line_sep>"""Types registered for JSON deserialization"""<line_sep>@classmethod<def_stmt>register cls type_cls typ=<none><block_start>"""Register class for JSON deserialization."""<line_sep>typ=type_cls.typ<if>typ<is><none><else>typ<line_sep>cls.TYPES[typ]=type_cls<line_sep><return>type_cls<block_end>@classmethod<def_stmt>get_type_cls cls jobj<block_start>"""Get the registered class for ``jobj``."""<if_stmt>cls<in>six.itervalues(cls.TYPES)<block_start><if_stmt>cls.type_field_name<not><in>jobj<block_start><raise>errors.DeserializationError("Missing type field ({0})".format(cls.type_field_name))<block_end># cls is already registered type_cls, force to use it # so that, e.g Revocation.from_json(jobj) fails if # jobj["type"] != "revocation". <return>cls<block_end><if_stmt><not>isinstance(jobj dict)<block_start><raise>errors.DeserializationError("{0} is not a dictionary object".format(jobj))<block_end><try_stmt><block_start>typ=jobj[cls.type_field_name]<block_end><except_stmt>KeyError<block_start><raise>errors.DeserializationError("missing type field")<block_end><try_stmt><block_start><return>cls.TYPES[typ]<block_end><except_stmt>KeyError<block_start><raise>errors.UnrecognizedTypeError(typ jobj)<block_end><block_end><def_stmt>to_partial_json self<block_start>"""Get JSON serializable object. :returns: Serializable JSON object representing ACME typed object. :meth:`validate` will almost certainly not work, due to reasons explained in :class:`josepy.interfaces.IJSONSerializable`. :rtype: dict """<line_sep>jobj=self.fields_to_partial_json()<line_sep>jobj[self.type_field_name]=self.typ<line_sep><return>jobj<block_end>@classmethod<def_stmt>from_json cls jobj<block_start>"""Deserialize ACME object from valid JSON object. :raises josepy.errors.UnrecognizedTypeError: if type of the ACME object has not been registered. """<line_sep># make sure subclasses don't cause infinite recursive from_json calls type_cls=cls.get_type_cls(jobj)<line_sep><return>type_cls(**type_cls.fields_from_json(jobj))<block_end><block_end>
"""add run_type Revision ID: 5dd2ba8222b1 Revises: 079a74c15e8b Create Date: 2021-07-22 23:53:04.043651 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.dialects postgresql<line_sep># revision identifiers, used by Alembic. revision='5dd2ba8222b1'<line_sep>down_revision='079a74c15e8b'<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade <block_start>op.add_column('experiment_runs' sa.Column('run_type' sa.Text() nullable=<true>) schema='triage_metadata')<line_sep>op.execute("UPDATE triage_metadata.experiment_runs SET run_type='experiment' WHERE run_type IS NULL")<line_sep>op.alter_column('experiment_runs' 'experiment_hash' nullable=<true> new_column_name='run_hash' schema='triage_metadata')<line_sep>op.drop_constraint('experiment_runs_experiment_hash_fkey' 'experiment_runs' type_='foreignkey' schema='triage_metadata')<line_sep>op.execute("ALTER TABLE triage_metadata.experiment_runs RENAME TO triage_runs")<line_sep>op.create_table('retrain' sa.Column('retrain_hash' sa.Text() nullable=<false>) sa.Column('config' postgresql.JSONB(astext_type=sa.Text()) nullable=<true>) sa.Column('prediction_date' sa.DateTime() nullable=<true>) sa.PrimaryKeyConstraint('retrain_hash') schema='triage_metadata' )<line_sep>op.alter_column('models' 'built_in_experiment_run' nullable=<false> new_column_name='built_in_triage_run' schema='triage_metadata')<line_sep>op.execute("CREATE TABLE triage_metadata.deprecated_models_built_by_experiment AS SELECT model_id, model_hash, built_by_experiment FROM triage_metadata.models")<line_sep>op.drop_column('models' 'built_by_experiment' schema='triage_metadata')<line_sep>op.create_table('retrain_models' sa.Column('retrain_hash' sa.String() nullable=<false>) sa.Column('model_hash' sa.String() nullable=<false>) sa.ForeignKeyConstraint(['retrain_hash'] ['triage_metadata.retrain.retrain_hash'] ) sa.PrimaryKeyConstraint('retrain_hash' 'model_hash') schema='triage_metadata')<block_end><def_stmt>downgrade <block_start>op.execute("ALTER TABLE triage_metadata.triage_runs RENAME TO experiment_runs")<line_sep>op.drop_column('experiment_runs' 'run_type' schema='triage_metadata')<line_sep>op.alter_column('experiment_runs' 'run_hash' nullable=<true> new_column_name='experiment_hash' schema='triage_metadata')<line_sep>op.create_foreign_key('experiment_runs_experiment_hash_fkey' 'experiment_runs' 'experiments' ['experiment_hash'] ['experiment_hash'] source_schema='triage_metadata' referent_schema='triage_metadata')<line_sep>op.drop_table('retrain_models' schema='triage_metadata')<line_sep>op.drop_table('retrain' schema='triage_metadata')<line_sep>op.add_column('models' sa.Column('built_by_experiment' sa.Text() nullable=<true>) schema='triage_metadata')<line_sep>op.alter_column('models' 'built_in_triage_run' nullable=<false> new_column_name='built_in_experiment_run' schema='triage_metadata')<block_end>
<import_from_stmt>.config add_panopticfcn_config<import_from_stmt>.panoptic_seg PanopticFCN<import_from_stmt>.build_solver build_lr_scheduler<line_sep>
## # Copyright (c) 2010-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """ Tests for L{txdav.common.datastore.upgrade.migrate}. """<import_from_stmt>twext.enterprise.adbapi2 Pickle<import_from_stmt>twext.enterprise.dal.syntax Delete<import_from_stmt>twext.python.filepath CachingFilePath<import_from_stmt>txweb2.http_headers MimeType<import_from_stmt>twisted.internet.defer inlineCallbacks Deferred returnValue<import_from_stmt>twisted.internet.protocol Protocol<import_from_stmt>twisted.protocols.amp AMP Command String<import_from_stmt>twisted.python.modules getModule<import_from_stmt>twisted.python.reflect qual namedAny<import_from_stmt>twisted.trial.unittest TestCase<import_from_stmt>twistedcaldav customxml caldavxml<import_from_stmt>twistedcaldav.config config<import_from_stmt>twistedcaldav.ical Component<import_from_stmt>txdav.base.propertystore.base PropertyName<import_from_stmt>txdav.caldav.datastore.test.common CommonTests<import_from_stmt>txdav.carddav.datastore.test.common CommonTests<as>ABCommonTests<import_from_stmt>txdav.common.datastore.file CommonDataStore<import_from_stmt>txdav.common.datastore.sql_tables schema<import_from_stmt>txdav.common.datastore.test.util SQLStoreBuilder<import_from_stmt>txdav.common.datastore.test.util populateCalendarsFrom StubNotifierFactory resetCalendarMD5s populateAddressBooksFrom resetAddressBookMD5s deriveValue withSpecialValue CommonCommonTests <import_from_stmt>txdav.common.datastore.upgrade.migrate UpgradeToDatabaseStep StoreSpawnerService swapAMP<import_from_stmt>txdav.xml element<import_stmt>copy<class_stmt>CreateStore(Command)<block_start>""" Create a store in a subprocess. """<line_sep>arguments=[('delegateTo' String())]<block_end><class_stmt>PickleConfig(Command)<block_start>""" Unpickle some configuration in a subprocess. """<line_sep>arguments=[('delegateTo' String()) ('config' Pickle())]<block_end><class_stmt>StoreCreator(AMP)<block_start>""" Helper protocol. """<line_sep>@CreateStore.responder<def_stmt>createStore self delegateTo<block_start>""" Create a store and pass it to the named delegate class. """<line_sep>swapAMP(self namedAny(delegateTo)(SQLStoreBuilder.childStore()))<line_sep><return>{}<block_end>@PickleConfig.responder<def_stmt>pickleConfig self config delegateTo# from twistedcaldav.config import config as globalConfig # globalConfig._data = config._data <block_start>swapAMP(self namedAny(delegateTo)(config))<line_sep><return>{}<block_end><block_end><class_stmt>StubSpawner(StoreSpawnerService)<block_start>""" Stub spawner service which populates the store forcibly. """<def_stmt>__init__ self config=<none><block_start>super(StubSpawner self).__init__()<line_sep>self.config=config<block_end>@inlineCallbacks<def_stmt>spawnWithStore self here there<block_start>""" 'here' and 'there' are the helper protocols 'there' will expect to be created with an instance of a store. """<line_sep>master=<yield>self.spawn(AMP() StoreCreator)<line_sep><yield>master.callRemote(CreateStore delegateTo=qual(there))<line_sep>returnValue(swapAMP(master here))<block_end>@inlineCallbacks<def_stmt>spawnWithConfig self config here there<block_start>""" Similar to spawnWithStore except the child process gets a configuration object instead. """<line_sep>master=<yield>self.spawn(AMP() StoreCreator)<line_sep>subcfg=copy.deepcopy(self.config)<del_stmt>subcfg._postUpdateHooks[:]<line_sep><yield>master.callRemote(PickleConfig config=subcfg delegateTo=qual(there))<line_sep>returnValue(swapAMP(master here))<block_end><block_end><class_stmt>HomeMigrationTests(CommonCommonTests TestCase)<block_start>""" Tests for L{UpgradeToDatabaseStep}. """<line_sep>av1=Component.fromString("""BEGIN:VCALENDAR VERSION:2.0 CALSCALE:GREGORIAN PRODID:-//calendarserver.org//Zonal//EN BEGIN:VAVAILABILITY ORGANIZER:mailto:<EMAIL> UID:<EMAIL> DTSTAMP:20061005T133225Z DTEND:20140101T000000Z BEGIN:AVAILABLE UID:<EMAIL> DTSTAMP:20061005T133225Z SUMMARY:Monday to Friday from 9:00 to 17:00 DTSTART:20130101T090000Z DTEND:20130101T170000Z RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR END:AVAILABLE END:VAVAILABILITY END:VCALENDAR """)<line_sep>@inlineCallbacks<def_stmt>setUp self<block_start>""" Set up two stores to migrate between. """<line_sep><yield>super(HomeMigrationTests self).setUp()<line_sep><yield>self.buildStoreAndDirectory(extraUids=(u"home1" u"home2" u"home3" u"home_defaults" u"home_no_splits" u"home_splits" u"home_splits_shared" ))<line_sep>self.sqlStore=self.store<line_sep># Add some files to the file store. self.filesPath=CachingFilePath(self.mktemp())<line_sep>self.filesPath.createDirectory()<line_sep>fileStore=self.fileStore=CommonDataStore(self.filesPath {"push":StubNotifierFactory()} self.directory <true> <true>)<line_sep>self.upgrader=UpgradeToDatabaseStep(self.fileStore self.sqlStore)<line_sep>requirements=CommonTests.requirements<line_sep>extras=deriveValue(self "extraRequirements" <lambda>t:{})<line_sep>requirements=self.mergeRequirements(requirements extras)<line_sep><yield>populateCalendarsFrom(requirements fileStore)<line_sep>md5s=CommonTests.md5s<line_sep><yield>resetCalendarMD5s(md5s fileStore)<line_sep>self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("home1").child(".some-extra-data").setContent("some extra data")<line_sep>requirements=ABCommonTests.requirements<line_sep><yield>populateAddressBooksFrom(requirements fileStore)<line_sep>md5s=ABCommonTests.md5s<line_sep><yield>resetAddressBookMD5s(md5s fileStore)<line_sep>self.filesPath.child("addressbooks").child("__uids__").child("ho").child("me").child("home1").child(".some-extra-data").setContent("some extra data")<line_sep># Add some properties we want to check get migrated over txn=self.fileStore.newTransaction()<line_sep>home=<yield>txn.calendarHomeWithUID("home_defaults")<line_sep>cal=<yield>home.calendarWithName("calendar_1")<line_sep>props=cal.properties()<line_sep>props[PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)]=caldavxml.SupportedCalendarComponentSet(caldavxml.CalendarComponent(name="VEVENT") caldavxml.CalendarComponent(name="VTODO") )<line_sep>props[PropertyName.fromElement(element.ResourceType)]=element.ResourceType(element.Collection() caldavxml.Calendar() )<line_sep>props[PropertyName.fromElement(customxml.GETCTag)]=customxml.GETCTag.fromString("foobar")<line_sep>inbox=<yield>home.calendarWithName("inbox")<line_sep>props=inbox.properties()<line_sep>props[PropertyName.fromElement(customxml.CalendarAvailability)]=customxml.CalendarAvailability.fromString(str(self.av1))<line_sep>props[PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)]=caldavxml.ScheduleDefaultCalendarURL(element.HRef.fromString("/calendars/__uids__/home_defaults/calendar_1") )<line_sep><yield>txn.commit()<block_end><def_stmt>mergeRequirements self a b<block_start>""" Merge two requirements dictionaries together, modifying C{a} and returning it. @param a: Some requirements, in the format of L{CommonTests.requirements}. @type a: C{dict} @param b: Some additional requirements, to be merged into C{a}. @type b: C{dict} @return: C{a} @rtype: C{dict} """<for_stmt>homeUID b<block_start>homereq=a.setdefault(homeUID {})<line_sep>homeExtras=b[homeUID]<for_stmt>calendarUID homeExtras<block_start>calreq=homereq.setdefault(calendarUID {})<line_sep>calendarExtras=homeExtras[calendarUID]<line_sep>calreq.update(calendarExtras)<block_end><block_end><return>a<block_end>@withSpecialValue("extraRequirements" {"home1":{"calendar_1":{"bogus.ics":(getModule("twistedcaldav").filePath.sibling("zoneinfo").child("EST.ics").getContent() CommonTests.metadata1)}}})@inlineCallbacks<def_stmt>test_unknownTypeNotMigrated self<block_start>""" The only types of calendar objects that should get migrated are VEVENTs and VTODOs. Other component types, such as free-standing VTIMEZONEs, don't have a UID and can't be stored properly in the database, so they should not be migrated. """<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>txn=self.sqlStore.newTransaction()<line_sep>self.addCleanup(txn.commit)<line_sep>self.assertIdentical(<none> (<yield>(<yield>(<yield>(<yield>txn.calendarHomeWithUID("home1")).calendarWithName("calendar_1"))).calendarObjectWithName("bogus.ics")))<block_end>@inlineCallbacks<def_stmt>test_upgradeCalendarHomes self<block_start>""" L{UpgradeToDatabaseService.startService} will do the upgrade, then start its dependent service by adding it to its service hierarchy. """<line_sep># Create a fake directory in the same place as a home, but with a non-existent uid fake_dir=self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("foobar")<line_sep>fake_dir.makedirs()<line_sep># Create a fake file in the same place as a home,with a name that matches the hash uid prefix fake_file=self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("home_file")<line_sep>fake_file.setContent("")<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>txn=self.sqlStore.newTransaction()<line_sep>self.addCleanup(txn.commit)<for_stmt>uid CommonTests.requirements<block_start><if_stmt>CommonTests.requirements[uid]<is><not><none><block_start>self.assertNotIdentical(<none> (<yield>txn.calendarHomeWithUID(uid)))<block_end><block_end># Successfully migrated calendar homes are deleted self.assertFalse(self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("home1").exists())<line_sep># Want metadata preserved home=(<yield>txn.calendarHomeWithUID("home1"))<line_sep>calendar=(<yield>home.calendarWithName("calendar_1"))<for_stmt>name,metadata,md5 (("1.ics" CommonTests.metadata1 CommonTests.md5Values[0]) ("2.ics" CommonTests.metadata2 CommonTests.md5Values[1]) ("3.ics" CommonTests.metadata3 CommonTests.md5Values[2]) )<block_start>object=(<yield>calendar.calendarObjectWithName(name))<line_sep>self.assertEquals(object.getMetadata() metadata)<line_sep>self.assertEquals(object.md5() md5)<block_end><block_end>@withSpecialValue("extraRequirements" {"nonexistent":{"calendar_1":{}}})@inlineCallbacks<def_stmt>test_upgradeCalendarHomesMissingDirectoryRecord self<block_start>""" Test an upgrade where a directory record is missing for a home; the original home directory will remain on disk. """<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>txn=self.sqlStore.newTransaction()<line_sep>self.addCleanup(txn.commit)<for_stmt>uid CommonTests.requirements<block_start><if_stmt>CommonTests.requirements[uid]<is><not><none><block_start>self.assertNotIdentical(<none> (<yield>txn.calendarHomeWithUID(uid)))<block_end><block_end>self.assertIdentical(<none> (<yield>txn.calendarHomeWithUID(u"nonexistent")))<line_sep># Skipped calendar homes are not deleted self.assertTrue(self.filesPath.child("calendars").child("__uids__").child("no").child("ne").child("nonexistent").exists())<block_end>@inlineCallbacks<def_stmt>test_upgradeExistingHome self<block_start>""" L{UpgradeToDatabaseService.startService} will skip migrating existing homes. """<line_sep>startTxn=self.sqlStore.newTransaction("populate empty sample")<line_sep><yield>startTxn.calendarHomeWithUID("home1" create=<true>)<line_sep><yield>startTxn.commit()<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>vrfyTxn=self.sqlStore.newTransaction("verify sample still empty")<line_sep>self.addCleanup(vrfyTxn.commit)<line_sep>home=<yield>vrfyTxn.calendarHomeWithUID("home1")<line_sep># The default calendar is still there. self.assertNotIdentical(<none> (<yield>home.calendarWithName("calendar")))<line_sep># The migrated calendar isn't. self.assertIdentical(<none> (<yield>home.calendarWithName("calendar_1")))<block_end>@inlineCallbacks<def_stmt>test_upgradeAttachments self<block_start>""" L{UpgradeToDatabaseService.startService} upgrades calendar attachments as well. """<line_sep># Need to tweak config and settings to setup dropbox to work self.patch(config "EnableDropBox" <true>)<line_sep>self.patch(config "EnableManagedAttachments" <false>)<line_sep>self.sqlStore.enableManagedAttachments=<false><line_sep>txn=self.sqlStore.newTransaction()<line_sep>cs=schema.CALENDARSERVER<line_sep><yield>Delete(From=cs Where=cs.NAME<eq>"MANAGED-ATTACHMENTS").on(txn)<line_sep><yield>txn.commit()<line_sep>txn=self.fileStore.newTransaction()<line_sep>committed=[]<def_stmt>maybeCommit <block_start><if_stmt><not>committed<block_start>committed.append(<true>)<line_sep><return>txn.commit()<block_end><block_end>self.addCleanup(maybeCommit)<line_sep>@inlineCallbacks<def_stmt>getSampleObj <block_start>home=(<yield>txn.calendarHomeWithUID("home1"))<line_sep>calendar=(<yield>home.calendarWithName("calendar_1"))<line_sep>object=(<yield>calendar.calendarObjectWithName("1.ics"))<line_sep>returnValue(object)<block_end>inObject=<yield>getSampleObj()<line_sep>someAttachmentName="some-attachment"<line_sep>someAttachmentType=MimeType.fromString("application/x-custom-type")<line_sep>attachment=<yield>inObject.createAttachmentWithName(someAttachmentName )<line_sep>transport=attachment.store(someAttachmentType)<line_sep>someAttachmentData="Here is some data for your attachment, enjoy."<line_sep>transport.write(someAttachmentData)<line_sep><yield>transport.loseConnection()<line_sep><yield>maybeCommit()<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>committed=[]<line_sep>txn=self.sqlStore.newTransaction()<line_sep>outObject=<yield>getSampleObj()<line_sep>outAttachment=<yield>outObject.attachmentWithName(someAttachmentName)<line_sep>allDone=Deferred()<class_stmt>SimpleProto(Protocol)<block_start>data=''<def_stmt>dataReceived self data<block_start>self.data<augadd>data<block_end><def_stmt>connectionLost self reason<block_start>allDone.callback(self.data)<block_end><block_end>self.assertEquals(outAttachment.contentType() someAttachmentType)<line_sep>outAttachment.retrieve(SimpleProto())<line_sep>allData=<yield>allDone<line_sep>self.assertEquals(allData someAttachmentData)<block_end>@inlineCallbacks<def_stmt>test_upgradeAddressBookHomes self<block_start>""" L{UpgradeToDatabaseService.startService} will do the upgrade, then start its dependent service by adding it to its service hierarchy. """<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>txn=self.sqlStore.newTransaction()<line_sep>self.addCleanup(txn.commit)<for_stmt>uid ABCommonTests.requirements<block_start><if_stmt>ABCommonTests.requirements[uid]<is><not><none><block_start>self.assertNotIdentical(<none> (<yield>txn.addressbookHomeWithUID(uid)))<block_end><block_end># Successfully migrated addressbook homes are deleted self.assertFalse(self.filesPath.child("addressbooks").child("__uids__").child("ho").child("me").child("home1").exists())<line_sep># Want metadata preserved home=(<yield>txn.addressbookHomeWithUID("home1"))<line_sep>adbk=(<yield>home.addressbookWithName("addressbook"))<for_stmt>name,md5 (("1.vcf" ABCommonTests.md5Values[0]) ("2.vcf" ABCommonTests.md5Values[1]) ("3.vcf" ABCommonTests.md5Values[2]) )<block_start>object=(<yield>adbk.addressbookObjectWithName(name))<line_sep>self.assertEquals(object.md5() md5)<block_end><block_end>@inlineCallbacks<def_stmt>test_upgradeProperties self<block_start>""" L{UpgradeToDatabaseService.startService} will do the upgrade, then start its dependent service by adding it to its service hierarchy. """<line_sep><yield>self.upgrader.stepWithResult(<none>)<line_sep>txn=self.sqlStore.newTransaction()<line_sep>self.addCleanup(txn.commit)<line_sep># Want metadata preserved home=(<yield>txn.calendarHomeWithUID("home_defaults"))<line_sep>cal=(<yield>home.calendarWithName("calendar_1"))<line_sep>inbox=(<yield>home.calendarWithName("inbox"))<line_sep># Supported components self.assertEqual(cal.getSupportedComponents() "VEVENT")<line_sep>self.assertTrue(cal.properties().get(PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet))<is><none>)<line_sep># Resource type removed self.assertTrue(cal.properties().get(PropertyName.fromElement(element.ResourceType))<is><none>)<line_sep># Ctag removed self.assertTrue(cal.properties().get(PropertyName.fromElement(customxml.GETCTag))<is><none>)<line_sep># Availability self.assertEquals(str(home.getAvailability()) str(self.av1))<line_sep>self.assertTrue(inbox.properties().get(PropertyName.fromElement(customxml.CalendarAvailability))<is><none>)<line_sep># Default calendar self.assertTrue(home.isDefaultCalendar(cal))<line_sep>self.assertTrue(inbox.properties().get(PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL))<is><none>)<block_end><def_stmt>test_fileStoreFromPath self<block_start>""" Verify that fileStoreFromPath() will return a CommonDataStore if the given path contains either "calendars" or "addressbooks" sub-directories. Otherwise it returns None """<line_sep># No child directories docRootPath=CachingFilePath(self.mktemp())<line_sep>docRootPath.createDirectory()<line_sep>step=UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)<line_sep>self.assertEquals(step <none>)<line_sep># "calendars" child directory exists childPath=docRootPath.child("calendars")<line_sep>childPath.createDirectory()<line_sep>step=UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)<line_sep>self.assertTrue(isinstance(step CommonDataStore))<line_sep>childPath.remove()<line_sep># "addressbooks" child directory exists childPath=docRootPath.child("addressbooks")<line_sep>childPath.createDirectory()<line_sep>step=UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)<line_sep>self.assertTrue(isinstance(step CommonDataStore))<line_sep>childPath.remove()<block_end><block_end>
# Copyright 2015, Ansible, Inc. # <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>getpass getpass<import_from_stmt>distutils.version LooseVersion<import_stmt>click<import_from_stmt>tower_cli models get_resource resources exceptions<as>exc<import_from_stmt>tower_cli.api client<import_from_stmt>tower_cli.cli types<import_from_stmt>tower_cli.utils debug parser<line_sep>PROMPT_LIST=['diff_mode' 'limit' 'tags' 'skip_tags' 'job_type' 'verbosity' 'inventory' 'credential']<class_stmt>Resource(models.ExeResource)<block_start>"""A resource for jobs. This resource has ordinary list and get methods, but it does not have create or modify. Instead of being created, a job is launched. """<line_sep>cli_help='Launch or monitor jobs.'<line_sep>endpoint='/jobs/'<line_sep>job_template=models.Field(key='-J' type=types.Related('job_template') required=<false> display=<true>)<line_sep>job_explanation=models.Field(required=<false> display=<false> read_only=<true>)<line_sep>created=models.Field(required=<false> display=<true>)<line_sep>status=models.Field(required=<false> display=<true>)<line_sep>elapsed=models.Field(required=<false> display=<true> type=float)<line_sep>@resources.command(use_fields_as_options=('job_template' ))@click.option('--monitor' is_flag=<true> default=<false> help='If sent, immediately calls `job monitor` on the newly '<concat>'launched job rather than exiting with a success.')@click.option('--wait' is_flag=<true> default=<false> help='Monitor the status of the job, but do not print '<concat>'while job is in progress.')@click.option('--timeout' required=<false> type=int help='If provided with --monitor, this command (not the job)'<concat>' will time out after the given number of seconds. '<concat>'Does nothing if --monitor is not sent.')@click.option('--no-input' is_flag=<true> default=<false> help='Suppress any requests for input.')@click.option('-e' '--extra-vars' required=<false> multiple=<true> help='yaml format text that contains extra variables '<concat>'to pass on. Use @ to get these from a file.')@click.option('--diff-mode' type=bool required=<false> help='Specify diff mode for job template to run.')@click.option('--limit' required=<false> help='Specify host limit for job template to run.')@click.option('--tags' required=<false> help='Specify tagged actions in the playbook to run.')@click.option('--skip-tags' required=<false> help='Specify tagged actions in the playbook to omit.')@click.option('--job-type' required=<false> type=click.Choice(['run' 'check']) help='Specify job type for job template to run.')@click.option('--verbosity' type=int required=<false> help='Specify verbosity of the playbook run.')@click.option('--inventory' required=<false> type=types.Related('inventory') help='Specify inventory for job template to run.')@click.option('--credential' required=<false> multiple=<true> type=types.Related('credential') help='Specify any type of credential(s) for job template to run.')<def_stmt>launch self job_template=<none> monitor=<false> wait=<false> timeout=<none> no_input=<true> extra_vars=<none> **kwargs<block_start>"""Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. =====API DOCS===== Launch a new job based on a job template. :param job_template: Primary key or name of the job template to launch new job. :type job_template: str :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather than exiting with a success. :type monitor: bool :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress. :type wait: bool :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number of seconds. :type timeout: int :param no_input: Flag that if set, suppress any requests for input. :type no_input: bool :param extra_vars: yaml formatted texts that contains extra variables to pass on. :type extra_vars: array of strings :param diff_mode: Specify diff mode for job template to run. :type diff_mode: bool :param limit: Specify host limit for job template to run. :type limit: str :param tags: Specify tagged actions in the playbook to run. :type tags: str :param skip_tags: Specify tagged actions in the playbook to omit. :type skip_tags: str :param job_type: Specify job type for job template to run. :type job_type: str :param verbosity: Specify verbosity of the playbook run. :type verbosity: int :param inventory: Specify machine credential for job template to run. :type inventory: str :param credential: Specify machine credential for job template to run. :type credential: str :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of the two flags are on. :rtype: dict =====API DOCS===== """<line_sep># Get the job template from Ansible Tower. # This is used as the baseline for starting the job. jt_resource=get_resource('job_template')<line_sep>jt=jt_resource.get(job_template)<line_sep># Update the job data for special treatment of certain fields # Special case for job tags, historically just called --tags tags=kwargs.get('tags' <none>)<line_sep>data={}<if_stmt>tags<block_start>data['job_tags']=tags<block_end># Special case for cross-version compatibility with credentials cred_arg=kwargs.pop('credential' ())<if_stmt>isinstance(cred_arg (list tuple))<block_start>credentials=cred_arg<block_end><else_stmt><block_start>credentials=[cred_arg]<block_end><if_stmt>credentials<block_start><if_stmt>'credentials'<in>jt['related']# Has Tower 3.3 / multi-cred support # combine user-provided credentials with JT credentials <block_start>jt_creds=set(c['id']<for>c jt['summary_fields']['credentials'])<line_sep>kwargs['credentials']=list(set(credentials)|jt_creds)<block_end><else_stmt><block_start><if_stmt>len(credentials)<g>1<block_start><raise>exc.UsageError('Providing multiple credentials on launch can only be '<concat>'done with Tower version 3.3 and higher or recent AWX.')<block_end>kwargs['credential']=credentials[0]<block_end><block_end># Initialize an extra_vars list that starts with the job template # preferences first, if they exist extra_vars_list=[]<if_stmt>'extra_vars'<in>data<and>len(data['extra_vars'])<g>0# But only do this for versions before 2.3 <block_start>debug.log('Getting version of Tower.' header='details')<line_sep>r=client.get('/config/')<if_stmt>LooseVersion(r.json()['version'])<l>LooseVersion('2.4')<block_start>extra_vars_list=[data['extra_vars']]<block_end><block_end># Add the runtime extra_vars to this list <if_stmt>extra_vars<block_start>extra_vars_list<augadd>list(extra_vars)<block_end># accept tuples # If the job template requires prompting for extra variables, # do so (unless --no-input is set). <if_stmt>jt.get('ask_variables_on_launch' <false>)<and><not>no_input<and><not>extra_vars# If JT extra_vars are JSON, echo them to user as YAML <block_start>initial=parser.process_extra_vars([jt['extra_vars']] force_json=<false>)<line_sep>initial='\n'.join(('# Specify extra variables (if any) here as YAML.' '# Lines beginning with "#" denote comments.' initial ))<line_sep>extra_vars=click.edit(initial)<or>''<if_stmt>extra_vars<ne>initial<block_start>extra_vars_list=[extra_vars]<block_end><block_end># Data is starting out with JT variables, and we only want to # include extra_vars that come from the algorithm here. data.pop('extra_vars' <none>)<line_sep># Replace/populate data fields if prompted. modified=set()<for_stmt>resource PROMPT_LIST<block_start><if_stmt>jt.pop('ask_'+resource+'_on_launch' <false>)<and><not>no_input<block_start>resource_object=kwargs.get(resource <none>)<if_stmt>type(resource_object)<eq>types.Related<block_start>resource_class=get_resource(resource)<line_sep>resource_object=resource_class.get(resource).pop('id' <none>)<block_end><if_stmt>resource_object<is><none><block_start>debug.log('{0} is asked at launch but not provided'.format(resource) header='warning')<block_end><elif_stmt>resource<ne>'tags'<block_start>data[resource]=resource_object<line_sep>modified.add(resource)<block_end><block_end><block_end># Dump extra_vars into JSON string for launching job <if_stmt>len(extra_vars_list)<g>0<block_start>data['extra_vars']=parser.process_extra_vars(extra_vars_list force_json=<true>)<block_end># Create the new job in Ansible Tower. start_data={}<line_sep>endpoint='/job_templates/%d/launch/'%jt['id']<if_stmt>'extra_vars'<in>data<and>len(data['extra_vars'])<g>0<block_start>start_data['extra_vars']=data['extra_vars']<block_end><if_stmt>tags<block_start>start_data['job_tags']=data['job_tags']<block_end><for_stmt>resource PROMPT_LIST<block_start><if_stmt>resource<in>modified<block_start>start_data[resource]=data[resource]<block_end><block_end># There's a non-trivial chance that we are going to need some # additional information to start the job; in particular, many jobs # rely on passwords entered at run-time. # # If there are any such passwords on this job, ask for them now. debug.log('Asking for information necessary to start the job.' header='details')<line_sep>job_start_info=client.get(endpoint).json()<for_stmt>password job_start_info.get('passwords_needed_to_start' [])<block_start>start_data[password]=getpass('Password for %s: '%password)<block_end># Actually start the job. debug.log('Launching the job.' header='details')<line_sep>self._pop_none(kwargs)<line_sep>kwargs.update(start_data)<line_sep>job_started=client.post(endpoint data=kwargs)<line_sep># Get the job ID from the result. job_id=job_started.json()['id']<line_sep># If returning json indicates any ignored fields, display it in # verbose mode. <if_stmt>job_started.text<eq>''<block_start>ignored_fields={}<block_end><else_stmt><block_start>ignored_fields=job_started.json().get('ignored_fields' {})<block_end>has_ignored_fields=<false><for_stmt>key,value ignored_fields.items()<block_start><if_stmt>value<and>value<ne>'{}'<block_start><if_stmt><not>has_ignored_fields<block_start>debug.log('List of ignored fields on the server side:' header='detail')<line_sep>has_ignored_fields=<true><block_end>debug.log('{0}: {1}'.format(key value))<block_end><block_end># Get some information about the running job to print result=self.status(pk=job_id detail=<true>)<line_sep>result['changed']=<true><line_sep># If we were told to monitor the job once it started, then call # monitor from here. <if_stmt>monitor<block_start><return>self.monitor(job_id timeout=timeout)<block_end><elif_stmt>wait<block_start><return>self.wait(job_id timeout=timeout)<block_end><return>result<block_end><block_end>
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>ares.attack.base BatchAttack<import_from_stmt>ares.attack.utils get_xs_ph get_ys_ph maybe_to_array get_unit<class_stmt>BIM(BatchAttack)<block_start>''' Basic Iterative Method (BIM). A white-box iterative constraint-based method. Require a differentiable loss function and a ``ares.model.Classifier`` model. - Supported distance metric: ``l_2``, ``l_inf``. - Supported goal: ``t``, ``tm``, ``ut``. - References: https://arxiv.org/abs/1607.02533. '''<def_stmt>__init__ self model batch_size loss goal distance_metric session iteration_callback=<none><block_start>''' Initialize BIM. :param model: The model to attack. A ``ares.model.Classifier`` instance. :param batch_size: Batch size for the ``batch_attack()`` method. :param loss: The loss function to optimize. A ``ares.loss.Loss`` instance. :param goal: Adversarial goals. All supported values are ``'t'``, ``'tm'``, and ``'ut'``. :param distance_metric: Adversarial distance metric. All supported values are ``'l_2'`` and ``'l_inf'``. :param session: The ``tf.Session`` to run the attack in. The ``model`` should be loaded into this session. :param iteration_callback: A function accept a ``xs`` ``tf.Tensor`` (the original examples) and a ``xs_adv`` ``tf.Tensor`` (the adversarial examples for ``xs``). During ``batch_attack()``, this callback function would be runned after each iteration, and its return value would be yielded back to the caller. By default, ``iteration_callback`` is ``None``. '''<line_sep>self.model,self.batch_size,self._session=model batch_size session<line_sep>self.loss,self.goal,self.distance_metric=loss goal distance_metric<line_sep># placeholder for batch_attack's input self.xs_ph=get_xs_ph(model batch_size)<line_sep>self.ys_ph=get_ys_ph(model batch_size)<line_sep># flatten shape of xs_ph xs_flatten_shape=(batch_size np.prod(self.model.x_shape))<line_sep># store xs and ys in variables to reduce memory copy between tensorflow and python # variable for the original example with shape of (batch_size, D) self.xs_var=tf.Variable(tf.zeros(shape=xs_flatten_shape dtype=self.model.x_dtype))<line_sep># variable for labels self.ys_var=tf.Variable(tf.zeros(shape=(batch_size ) dtype=self.model.y_dtype))<line_sep># variable for the (hopefully) adversarial example with shape of (batch_size, D) self.xs_adv_var=tf.Variable(tf.zeros(shape=xs_flatten_shape dtype=self.model.x_dtype))<line_sep># magnitude self.eps_ph=tf.placeholder(self.model.x_dtype (self.batch_size ))<line_sep>self.eps_var=tf.Variable(tf.zeros((self.batch_size ) dtype=self.model.x_dtype))<line_sep># step size self.alpha_ph=tf.placeholder(self.model.x_dtype (self.batch_size ))<line_sep>self.alpha_var=tf.Variable(tf.zeros((self.batch_size ) dtype=self.model.x_dtype))<line_sep># expand dim for easier broadcast operations eps=tf.expand_dims(self.eps_var 1)<line_sep>alpha=tf.expand_dims(self.alpha_var 1)<line_sep># calculate loss' gradient with relate to the adversarial example # grad.shape == (batch_size, D) self.xs_adv_model=tf.reshape(self.xs_adv_var (batch_size *self.model.x_shape))<line_sep>self.loss=loss(self.xs_adv_model self.ys_var)<line_sep>grad=tf.gradients(self.loss self.xs_adv_var)[0]<if_stmt>goal<eq>'t'<or>goal<eq>'tm'<block_start>grad=-grad<block_end><elif_stmt>goal<ne>'ut'<block_start><raise>NotImplementedError<block_end># update the adversarial example <if_stmt>distance_metric<eq>'l_2'<block_start>grad_unit=get_unit(grad)<line_sep>xs_adv_delta=self.xs_adv_var-self.xs_var+alpha<times>grad_unit<line_sep># clip by max l_2 magnitude of adversarial noise xs_adv_next=self.xs_var+tf.clip_by_norm(xs_adv_delta eps axes=[1])<block_end><elif_stmt>distance_metric<eq>'l_inf'<block_start>xs_lo,xs_hi=self.xs_var-eps self.xs_var+eps<line_sep>grad_sign=tf.sign(grad)<line_sep># clip by max l_inf magnitude of adversarial noise xs_adv_next=tf.clip_by_value(self.xs_adv_var+alpha<times>grad_sign xs_lo xs_hi)<block_end><else_stmt><block_start><raise>NotImplementedError<block_end># clip by (x_min, x_max) xs_adv_next=tf.clip_by_value(xs_adv_next self.model.x_min self.model.x_max)<line_sep>self.update_xs_adv_step=self.xs_adv_var.assign(xs_adv_next)<line_sep>self.config_eps_step=self.eps_var.assign(self.eps_ph)<line_sep>self.config_alpha_step=self.alpha_var.assign(self.alpha_ph)<line_sep>self.setup_xs=[self.xs_var.assign(tf.reshape(self.xs_ph xs_flatten_shape)) self.xs_adv_var.assign(tf.reshape(self.xs_ph xs_flatten_shape))]<line_sep>self.setup_ys=self.ys_var.assign(self.ys_ph)<line_sep>self.iteration=<none><line_sep>self.iteration_callback=<none><if_stmt>iteration_callback<is><not><none><block_start>xs_model=tf.reshape(self.xs_var (self.batch_size *self.model.x_shape))<line_sep>self.iteration_callback=iteration_callback(xs_model self.xs_adv_model)<block_end><block_end><def_stmt>config self **kwargs<block_start>''' (Re)config the attack. :param magnitude: Max distortion, could be either a float number or a numpy float number array with shape of (batch_size,). :param alpha: Step size for each iteration, could be either a float number or a numpy float number array with shape of (batch_size,). :param iteration: Iteration count. An integer. '''<if_stmt>'magnitude'<in>kwargs<block_start>eps=maybe_to_array(kwargs['magnitude'] self.batch_size)<line_sep>self._session.run(self.config_eps_step feed_dict={self.eps_ph:eps})<block_end><if_stmt>'alpha'<in>kwargs<block_start>alpha=maybe_to_array(kwargs['alpha'] self.batch_size)<line_sep>self._session.run(self.config_alpha_step feed_dict={self.alpha_ph:alpha})<block_end><if_stmt>'iteration'<in>kwargs<block_start>self.iteration=kwargs['iteration']<block_end><block_end><def_stmt>_batch_attack_generator self xs ys ys_target<block_start>''' Attack a batch of examples. It is a generator which yields back ``iteration_callback()``'s return value after each iteration if the ``iteration_callback`` is not ``None``, and returns the adversarial examples. '''<line_sep>labels=ys<if>self.goal<eq>'ut'<else>ys_target<line_sep>self._session.run(self.setup_xs feed_dict={self.xs_ph:xs})<line_sep>self._session.run(self.setup_ys feed_dict={self.ys_ph:labels})<for_stmt>_ range(self.iteration)<block_start>self._session.run(self.update_xs_adv_step)<if_stmt>self.iteration_callback<is><not><none><block_start><yield>self._session.run(self.iteration_callback)<block_end><block_end><return>self._session.run(self.xs_adv_model)<block_end><def_stmt>batch_attack self xs ys=<none> ys_target=<none><block_start>''' Attack a batch of examples. :return: When the ``iteration_callback`` is ``None``, return the generated adversarial examples. When the ``iteration_callback`` is not ``None``, return a generator, which yields back the callback's return value after each iteration and returns the generated adversarial examples. '''<line_sep>g=self._batch_attack_generator(xs ys ys_target)<if_stmt>self.iteration_callback<is><none><block_start><try_stmt><block_start>next(g)<block_end><except_stmt>StopIteration<as>exp<block_start><return>exp.value<block_end><block_end><else_stmt><block_start><return>g<block_end><block_end><block_end>
# Licensed under a 3-clause BSD style license - see LICENSE.rst <import_from_stmt>numpy.testing assert_allclose<import_from_stmt>astropy.time Time<import_from_stmt>gammapy.data FixedPointingInfo PointingInfo<import_from_stmt>gammapy.utils.testing assert_time_allclose requires_data<line_sep>@requires_data()<class_stmt>TestFixedPointingInfo<block_start>@classmethod<def_stmt>setup_class cls<block_start>filename="$GAMMAPY_DATA/tests/pointing_table.fits.gz"<line_sep>cls.fpi=FixedPointingInfo.read(filename)<block_end><def_stmt>test_location self<block_start>lon,lat,height=self.fpi.location.geodetic<line_sep>assert_allclose(lon.deg 16.5002222222222)<line_sep>assert_allclose(lat.deg -23.2717777777778)<line_sep>assert_allclose(height.value 1834.999999999783)<block_end><def_stmt>test_time_ref self<block_start>expected=Time(51910.00074287037 format="mjd" scale="tt")<line_sep>assert_time_allclose(self.fpi.time_ref expected)<block_end><def_stmt>test_time_start self<block_start>time=self.fpi.time_start<line_sep>expected=Time(53025.826414166666 format="mjd" scale="tt")<line_sep>assert_time_allclose(time expected)<block_end><def_stmt>test_time_stop self<block_start>time=self.fpi.time_stop<line_sep>expected=Time(53025.844770648146 format="mjd" scale="tt")<line_sep>assert_time_allclose(time expected)<block_end><def_stmt>test_duration self<block_start>duration=self.fpi.duration<line_sep>assert_allclose(duration.sec 1586.0000000044238)<block_end><def_stmt>test_radec self<block_start>pos=self.fpi.radec<line_sep>assert_allclose(pos.ra.deg 83.633333333333)<line_sep>assert_allclose(pos.dec.deg 24.51444444)<assert_stmt>pos.name<eq>"icrs"<block_end><def_stmt>test_altaz self<block_start>pos=self.fpi.altaz<line_sep>assert_allclose(pos.az.deg 7.48272)<line_sep>assert_allclose(pos.alt.deg 41.84191)<assert_stmt>pos.name<eq>"altaz"<block_end><block_end>@requires_data()<class_stmt>TestPointingInfo<block_start>@classmethod<def_stmt>setup_class cls<block_start>filename="$GAMMAPY_DATA/tests/pointing_table.fits.gz"<line_sep>cls.pointing_info=PointingInfo.read(filename)<block_end><def_stmt>test_str self<block_start>ss=str(self.pointing_info)<assert_stmt>"Pointing info"<in>ss<block_end><def_stmt>test_location self<block_start>lon,lat,height=self.pointing_info.location.geodetic<line_sep>assert_allclose(lon.deg 16.5002222222222)<line_sep>assert_allclose(lat.deg -23.2717777777778)<line_sep>assert_allclose(height.value 1834.999999999783)<block_end><def_stmt>test_time_ref self<block_start>expected=Time(51910.00074287037 format="mjd" scale="tt")<line_sep>assert_time_allclose(self.pointing_info.time_ref expected)<block_end><def_stmt>test_table self<block_start><assert_stmt>len(self.pointing_info.table)<eq>100<block_end><def_stmt>test_time self<block_start>time=self.pointing_info.time<assert_stmt>len(time)<eq>100<line_sep>expected=Time(53025.826414166666 format="mjd" scale="tt")<line_sep>assert_time_allclose(time[0] expected)<block_end><def_stmt>test_duration self<block_start>duration=self.pointing_info.duration<line_sep>assert_allclose(duration.sec 1586.0000000044238)<block_end><def_stmt>test_radec self<block_start>pos=self.pointing_info.radec[0]<line_sep>assert_allclose(pos.ra.deg 83.633333333333)<line_sep>assert_allclose(pos.dec.deg 24.51444444)<assert_stmt>pos.name<eq>"icrs"<block_end><def_stmt>test_altaz self<block_start>pos=self.pointing_info.altaz[0]<line_sep>assert_allclose(pos.az.deg 11.45751357)<line_sep>assert_allclose(pos.alt.deg 41.34088901)<assert_stmt>pos.name<eq>"altaz"<block_end><def_stmt>test_altaz_from_table self<block_start>pos=self.pointing_info.altaz_from_table[0]<line_sep>assert_allclose(pos.az.deg 11.20432353385406)<line_sep>assert_allclose(pos.alt.deg 41.37921408774436)<assert_stmt>pos.name<eq>"altaz"<block_end><def_stmt>test_altaz_interpolate self<block_start>time=self.pointing_info.time[0]<line_sep>pos=self.pointing_info.altaz_interpolate(time)<line_sep>assert_allclose(pos.az.deg 11.45751357)<line_sep>assert_allclose(pos.alt.deg 41.34088901)<assert_stmt>pos.name<eq>"altaz"<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # <import_stmt>time<import_from_stmt>typing Dict Optional<import_stmt>sacremoses# type: ignore <import_from_stmt>cc_net jsonql text_normalizer<class_stmt>RobustTokenizer(jsonql.Transformer)<block_start>"""Moses tokenizer with the expected preprocessing."""<line_sep>LANG_WITHOUT_ACCENT={"en" "my"}<def_stmt>__init__ self lang:str<block_start>super().__init__()<line_sep>self.lang=lang<line_sep>self.moses=sacremoses.MosesTokenizer(lang)<line_sep>self.rm_accent=lang<in>self.LANG_WITHOUT_ACCENT<line_sep>self.ready=<true><block_end><def_stmt>do self text:str<block_start>text=text_normalizer.normalize(text accent=self.rm_accent case=<false> numbers=<false> punct=<true>)<line_sep>text=text_normalizer.normalize_spacing_for_tok(text language=self.lang)<line_sep><return>self.moses.tokenize(text return_str=<true> escape=<false>)<block_end><block_end><class_stmt>DocTokenizer(jsonql.Transformer)<block_start>"""Tokenize the text found in `output_field and store the result in `output_field`."""<def_stmt>__init__ self field:str output_field:str="tokenized" language_field:str="language" <block_start>super().__init__()<line_sep>self.field=field<line_sep>self.output_field=output_field<line_sep>self.language_field=language_field<line_sep>self.n_docs=0<line_sep>self.tokenizers:Dict[str RobustTokenizer]={}<block_end><def_stmt>get_tokenizer self lang:str<arrow>Optional[RobustTokenizer]<block_start>cache=self.tokenizers<if_stmt>lang<in>cache<block_start><return>cache[lang]<block_end><if_stmt>lang<in>("th" "zh" "ja")# TODO find a tokenizer for those languages <block_start><return><none><block_end>cache[lang]=RobustTokenizer(lang)<line_sep><return>cache[lang]<block_end><def_stmt>do self document<block_start>lang=document[self.language_field]<line_sep>tok=self.get_tokenizer(lang)<if_stmt><not>tok<block_start><return>document<block_end>self.n_docs<augadd>1<line_sep>lines=document[self.field].split("\n")<line_sep>tokenized="\n".join(tok(l)<for>l lines)<line_sep>document[self.output_field]=tokenized<line_sep><return>document<block_end><def_stmt>summary self<block_start>delay=(time.time()-self.start_time)/3600<line_sep>speed=self.n_docs/delay<line_sep><return>[f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."]<block_end><block_end>
<import_stmt>cv2<line_sep>cv2.setNumThreads(0)<line_sep>cv2.ocl.setUseOpenCL(<false>)<import_stmt>numpy<as>np<import_stmt>math<import_from_stmt>functools wraps<def_stmt>clip img dtype maxval<block_start><return>np.clip(img 0 maxval).astype(dtype)<block_end><def_stmt>clipped func<block_start>""" wrapper to clip results of transform to image dtype value range """<line_sep>@wraps(func)<def_stmt>wrapped_function img *args **kwargs<block_start>dtype,maxval=img.dtype np.max(img)<line_sep><return>clip(func(img *args **kwargs) dtype maxval)<block_end><return>wrapped_function<block_end><def_stmt>fix_shift_values img *args<block_start>""" shift values are normally specified in uint, but if your data is float - you need to remap values """<if_stmt>img.dtype<eq>np.float32<block_start><return>list(map(<lambda>x:x/255 args))<block_end><return>args<block_end><def_stmt>vflip img<block_start><return>cv2.flip(img 0)<block_end><def_stmt>hflip img<block_start><return>cv2.flip(img 1)<block_end><def_stmt>flip img code<block_start><return>cv2.flip(img code)<block_end><def_stmt>transpose img<block_start><return>img.transpose(1 0 2)<if>len(img.shape)<g>2<else>img.transpose(1 0)<block_end><def_stmt>rot90 img times<block_start>img=np.rot90(img times)<line_sep><return>np.ascontiguousarray(img)<block_end><def_stmt>rotate img angle<block_start>""" rotate image on specified angle :param angle: angle in degrees """<line_sep>height,width=img.shape[0:2]<line_sep>mat=cv2.getRotationMatrix2D((width/2 height/2) angle 1.0)<line_sep>img=cv2.warpAffine(img mat (width height) flags=cv2.INTER_LINEAR borderMode=cv2.BORDER_REFLECT_101)<line_sep><return>img<block_end><def_stmt>shift_scale_rotate img angle scale dx dy<block_start>""" :param angle: in degrees :param scale: relative scale """<line_sep>height,width=img.shape[:2]<line_sep>cc=math.cos(angle/180<times>math.pi)<times>scale<line_sep>ss=math.sin(angle/180<times>math.pi)<times>scale<line_sep>rotate_matrix=np.array([[cc -ss] [ss cc]])<line_sep>box0=np.array([[0 0] [width 0] [width height] [0 height] ])<line_sep>box1=box0-np.array([width/2 height/2])<line_sep>box1=np.dot(box1 rotate_matrix.T)+np.array([width/2+dx<times>width height/2+dy<times>height])<line_sep>box0=box0.astype(np.float32)<line_sep>box1=box1.astype(np.float32)<line_sep>mat=cv2.getPerspectiveTransform(box0 box1)<line_sep>img=cv2.warpPerspective(img mat (width height) flags=cv2.INTER_LINEAR borderMode=cv2.BORDER_REFLECT_101)<line_sep><return>img<block_end><def_stmt>center_crop img height width<block_start>h,w,c=img.shape<line_sep>dy=(h-height)<floordiv>2<line_sep>dx=(w-width)<floordiv>2<line_sep>y1=dy<line_sep>y2=y1+height<line_sep>x1=dx<line_sep>x2=x1+width<line_sep>img=img[y1:y2 x1:x2 :]<line_sep><return>img<block_end><def_stmt>shift_hsv img hue_shift sat_shift val_shift<block_start>dtype=img.dtype<line_sep>maxval=np.max(img)<line_sep>img=cv2.cvtColor(img cv2.COLOR_RGB2HSV).astype(np.int32)<line_sep>h,s,v=cv2.split(img)<line_sep>h=cv2.add(h hue_shift)<line_sep>h=np.where(h<l>0 maxval-h h)<line_sep>h=np.where(h<g>maxval h-maxval h)<line_sep>h=h.astype(dtype)<line_sep>s=clip(cv2.add(s sat_shift) dtype maxval)<line_sep>v=clip(cv2.add(v val_shift) dtype maxval)<line_sep>img=cv2.merge((h s v)).astype(dtype)<line_sep>img=cv2.cvtColor(img cv2.COLOR_HSV2RGB)<line_sep><return>img<block_end><def_stmt>shift_channels img r_shift g_shift b_shift<block_start>img[<ellipsis> 0]=clip(img[<ellipsis> 0]+r_shift np.uint8 255)<line_sep>img[<ellipsis> 1]=clip(img[<ellipsis> 1]+g_shift np.uint8 255)<line_sep>img[<ellipsis> 2]=clip(img[<ellipsis> 2]+b_shift np.uint8 255)<line_sep><return>img<block_end><def_stmt>clahe img clipLimit=2.0 tileGridSize=(8 8)<block_start>img_yuv=cv2.cvtColor(img cv2.COLOR_RGB2LAB)<line_sep>clahe=cv2.createCLAHE(clipLimit=clipLimit tileGridSize=tileGridSize)<line_sep>img_yuv[: : 0]=clahe.apply(img_yuv[: : 0])<line_sep>img_output=cv2.cvtColor(img_yuv cv2.COLOR_LAB2RGB)<line_sep><return>img_output<block_end><def_stmt>blur img ksize<block_start><return>cv2.blur(img (ksize ksize))<block_end><def_stmt>invert img<block_start><return>255-img<block_end><def_stmt>channel_shuffle img<block_start>ch_arr=[0 1 2]<line_sep>np.random.shuffle(ch_arr)<line_sep>img=img[<ellipsis> ch_arr]<line_sep><return>img<block_end><def_stmt>img_to_tensor im verbose=<false><block_start>'''AVE edit'''<line_sep>im_out=np.moveaxis(im/(255.<if>im.dtype<eq>np.uint8<else>1) -1 0).astype(np.float32)<if_stmt>verbose<block_start>print("augmentations.functiona.py.img_to_tensor(): im_out.shape:" im_out.shape)<line_sep>print("im_out.unique:" np.unique(im_out))<block_end><return>im_out<block_end><def_stmt>mask_to_tensor mask num_classes verbose=<false><block_start>'''AVE edit'''<if_stmt>num_classes<g>1<block_start>mask=img_to_tensor(mask)<block_end><else_stmt><block_start>mask=np.expand_dims(mask/(255.<if>mask.dtype<eq>np.uint8<else>1) 0).astype(np.float32)<block_end><if_stmt>verbose<block_start>print("augmentations.functiona.py.img_to_tensor(): mask.shape:" mask.shape)<line_sep>print("mask.unique:" np.unique(mask))<block_end><return>mask<block_end>
<import_stmt>django.conf<class_stmt>Settings<block_start>""" This is a simple class to take the place of the global settings object. An instance will contain all of our settings as attributes, with default values if they are not specified by the configuration. """<line_sep>defaults={'OTP_LOGIN_URL':django.conf.settings.LOGIN_URL 'OTP_ADMIN_HIDE_SENSITIVE_DATA':<false> }<def_stmt>__getattr__ self name<block_start><if_stmt>name<in>self.defaults<block_start><return>getattr(django.conf.settings name self.defaults[name])<block_end><else_stmt><block_start><return>getattr(django.conf.settings name)<block_end><block_end><block_end>settings=Settings()<line_sep>
<import_from_stmt>.test_tensorboard_rest_api TestTensorboardRestAPI<import_from_stmt>.test_tensorboard_server TestTensorboardServer<import_from_stmt>.test_tensorboard_endpoints TestTensorboardEndpoint<line_sep>
<import_from_stmt>loguru logger<line_sep># See "test_catch_exceptions.py" for extended testing <def_stmt>test_backtrace writer<block_start>logger.add(writer format="{message}" backtrace=<true>)<try_stmt><block_start>1/0<block_end><except_stmt>Exception<block_start>logger.exception("")<block_end>result_with=writer.read().strip()<line_sep>logger.remove()<line_sep>writer.clear()<line_sep>logger.add(writer format="{message}" backtrace=<false>)<try_stmt><block_start>1/0<block_end><except_stmt>Exception<block_start>logger.exception("")<block_end>result_without=writer.read().strip()<assert_stmt>len(result_with.splitlines())<g>len(result_without.splitlines())<block_end>
<import_stmt>io<import_stmt>pytest<import_stmt>pytorch_pfn_extras<as>ppe<import_from_stmt>pytorch_pfn_extras.training.extensions _ipython_module_available<import_from_stmt>pytorch_pfn_extras.training.extensions.log_report _pandas_available<line_sep>@pytest.mark.skipif(<not>_ipython_module_available<or><not>_pandas_available reason="print report notebook import failed, "<concat>"maybe ipython is not installed")<def_stmt>test_run_print_report_notebook <block_start>max_epochs=5<line_sep>iters_per_epoch=5<line_sep>manager=ppe.training.ExtensionsManager({} {} max_epochs iters_per_epoch=iters_per_epoch)<line_sep>out=io.StringIO()<line_sep>log_report=ppe.training.extensions.LogReport()<line_sep>manager.extend(log_report)<line_sep>extension=ppe.training.extensions.PrintReportNotebook(out=out)<line_sep>manager.extend(extension)<for_stmt>_ range(max_epochs)<block_start><for_stmt>_ range(iters_per_epoch)<block_start><with_stmt>manager.run_iteration()# Only test it runs without fail # The value is not tested now... <block_start><pass><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main([__file__ '-v' '-s'])<block_end>
<import_stmt>string<import_from_stmt>...errors SimValueError<import_from_stmt>. MemoryMixin<class_stmt>HexDumperMixin(MemoryMixin)<block_start><def_stmt>hex_dump self start size word_size=4 words_per_row=4 endianness="Iend_BE" symbolic_char='?' unprintable_char='.' solve=<false> extra_constraints=<none> inspect=<false> disable_actions=<true><block_start>""" Returns a hex dump as a string. The solver, if enabled, is called once for every byte potentially making this function very slow. It is meant to be used mainly as a "visualization" for debugging. Warning: May read and display more bytes than `size` due to rounding. Particularly, if size is less than, or not a multiple of word_size*words_per_line. :param start: starting address from which to print :param size: number of bytes to display :param word_size: number of bytes to group together as one space-delimited unit :param words_per_row: number of words to display per row of output :param endianness: endianness to use when displaying each word (ASCII representation is unchanged) :param symbolic_char: the character to display when a byte is symbolic and has multiple solutions :param unprintable_char: the character to display when a byte is not printable :param solve: whether or not to attempt to solve (warning: can be very slow) :param extra_constraints: extra constraints to pass to the solver is solve is True :param inspect: whether or not to trigger SimInspect breakpoints for the memory load :param disable_actions: whether or not to disable SimActions for the memory load :return: hex dump as a string """<if_stmt>endianness<eq>"Iend_BE"<block_start>end=1<block_end><else_stmt><block_start>end=-1<block_end><if_stmt>extra_constraints<is><none><block_start>extra_constraints=[]<block_end># round up size so that chop() works line_size=word_size<times>words_per_row<line_sep>size=size<if>size%line_size<eq>0<else>size+line_size-size%line_size<line_sep>raw_mem=super().load(start size=size inspect=inspect disable_actions=disable_actions)<line_sep>i=start<line_sep>dump_str=""<for_stmt>line raw_mem.chop(line_size<times>self.state.arch.byte_width)<block_start>dump="%x:"%i<line_sep>group_str=""<for_stmt>word line.chop(word_size<times>self.state.arch.byte_width)<block_start>word_bytes=""<line_sep>word_str=""<for_stmt>byte_ word.chop(self.state.arch.byte_width)[::end]<block_start>byte_value=<none><if_stmt><not>self.state.solver.symbolic(byte_)<or>solve<block_start><try_stmt><block_start>byte_value=self.state.solver.eval_one(byte_ extra_constraints=extra_constraints)<block_end><except_stmt>SimValueError<block_start><pass><block_end><block_end><if_stmt>byte_value<is><not><none><block_start>word_bytes<augadd>"%02x"%byte_value<if_stmt>chr(byte_value)<in>string.printable[:-5]<block_start>word_str<augadd>chr(byte_value)<block_end><else_stmt><block_start>word_str<augadd>unprintable_char<block_end><block_end><else_stmt><block_start>word_bytes<augadd>symbolic_char<times>2<line_sep>word_str<augadd>symbolic_char<block_end><block_end>dump<augadd>' '+word_bytes<line_sep>group_str<augadd>word_str[::end]# always print ASCII representation in little-endian <block_end>dump<augadd>' '+group_str<line_sep>i<augadd>line_size<line_sep>dump_str<augadd>dump+'\n'<block_end><return>dump_str<block_end><block_end>
# Copyright 2011 The scales Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Formatting methods for stats."""<import_from_stmt>greplin scales<import_stmt>cgi<import_stmt>six<import_stmt>json<import_stmt>operator<import_stmt>re<line_sep>OPERATORS={'>=':operator.ge '>':operator.gt '<':operator.lt '<=':operator.le '=':operator.eq '==':operator.eq '!=':operator.ne}<line_sep>OPERATOR=re.compile('(%s)'%'|'.join(list(OPERATORS.keys())))<def_stmt>runQuery statDict query<block_start>"""Filters for the given query."""<line_sep>parts=[x.strip()<for>x OPERATOR.split(query)]<assert_stmt>len(parts)<in>(1 3)<line_sep>queryKey=parts[0]<line_sep>result={}<for_stmt>key,value six.iteritems(statDict)<block_start><if_stmt>key<eq>queryKey<block_start><if_stmt>len(parts)<eq>3<block_start>op=OPERATORS[parts[1]]<try_stmt><block_start>queryValue=type(value)(parts[2])<if>value<else>parts[2]<block_end><except_stmt>(TypeError ValueError)<block_start><continue><block_end><if_stmt><not>op(value queryValue)<block_start><continue><block_end><block_end>result[key]=value<block_end><elif_stmt>isinstance(value scales.StatContainer)<or>isinstance(value dict)<block_start>child=runQuery(value query)<if_stmt>child<block_start>result[key]=child<block_end><block_end><block_end><return>result<block_end><def_stmt>htmlHeader output path serverName query=<none><block_start>"""Writes an HTML header."""<if_stmt>path<and>path<ne>'/'<block_start>output.write('<title>%s - Status: %s</title>'%(serverName path))<block_end><else_stmt><block_start>output.write('<title>%s - Status</title>'%serverName)<block_end>output.write(''' <style> body,td { font-family: monospace } .level div { padding-bottom: 4px; } .level .level { margin-left: 2em; padding: 1px 0; } span { color: #090; vertical-align: top } .key { color: black; font-weight: bold } .int, .float { color: #00c } </style> ''')<line_sep>output.write('<h1 style="margin: 0">Stats</h1>')<line_sep>output.write('<h3 style="margin: 3px 0 18px">%s</h3>'%serverName)<line_sep>output.write('<p><form action="#" method="GET">Filter: <input type="text" name="query" size="20" value="%s"></form></p>'%(query<or>''))<block_end><def_stmt>htmlFormat output pathParts=() statDict=<none> query=<none><block_start>"""Formats as HTML, writing to the given object."""<line_sep>statDict=statDict<or>scales.getStats()<if_stmt>query<block_start>statDict=runQuery(statDict query)<block_end>_htmlRenderDict(pathParts statDict output)<block_end><def_stmt>_htmlRenderDict pathParts statDict output<block_start>"""Render a dictionary as a table - recursing as necessary."""<line_sep>keys=list(statDict.keys())<line_sep>keys.sort()<line_sep>links=[]<line_sep>output.write('<div class="level">')<for_stmt>key keys<block_start>keyStr=cgi.escape(_utf8str(key))<line_sep>value=statDict[key]<if_stmt>hasattr(value '__call__')<block_start>value=value()<block_end><if_stmt>hasattr(value 'keys')<block_start>valuePath=pathParts+(keyStr )<if_stmt>isinstance(value scales.StatContainer)<and>value.isCollapsed()<block_start>link='/status/'+'/'.join(valuePath)<line_sep>links.append('<div class="key"><a href="%s">%s</a></div>'%(link keyStr))<block_end><else_stmt><block_start>output.write('<div class="key">%s</div>'%keyStr)<line_sep>_htmlRenderDict(valuePath value output)<block_end><block_end><else_stmt><block_start>output.write('<div><span class="key">%s</span> <span class="%s">%s</span></div>'%(keyStr type(value).__name__ cgi.escape(_utf8str(value)).replace('\n' '<br/>')))<block_end><block_end><if_stmt>links<block_start><for_stmt>link links<block_start>output.write(link)<block_end><block_end>output.write('</div>')<block_end><def_stmt>_utf8str x<block_start>"""Like str(x), but returns UTF8."""<if_stmt>six.PY3<block_start><return>str(x)<block_end><if_stmt>isinstance(x six.binary_type)<block_start><return>x<block_end><elif_stmt>isinstance(x six.text_type)<block_start><return>x.encode('utf-8')<block_end><else_stmt><block_start><return>six.binary_type(x)<block_end><block_end><def_stmt>jsonFormat output statDict=<none> query=<none> pretty=<false><block_start>"""Formats as JSON, writing to the given object."""<line_sep>statDict=statDict<or>scales.getStats()<if_stmt>query<block_start>statDict=runQuery(statDict query)<block_end>indent=2<if>pretty<else><none><line_sep># At first, assume that strings are in UTF-8. If this fails -- if, for example, we have # crazy binary data -- then in order to get *something* out, we assume ISO-8859-1, # which maps each byte to a unicode code point. <try_stmt><block_start>serialized=json.dumps(statDict cls=scales.StatContainerEncoder indent=indent)<block_end><except_stmt>UnicodeDecodeError<block_start>serialized=json.dumps(statDict cls=scales.StatContainerEncoder indent=indent encoding='iso-8859-1')<block_end>output.write(serialized)<line_sep>output.write('\n')<block_end>
#MenuTitle: Steal Kerning Groups from Font """Copy kerning groups from one font to another."""<import_from_future_stmt> print_function<import_stmt>vanilla<class_stmt>GroupsCopy(object)<block_start>"""GUI for copying kerning groups from one font to another"""<def_stmt>__init__ self<block_start>self.w=vanilla.FloatingWindow((400 70) "Steal kerning groups")<line_sep>self.w.text_anchor=vanilla.TextBox((15 12+2 130 14) "Copy groups from:" sizeStyle='small')<line_sep>self.w.from_font=vanilla.PopUpButton((150 12 150 17) self.GetFonts(isSourceFont=<true>) sizeStyle='small' callback=self.buttonCheck)<line_sep>self.w.text_value=vanilla.TextBox((15 12+2+25 130 14) "To selected glyphs in:" sizeStyle='small')<line_sep>self.w.to_font=vanilla.PopUpButton((150 12+25 150 17) self.GetFonts(isSourceFont=<false>) sizeStyle='small' callback=self.buttonCheck)<line_sep>self.w.copybutton=vanilla.Button((-80 12+25 -15 17) "Copy" sizeStyle='small' callback=self.copyGroups)<line_sep>self.w.setDefaultButton(self.w.copybutton)<line_sep>self.w.open()<line_sep>self.buttonCheck(<none>)<block_end><def_stmt>GetFonts self isSourceFont<block_start>myFontList=["%s - %s"%(x.font.familyName x.selectedFontMaster().name)<for>x Glyphs.orderedDocuments()]<if_stmt>isSourceFont<block_start>myFontList.reverse()<block_end><return>myFontList<block_end><def_stmt>buttonCheck self sender<block_start>fromFont=self.w.from_font.getItems()[self.w.from_font.get()]<line_sep>toFont=self.w.to_font.getItems()[self.w.to_font.get()]<if_stmt>fromFont<eq>toFont<block_start>self.w.copybutton.enable(onOff=<false>)<block_end><else_stmt><block_start>self.w.copybutton.enable(onOff=<true>)<block_end><block_end><def_stmt>copyGroups self sender<block_start>fromFont=self.w.from_font.getItems()[self.w.from_font.get()]<line_sep>toFont=self.w.to_font.getItems()[self.w.to_font.get()]<line_sep>Doc_source=[x<for>x Glyphs.orderedDocuments()<if>("%s - %s"%(x.font.familyName x.selectedFontMaster().name))<eq>fromFont][0]<line_sep>Master_source=Doc_source.selectedFontMaster().id<line_sep>Font_source=Doc_source.font<line_sep>Font_target=[x.font<for>x Glyphs.orderedDocuments()<if>("%s - %s"%(x.font.familyName x.selectedFontMaster().name))<eq>toFont][0]<line_sep>Glyphs_selected=[x.parent<for>x Font_target.parent.selectedLayers()]<line_sep>print("Syncing kerning groups for" len(Glyphs_selected) "glyphs from" Font_source.familyName "to" Font_target.familyName ":")<try_stmt><block_start><for_stmt>thisGlyph Glyphs_selected<block_start>glyphName=thisGlyph.name<try_stmt><block_start>sourceGlyph=Font_source.glyphs[glyphName]<line_sep>oldL=thisGlyph.leftKerningGroup<line_sep>oldR=thisGlyph.rightKerningGroup<line_sep>newL=sourceGlyph.leftKerningGroup<line_sep>newR=sourceGlyph.rightKerningGroup<if_stmt>oldL<ne>newL<or>oldR<ne>newR<block_start>thisGlyph.leftKerningGroup=newL<line_sep>thisGlyph.rightKerningGroup=newR<line_sep>print(" " glyphName ":" newL "<--->" newR)<block_end># start: temporary fix for 3.0.3 unwrapped vertical kerning <def_stmt>kerningGetter kerning<block_start><if_stmt>kerning<is><not><none><and><not>isinstance(kerning str)<block_start>kerning=kerning()<block_end><return>kerning<block_end># end: temporary fix for 3.0.3 unwrapped vertical kerning oldT=kerningGetter(thisGlyph.topKerningGroup)<line_sep>oldB=kerningGetter(thisGlyph.bottomKerningGroup)<line_sep>newT=kerningGetter(sourceGlyph.topKerningGroup)<line_sep>newB=kerningGetter(sourceGlyph.bottomKerningGroup)<if_stmt>oldT<ne>newT<or>oldB<ne>newB<block_start>thisGlyph.leftKerningGroup=newL<line_sep>thisGlyph.setTopKerningGroup_(newT)<line_sep>thisGlyph.setBottomKerningGroup_(newB)<line_sep>print(" " glyphName ":" newT "\n ^\n |\n V\n" newB)<block_end><pass><block_end><except_stmt>Exception<as>e<block_start>print(" " glyphName ": Error")<line_sep># print e <block_end><block_end><block_end><except_stmt>Exception<as>e<block_start><import_stmt>traceback<line_sep>print(traceback.format_exc())<block_end><finally_stmt><block_start>print("Done.")<block_end>self.w.close()<block_end><block_end>GroupsCopy()<line_sep>
<import_stmt>idaapi<import_from_stmt>idaapi *<line_sep>inifinite_loops=[b"\x00\xbf\xfd\xe7" # loop: nop; b loop b"\xfe\xe7" # loop: b loop ]<line_sep>whitelist=["Reset_Handler" "main"]<def_stmt>detect_noret_funcs <block_start>exit_locs_name_pairs=[]<for_stmt>func_addr Functions()<block_start><if_stmt>get_func_flags(func_addr)&idaapi.FUNC_NORET<block_start>name=get_func_name(func_addr)<if_stmt>name<not><in>whitelist<block_start>print("noret function: '{}' at 0x{:x}".format(name func_addr))<line_sep>exit_locs_name_pairs.append((func_addr name))<block_end><block_end><block_end><return>exit_locs_name_pairs<block_end><def_stmt>detect_exit_ats add_noret_functions=<false># 0. find BKPTs <block_start>exit_locs=[]<line_sep># 1. find noret functions if requested <if_stmt>add_noret_functions<block_start>exit_locs<augadd>detect_noret_funcs()<block_end>cnt=0<line_sep># 2. find infinite loops and BKPT instructions <for_stmt>segea Segments()<block_start><for_stmt>funcea Functions(segea get_segm_end(segea))<block_start>functionName=get_func_name(funcea)<for_stmt>(startea endea) Chunks(funcea)<block_start><for_stmt>head Heads(startea endea)# print(functionName, ":", "0x%08x"%(head), ":", GetDisasm(head)) <block_start><for_stmt>loop_code inifinite_loops<block_start><if_stmt>get_bytes(head len(loop_code))<eq>loop_code<block_start>print("Found endless loop: 0x{:x} (function {})".format(head functionName))<line_sep>exit_locs.append((head "endless_loop_{:02d}_{}".format(cnt functionName)))<line_sep>cnt<augadd>1<block_end><block_end><if_stmt>print_insn_mnem(head)<eq>'BKPT'<block_start>print("Found bkpt: 0x{:x} (function {})".format(head functionName))<line_sep>exit_locs.append((head "bkpt_{:02d}_{}".format(cnt functionName)))<line_sep>cnt<augadd>1<block_end><block_end><block_end><block_end><block_end><return>exit_locs<block_end><def_stmt>print_exit_ats add_noret_functions=<false><block_start>exit_locs=detect_exit_ats(add_noret_functions=add_noret_functions)<line_sep>print("exit_at:")<for_stmt>addr,name exit_locs<block_start>print(" {}: 0x{:08x}".format(name addr))<block_end><block_end><def_stmt>dump_exit_ats filename="exit_ats.yml"<block_start>exit_locs=detect_exit_ats()<with_stmt>open(filename "w")<as>f<block_start>f.write("exit_at:\n")<for_stmt>addr,name exit_locs<block_start>f.write(" {}: 0x{:08x}\n".format(name addr))<block_end><block_end><block_end>dump_exit_ats()<line_sep>
# encoding: utf-8 <import_stmt>datetime<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<def_stmt>get_next_period_day current period n=1 extra_offset=0<block_start>""" Get the n'th day in next period from current day. Parameters ---------- current : int Current date in format "%Y%m%d". period : str Interval between current and next. {'day', 'week', 'month'} n : int n times period. extra_offset : int n'th business day after next period. Returns ------- nxt : int """<line_sep>current_dt=convert_int_to_datetime(current)<if_stmt>period<eq>'day'<block_start>offset=pd.tseries.offsets.BDay()# move to next business day # offset = offsets.Day <block_end><elif_stmt>period<eq>'week'<block_start>offset=pd.tseries.offsets.Week(weekday=0)# move to next Monday <block_end><elif_stmt>period<eq>'month'<block_start>offset=pd.tseries.offsets.BMonthBegin()# move to first business day of next month # offset = offsets.MonthBegin <block_end><else_stmt><block_start><raise>NotImplementedError("Frequency as {} not support".format(period))<block_end>offset=offset<times>n<line_sep>next_dt=current_dt+offset<if_stmt>extra_offset<block_start>next_dt=next_dt+extra_offset<times>pd.tseries.offsets.BDay()<block_end>nxt=convert_datetime_to_int(next_dt)<line_sep><return>nxt<block_end><def_stmt>convert_int_to_datetime dt<block_start>"""Convert int date (%Y%m%d) to datetime.datetime object."""<if_stmt>isinstance(dt pd.Series)<block_start>dt=dt.astype(str)<block_end><elif_stmt>isinstance(dt int)<block_start>dt=str(dt)<block_end><return>pd.to_datetime(dt format="%Y%m%d")<block_end><def_stmt>convert_datetime_to_int dt<block_start>f=<lambda>x:x.year<times>10000+x.month<times>100+x.day<if_stmt>isinstance(dt (datetime.datetime datetime.date))<block_start>dt=pd.Timestamp(dt)<line_sep>res=f(dt)<block_end><elif_stmt>isinstance(dt np.datetime64)<block_start>dt=pd.Timestamp(dt)<line_sep>res=f(dt)<block_end><else_stmt><block_start>dt=pd.Series(dt)<line_sep>res=dt.apply(f)<block_end><return>res<block_end><def_stmt>shift date n_weeks=0<block_start>"""Shift date backward or forward for n weeks. Parameters ---------- date : int or datetime The date to be shifted. n_weeks : int, optional Positive for increasing date, negative for decreasing date. Default 0 (no shift). Returns ------- res : int or datetime """<line_sep>delta=pd.Timedelta(weeks=n_weeks)<line_sep>is_int=isinstance(date (int np.integer))<if_stmt>is_int<block_start>dt=convert_int_to_datetime(date)<block_end><else_stmt><block_start>dt=date<block_end>res=dt+delta<if_stmt>is_int<block_start>res=convert_datetime_to_int(res)<block_end><return>res<block_end><def_stmt>combine_date_time date time<block_start><return>np.int64(date)<times>1000000+np.int64(time)<block_end><def_stmt>split_date_time dt<block_start>date=dt<floordiv>1000000<line_sep>time=dt%1000000<line_sep><return>date time<block_end><def_stmt>date_to_month ser# ser = pd.Series(ser) <block_start>res=ser%10000<floordiv>100<line_sep>MONTH_MAP={1:'Jan' 2:'Feb' 3:'Mar' 4:'Apr' 5:'May' 6:'Jun' 7:'Jul' 8:'Aug' 9:'Sep' 10:'Oct' 11:'Nov' 12:'Dec'}<line_sep># res = res.replace(MONTH_MAP) <return>res<block_end><def_stmt>date_to_year ser<block_start><return>ser<floordiv>10000<block_end>
<class_stmt>Acl(object)<block_start><def_stmt>__init__ self read_acl<block_start>self.read_acl=read_acl<block_end>@staticmethod<def_stmt>from_acl_response acl_response<block_start>'''Takes JSON response from API and converts to ACL object'''<if_stmt>'read'<in>acl_response<block_start>read_acl=AclType.from_acl_response(acl_response['read'])<line_sep><return>Acl(read_acl)<block_end><else_stmt><block_start><raise>ValueError('Response does not contain read ACL')<block_end><block_end><def_stmt>to_api_param self<block_start>read_acl_string=self.read_acl.acl_string<if_stmt>read_acl_string<is><none><block_start><return>{'read':[]}<block_end><return>{'read':[read_acl_string]}<block_end><block_end><class_stmt>AclInner(object)<block_start><def_stmt>__init__ self pseudonym acl_string<block_start>self.pseudonym=pseudonym<line_sep>self.acl_string=acl_string<block_end><def_stmt>__repr__ self<block_start><return>'AclType(pseudonym=%s,acl_string=%s)'%(self.pseudonym self.acl_string)<block_end><block_end><class_stmt>AclType(object)<block_start>public=AclInner('public' 'user://*')<line_sep>my_algos=AclInner('my_algos' 'algo://.my/*')<line_sep>private=AclInner('private' <none>)# Really is an empty list default=my_algos<line_sep>types=(public my_algos private)<line_sep>@staticmethod<def_stmt>from_acl_response acl_list<block_start><if_stmt>len(acl_list)<eq>0<block_start><return>AclType.private<block_end><else_stmt><block_start>acl_string=acl_list[0]<for_stmt>t AclType.types<block_start><if_stmt>t.acl_string<eq>acl_string<block_start><return>t<block_end><block_end><else_stmt><block_start><raise>ValueError('Invalid acl string %s'%(acl_list[0]))<block_end><block_end><block_end><block_end><class_stmt>ReadAcl(object)<block_start>public=Acl(AclType.public)<line_sep>private=Acl(AclType.private)<line_sep>my_algos=Acl(AclType.my_algos)<block_end>
<import_stmt>os<import_from_stmt>pathlib Path<import_from_stmt>ament_index_python.packages get_package_share_directory<import_from_stmt>launch LaunchDescription<import_from_stmt>launch.actions IncludeLaunchDescription SetEnvironmentVariable Shutdown<import_from_stmt>launch.launch_description_sources PythonLaunchDescriptionSource<import_from_stmt>launch_ros.actions Node<def_stmt>generate_launch_description <block_start>bringup_dir=Path(get_package_share_directory('rj_robocup'))<line_sep>launch_dir=bringup_dir/'launch'<line_sep>stdout_linebuf_envvar=SetEnvironmentVariable('RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED' '1')<line_sep>grsim=Node(package='rj_robocup' executable='grSim' arguments=[])<line_sep>radio=Node(package='rj_robocup' executable='sim_radio_node' output='screen' on_exit=Shutdown())<line_sep>control=Node(package='rj_robocup' executable='control_node' output='screen' on_exit=Shutdown())<line_sep>config_server=Node(package='rj_robocup' executable='config_server' output='screen' on_exit=Shutdown())<line_sep>vision_receiver_launch_path=str(launch_dir/"vision_receiver.launch.py")<line_sep>vision_receiver=IncludeLaunchDescription(PythonLaunchDescriptionSource(vision_receiver_launch_path))<line_sep>ref_receiver=Node(package='rj_robocup' executable='internal_referee_node' output='screen' on_exit=Shutdown())<line_sep>vision_filter_launch_path=str(launch_dir/"vision_filter.launch.py")<line_sep>vision_filter=IncludeLaunchDescription(PythonLaunchDescriptionSource(vision_filter_launch_path))<line_sep><return>LaunchDescription([grsim stdout_linebuf_envvar config_server radio control vision_receiver vision_filter ref_receiver])<block_end>
<import_stmt>pygame<import_from_stmt>pygame.locals *<class_stmt>ControlScheme<block_start><def_stmt>__init__ self<block_start>self.up=K_UP<line_sep>self.down=K_DOWN<block_end><block_end><class_stmt>Bat<block_start><def_stmt>__init__ self start_pos control_scheme court_size<block_start>self.control_scheme=control_scheme<line_sep>self.move_up=<false><line_sep>self.move_down=<false><line_sep>self.move_speed=450.0<line_sep>self.court_size=court_size<line_sep>self.length=30.0<line_sep>self.width=5.0<line_sep>self.position=[float(start_pos[0]) float(start_pos[1])]<line_sep>self.rect=pygame.Rect((start_pos[0] start_pos[1]) (self.width self.length))<line_sep>self.colour=pygame.Color("#FFFFFF")<block_end><def_stmt>process_event self event<block_start><if_stmt>event.type<eq>KEYDOWN<block_start><if_stmt>event.key<eq>self.control_scheme.up<block_start>self.move_up=<true><block_end><if_stmt>event.key<eq>self.control_scheme.down<block_start>self.move_down=<true><block_end><block_end><if_stmt>event.type<eq>KEYUP<block_start><if_stmt>event.key<eq>self.control_scheme.up<block_start>self.move_up=<false><block_end><if_stmt>event.key<eq>self.control_scheme.down<block_start>self.move_down=<false><block_end><block_end><block_end><def_stmt>update self dt<block_start><if_stmt>self.move_up<block_start>self.position[1]<augsub>dt<times>self.move_speed<if_stmt>self.position[1]<l>10.0<block_start>self.position[1]=10.0<block_end>self.rect.y=self.position[1]<block_end><if_stmt>self.move_down<block_start>self.position[1]<augadd>dt<times>self.move_speed<if_stmt>self.position[1]<g>self.court_size[1]-self.length-10<block_start>self.position[1]=self.court_size[1]-self.length-10<block_end>self.rect.y=self.position[1]<block_end><block_end><def_stmt>render self screen<block_start>pygame.draw.rect(screen self.colour self.rect)<block_end><block_end>
""" An image store representing Rackspace specific images """<import_from_future_stmt> absolute_import division unicode_literals<import_stmt>attr<import_from_stmt>six iteritems<import_from_stmt>mimic.model.rackspace_images RackspaceWindowsImage RackspaceCentOSPVImage RackspaceCentOSPVHMImage RackspaceCoreOSImage RackspaceDebianImage RackspaceFedoraImage RackspaceFreeBSDImage RackspaceGentooImage RackspaceOpenSUSEImage RackspaceRedHatPVImage RackspaceRedHatPVHMImage RackspaceUbuntuPVImage RackspaceUbuntuPVHMImage RackspaceVyattaImage RackspaceScientificImage RackspaceOnMetalCentOSImage RackspaceOnMetalCoreOSImage RackspaceOnMetalDebianImage RackspaceOnMetalFedoraImage RackspaceOnMetalUbuntuImage <import_from_stmt>mimic.model.rackspace_images create_rackspace_images<line_sep>@attr.s<class_stmt>RackspaceImageStore(object)<block_start>""" A store for images to share between nova_api and glance_api :var image_list: list of Rackspace images """<line_sep>image_list=attr.ib(default=attr.Factory(list))<def_stmt>create_image_store self tenant_id<block_start>""" Generates the data for each image in each image class """<line_sep>image_classes=[RackspaceWindowsImage RackspaceCentOSPVImage RackspaceCentOSPVHMImage RackspaceCoreOSImage RackspaceDebianImage RackspaceFedoraImage RackspaceFreeBSDImage RackspaceGentooImage RackspaceOpenSUSEImage RackspaceRedHatPVImage RackspaceRedHatPVHMImage RackspaceUbuntuPVImage RackspaceUbuntuPVHMImage RackspaceVyattaImage RackspaceScientificImage RackspaceOnMetalCentOSImage RackspaceOnMetalCoreOSImage RackspaceOnMetalDebianImage RackspaceOnMetalFedoraImage RackspaceOnMetalUbuntuImage]<if_stmt>len(self.image_list)<l>1<block_start><for_stmt>image_class image_classes<block_start><for_stmt>image,image_spec iteritems(image_class.images)<block_start>image_name=image<line_sep>image_id=image_spec['id']<line_sep>minRam=image_spec['minRam']<line_sep>minDisk=image_spec['minDisk']<line_sep>image_size=image_spec['OS-EXT-IMG-SIZE:size']<line_sep>image=image_class(image_id=image_id tenant_id=tenant_id image_size=image_size name=image_name minRam=minRam minDisk=minDisk)<if_stmt>'com.rackspace__1__ui_default_show'<in>image_spec<block_start>image.set_is_default()<block_end>self.image_list.append(image)<block_end><block_end>self.image_list.extend(create_rackspace_images(tenant_id))<block_end><return>self.image_list<block_end><def_stmt>get_image_by_id self image_id<block_start>""" Get an image by its id """<for_stmt>image self.image_list<block_start><if_stmt>image_id<eq>image.image_id<block_start><return>image<block_end><block_end><block_end><def_stmt>add_image_to_store self image<block_start>""" Add a new image to the list of images """<line_sep>self.image_list.append(image)<block_end><block_end>
<import_stmt>os<import_from_stmt>functools lru_cache<import_stmt>numpy<as>np<import_from_stmt>qtpy.QtCore Qt<import_from_stmt>qtpy QtCore QtGui QtWidgets<import_from_stmt>matplotlib.colors ColorConverter<import_from_stmt>glue.utils.qt get_qapp<import_from_stmt>glue.config viewer_tool<import_from_stmt>glue.core BaseData Data<import_from_stmt>glue.utils.qt load_ui<import_from_stmt>glue.viewers.common.qt.data_viewer DataViewer<import_from_stmt>glue.viewers.common.qt.toolbar BasicToolbar<import_from_stmt>glue.viewers.common.tool CheckableTool<import_from_stmt>glue.viewers.common.layer_artist LayerArtist<import_from_stmt>glue.core.subset ElementSubsetState<import_from_stmt>glue.utils.colors alpha_blend_colors<import_from_stmt>glue.utils.qt mpl_to_qt_color messagebox_on_error<import_from_stmt>glue.core.exceptions IncompatibleAttribute<import_from_stmt>glue.viewers.table.compat update_table_viewer_state<try_stmt><block_start><import_stmt>dask.array<as>da<line_sep>DASK_INSTALLED=<true><block_end><except_stmt>ImportError<block_start>DASK_INSTALLED=<false><block_end>__all__=['TableViewer' 'TableLayerArtist']<line_sep>COLOR_CONVERTER=ColorConverter()<class_stmt>DataTableModel(QtCore.QAbstractTableModel)<block_start><def_stmt>__init__ self table_viewer<block_start>super(DataTableModel self).__init__()<if_stmt>table_viewer.data.ndim<ne>1<block_start><raise>ValueError("Can only use Table widget for 1D data")<block_end>self._table_viewer=table_viewer<line_sep>self._data=table_viewer.data<line_sep>self.show_coords=<false><line_sep>self.order=np.arange(self._data.shape[0])<line_sep>self._update_visible()<block_end><def_stmt>data_changed self<block_start>top_left=self.index(0 0)<line_sep>bottom_right=self.index(self.columnCount() self.rowCount())<line_sep>self._update_visible()<line_sep>self.data_by_row_and_column.cache_clear()<line_sep>self.dataChanged.emit(top_left bottom_right)<line_sep>self.layoutChanged.emit()<block_end>@property<def_stmt>columns self<block_start><if_stmt>self.show_coords<block_start><return>self._data.components<block_end><else_stmt><block_start><return>self._data.main_components+self._data.derived_components<block_end><block_end><def_stmt>columnCount self index=<none><block_start><return>len(self.columns)<block_end><def_stmt>rowCount self index=<none># Qt bug: Crashes on tables bigger than this <block_start><return>min(self.order_visible.size 71582788)<block_end><def_stmt>headerData self section orientation role<block_start><if_stmt>role<ne>Qt.DisplayRole<block_start><return><none><block_end><if_stmt>orientation<eq>Qt.Horizontal<block_start>column_name=self.columns[section].label<line_sep>units=self._data.get_component(self.columns[section]).units<if_stmt>units<ne>''<block_start>column_name<augadd>"\n{0}".format(units)<block_end><return>column_name<block_end><elif_stmt>orientation<eq>Qt.Vertical<block_start><return>str(self.order_visible[section])<block_end><block_end><def_stmt>data self index role<block_start><if_stmt><not>index.isValid()<block_start><return><none><block_end><return>self.data_by_row_and_column(index.row() index.column() role)<block_end># The data() method gets called many times, often with the same parameters, # for example if bringing the window to the foreground/background, shifting # up/down/left/right by one cell, etc. This can be very slow when e.g. dask # columns are present so we cache the most recent 65536 calls which should # have a reasonably sensible memory footprint. @lru_cache(maxsize=65536)<def_stmt>data_by_row_and_column self row column role<block_start><if_stmt>role<eq>Qt.DisplayRole<block_start>c=self.columns[column]<line_sep>idx=self.order_visible[row]<line_sep>comp=self._data[c]<line_sep>value=comp[idx]<if_stmt>isinstance(value bytes)<block_start><return>value.decode('ascii')<block_end><else_stmt><block_start><if_stmt>DASK_INSTALLED<and>isinstance(value da.Array)<block_start><return>str(value.compute())<block_end><else_stmt><block_start><return>str(comp[idx])<block_end><block_end><block_end><elif_stmt>role<eq>Qt.BackgroundRole<block_start>idx=self.order_visible[row]<line_sep># Find all subsets that this index is part of colors=[]<for_stmt>layer_artist self._table_viewer.layers[::-1]<block_start><if_stmt>isinstance(layer_artist.layer BaseData)<block_start><continue><block_end><if_stmt>layer_artist.visible<block_start>subset=layer_artist.layer<try_stmt><block_start><if_stmt>subset.to_mask(view=slice(idx idx+1))[0]<block_start>colors.append(subset.style.color)<block_end><block_end><except_stmt>IncompatibleAttribute<as>exc# Only disable the layer if enabled, as otherwise we # will recursively call clear and _refresh, causing # an infinite loop and performance issues. <block_start><if_stmt>layer_artist.enabled<block_start>layer_artist.disable_invalid_attributes(*exc.args)<block_end><block_end><else_stmt><block_start>layer_artist.enabled=<true><block_end><block_end><block_end># Blend the colors using alpha blending <if_stmt>len(colors)<g>0<block_start>color=alpha_blend_colors(colors additional_alpha=0.5)<line_sep>color=mpl_to_qt_color(color)<line_sep><return>QtGui.QBrush(color)<block_end><block_end><block_end><def_stmt>sort self column ascending<block_start>c=self.columns[column]<line_sep>comp=self._data.get_component(c)<line_sep>self.order=np.argsort(comp.data)<if_stmt>ascending<eq>Qt.DescendingOrder<block_start>self.order=self.order[::-1]<block_end>self._update_visible()<line_sep>self.data_by_row_and_column.cache_clear()<line_sep>self.layoutChanged.emit()<block_end><def_stmt>_update_visible self<block_start>""" Given which layers are visible or not, convert order to order_visible. """<line_sep>self.data_by_row_and_column.cache_clear()<line_sep># First, if the data layer is visible, show all rows <for_stmt>layer_artist self._table_viewer.layers<block_start><if_stmt>layer_artist.visible<and>isinstance(layer_artist.layer BaseData)<block_start>self.order_visible=self.order<line_sep><return><block_end><block_end># If not then we need to show only the rows with visible subsets visible=np.zeros(self.order.shape dtype=bool)<for_stmt>layer_artist self._table_viewer.layers<block_start><if_stmt>layer_artist.visible<block_start>mask=layer_artist.layer.to_mask()<if_stmt>DASK_INSTALLED<and>isinstance(mask da.Array)<block_start>mask=mask.compute()<block_end>visible<augor>mask<block_end><block_end>self.order_visible=self.order[visible]<block_end><block_end><class_stmt>TableLayerArtist(LayerArtist)<block_start><def_stmt>__init__ self table_viewer viewer_state layer_state=<none> layer=<none><block_start>self._table_viewer=table_viewer<line_sep>super(TableLayerArtist self).__init__(viewer_state layer_state=layer_state layer=layer)<line_sep>self.redraw()<block_end><def_stmt>_refresh self<block_start>self._table_viewer.model.data_changed()<block_end><def_stmt>redraw self<block_start>self._refresh()<block_end><def_stmt>update self<block_start>self._refresh()<block_end><def_stmt>clear self<block_start>self._refresh()<block_end><block_end>@viewer_tool<class_stmt>RowSelectTool(CheckableTool)<block_start>tool_id='table:rowselect'<line_sep>icon='glue_row_select'<line_sep>action_text='Select row(s)'<line_sep>tool_tip=('Select rows by clicking on rows and pressing enter '<concat>'once the selection is ready to be applied')<line_sep>status_tip=('CLICK to select, press ENTER to finalize selection, '<concat>'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')<def_stmt>__init__ self viewer<block_start>super(RowSelectTool self).__init__(viewer)<line_sep>self.deactivate()<block_end><def_stmt>activate self<block_start>self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)<block_end><def_stmt>deactivate self# Don't do anything if the viewer has already been closed <block_start><if_stmt>self.viewer<is><none><block_start><return><block_end>self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)<line_sep>self.viewer.ui.table.clearSelection()<block_end><block_end><class_stmt>TableViewWithSelectionSignal(QtWidgets.QTableView)<block_start>selection_changed=QtCore.Signal()<def_stmt>selectionChanged self *args **kwargs<block_start>self.selection_changed.emit()<line_sep>super(TableViewWithSelectionSignal self).selectionChanged(*args **kwargs)<block_end><block_end><class_stmt>TableViewer(DataViewer)<block_start>LABEL="Table Viewer"<line_sep>_toolbar_cls=BasicToolbar<line_sep>_data_artist_cls=TableLayerArtist<line_sep>_subset_artist_cls=TableLayerArtist<line_sep>inherit_tools=<false><line_sep>tools=['table:rowselect']<def_stmt>__init__ self session state=<none> parent=<none> widget=<none><block_start>super(TableViewer self).__init__(session state=state parent=parent)<line_sep>self.ui=load_ui('data_viewer.ui' directory=os.path.dirname(__file__))<line_sep>self.setCentralWidget(self.ui)<line_sep>hdr=self.ui.table.horizontalHeader()<line_sep>hdr.setStretchLastSection(<true>)<line_sep>hdr.setSectionResizeMode(hdr.Interactive)<line_sep>hdr=self.ui.table.verticalHeader()<line_sep>hdr.setSectionResizeMode(hdr.Interactive)<line_sep>self.data=<none><line_sep>self.model=<none><line_sep>self.ui.table.selection_changed.connect(self.selection_changed)<line_sep>self.state.add_callback('layers' self._on_layers_changed)<line_sep>self._on_layers_changed()<block_end><def_stmt>selection_changed self<block_start>app=get_qapp()<if_stmt>app.queryKeyboardModifiers()<eq>Qt.AltModifier<block_start>self.finalize_selection(clear=<false>)<block_end><block_end><def_stmt>keyPressEvent self event<block_start><if_stmt>self.toolbar.active_tool<is>self.toolbar.tools['table:rowselect']<block_start><if_stmt>event.key()<in>[Qt.Key_Enter Qt.Key_Return]<block_start>self.finalize_selection()<block_end><block_end>super(TableViewer self).keyPressEvent(event)<block_end><def_stmt>finalize_selection self clear=<true><block_start>model=self.ui.table.selectionModel()<line_sep>selected_rows=[self.model.order_visible[x.row()]<for>x model.selectedRows()]<line_sep>subset_state=ElementSubsetState(indices=selected_rows data=self.data)<line_sep>mode=self.session.edit_subset_mode<line_sep>mode.update(self._data subset_state focus_data=self.data)<if_stmt>clear# We block the signals here to make sure that we don't update # the subset again once the selection is cleared. <block_start>self.ui.table.blockSignals(<true>)<line_sep>self.ui.table.clearSelection()<line_sep>self.ui.table.blockSignals(<false>)<block_end><block_end><def_stmt>_on_layers_changed self *args<block_start><for_stmt>layer_state self.state.layers<block_start><if_stmt>isinstance(layer_state.layer BaseData)<block_start><break><block_end><block_end><else_stmt><block_start><return><block_end>self.data=layer_state.layer<line_sep>self.setUpdatesEnabled(<false>)<line_sep>self.model=DataTableModel(self)<line_sep>self.ui.table.setModel(self.model)<line_sep>self.setUpdatesEnabled(<true>)<block_end>@messagebox_on_error("Failed to add data")<def_stmt>add_data self data<block_start><with_stmt>self._layer_artist_container.ignore_empty()<block_start>self.state.layers[:]=[]<line_sep><return>super(TableViewer self).add_data(data)<block_end><block_end>@messagebox_on_error("Failed to add subset")<def_stmt>add_subset self subset<block_start><if_stmt>self.data<is><none><block_start>self.add_data(subset.data)<line_sep>self.state.layers[0].visible=<false><block_end><elif_stmt>subset.data<ne>self.data<block_start><raise>ValueError("subset parent data does not match existing table data")<block_end><return>super(TableViewer self).add_subset(subset)<block_end>@property<def_stmt>window_title self<block_start><if_stmt>len(self.state.layers)<g>0<block_start><return>'Table: '+self.state.layers[0].layer.label<block_end><else_stmt><block_start><return>'Table'<block_end><block_end><def_stmt>closeEvent self event<block_start>""" On close, Qt seems to scan through the entire model if the data set is big. To sidestep that, we swap out with a tiny data set before closing """<line_sep>super(TableViewer self).closeEvent(event)<if_stmt>self.model<is><not><none><block_start>self.model._data=Data(x=[0])<block_end>event.accept()<block_end><def_stmt>get_layer_artist self cls layer=<none> layer_state=<none><block_start><return>cls(self self.state layer=layer layer_state=layer_state)<block_end>@staticmethod<def_stmt>update_viewer_state rec context<block_start><return>update_table_viewer_state(rec context)<block_end><block_end>
# Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import<import_from_stmt>contextlib contextmanager<import_stmt>functools<import_stmt>logging<import_from_stmt>uuid uuid4<import_stmt>google.cloud.exceptions<import_from_stmt>.globals queue_context<import_from_stmt>.storage Storage<import_from_stmt>.task Task TaskResult<import_from_stmt>.utils dumps measure_time unpickle UnpickleError<line_sep>logger=logging.getLogger(__name__)<line_sep>PUBSUB_OBJECT_PREFIX='psq'<class_stmt>Queue(object)<block_start><def_stmt>__init__ self publisher_client subscriber_client project name='default' storage=<none> extra_context=<none> asynchronous=<true><block_start>self._async=asynchronous<line_sep>self.name=name<line_sep>self.project=project<if_stmt>self._async<block_start>self.publisher_client=publisher_client<line_sep>self.subscriber_client=subscriber_client<line_sep>self.topic_path=self._get_or_create_topic()<block_end>self.storage=storage<or>Storage()<line_sep>self.subscription=<none><line_sep>self.extra_context=extra_context<if>extra_context<else>dummy_context<block_end><def_stmt>_get_topic_path self<block_start>topic_name='{}-{}'.format(PUBSUB_OBJECT_PREFIX self.name)<line_sep><return>self.publisher_client.topic_path(self.project topic_name)<block_end><def_stmt>_get_or_create_topic self<block_start>topic_path=self._get_topic_path()<try_stmt><block_start>self.publisher_client.get_topic(topic_path)<block_end><except_stmt>google.cloud.exceptions.NotFound<block_start>logger.info("Creating topic {}".format(topic_path))<try_stmt><block_start>self.publisher_client.create_topic(topic_path)<block_end><except_stmt>google.cloud.exceptions.Conflict# Another process created the topic before us, ignore. <block_start><pass><block_end><block_end><return>topic_path<block_end><def_stmt>_get_or_create_subscription self<block_start>"""Workers all share the same subscription so that tasks are distributed across all workers."""<line_sep>topic_path=self._get_topic_path()<line_sep>subscription_name='{}-{}-shared'.format(PUBSUB_OBJECT_PREFIX self.name)<line_sep>subscription_path=self.subscriber_client.subscription_path(self.project subscription_name)<try_stmt><block_start>self.subscriber_client.get_subscription(subscription_path)<block_end><except_stmt>google.cloud.exceptions.NotFound<block_start>logger.info("Creating shared subscription {}".format(subscription_name))<try_stmt><block_start>self.subscriber_client.create_subscription(subscription_path topic=topic_path)<block_end><except_stmt>google.cloud.exceptions.Conflict# Another worker created the subscription before us, ignore. <block_start><pass><block_end><block_end><return>subscription_path<block_end><def_stmt>enqueue self f *args **kwargs<block_start>"""Enqueues a function for the task queue to execute."""<line_sep>task=Task(uuid4().hex f args kwargs)<line_sep>self.storage.put_task(task)<line_sep><return>self.enqueue_task(task)<block_end><def_stmt>enqueue_task self task<block_start>"""Enqueues a task directly. This is used when a task is retried or if a task was manually created. Note that this does not store the task. """<line_sep>data=dumps(task)<if_stmt>self._async<block_start>self.publisher_client.publish(self.topic_path data=data)<line_sep>logger.info('Task {} queued.'.format(task.id))<block_end><else_stmt><block_start>unpickled_task=unpickle(data)<line_sep>logger.info('Executing task {} synchronously.'.format(unpickled_task.id))<with_stmt>measure_time()<as>summary self.queue_context()<block_start>unpickled_task.execute(queue=self)<line_sep>summary(unpickled_task.summary())<block_end><block_end><return>TaskResult(task.id self)<block_end>@staticmethod<def_stmt>_pubsub_message_callback task_callback message<block_start>message.ack()<try_stmt><block_start>task=unpickle(message.data)<line_sep>task_callback(task)<block_end><except_stmt>UnpickleError<block_start>logger.exception('Failed to unpickle task {}.'.format(message))<block_end><block_end><def_stmt>listen self callback<block_start><if_stmt><not>self.subscription<block_start>self.subscription=self._get_or_create_subscription()<block_end>message_callback=functools.partial(self._pubsub_message_callback callback)<line_sep><return>self.subscriber_client.subscribe(self.subscription callback=message_callback)<block_end><def_stmt>cleanup self<block_start>"""Does nothing for this queue, but other queues types may use this to perform clean-up after listening for tasks."""<line_sep><pass><block_end><def_stmt>queue_context self<block_start>""" Returns a context manager that sets this queue as the current_queue global. Similar to flask's app.app_context. This is used by the workers to make the global available inside of task functions. """<line_sep><return>queue_context(self)<block_end><block_end>@contextmanager<def_stmt>dummy_context <block_start><yield><block_end>
<import_stmt>unittest<import_stmt>os<import_stmt>tempfile<import_stmt>shutil<import_stmt>contextlib<import_from_stmt>pytest warns<import_from_stmt>scrapy.exceptions ScrapyDeprecationWarning<import_from_stmt>scrapy.utils.project data_path get_project_settings<line_sep>@contextlib.contextmanager<def_stmt>inside_a_project <block_start>prev_dir=os.getcwd()<line_sep>project_dir=tempfile.mkdtemp()<try_stmt><block_start>os.chdir(project_dir)<with_stmt>open('scrapy.cfg' 'w')<as>f# create an empty scrapy.cfg <block_start>f.close()<block_end><yield>project_dir<block_end><finally_stmt><block_start>os.chdir(prev_dir)<line_sep>shutil.rmtree(project_dir)<block_end><block_end><class_stmt>ProjectUtilsTest(unittest.TestCase)<block_start><def_stmt>test_data_path_outside_project self<block_start>self.assertEqual(os.path.join('.scrapy' 'somepath') data_path('somepath'))<line_sep>abspath=os.path.join(os.path.sep 'absolute' 'path')<line_sep>self.assertEqual(abspath data_path(abspath))<block_end><def_stmt>test_data_path_inside_project self<block_start><with_stmt>inside_a_project()<as>proj_path<block_start>expected=os.path.join(proj_path '.scrapy' 'somepath')<line_sep>self.assertEqual(os.path.realpath(expected) os.path.realpath(data_path('somepath')))<line_sep>abspath=os.path.join(os.path.sep 'absolute' 'path')<line_sep>self.assertEqual(abspath data_path(abspath))<block_end><block_end><block_end>@contextlib.contextmanager<def_stmt>set_env **update<block_start>modified=set(update.keys())&set(os.environ.keys())<line_sep>update_after={k:os.environ[k]<for>k modified}<line_sep>remove_after=frozenset(k<for>k update<if>k<not><in>os.environ)<try_stmt><block_start>os.environ.update(update)<line_sep><yield><block_end><finally_stmt><block_start>os.environ.update(update_after)<for_stmt>k remove_after<block_start>os.environ.pop(k)<block_end><block_end><block_end><class_stmt>GetProjectSettingsTestCase(unittest.TestCase)<block_start><def_stmt>test_valid_envvar self<block_start>value='tests.test_cmdline.settings'<line_sep>envvars={'SCRAPY_SETTINGS_MODULE':value }<with_stmt>set_env(**envvars) warns(<none>)<as>warnings<block_start>settings=get_project_settings()<block_end><assert_stmt><not>warnings<assert_stmt>settings.get('SETTINGS_MODULE')<eq>value<block_end><def_stmt>test_invalid_envvar self<block_start>envvars={'SCRAPY_FOO':'bar' }<with_stmt>set_env(**envvars) warns(<none>)<as>warnings<block_start>get_project_settings()<block_end><assert_stmt>len(warnings)<eq>1<assert_stmt>warnings[0].category<eq>ScrapyDeprecationWarning<assert_stmt>str(warnings[0].message).endswith(': FOO')<block_end><def_stmt>test_valid_and_invalid_envvars self<block_start>value='tests.test_cmdline.settings'<line_sep>envvars={'SCRAPY_FOO':'bar' 'SCRAPY_SETTINGS_MODULE':value }<with_stmt>set_env(**envvars) warns(<none>)<as>warnings<block_start>settings=get_project_settings()<block_end><assert_stmt>len(warnings)<eq>1<assert_stmt>warnings[0].category<eq>ScrapyDeprecationWarning<assert_stmt>str(warnings[0].message).endswith(': FOO')<assert_stmt>settings.get('SETTINGS_MODULE')<eq>value<block_end><block_end>
<import_from_stmt>trainer.normal NormalTrainer<import_from_stmt>config cfg<def_stmt>get_trainer <block_start>pair={'normal':NormalTrainer}<assert_stmt>(cfg.train.trainer<in>pair)<line_sep><return>pair[cfg.train.trainer]()<block_end>
# Copyright 2017 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Google Cloud Platform library - datalab cell magic."""<import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<try_stmt><block_start><import_stmt>IPython<import_stmt>IPython.core.display<import_stmt>IPython.core.magic<block_end><except_stmt>ImportError<block_start><raise>Exception('This module can only be loaded in ipython.')<block_end><import_stmt>google.datalab.utils.commands<line_sep>@IPython.core.magic.register_line_cell_magic<def_stmt>datalab line cell=<none><block_start>"""Implements the datalab cell magic for ipython notebooks. Args: line: the contents of the datalab line. Returns: The results of executing the cell. """<line_sep>parser=google.datalab.utils.commands.CommandParser(prog='%datalab' description=""" Execute operations that apply to multiple Datalab APIs. Use "%datalab <command> -h" for help on a specific command. """)<line_sep>config_parser=parser.subcommand('config' help='List or set API-specific configurations.')<line_sep>config_sub_commands=config_parser.add_subparsers(dest='command')<line_sep># %%datalab config list config_list_parser=config_sub_commands.add_parser('list' help='List configurations')<line_sep>config_list_parser.set_defaults(func=_config_list_fn)<line_sep># %%datalab config set -n <NAME> -v <VALUE> config_set_parser=config_sub_commands.add_parser('set' help='Set configurations')<line_sep>config_set_parser.add_argument('-n' '--name' help='The name of the configuration value' required=<true>)<line_sep>config_set_parser.add_argument('-v' '--value' help='The value to set' required=<true>)<line_sep>config_set_parser.set_defaults(func=_config_set_fn)<line_sep>project_parser=parser.subcommand('project' help='Get or set the default project ID')<line_sep>project_sub_commands=project_parser.add_subparsers(dest='command')<line_sep># %%datalab project get project_get_parser=project_sub_commands.add_parser('get' help='Get the default project ID')<line_sep>project_get_parser.set_defaults(func=_project_get_fn)<line_sep># %%datalab project set -p <PROJECT_ID> project_set_parser=project_sub_commands.add_parser('set' help='Set the default project ID')<line_sep>project_set_parser.add_argument('-p' '--project' help='The default project ID' required=<true>)<line_sep>project_set_parser.set_defaults(func=_project_set_fn)<line_sep><return>google.datalab.utils.commands.handle_magic_line(line cell parser)<block_end><def_stmt>_config_list_fn args cell<block_start>ctx=google.datalab.Context.default()<line_sep><return>google.datalab.utils.commands.render_dictionary([ctx.config])<block_end><def_stmt>_config_set_fn args cell<block_start>name=args['name']<line_sep>value=args['value']<line_sep>ctx=google.datalab.Context.default()<line_sep>ctx.config[name]=value<line_sep><return>google.datalab.utils.commands.render_dictionary([ctx.config])<block_end><def_stmt>_project_get_fn args cell<block_start>ctx=google.datalab.Context.default()<line_sep><return>google.datalab.utils.commands.render_text(ctx.project_id)<block_end><def_stmt>_project_set_fn args cell<block_start>project=args['project']<line_sep>ctx=google.datalab.Context.default()<line_sep>ctx.set_project_id(project)<line_sep><return><block_end>
<import_stmt>_plotly_utils.basevalidators<class_stmt>ConnectorValidator(_plotly_utils.basevalidators.CompoundValidator)<block_start><def_stmt>__init__ self plotly_name="connector" parent_name="waterfall" **kwargs<block_start>super(ConnectorValidator self).__init__(plotly_name=plotly_name parent_name=parent_name data_class_str=kwargs.pop("data_class_str" "Connector") data_docs=kwargs.pop("data_docs" """ line :class:`plotly.graph_objects.waterfall.connecto r.Line` instance or dict with compatible properties mode Sets the shape of connector lines. visible Determines if connector lines are drawn. """ ) **kwargs)<block_end><block_end>
<import_stmt>json<import_stmt>regex<import_stmt>nltk.data<import_from_stmt>nltk.tokenize word_tokenize<import_stmt>sys<line_sep>sent_detector=nltk.data.load('tokenizers/punkt/english.pickle')<def_stmt>tokenize string<block_start><return>word_tokenize(string)<block_end><def_stmt>split_paragraphs text<block_start>""" remove urls, lowercase all words and separate paragraphs """<line_sep>splits=regex.split(r'\n+' text)<line_sep>paras=[]<for_stmt>split splits[1:]# skip the titles <block_start>split=split.strip()<if_stmt>len(split)<eq>0<block_start><continue><block_end><if_stmt>'Section::'<in>split<block_start><continue><block_end>paras.append(split)<block_end>paras=" ".join(paras)<line_sep><return>sent_detector.tokenize(paras)<block_end><def_stmt>split_sent sent<block_start>strings=regex.split('<a |</a>' sent)<line_sep>new_strings=[]<line_sep>count=0<for_stmt>s strings<block_start>s=s.strip()<if_stmt>s<block_start><if_stmt>'href='<in>s<block_start>s=s.lstrip('href="')<line_sep>href,text=s.split('">')<line_sep>new_strings.append((text href))<line_sep>count<augadd>1<block_end><else_stmt><block_start>ss=tokenize(s)<line_sep>new_strings.extend([(_ <none>)<for>_ ss])<block_end><block_end><block_end><return>new_strings count/len(new_strings) count<block_end>fw=open('out-more.json' 'w')<with_stmt>open('en.json' 'r')<as>f<block_start><for_stmt>i,line enumerate(f)<block_start>data=json.loads(line)<line_sep>entry={"id":data['id'] "url":data['url'] 'title':data['title']}<line_sep>outputs=[]<if_stmt>len(data['text'])<g>50<block_start><try_stmt><block_start>sents=split_paragraphs(data['text'])<for_stmt>sent sents<block_start><if_stmt>len(sent)<l>400<block_start>output,ratio,count=split_sent(sent)<if_stmt>count<g>1<and>ratio<ge>0.10<and>len(output)<ge>8<and>output[0][0][0].isupper()<block_start>text=[_[0]<for>_ output]<line_sep>hyperlink=[_[1]<for>_ output]<line_sep>outputs.append((text hyperlink))<block_end><block_end><block_end><block_end><except_stmt>Exception<block_start><pass><block_end><block_end><if_stmt>len(outputs)<g>0<block_start>entry['text']=outputs<line_sep>fw.write(json.dumps(entry)+'\n')<block_end>sys.stdout.write('finished {}/{} \r'.format(i 5989879))<block_end><block_end>fw.close()<line_sep>
<import_stmt>re<import_from_stmt>.._compat integer_types long<import_from_stmt>.base SQLAdapter<import_from_stmt>. adapters<line_sep>@adapters.register_for("sapdb")<class_stmt>SAPDB(SQLAdapter)<block_start>dbengine="sapdb"<line_sep>drivers=("sapdb" )<line_sep>REGEX_URI=("^(?P<user>[^:@]+)(:(?P<password>[^@]*))?"<concat>r"@(?P<host>[^:/]+|\[[^\]]+\])/(?P<db>[^?]+)$")<def_stmt>_initialize_ self<block_start>super(SAPDB self)._initialize_()<line_sep>ruri=self.uri.split("://" 1)[1]<line_sep>m=re.match(self.REGEX_URI ruri)<if_stmt><not>m<block_start><raise>SyntaxError("Invalid URI string in DAL")<block_end>user=self.credential_decoder(m.group("user"))<line_sep>password=self.credential_decoder(m.group("password"))<if_stmt>password<is><none><block_start>password=""<block_end>host=m.group("host")<line_sep>db=m.group("db")<line_sep>self.driver_args.update(user=user password=password database=db host=host)<block_end><def_stmt>connector self<block_start>self.driver.connect(**self.driver_args)<block_end><def_stmt>lastrowid self table<block_start>self.execute("select %s.NEXTVAL from dual"%table._sequence_name)<line_sep><return>long(self.cursor.fetchone()[0])<block_end><def_stmt>create_sequence_and_triggers self query table **args<block_start>self.execute("CREATE SEQUENCE %s;"%table._sequence_name)<line_sep>self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');"%(table._rname table._id._rname table._sequence_name))<line_sep>self.execute(query)<block_end><block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MolTree"""<import_stmt>rdkit<import_stmt>rdkit.Chem<as>Chem<import_from_stmt>src.chemutils get_clique_mol tree_decomp get_mol get_smiles set_atommap enum_assemble decode_stereo<import_from_stmt>src.vocab Vocab<class_stmt>MolTreeNode(object)<block_start>"""MolTreeNode"""<def_stmt>__init__ self smiles clique=[]<block_start>self.smiles=smiles<line_sep>self.mol=get_mol(self.smiles)<line_sep>self.clique=[x<for>x clique]<line_sep>self.neighbors=[]<block_end><def_stmt>add_neighbor self nei_node<block_start>"""add a neighbor node """<line_sep>self.neighbors.append(nei_node)<block_end><def_stmt>recover self original_mol<block_start>"""tbd"""<line_sep>clique=[]<line_sep>clique.extend(self.clique)<if_stmt><not>self.is_leaf<block_start><for_stmt>cidx self.clique<block_start>original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(self.nid)<block_end><block_end><for_stmt>nei_node self.neighbors<block_start>clique.extend(nei_node.clique)<if_stmt>nei_node.is_leaf<block_start><continue><block_end><for_stmt>cidx nei_node.clique<block_start><if_stmt>cidx<not><in>self.clique<or>len(nei_node.clique)<eq>1<block_start>atom=original_mol.GetAtomWithIdx(cidx)<line_sep>atom.SetAtomMapNum(nei_node.nid)<block_end><block_end><block_end>clique=list(set(clique))<line_sep>label_mol=get_clique_mol(original_mol clique)<line_sep>self.label=Chem.MolToSmiles(Chem.MolFromSmiles(get_smiles(label_mol)))<for_stmt>cidx clique<block_start>original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(0)<block_end><return>self.label<block_end><def_stmt>assemble self<block_start>"""get candidate subgraph info"""<line_sep>neighbors=[nei<for>nei self.neighbors<if>nei.mol.GetNumAtoms()<g>1]<line_sep>neighbors=sorted(neighbors key=<lambda>x:x.mol.GetNumAtoms() reverse=<true>)<line_sep>singletons=[nei<for>nei self.neighbors<if>nei.mol.GetNumAtoms()<eq>1]<line_sep>neighbors=singletons+neighbors<line_sep>cands,aroma=enum_assemble(self neighbors [] [])<line_sep>new_cands=[cand<for>i,cand enumerate(cands)<if>aroma[i]<ge>0]<if_stmt>len(new_cands)<g>0<block_start>cands=new_cands<block_end><if_stmt>len(cands)<g>0<block_start>self.cands,_=zip(*cands)<line_sep>self.cands=list(self.cands)<block_end><else_stmt><block_start>self.cands=[]<block_end><block_end><block_end><class_stmt>MolTree(object)<block_start>"""MolTree"""<def_stmt>__init__ self smiles<block_start>self.smiles=smiles<line_sep>self.mol=get_mol(smiles)<line_sep>cliques,edges=tree_decomp(self.mol)<line_sep>self.nodes=[]<line_sep>root=0<for_stmt>i,c enumerate(cliques)<block_start>cmol=get_clique_mol(self.mol c)<line_sep>node=MolTreeNode(get_smiles(cmol) c)<line_sep>self.nodes.append(node)<if_stmt>min(c)<eq>0<block_start>root=i<block_end><block_end><for_stmt>x,y edges<block_start>self.nodes[x].add_neighbor(self.nodes[y])<line_sep>self.nodes[y].add_neighbor(self.nodes[x])<block_end><if_stmt>root<g>0<block_start>self.nodes[0],self.nodes[root]=self.nodes[root] self.nodes[0]<block_end><for_stmt>i,node enumerate(self.nodes)<block_start>node.nid=i+1<if_stmt>len(node.neighbors)<g>1<block_start>set_atommap(node.mol node.nid)<block_end>node.is_leaf=(len(node.neighbors)<eq>1)<block_end><block_end><def_stmt>size self<block_start>"""return nodes nums"""<line_sep><return>len(self.nodes)<block_end><def_stmt>recover self<block_start>"""recover nodes"""<for_stmt>node self.nodes<block_start>node.recover(self.mol)<block_end><block_end><def_stmt>assemble self<block_start>"""assemble nodes"""<for_stmt>node self.nodes<block_start>node.assemble()<block_end><block_end><block_end><def_stmt>dfs node fa_idx<block_start>"""dfs"""<line_sep>max_depth=0<for_stmt>child node.neighbors<block_start><if_stmt>child.idx<eq>fa_idx<block_start><continue><block_end>max_depth=max(max_depth dfs(child node.idx))<block_end><return>max_depth+1<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--train_path' required=<true>)<line_sep>parser.add_argument('--vocab_path' required=<true>)<line_sep>args=parser.parse_args()<line_sep>lg=rdkit.RDLogger.logger()<line_sep>lg.setLevel(rdkit.RDLogger.CRITICAL)<with_stmt>open(args.train_path 'r')<as>f<block_start>data=f.read().splitlines()<block_end>cset=set()<for_stmt>item data<block_start>smiles=item.split()[0]<line_sep>mol=MolTree(smiles)<for_stmt>c mol.nodes<block_start>cset.add(c.smiles)<block_end><block_end><with_stmt>open(args.vocab_path 'w')<as>f<block_start><for_stmt>c cset<block_start>f.write(c+'\n')<block_end><block_end><block_end>
# -*- coding: utf-8 -*- """ Created on 2017-4-25 @author: cheng.li """<import_stmt>datetime<as>dt<import_stmt>numpy<as>np<import_from_stmt>sklearn.linear_model LinearRegression<import_from_stmt>alphamind.data.neutralize neutralize<def_stmt>benchmark_neutralize n_samples:int n_features:int n_loops:int<arrow><none><block_start>print("-"<times>60)<line_sep>print("Starting least square fitting benchmarking")<line_sep>print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples n_features n_loops))<line_sep>y=np.random.randn(n_samples 5)<line_sep>x=np.random.randn(n_samples n_features)<line_sep>start=dt.datetime.now()<for_stmt>_ range(n_loops)<block_start>calc_res=neutralize(x y)<block_end>impl_model_time=dt.datetime.now()-start<line_sep>print('{0:20s}: {1}'.format('Implemented model' impl_model_time))<line_sep>start=dt.datetime.now()<for_stmt>_ range(n_loops)<block_start>benchmark_model=LinearRegression(fit_intercept=<false>)<line_sep>benchmark_model.fit(x y)<line_sep>exp_res=y-x@benchmark_model.coef_.T<block_end>benchmark_model_time=dt.datetime.now()-start<line_sep>print('{0:20s}: {1}'.format('Benchmark model' benchmark_model_time))<line_sep>np.testing.assert_array_almost_equal(calc_res exp_res)<block_end><def_stmt>benchmark_neutralize_with_groups n_samples:int n_features:int n_loops:int n_groups:int<arrow><none><block_start>print("-"<times>60)<line_sep>print("Starting least square fitting with group benchmarking")<line_sep>print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples n_features n_loops n_groups))<line_sep>y=np.random.randn(n_samples 5)<line_sep>x=np.random.randn(n_samples n_features)<line_sep>groups=np.random.randint(n_groups size=n_samples)<line_sep>start=dt.datetime.now()<for_stmt>_ range(n_loops)<block_start>_=neutralize(x y groups)<block_end>impl_model_time=dt.datetime.now()-start<line_sep>print('{0:20s}: {1}'.format('Implemented model' impl_model_time))<line_sep>start=dt.datetime.now()<line_sep>model=LinearRegression(fit_intercept=<false>)<for_stmt>_ range(n_loops)<block_start><for_stmt>i range(n_groups)<block_start>curr_x=x[groups<eq>i]<line_sep>curr_y=y[groups<eq>i]<line_sep>model.fit(curr_x curr_y)<line_sep>_=curr_y-curr_x@model.coef_.T<block_end><block_end>benchmark_model_time=dt.datetime.now()-start<line_sep>print('{0:20s}: {1}'.format('Benchmark model' benchmark_model_time))<block_end><if_stmt>__name__<eq>'__main__'<block_start>benchmark_neutralize(3000 10 1000)<line_sep>benchmark_neutralize_with_groups(3000 10 1000 30)<block_end>
# coding: utf-8 # DJANGO IMPORTS <import_from_stmt>django.conf settings<line_sep># Admin Site Title ADMIN_HEADLINE=getattr(settings "GRAPPELLI_ADMIN_HEADLINE" 'Grappelli')<line_sep>ADMIN_TITLE=getattr(settings "GRAPPELLI_ADMIN_TITLE" 'Grappelli')<line_sep># Link to your Main Admin Site (no slashes at start and end) ADMIN_URL=getattr(settings "GRAPPELLI_ADMIN_URL" '/admin/')<line_sep>
<import_from_stmt>..utils run<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<def_stmt>process_one_package path package python_version="3"<block_start>"""Get details about one precise python package in the given image. :param path: path were the docker image filesystem is expanded. :type path: string :param package: name of the python package to get info from. :type package: string :param python_version: version of python to use. can be "2" or "3". default to "3". :type python_version: string :return: list containing package name, version and size :rtype: list[string, string, int] """<line_sep>command=f"sudo chroot {path} pip{python_version} show {package}"<line_sep>info=get_ipython().getoutput(command)<for_stmt>line info<block_start><if_stmt>"Name"<in>line<block_start>name=line.split(" ").pop()<block_end><if_stmt>"Version"<in>line<block_start>version=line.split(" ").pop()<block_end><if_stmt>"Location"<in>line<block_start>location=line.split(" ").pop()<block_end><block_end>result=get_ipython().getoutput(f"du --max-depth=0 {path}{location}/{name}").pop()<line_sep># If the folder does not exist, try lowercase <if_stmt>"cannot access"<in>result<block_start>result=get_ipython().getoutput(f"du --max-depth=0 {path}{location}/{name.lower()}").pop()<block_end># If the lowercase folder do not exist either <if_stmt>"cannot access"<not><in>result<block_start>size=int(result.split('\t').pop(0))<block_end># List the files by hand <else_stmt><block_start>command=f"sudo chroot {path} pip{python_version} show {package} -f"<line_sep>info=get_ipython().getoutput(command)<line_sep>flag=<false><line_sep>size=0<for_stmt>line info<block_start><if_stmt>flag<block_start>command=f"du {path}{location}/{line.strip()}"<line_sep>size<augadd>int(get_ipython().getoutput(command).pop().split('\t').pop(0))<block_end><if_stmt>'Files'<in>line<block_start>flag=<true><block_end><block_end><block_end><return>[name version size]<block_end><def_stmt>get_python_packages_info path python_version="3"<block_start>"""Get details about all python packages in an image filesystem. :param path: path were the docker image filesystem is expanded. :type path: string :param python_version: version of python to use. can be "2" or "3". default to "3". :type python_version: string :return: list containing lists of each package's name, version and size :rtype: list[list[string, string, int]] """<line_sep>command=f"sudo chroot {path} pip{python_version} list --format freeze --no-cache-dir 2>/dev/null"<line_sep>packages=[package.split('==')<for>package get_ipython().getoutput(command)]<line_sep>package_list=[]<for_stmt>package packages<block_start><try_stmt><block_start>package_list.append(process_one_package(path package[0]))<block_end><except_stmt>Exception<as>e<block_start>logger.error("Error processing python packages" package[0] e)<line_sep><pass><block_end><block_end><return>package_list<block_end>
''' Test redirection behavior to invalid addresses '''<line_sep># Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>enum Enum<import_stmt>re<import_stmt>os<import_stmt>socket<import_stmt>sys<line_sep>Test.Summary=''' Test redirection behavior to invalid addresses '''<line_sep>Test.ContinueOnFail=<false><line_sep>Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir 'tcp_client.py'))<line_sep>dns=Test.MakeDNServer('dns')<line_sep># This record is used in each test case to get the initial redirect response from the origin that we will handle. dnsRecords={'iwillredirect.test':['127.0.0.1']}<line_sep>host=socket.gethostname()<line_sep>ipv4addrs=set()<try_stmt><block_start>ipv4addrs=set([ip<for>(family _ _ _ (ip *_)) socket.getaddrinfo(host port=<none>)<if>socket.AF_INET<eq>family])<block_end><except_stmt>socket.gaierror<block_start><pass><block_end>ipv6addrs=set()<try_stmt><block_start>ipv6addrs=set(["[{0}]".format(ip.split('%')[0])<for>(family _ _ _ (ip *_)) socket.getaddrinfo(host port=<none>)<if>socket.AF_INET6<eq>family<and>'fe80'<ne>ip[0:4]])<line_sep># Skip link-local addresses. <block_end><except_stmt>socket.gaierror<block_start><pass><block_end>origin=Test.MakeOriginServer('origin' ip='0.0.0.0')<line_sep>ArbitraryTimestamp='12345678'<line_sep># This is for cases when the content is actually fetched from the invalid address. request_header={'headers':('GET / HTTP/1.1\r\n'<concat>'Host: *\r\n\r\n') 'timestamp':ArbitraryTimestamp 'body':''}<line_sep>response_header={'headers':('HTTP/1.1 204 No Content\r\n'<concat>'Connection: close\r\n\r\n') 'timestamp':ArbitraryTimestamp 'body':''}<line_sep>origin.addResponse('sessionfile.log' request_header response_header)<line_sep># Map scenarios to trafficserver processes. trafficservers={}<line_sep>data_dirname='generated_test_data'<line_sep>data_path=os.path.join(Test.TestDirectory data_dirname)<line_sep>os.makedirs(data_path exist_ok=<true>)<def_stmt>normalizeForAutest value<block_start>''' autest uses "test run" names to build file and directory names, so we must transform them in case there are incompatible or annoying characters. This means we can also use them in URLs. '''<if_stmt><not>value<block_start><return><none><block_end><return>re.sub(r'[^a-z0-9-]' '_' value flags=re.I)<block_end><def_stmt>makeTestCase redirectTarget expectedAction scenario<block_start>''' Helper method that creates a "meta-test" from which autest generates a test case. :param redirectTarget: The target address of a redirect from origin to be handled. :param scenario: Defines the ACL to configure and the addresses to test. '''<line_sep>config=','.join(':'.join(t)<for>t sorted((addr.name.lower() action.name.lower())<for>(addr action) scenario.items()))<line_sep>normRedirectTarget=normalizeForAutest(redirectTarget)<line_sep>normConfig=normalizeForAutest(config)<line_sep>tr=Test.AddTestRun('With_Config_{0}_Redirect_to_{1}'.format(normConfig normRedirectTarget))<if_stmt>trafficservers<block_start>tr.StillRunningAfter=origin<line_sep>tr.StillRunningAfter=dns<block_end><else_stmt><block_start>tr.Processes.Default.StartBefore(origin)<line_sep>tr.Processes.Default.StartBefore(dns)<block_end><if_stmt>config<not><in>trafficservers<block_start>trafficservers[config]=Test.MakeATSProcess('ts_{0}'.format(normConfig) enable_cache=<false>)<line_sep>trafficservers[config].Disk.records_config.update({'proxy.config.diags.debug.enabled':1 'proxy.config.diags.debug.tags':'http|dns|redirect' 'proxy.config.http.number_of_redirections':1 'proxy.config.dns.nameservers':'127.0.0.1:{0}'.format(dns.Variables.Port) 'proxy.config.dns.resolv_conf':'NULL' 'proxy.config.url_remap.remap_required':0 'proxy.config.http.redirect.actions':config 'proxy.config.http.connect_attempts_timeout':5 'proxy.config.http.connect_attempts_max_retries':0 })<line_sep>tr.Processes.Default.StartBefore(trafficservers[config])<block_end><else_stmt><block_start>tr.StillRunningAfter=trafficservers[config]<block_end>testDomain='testdomain{0}.test'.format(normRedirectTarget)<line_sep># The micro DNS server can't tell us whether it has a record of the domain already, so we use a dictionary to avoid duplicates. # We remove any surrounding brackets that are common to IPv6 addresses. <if_stmt>redirectTarget<block_start>dnsRecords[testDomain]=[redirectTarget.strip('[]')]<block_end># A GET request parameterized on the config and on the target. request_header={'headers':('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'<concat>'Host: *\r\n\r\n').format(normConfig normRedirectTarget) 'timestamp':ArbitraryTimestamp 'body':''}<line_sep># Returns a redirect to the test domain for the given target & the port number for the TS of the given config. response_header={'headers':('HTTP/1.1 307 Temporary Redirect\r\n'<concat>'Location: http://{0}:{1}/\r\n'<concat>'Connection: close\r\n\r\n').format(testDomain origin.Variables.Port) 'timestamp':ArbitraryTimestamp 'body':''}<line_sep>origin.addResponse('sessionfile.log' request_header response_header)<line_sep># Generate the request data file. command_path=os.path.join(data_path tr.Name)<with_stmt>open(command_path 'w')<as>f<block_start>f.write(('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'<concat>'Host: iwillredirect.test:{2}\r\n\r\n').format(normConfig normRedirectTarget origin.Variables.Port))<block_end># Set the command with the appropriate URL. port=trafficservers[config].Variables.port<line_sep>dir_path=os.path.join(data_dirname tr.Name)<line_sep>tr.Processes.Default.Command=(f"bash -o pipefail -c '{sys.executable} tcp_client.py 127.0.0.1 {port} "<concat>f"{dir_path} | head -n 1'")<line_sep>tr.Processes.Default.ReturnCode=0<line_sep># Generate and set the 'gold file' to check stdout goldFilePath=os.path.join(data_path '{0}.gold'.format(tr.Name))<with_stmt>open(goldFilePath 'w')<as>f<block_start>f.write(expectedAction.value['expectedStatusLine'])<block_end>tr.Processes.Default.Streams.stdout=goldFilePath<block_end><class_stmt>AddressE(Enum)<block_start>''' Classes of addresses are mapped to example addresses. '''<line_sep>Private=('10.0.0.1' '[fc00::1]')<line_sep>Loopback=(['127.1.2.3'])# [::1] is omitted here because it is likely overwritten by Self, and there are no others in IPv6. Multicast=('172.16.58.3' '[ff42::]')<line_sep>Linklocal=('169.254.0.1' '[fe80::]')<line_sep>Routable=('172.16.17.32' '[2001:4998:58:1836::10]')# Do not Follow redirects to these in an automated test. Self=ipv4addrs|ipv6addrs# Addresses of this host. Default=<none><block_end># All addresses apply, nothing in particular to test. <class_stmt>ActionE(Enum)# Title case because 'return' is a Python keyword. <block_start>Return={'config':'return' 'expectedStatusLine':'HTTP/1.1 307 Temporary Redirect\r\n'}<line_sep>Reject={'config':'reject' 'expectedStatusLine':'HTTP/1.1 403 Forbidden\r\n'}<line_sep>Follow={'config':'follow' 'expectedStatusLine':'HTTP/1.1 204 No Content\r\n'}<line_sep># Added to test failure modes. Break={'expectedStatusLine':'HTTP/1.1 500 Cannot find server.\r\n'}<block_end>scenarios=[{# Follow to loopback, but alternately reject/return others. AddressE.Private:ActionE.Reject AddressE.Loopback:ActionE.Follow AddressE.Multicast:ActionE.Reject AddressE.Linklocal:ActionE.Return AddressE.Routable:ActionE.Reject AddressE.Self:ActionE.Return AddressE.Default:ActionE.Reject } {# Follow to loopback, but alternately reject/return others, flipped from the previous scenario. AddressE.Private:ActionE.Return AddressE.Loopback:ActionE.Follow AddressE.Multicast:ActionE.Return AddressE.Linklocal:ActionE.Reject AddressE.Routable:ActionE.Return AddressE.Self:ActionE.Reject AddressE.Default:ActionE.Return } {# Return loopback, but reject everything else. AddressE.Loopback:ActionE.Return AddressE.Default:ActionE.Reject } {# Reject loopback, but return everything else. AddressE.Loopback:ActionE.Reject AddressE.Default:ActionE.Return } {# Return everything. AddressE.Default:ActionE.Return } ]<for_stmt>scenario scenarios<block_start><for_stmt>addressClass AddressE<block_start><if_stmt><not>addressClass.value# Default has no particular addresses to test. <block_start><continue><block_end><for_stmt>address addressClass.value<block_start>expectedAction=scenario[addressClass]<if>addressClass<in>scenario<else>scenario[AddressE.Default]<line_sep>makeTestCase(redirectTarget=address expectedAction=expectedAction scenario=scenario)<block_end><block_end># Test redirects to names that cannot be resolved. makeTestCase(redirectTarget=<none> expectedAction=ActionE.Break scenario=scenario)<block_end>dns.addRecords(records=dnsRecords)<line_sep># Make sure this runs only after local files have been created. Test.Setup.Copy(data_path)<line_sep>
<import_stmt>unittest<import_stmt>mock<import_from_stmt>...management.stats Stats<class_stmt>TestStats(unittest.TestCase)<block_start><def_stmt>test_init_with_optionals self<block_start>t=Stats(domain='domain' token='<PASSWORD>' telemetry=<false> timeout=(10 2))<line_sep>self.assertEqual(t.client.options.timeout (10 2))<line_sep>telemetry_header=t.client.base_headers.get('Auth0-Client' <none>)<line_sep>self.assertEqual(telemetry_header <none>)<block_end>@mock.patch('auth0.v3.management.stats.RestClient')<def_stmt>test_active_users self mock_rc<block_start>mock_instance=mock_rc.return_value<line_sep>s=Stats(domain='domain' token='<PASSWORD>')<line_sep>s.active_users()<line_sep>mock_instance.get.assert_called_with('https://domain/api/v2/stats/active-users' )<block_end>@mock.patch('auth0.v3.management.stats.RestClient')<def_stmt>test_daily_stats self mock_rc<block_start>mock_instance=mock_rc.return_value<line_sep>s=Stats(domain='domain' token='<PASSWORD>')<line_sep>s.daily_stats()<line_sep>mock_instance.get.assert_called_with('https://domain/api/v2/stats/daily' params={'from':<none> 'to':<none>} )<line_sep>s.daily_stats(from_date='12341212' to_date='56785656')<line_sep>mock_instance.get.assert_called_with('https://domain/api/v2/stats/daily' params={'from':'12341212' 'to':'56785656'} )<block_end><block_end>
# Author: <NAME> <<EMAIL>> # A core-attachment based method to detect protein complexes in PPI networks # <NAME>, Kwoh, Ng (2009) # http://www.biomedcentral.com/1471-2105/10/169 <import_from_stmt>collections defaultdict<import_from_stmt>itertools combinations<import_stmt>functools<line_sep># return average degree and density for a graph <def_stmt>__graph_stats graph<block_start>avg_deg=sum(len(n)<for>n graph.values())/float(len(graph))<line_sep>density=avg_deg/(len(graph)-1)<line_sep><return>avg_deg density<block_end># return core nodes, given a graph and its average degree __get_core_nodes=<lambda>g avg:set(v<for>v,n g.items()<if>len(n)<ge>avg)<line_sep># return NA score __NA_score=<lambda>a b:float(len(a&b)<power>2)/(len(a)<times>len(b))<def_stmt>__core_removal graph density_threshold<block_start><if_stmt>len(graph)<eq>1# need at least two nodes in the graph... <block_start><return>[graph]<block_end>avg_deg,density=__graph_stats(graph)<if_stmt>density<ge>density_threshold<block_start><return>[graph]<block_end><else_stmt># find and remove core nodes; create connected subcomponents <block_start>core_nodes=__get_core_nodes(graph avg_deg)<line_sep>result=[]<line_sep>subgraphs=[]<for_stmt>v,n graph.items()<block_start><if_stmt>v<in>core_nodes<block_start><continue><block_end>n=n-core_nodes# note that we're reassigning n <for_stmt>s subgraphs<block_start><if_stmt><not>n.isdisjoint(s)<block_start>s<augor>n<line_sep><break><block_end><block_end><else_stmt><block_start>subgraphs.append(n|{v})<block_end><block_end># connected subcomponent joining i=0<while_stmt>i<l>len(subgraphs)-1<block_start>j=i+1<while_stmt>j<l>len(subgraphs)<block_start><if_stmt><not>subgraphs[i].isdisjoint(subgraphs[j])<block_start>subgraphs[i]<augor>subgraphs[j]<line_sep>subgraphs.pop(j)<block_end><else_stmt><block_start>j<augadd>1<block_end><block_end>i<augadd>1<block_end># recursive core removal <for_stmt>s subgraphs<block_start>tresults=__core_removal(dict((v graph[v]&s)<for>v s) density_threshold)<for_stmt>tc tresults<block_start>nodes=set()<for_stmt>v,n tc.items()<block_start>nodes.add(v)<line_sep>n<augor>graph[v]&core_nodes<block_end><for_stmt>c core_nodes<block_start>tc[c]=graph[c]&(nodes|core_nodes)<block_end><block_end>result<augadd>tresults<block_end><return>result<block_end><block_end><def_stmt>co_ach g density_threshold=0.7 affinity_threshold=0.225 closeness_threshold=0.5# read protein-protein pairs <block_start>data=defaultdict(set)<for_stmt>a,b g.edges()<block_start>data[a].add(b)<line_sep>data[b].add(a)<block_end># step 1: find preliminary cores SC=[]# currently-detected preliminary cores count=0<for_stmt>vertex,neighbors data.items()# build neighborhood graph <block_start>vertices={vertex}|neighbors<line_sep>size1_neighbors=set()<line_sep>graph={}<for_stmt>v vertices<block_start>n=data[v]&vertices<if_stmt>len(n)<g>1# ignore size-1 vertices <block_start>graph[v]=n<block_end><else_stmt><block_start>size1_neighbors.add(v)<block_end><block_end><if_stmt>len(graph)<l>2# not enough connections in this graph <block_start><continue><block_end>graph[vertex]<augsub>size1_neighbors<line_sep># get core graph avg_deg,density=__graph_stats(graph)<line_sep>core_nodes=__get_core_nodes(graph avg_deg)<line_sep>vertices=set(graph.keys())<for_stmt>v vertices-core_nodes<block_start><del_stmt>graph[v]<block_end><for_stmt>n graph.values()<block_start>n<augand>core_nodes<block_end><if_stmt>len(graph)<l>2# not enough connections in this graph <block_start><continue><block_end>graph_nodes=set(graph)<line_sep># inner loop <for_stmt>sg __core_removal(graph density_threshold)<block_start><while_stmt><true><block_start>_,density=__graph_stats(sg)<line_sep># if density threshold met, stop; else, remove min degree node <if_stmt>density<ge>density_threshold<block_start><break><block_end>w=min(sg.items() key=<lambda>k:len(k[1]))[0]<del_stmt>sg[w]<for_stmt>n sg.values()<block_start>n.discard(w)<block_end><block_end>sg_nodes=set(sg)<while_stmt>graph_nodes-sg_nodes<block_start>w=max(graph_nodes-sg_nodes key=<lambda>v:len(graph[v]&sg_nodes))<line_sep>new_sg=sg.copy()<for_stmt>v,n new_sg.items()<block_start><if_stmt>w<in>graph[v]<block_start>n.add(w)<block_end><block_end>new_sg[w]=graph[w]&sg_nodes<line_sep>_,density=__graph_stats(new_sg)<if_stmt>density<l>density_threshold<block_start><break><block_end>sg=new_sg<line_sep>sg_nodes.add(w)<block_end># redundancy filtering max_sim=-1<for_stmt>i range(len(SC))<block_start>sim=__NA_score(set(SC[i]) sg_nodes)<if_stmt>sim<g>max_sim<block_start>max_sim=sim<line_sep>index=i<block_end><block_end><if_stmt>max_sim<l>affinity_threshold<block_start>SC.append(sg)<block_end><else_stmt><block_start>_,density_i=__graph_stats(SC[index])<if_stmt>density<times>len(sg)<g>density_i<times>len(SC[index])<block_start>SC[index]=sg<block_end><block_end><block_end><block_end># step 2: adding peripheral proteins clusters=set()<for_stmt>core SC<block_start>nodes=frozenset(core)<line_sep>neighbors=(functools.reduce(<lambda>x y:x|y (data[v]<for>v nodes))-nodes)<line_sep>neighbors<augsub>set(v<for>v neighbors<if>float(len(data[v]&nodes))/len(nodes)<le>closeness_threshold)<line_sep>clusters.add(nodes|neighbors)<block_end><return>[list(c)<for>c clusters]<block_end>
########################################################################## # # Copyright (c) 2016, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## <import_stmt>Gaffer<import_stmt>GafferScene<import_stmt>GafferSceneUI<def_stmt>filmFitMetadata # Take the metadata from StandardOptionsUI, except not the layout section <block_start>allOptions=GafferSceneUI.StandardOptionsUI.plugsMetadata["options.filmFit"]+GafferSceneUI.StandardOptionsUI.plugsMetadata["options.filmFit.value"]<line_sep>optionPairs=zip(allOptions[::2] allOptions[1::2])<line_sep><return>sum([[i j]<for>i,j optionPairs<if>i<ne>"layout:section"] [])<block_end>Gaffer.Metadata.registerNode(GafferScene.LightToCamera "description" """ Converts lights into cameras. Spotlights are converted to a perspective camera with the field of view matching the cone angle, and distant lights are converted to an orthographic camera. """ plugs={"filmFit":filmFitMetadata() "distantAperture":["description" """ The orthographic aperture used when converting distant lights ( which are theoretically infinite in extent ) """ ] "clippingPlanes":["description" """ Clipping planes for the created cameras. When creating a perspective camera, a near clip <= 0 is invalid, and will be replaced with 0.01. Also, certain lights only start casting light at some distance - if near clip is less than this, it will be increased. """ ] "filter":["description" """ Specifies which lights to convert. """ ] })<line_sep>
<import_stmt>math<import_from_stmt>collections namedtuple<import_from_stmt>. geohash<line_sep>Box=namedtuple("Box" ["s" "w" "n" "e"])<def_stmt>geohash_bbox gh<block_start>ret=geohash.bbox(gh)<line_sep><return>Box(ret["s"] ret["w"] ret["n"] ret["e"])<block_end><def_stmt>bbox lat lon radius<block_start>lat_delta=radius<times>360/40000<line_sep>lon_delta=lat_delta/math.cos(lat<times>math.pi/180.0)<line_sep><return>Box(lat-lat_delta lon-lon_delta lat+lat_delta lon+lon_delta)<block_end><def_stmt>overlap a1 a2 b1 b2<block_start><return>a1<l>b2<and>a2<g>b1<block_end><def_stmt>box_overlap box1:Box box2:Box<block_start><return>overlap(box1.s box1.n box2.s box2.n)<and>overlap(box1.w box1.e box2.w box2.e)<block_end><def_stmt>compute_geohash_tiles lat lon radius precision<block_start>bounds=bbox(lat lon radius)<line_sep>center=geohash.encode(lat lon precision)<line_sep>stack=set()<line_sep>checked=set()<line_sep>stack.add(center)<line_sep>checked.add(center)<while_stmt>stack<block_start>current=stack.pop()<for_stmt>neighbor geohash.neighbors(current)<block_start><if_stmt>neighbor<not><in>checked<and>box_overlap(geohash_bbox(neighbor) bounds)<block_start>stack.add(neighbor)<line_sep>checked.add(neighbor)<block_end><block_end><block_end><return>checked<block_end><def_stmt>geohash_overlap lat lon radius max_tiles=9<block_start>result=[]<for_stmt>precision range(1 13)<block_start>tiles=compute_geohash_tiles(lat lon radius precision)<if_stmt>len(tiles)<le>9<block_start>result=tiles<line_sep>precision<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end><return>result<block_end>
"""Use TIMESTAMP column for latest submission Revision ID: eff<PASSWORD>0<PASSWORD> Revises: <PASSWORD> Create Date: 2017-01-08 22:20:43.814375 """<line_sep># revision identifiers, used by Alembic. revision='eff<PASSWORD>'<line_sep>down_revision='<PASSWORD>'<import_from_stmt>alembic op# lgtm[py/unused-import] <import_stmt>sqlalchemy<as>sa# lgtm[py/unused-import] <import_stmt>libweasyl<import_from_stmt>libweasyl.legacy UNIXTIME_OFFSET<def_stmt>upgrade <block_start>op.alter_column('profile' 'latest_submission_time' new_column_name='latest_submission_time_old' )<line_sep>op.add_column('profile' sa.Column('latest_submission_time' libweasyl.models.helpers.ArrowColumn() nullable=<false> server_default='epoch') )<line_sep>op.execute("UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE 'epoch' + "<concat>"(latest_submission_time_old - %d) * INTERVAL '1 second'"%(UNIXTIME_OFFSET ))<line_sep>op.drop_column('profile' 'latest_submission_time_old')<block_end><def_stmt>downgrade <block_start>op.alter_column('profile' 'latest_submission_time' new_column_name='latest_submission_time_new' )<line_sep>op.add_column('profile' sa.Column('latest_submission_time' libweasyl.models.helpers.WeasylTimestampColumn() nullable=<false> server_default='0') )<line_sep>op.execute("UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d"%(UNIXTIME_OFFSET ))<line_sep>op.drop_column('profile' 'latest_submission_time_new')<block_end>
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>test_plan<import_stmt>settings<class_stmt>Module(test_plan.Testplan)<block_start>runScript=settings.KMD_RUNSCRIPT<line_sep>deviceTargets=['sim' 'ufpga']<def_stmt>__init__ self<block_start>super(Module self).__init__(__name__)<block_end><block_end># Convenience globals kmd=Module.runScript<line_sep>devices=Module.deviceTargets<line_sep>ces=["Core Engine Scheduler"]<line_sep>nn=["Neural Network"]<line_sep>convd=["CONV HW - Direct"]<line_sep>convi=["CONV HW - Image"]<line_sep>convw=["CONV HW - Winograd"]<line_sep>convp=["CONV HW - Pipeline"]<line_sep>sdpx1=["SDP X1 HW"]<line_sep>sdpx2=["SDP X2 HW"]<line_sep>sdpy=["SDP Y HW"]<line_sep>sdpf=["SDP HW - Full"]<line_sep>cdp=["CDP HW"]<line_sep>pdp=["PDP HW"]<def_stmt>registerNvSmallTests self testplan<block_start>testplan.append([0 "Written" kmd "CONV_D_L0_0_small" <none> convd devices "Convolution test - Sanity test direct convolution" "Direct convolution, 8x8x128 input cube, 3x3x128 kernel cube and 32 kernels input and weight read from DRAM, no mean and bias data, output written to DRAM through SDP."])<line_sep>testplan.append([0 "Written" kmd "SDP_X1_L0_0_small" <none> sdpx1 devices "SDP test - Sanity test for SDP, only X1 enabled with ALU, X2 and Y disable. No DMA used" "Element wise sum operation in X1, 8x8x32 input cube and 8x8x32 bias cube. Activation function as ReLU"])<line_sep>testplan.append([0 "Written" kmd "CDP_L0_0_small" <none> cdp devices "CDP test - Sanity test for CDP" "Use only linear table with LUT configured with all 1. 8x8x32 input cube and 8x8x32 output cube."])<line_sep>testplan.append([0 "Written" kmd "PDP_L0_0_small" <none> pdp devices "PDP test - Sanity test for PDP with max pooling" "Max pooling, 8x8x32 input cube, 8x8x32 output cube, no padding, 1x1 kernel size. No need to compare data. It is enough if task succeeds to pass this test."])<line_sep>testplan.append([0 "Written" kmd "NN_L0_1_small" <none> nn devices "AlexNet" "AlexNet"])<block_end><def_stmt>registerFirmwareSmallTests self<block_start>testplan=[]<line_sep>registerNvSmallTests(self testplan)<for_stmt>item testplan<block_start>test=test_plan.Test()<line_sep>test.level=item[0]<line_sep>test.status=item[1]<line_sep>test.runscript=item[2]<line_sep>test.name=item[3]<line_sep>test.options=item[4]<line_sep>test.features=item[5]<line_sep>test.targets=item[6]<line_sep>test.description=item[7]<line_sep>test.dependencies=<none><line_sep>self.add_test(test)<block_end><block_end><def_stmt>registerTests self<block_start>registerFirmwareSmallTests(self)<block_end>Module.register_tests=registerTests<line_sep>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This DAG will not work unless you create an Amazon EMR cluster running Apache Hive and copy data into it following steps 1-4 (inclusive) here: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EMRforDynamoDB.Tutorial.html """<import_stmt>os<import_from_stmt>datetime datetime<import_from_stmt>airflow DAG<import_from_stmt>airflow.decorators task<import_from_stmt>airflow.models Connection<import_from_stmt>airflow.providers.amazon.aws.hooks.dynamodb DynamoDBHook<import_from_stmt>airflow.providers.amazon.aws.transfers.hive_to_dynamodb HiveToDynamoDBOperator<import_from_stmt>airflow.utils db<line_sep>DYNAMODB_TABLE_NAME='example_hive_to_dynamodb_table'<line_sep>HIVE_CONNECTION_ID=os.getenv('HIVE_CONNECTION_ID' 'hive_on_emr')<line_sep>HIVE_HOSTNAME=os.getenv('HIVE_HOSTNAME' 'ec2-123-45-67-890.compute-1.amazonaws.com')<line_sep># These values assume you set up the Hive data source following the link above. DYNAMODB_TABLE_HASH_KEY='feature_id'<line_sep>HIVE_SQL='SELECT feature_id, feature_name, feature_class, state_alpha FROM hive_features'<line_sep>@task<def_stmt>create_dynamodb_table <block_start>client=DynamoDBHook(client_type='dynamodb').conn<line_sep>client.create_table(TableName=DYNAMODB_TABLE_NAME KeySchema=[{'AttributeName':DYNAMODB_TABLE_HASH_KEY 'KeyType':'HASH'} ] AttributeDefinitions=[{'AttributeName':DYNAMODB_TABLE_HASH_KEY 'AttributeType':'N'} ] ProvisionedThroughput={'ReadCapacityUnits':20 'WriteCapacityUnits':20} )<line_sep># DynamoDB table creation is nearly, but not quite, instantaneous. # Wait for the table to be active to avoid race conditions writing to it. waiter=client.get_waiter('table_exists')<line_sep>waiter.wait(TableName=DYNAMODB_TABLE_NAME WaiterConfig={'Delay':1})<block_end>@task<def_stmt>get_dynamodb_item_count <block_start>""" A DynamoDB table has an ItemCount value, but it is only updated every six hours. To verify this DAG worked, we will scan the table and count the items manually. """<line_sep>table=DynamoDBHook(resource_type='dynamodb').conn.Table(DYNAMODB_TABLE_NAME)<line_sep>response=table.scan(Select='COUNT')<line_sep>item_count=response['Count']<while_stmt>'LastEvaluatedKey'<in>response<block_start>response=table.scan(Select='COUNT' ExclusiveStartKey=response['LastEvaluatedKey'])<line_sep>item_count<augadd>response['Count']<block_end>print(f'DynamoDB table contains {item_count} items.')<block_end># Included for sample purposes only; in production you wouldn't delete # the table you just backed your data up to. Using 'all_done' so even # if an intermediate step fails, the DAG will clean up after itself. @task(trigger_rule='all_done')<def_stmt>delete_dynamodb_table <block_start>DynamoDBHook(client_type='dynamodb').conn.delete_table(TableName=DYNAMODB_TABLE_NAME)<block_end># Included for sample purposes only; in production this should # be configured in the environment and not be part of the DAG. # Note: The 'hiveserver2_default' connection will not work if Hive # is hosted on EMR. You must set the host name of the connection # to match your EMR cluster's hostname. @task<def_stmt>configure_hive_connection <block_start>db.merge_conn(Connection(conn_id=HIVE_CONNECTION_ID conn_type='hiveserver2' host=HIVE_HOSTNAME port=10000 ))<block_end><with_stmt>DAG(dag_id='example_hive_to_dynamodb' schedule_interval=<none> start_date=datetime(2021 1 1) tags=['example'] catchup=<false> )<as>dag# Add the prerequisites docstring to the DAG in the UI. <block_start>dag.doc_md=__doc__<line_sep># [START howto_transfer_hive_to_dynamodb] backup_to_dynamodb=HiveToDynamoDBOperator(task_id='backup_to_dynamodb' hiveserver2_conn_id=HIVE_CONNECTION_ID sql=HIVE_SQL table_name=DYNAMODB_TABLE_NAME table_keys=[DYNAMODB_TABLE_HASH_KEY] )<line_sep># [END howto_transfer_hive_to_dynamodb] (configure_hive_connection()<rshift>create_dynamodb_table()<rshift>backup_to_dynamodb<rshift>get_dynamodb_item_count()<rshift>delete_dynamodb_table())<block_end>
<import_from_stmt>.main main<as>run# noqa: F401 __version__='0.0.3'<line_sep>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>.darts_backbone DartsBackbone<import_from_stmt>.searchable_mobilenet SearchableMobileNet<import_from_stmt>.searchable_shufflenet_v2 SearchableShuffleNetV2<line_sep>__all__=['DartsBackbone' 'SearchableShuffleNetV2' 'SearchableMobileNet']<line_sep>
<import_stmt>sys<import_from_stmt>. app<line_sep>sys.path.append(str(app.config['LIB_PATH']))<import_from_stmt>musicautobot.music_transformer *<import_from_stmt>musicautobot.config *<import_from_stmt>flask Response send_from_directory send_file request jsonify<import_from_stmt>.save to_s3<import_stmt>torch<import_stmt>traceback<line_sep>torch.set_num_threads(4)<line_sep>data=load_data(app.config['DATA_PATH'] app.config['DATA_SAVE_NAME'] num_workers=1)<line_sep>learn=music_model_learner(data pretrained_path=app.config['MUSIC_MODEL_PATH'])<if_stmt>torch.cuda.is_available()<block_start>learn.model.cuda()<block_end># learn.to_fp16(loss_scale=512) # fp16 not supported for cpu - https://github.com/pytorch/pytorch/issues/17699 @app.route('/predict/midi' methods=['POST'])<def_stmt>predict_midi <block_start>args=request.form.to_dict()<line_sep>midi=request.files['midi'].read()<line_sep>print('THE ARGS PASSED:' args)<line_sep>bpm=float(args['bpm'])# (AS) TODO: get bpm from midi file instead temperatures=(float(args.get('noteTemp' 1.2)) float(args.get('durationTemp' 0.8)))<line_sep>n_words=int(args.get('nSteps' 200))<line_sep>seed_len=int(args.get('seedLen' 12))<line_sep># debugging 1 - send exact midi back # with open('/tmp/test.mid', 'wb') as f: # f.write(midi) # return send_from_directory('/tmp', 'test.mid', mimetype='audio/midi') # debugging 2 - test music21 conversion # stream = file2stream(midi) # 1. # debugging 3 - test npenc conversion # seed_np = midi2npenc(midi) # music21 can handle bytes directly # stream = npenc2stream(seed_np, bpm=bpm) # debugging 4 - midi in, convert, midi out # stream = file2stream(midi) # 1. # midi_in = Path(stream.write("musicxml")) # print('Midi in:', midi_in) # stream_sep = separate_melody_chord(stream) # midi_out = Path(stream_sep.write("midi")) # print('Midi out:', midi_out) # s3_id = to_s3(midi_out, args) # result = { # 'result': s3_id # } # return jsonify(result) # Main logic <try_stmt><block_start>full=predict_from_midi(learn midi=midi n_words=n_words seed_len=seed_len temperatures=temperatures)<line_sep>stream=separate_melody_chord(full.to_stream(bpm=bpm))<line_sep>midi_out=Path(stream.write("midi"))<line_sep>print('Wrote to temporary file:' midi_out)<block_end><except_stmt>Exception<as>e<block_start>traceback.print_exc()<line_sep><return>jsonify({'error':f'Failed to predict: {e}'})<block_end>s3_id=to_s3(midi_out args)<line_sep>result={'result':s3_id}<line_sep><return>jsonify(result)<line_sep># return send_from_directory(midi_out.parent, midi_out.name, mimetype='audio/midi') <block_end># @app.route('/midi/song/<path:sid>') # def get_song_midi(sid): # return send_from_directory(file_path/data_dir, htlist[sid]['midi'], mimetype='audio/midi') @app.route('/midi/convert' methods=['POST'])<def_stmt>convert_midi <block_start>args=request.form.to_dict()<if_stmt>'midi'<in>request.files<block_start>midi=request.files['midi'].read()<block_end><elif_stmt>'midi_path'<in>args<block_start>midi=args['midi_path']<block_end>stream=file2stream(midi)# 1. # stream = file2stream(midi).chordify() # 1. stream_out=Path(stream.write('musicxml'))<line_sep><return>send_from_directory(stream_out.parent stream_out.name mimetype='xml')<block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>tests.core mock<import_from_stmt>trakt Trakt<import_from_stmt>httmock HTTMock<import_stmt>pytest<def_stmt>test_likes <block_start><with_stmt>HTTMock(mock.fixtures mock.unknown)<block_start><with_stmt>Trakt.configuration.auth('mock' 'mock')<block_start>likes=Trakt['users'].likes()<assert_stmt>likes<is><not><none><line_sep>likes=list(likes)<block_end><block_end><assert_stmt>len(likes)<eq>3<assert_stmt>likes[0].keys<eq>[('trakt' 1519)]<assert_stmt>likes[1].keys<eq>[('trakt' '1238362') ('slug' 'star-wars-machete')]<assert_stmt>likes[2].keys<eq>[('trakt' '840781') ('slug' 'star-wars-timeline')]<block_end><def_stmt>test_likes_invalid_response <block_start><with_stmt>HTTMock(mock.fixtures mock.unknown)<block_start>likes=Trakt['users'].likes()<block_end><assert_stmt>likes<is><none><block_end><def_stmt>test_likes_invalid_type <block_start><with_stmt>HTTMock(mock.fixtures mock.unknown)<block_start><with_stmt>pytest.raises(ValueError)<block_start>likes=Trakt['users'].likes('invalid')<assert_stmt>likes<is><not><none><line_sep>likes=list(likes)<block_end><block_end><block_end>
# coding=utf-8 # Copyright 2021 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A model for to embed structured features."""<import_from_stmt>typing Any Tuple<import_stmt>flax.linen<as>nn<import_stmt>jax.numpy<as>jnp<class_stmt>FeaturesEncoder(nn.Module)<block_start>"""Encodes structured features."""<line_sep>input_dims:Tuple[int]<line_sep>embed_dim:int=32<line_sep>@nn.compact<def_stmt>__call__ self x<block_start>result=[]<line_sep>index=0<for_stmt>d self.input_dims<block_start>arr=x[<ellipsis> index:index+d]<line_sep>result.append(arr<if>d<eq>1<else>nn.Dense(self.embed_dim)(arr))<line_sep>index<augadd>d<block_end><return>jnp.concatenate(result axis=-1)<block_end><block_end><class_stmt>AdultModel(nn.Module)<block_start>"""A model to predict if the income is above 50k (adult dataset)."""<line_sep>encoder_cls:Any<line_sep>hidden:Tuple[int]=(64 64)<line_sep>@nn.compact<def_stmt>__call__ self x train:bool=<true><block_start>x=self.encoder_cls()(x)<for_stmt>h self.hidden<block_start>x=nn.Dense(h)(x)<line_sep>x=nn.relu(x)<block_end>x=nn.Dense(1)(x)<line_sep>x=nn.sigmoid(x)<line_sep><return>x[<ellipsis> 0]<block_end><block_end>
<import_from_stmt>reikna.core.signature Type Annotation Parameter Signature<import_from_stmt>reikna.core.computation Computation<import_from_stmt>reikna.core.transformation Transformation Indices<line_sep>
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_from_future_stmt> division<def_stmt>Shape visitor node<block_start>input=visitor.visit(node.input[0])<line_sep>shape=input.shape<if_stmt>(input.get_layout()<is><not><none><and>input.get_onnx_layout()<is><not><none><and>input.get_layout()<ne>input.get_onnx_layout())<block_start>shape=[shape[input.get_layout().index(l)]<for>l input.get_onnx_layout()]<block_end><return>tuple(shape)<block_end>
<import_from_future_stmt> print_function<import_stmt>json<import_stmt>os<class_stmt>Data()<block_start>shared_state={}<def_stmt>__init__ self<block_start>self.__dict__=self.shared_state<line_sep>self.set_checklist(<none>)<line_sep>self.set_issues()<block_end><def_stmt>set_checklist self file_name<block_start>is_empty=file_name<is><none><if_stmt>is_empty<block_start>file_name=os.getcwd()+os.sep+"conf"+os.sep+"checklist.json"<block_end><try_stmt><block_start><with_stmt>open(file_name)<as>data_file<block_start>data=json.load(data_file)<line_sep>self.checklist=data["checklist"]<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><def_stmt>get_checklist self<block_start><return>self.checklist<block_end><def_stmt>set_issues self<block_start>file_name=os.getcwd()+os.sep+"conf"+os.sep+"issues.json"<try_stmt><block_start><with_stmt>open(file_name)<as>data_file<block_start>self.issues=json.load(data_file)<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><def_stmt>get_issues self<block_start><return>self.issues<block_end><def_stmt>set_bugs self functionality_name test_name request response<block_start>bug={"request":request "response":response}<line_sep>self.checklist["Functionality"][functionality_name]["tests"][test_name]["bugs"].append(bug)<block_end><def_stmt>set_notes self functionality_name test_name notes<block_start>self.checklist["Functionality"][functionality_name]["tests"][test_name]["notes"]=notes<block_end><block_end>
# Convenience test module to run all of the XML-related tests in the # standard library. <import_stmt>sys<import_stmt>test.test_support<line_sep>test.test_support.verbose=0<def_stmt>runtest name<block_start>__import__(name)<line_sep>module=sys.modules[name]<if_stmt>hasattr(module "test_main")<block_start>module.test_main()<block_end><block_end>runtest("test.test_minidom")<line_sep>runtest("test.test_pyexpat")<line_sep>runtest("test.test_sax")<line_sep>runtest("test.test_xml_etree")<line_sep>runtest("test.test_xml_etree_c")<line_sep>runtest("test.test_xmllib")<line_sep>runtest("test.test_xmlrpc")<line_sep>
<import_stmt>pytest<import_stmt>os<import_stmt>memcnn.experiment.factory<import_from_stmt>memcnn.config Config<def_stmt>test_get_attr_from_module <block_start>a=memcnn.experiment.factory.get_attr_from_module('memcnn.experiment.factory.get_attr_from_module')<assert_stmt>a<is>memcnn.experiment.factory.get_attr_from_module<block_end><def_stmt>test_load_experiment_config <block_start>cfg_fname=os.path.join(Config.get_dir() 'experiments.json')<line_sep>memcnn.experiment.factory.load_experiment_config(cfg_fname ['cifar10' 'resnet110'])<block_end>@pytest.mark.skip(reason="Covered more efficiently by test_train.test_run_experiment")<def_stmt>test_experiment_config_parser tmp_path<block_start>tmp_data_dir=tmp_path/"tmpdata"<line_sep>cfg_fname=os.path.join(Config.get_dir() 'experiments.json')<line_sep>cfg=memcnn.experiment.factory.load_experiment_config(cfg_fname ['cifar10' 'resnet110'])<line_sep>memcnn.experiment.factory.experiment_config_parser(cfg str(tmp_data_dir) workers=<none>)<block_end><def_stmt>test_circular_dependency tmp_path<block_start>p=str(tmp_path/"circular.json")<line_sep>content=u'{ "circ": { "base": "circ" } }'<with_stmt>open(p 'w')<as>fh<block_start>fh.write(content)<block_end><with_stmt>open(p 'r')<as>fh<block_start><assert_stmt>fh.read()<eq>content<block_end><with_stmt>pytest.raises(RuntimeError)<block_start>memcnn.experiment.factory.load_experiment_config(p ['circ'])<block_end><block_end>
############################################################################## # Copyright 2016-2017 Rigetti Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """ Sub-package for facilitating connections to the QVM / QPU. """<line_sep>__all__=["AbstractCompiler" "BenchmarkConnection" "EncryptedProgram" "EngagementManager" "get_qc" "list_quantum_computers" "local_forest_runtime" "QAM" "QAMExecutionResult" "QCSClientConfiguration" "QCSQuantumProcessor" "QPU" "QPUCompiler" "QuantumComputer" "QuantumExecutable" "QVM" "QVMCompiler" "WavefunctionSimulator" ]<import_from_stmt>qcs_api_client.client QCSClientConfiguration<import_from_stmt>pyquil.api._benchmark BenchmarkConnection<import_from_stmt>pyquil.api._compiler QVMCompiler QPUCompiler QuantumExecutable EncryptedProgram AbstractCompiler<import_from_stmt>pyquil.api._engagement_manager EngagementManager<import_from_stmt>pyquil.api._qam QAM QAMExecutionResult<import_from_stmt>pyquil.api._qpu QPU<import_from_stmt>pyquil.api._quantum_computer QuantumComputer list_quantum_computers get_qc local_forest_runtime <import_from_stmt>pyquil.api._qvm QVM<import_from_stmt>pyquil.api._wavefunction_simulator WavefunctionSimulator<import_from_stmt>pyquil.quantum_processor QCSQuantumProcessor<line_sep>
<import_stmt>datetime<line_sep>s='2018-12-31'<line_sep>d=datetime.date.fromisoformat(s)<line_sep>print(d)<line_sep># 2018-12-31 print(type(d))<line_sep># <class 'datetime.date'> # print(datetime.date.fromisoformat('2018-12')) # ValueError: Invalid isoformat string: '2018-12' print(datetime.date.fromisoformat('2018-01-01'))<line_sep># 2018-01-01 # print(datetime.date.fromisoformat('2018-1-1')) # ValueError: Invalid isoformat string: '2018-1-1' s='05:00:30.001000'<line_sep>t=datetime.time.fromisoformat(s)<line_sep>print(t)<line_sep># 05:00:30.001000 print(type(t))<line_sep># <class 'datetime.time'> print(datetime.time.fromisoformat('05'))<line_sep># 05:00:00 # print(datetime.time.fromisoformat('5:00:30')) # ValueError: Invalid isoformat string: '5:00:30' s='2018-12-31T05:00:30.001000'<line_sep>dt=datetime.datetime.fromisoformat(s)<line_sep>print(dt)<line_sep># 2018-12-31 05:00:30.001000 print(type(dt))<line_sep># <class 'datetime.datetime'> print(datetime.datetime.fromisoformat('2018-12-31x05:00:30.001000'))<line_sep># 2018-12-31 05:00:30.001000 # print(datetime.datetime.fromisoformat('2018-12-31xx05:00:30.001000')) # ValueError: Invalid isoformat string: '2018-12-31xx05:00:30.001000' print(datetime.datetime.fromisoformat('2018-12-31T05'))<line_sep># 2018-12-31 05:00:00 print(datetime.datetime.fromisoformat('2018-12-31'))<line_sep># 2018-12-31 00:00:00 # print(datetime.datetime.fromisoformat('2018-12-31T5:00')) # ValueError: Invalid isoformat string: '2018-12-31T5:00' s='2018-12-31T05:00:30.001000'<line_sep># print(datetime.date.fromisoformat(s)) # ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000' # print(datetime.time.fromisoformat(s)) # ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000' d=datetime.datetime.fromisoformat(s).date()<line_sep>print(d)<line_sep># 2018-12-31 print(type(d))<line_sep># <class 'datetime.date'> t=datetime.datetime.fromisoformat(s).time()<line_sep>print(t)<line_sep># 05:00:30.001000 print(type(t))<line_sep># <class 'datetime.time'> s='2018-12-31T05:00:30'<line_sep>s_basic=s.replace('-' '').replace(':' '')<line_sep>print(s_basic)<line_sep># 20181231T050030 s='2018-12-31T05:00:30.001000'<line_sep>s_basic=s.split('.')[0].replace('-' '').replace(':' '')<line_sep>print(s_basic)<line_sep># 20181231T050030 s_ex=datetime.datetime.strptime(s_basic '%Y%m%dT%H%M%S').isoformat()<line_sep>print(s_ex)<line_sep># 2018-12-31T05:00:30
""" Summarize a column total cases column and total deaths column Country by country data in columns, sum up and match global totals """<import_stmt>csv<import_stmt>pandas<line_sep>pandas.set_option("display.max_rows" <none> "display.max_columns" <none>)<line_sep>col_list=["Total Cases" "Country/ Other" "Total Deaths" "# 9/27/2020"]<line_sep>df=pandas.read_csv("covidmilliondead.csv" usecols=col_list thousands=',')<line_sep>totalCases,totalDeaths=0 0<for_stmt>idx,cases,deaths zip(df["# 9/27/2020"] df["Total Cases"] df["Total Deaths"])<block_start><if_stmt>idx<g>0<block_start>totalCases<augadd>cases<if_stmt>deaths<g>0<block_start>totalDeaths<augadd>deaths<block_end><block_end><block_end><for_stmt>idx,country,cases,deaths zip(df["# 9/27/2020"] df["Country/ Other"] df["Total Cases"] df["Total Deaths"])<block_start><if_stmt>idx<g>0<block_start>print("\n" country)<line_sep>print("Cases : " cases "/" totalCases " %" "{:.5%}".format(cases/totalCases))<if_stmt>deaths<g>0<block_start>print("Deaths : " int(deaths) "/" totalDeaths " %" "{:.5%}".format(deaths/totalDeaths))<block_end><block_end><block_end>print("")<line_sep>print("Total Cases")<line_sep>print(totalCases)<line_sep>print("Total Deaths")<line_sep>print(totalDeaths)<line_sep>
<import_from_stmt>insights.parsers.zipl_conf ZiplConf<import_from_stmt>insights.tests context_wrap<import_from_stmt>insights.parsers ParseException<import_stmt>pytest<line_sep>ZIPL_CONF=""" [defaultboot] defaultauto prompt=1 timeout=5 default=linux target=/boot [linux] image=/boot/vmlinuz-3.10.0-693.el7.s390x ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8" [linux-0-rescue-a27932c8d57248e390cee3798bbd3709] image=/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709 ramdisk=/boot/initramfs-0-rescue-a27932c8d57248e390cee3798bbd3709.img parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0" [other] image=/boot/vmlinuz ramdisk=/boot/initramfs.img parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 # Configuration for dumping to SCSI disk # Separate IPL and dump partitions [dumpscsi] target=/boot dumptofs=/dev/sda2 parameters="dump_dir=/mydumps dump_compress=none dump_mode=auto" # Menu containing two DASD boot configurations :menu1 1=linux 2=linux-0-rescue-a27932c8d57248e390cee3798bbd3709 default=1 prompt=1 timeout=30 """.strip()<line_sep>ZIPL_CONF_INVALID=""" prompt=1 timeout=5 default=linux [linux] image=/boot/vmlinuz-3.10.0-693.el7.s390x ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8" """.strip()<def_stmt>test_zipl_conf <block_start>res=ZiplConf(context_wrap(ZIPL_CONF))<assert_stmt>res.get('linux').get('image')<eq>"/boot/vmlinuz-3.10.0-693.el7.s390x"<assert_stmt>res['linux']['image']<eq>"/boot/vmlinuz-3.10.0-693.el7.s390x"<assert_stmt>res[':menu1']['1']<eq>'linux'<assert_stmt>'defaultauto'<in>res['defaultboot']<assert_stmt>res['defaultboot']['defaultauto']<is><true><assert_stmt>res['other']['parameters']<eq>'"root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100'<assert_stmt>res.images<eq>{'linux':'/boot/vmlinuz-3.10.0-693.el7.s390x' 'linux-0-rescue-a27932c8d57248e390cee3798bbd3709':'/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709' 'other':'/boot/vmlinuz'}<assert_stmt>res.dumptofses<eq>{'dumpscsi':'/dev/sda2'}<block_end><def_stmt>test_zipl_conf_invalid <block_start><with_stmt>pytest.raises(ParseException)<as>pe<block_start>ZiplConf(context_wrap(ZIPL_CONF_INVALID))<block_end><assert_stmt>"Invalid zipl configuration file is found."<in>str(pe)<block_end>
<import_from_future_stmt> absolute_import print_function division<import_stmt>unittest<import_from_stmt>pony.orm.core *<import_from_stmt>pony.orm.core local<import_from_stmt>pony.orm.tests.testutils *<import_from_stmt>pony.orm.tests setup_database teardown_database<class_stmt>TestGeneratorDbSession(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>db=Database()<class_stmt>Account(db.Entity)<block_start>id=PrimaryKey(int)<line_sep>amount=Required(int)<block_end>setup_database(db)<line_sep>self.db=db<line_sep>self.Account=Account<with_stmt>db_session<block_start>a1=Account(id=1 amount=1000)<line_sep>a2=Account(id=2 amount=2000)<line_sep>a3=Account(id=3 amount=3000)<block_end><block_end><def_stmt>tearDown self<block_start>teardown_database(self.db)<assert_stmt>local.db_session<is><none><line_sep>self.db=self.Account=<none><block_end>@raises_exception(TypeError 'db_session with `retry` option cannot be applied to generator function')<def_stmt>test1 self<block_start>@db_session(retry=3)<def_stmt>f <block_start><yield><block_end><block_end>@raises_exception(TypeError 'db_session with `ddl` option cannot be applied to generator function')<def_stmt>test2 self<block_start>@db_session(ddl=<true>)<def_stmt>f <block_start><yield><block_end><block_end>@raises_exception(TypeError 'db_session with `serializable` option cannot be applied to generator function')<def_stmt>test3 self<block_start>@db_session(serializable=<true>)<def_stmt>f <block_start><yield><block_end><block_end><def_stmt>test4 self<block_start>@db_session(immediate=<true>)<def_stmt>f <block_start><yield><block_end><block_end>@raises_exception(TransactionError '@db_session-wrapped generator cannot be used inside another db_session')<def_stmt>test5 self<block_start>@db_session<def_stmt>f <block_start><yield><block_end><with_stmt>db_session<block_start>next(f())<block_end><block_end><def_stmt>test6 self<block_start>@db_session<def_stmt>f <block_start>x=local.db_session<line_sep>self.assertTrue(x<is><not><none>)<line_sep><yield>self.db._get_cache()<line_sep>self.assertEqual(local.db_session x)<line_sep>a1=self.Account[1]<line_sep><yield>a1.amount<line_sep>self.assertEqual(local.db_session x)<line_sep>a2=self.Account[2]<line_sep><yield>a2.amount<block_end>gen=f()<line_sep>cache=next(gen)<line_sep>self.assertTrue(cache.is_alive)<line_sep>self.assertEqual(local.db_session <none>)<line_sep>amount=next(gen)<line_sep>self.assertEqual(amount 1000)<line_sep>self.assertEqual(local.db_session <none>)<line_sep>amount=next(gen)<line_sep>self.assertEqual(amount 2000)<line_sep>self.assertEqual(local.db_session <none>)<try_stmt><block_start>next(gen)<block_end><except_stmt>StopIteration<block_start>self.assertFalse(cache.is_alive)<block_end><else_stmt><block_start>self.fail()<block_end><block_end><def_stmt>test7 self<block_start>@db_session<def_stmt>f id1<block_start>a1=self.Account[id1]<line_sep>id2=<yield>a1.amount<line_sep>a2=self.Account[id2]<line_sep>amount=<yield>a2.amount<line_sep>a1.amount<augsub>amount<line_sep>a2.amount<augadd>amount<line_sep>commit()<block_end>gen=f(1)<line_sep>amount1=next(gen)<line_sep>self.assertEqual(amount1 1000)<line_sep>amount2=gen.send(2)<line_sep>self.assertEqual(amount2 2000)<try_stmt><block_start>gen.send(100)<block_end><except_stmt>StopIteration<block_start><pass><block_end><else_stmt><block_start>self.fail()<block_end><with_stmt>db_session<block_start>a1=self.Account[1]<line_sep>self.assertEqual(a1.amount 900)<line_sep>a2=self.Account[2]<line_sep>self.assertEqual(a2.amount 2100)<block_end><block_end>@raises_exception(TransactionError 'You need to manually commit() changes before suspending the generator')<def_stmt>test8 self<block_start>@db_session<def_stmt>f id1<block_start>a1=self.Account[id1]<line_sep>a1.amount<augadd>100<line_sep><yield>a1.amount<block_end><for_stmt>amount f(1)<block_start><pass><block_end><block_end><def_stmt>test9 self<block_start>@db_session<def_stmt>f id1<block_start>a1=self.Account[id1]<line_sep>a1.amount<augadd>100<line_sep>commit()<line_sep><yield>a1.amount<block_end><for_stmt>amount f(1)<block_start><pass><block_end><block_end><def_stmt>test10 self<block_start>@db_session<def_stmt>f id1<block_start>a1=self.Account[id1]<line_sep><yield>a1.amount<line_sep>a1.amount<augadd>100<block_end><with_stmt>db_session<block_start>a=self.Account[1].amount<block_end><for_stmt>amount f(1)<block_start><pass><block_end><with_stmt>db_session<block_start>b=self.Account[1].amount<block_end>self.assertEqual(b a+100)<block_end><def_stmt>test12 self<block_start>@db_session<def_stmt>f id1<block_start>a1=self.Account[id1]<line_sep><yield>a1.amount<block_end>gen=f(1)<line_sep>next(gen)<line_sep>gen.close()<block_end>@raises_exception(TypeError 'error message')<def_stmt>test13 self<block_start>@db_session<def_stmt>f id1<block_start>a1=self.Account[id1]<line_sep><yield>a1.amount<block_end>gen=f(1)<line_sep>next(gen)<line_sep>gen.throw(TypeError('error message'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
""" Define authorization predicates. These are functions which accept an `Identity` object and a context object and return a truthy value. These represent building blocks of our permission map which define when people do, or don't have permissions. For example a predicate might define "group_created_by_user" which is only true when a user is present, a group is present and the user created that group. """<import_from_stmt>itertools chain<import_from_stmt>h.models.group JoinableBy ReadableBy WriteableBy<def_stmt>requires *parent_predicates<block_start>""" Decorate a predicate to say it requires other predicates to be True first. :param parent_predicates: A list of predicates that have to be true for this predicate to be true as well. """<def_stmt>decorator function<block_start>function.requires=parent_predicates<line_sep><return>function<block_end><return>decorator<block_end># Identity things <def_stmt>authenticated identity _context<block_start><return>identity<block_end># The `@requires` here means that this predicate needs `authenticate` to be # True before it's True. It also avoids attribute errors if identity is None @requires(authenticated)<def_stmt>authenticated_user identity _context<block_start><return>identity.user<block_end>@requires(authenticated_user)<def_stmt>user_is_staff identity _context<block_start><return>identity.user.staff<block_end>@requires(authenticated_user)<def_stmt>user_is_admin identity _context<block_start><return>identity.user.admin<block_end>@requires(authenticated)<def_stmt>authenticated_client identity _context<block_start><return>identity.auth_client<block_end>@requires(authenticated_client)<def_stmt>authenticated_client_is_lms identity _context<block_start>authority=identity.auth_client.authority<line_sep><return>authority.startswith("lms.")<and>authority.endswith(".hypothes.is")<block_end># Users <def_stmt>user_found _identity context<block_start><return>hasattr(context "user")<and>context.user<block_end>@requires(authenticated_client user_found)<def_stmt>user_authority_matches_authenticated_client identity context<block_start><return>context.user.authority<eq>identity.auth_client.authority<block_end># Annotations <def_stmt>annotation_found _identity context<block_start><return>hasattr(context "annotation")<and>context.annotation<block_end>@requires(annotation_found)<def_stmt>annotation_shared _identity context<block_start><return>context.annotation.shared<block_end>@requires(annotation_found)<def_stmt>annotation_not_shared _identity context<block_start><return><not>context.annotation.shared<block_end>@requires(annotation_found)<def_stmt>annotation_live _identity context<block_start><return><not>context.annotation.deleted<block_end>@requires(authenticated_user annotation_found)<def_stmt>annotation_created_by_user identity context<block_start><return>identity.user.userid<eq>context.annotation.userid<block_end># Groups <def_stmt>group_found _identity context<block_start><return>hasattr(context "group")<and>context.group<block_end><def_stmt>group_not_found _identity context<block_start><return><not>hasattr(context "group")<or><not>context.group<block_end>@requires(group_found)<def_stmt>group_writable_by_members _identity context<block_start><return>context.group.writeable_by<eq>WriteableBy.members<block_end>@requires(group_found)<def_stmt>group_writable_by_authority _identity context<block_start><return>context.group.writeable_by<eq>WriteableBy.authority<block_end>@requires(group_found)<def_stmt>group_readable_by_world _identity context<block_start><return>context.group.readable_by<eq>ReadableBy.world<block_end>@requires(group_found)<def_stmt>group_readable_by_members _identity context<block_start><return>context.group.readable_by<eq>ReadableBy.members<block_end>@requires(group_found)<def_stmt>group_joinable_by_authority _identity context<block_start><return>context.group.joinable_by<eq>JoinableBy.authority<block_end>@requires(authenticated_user group_found)<def_stmt>group_created_by_user identity context<block_start><return>context.group.creator<and>context.group.creator.id<eq>identity.user.id<block_end>@requires(authenticated_user group_found)<def_stmt>group_has_user_as_member identity context# With detached groups like we have with the websocket, this doesn't work # as SQLAlchemy does not consider them equal: # return context.group in identity.user.groups <block_start><return>any(user_group.id<eq>context.group.id<for>user_group identity.user.groups)<block_end>@requires(authenticated_user group_found)<def_stmt>group_matches_user_authority identity context<block_start><return>context.group.authority<eq>identity.user.authority<block_end>@requires(authenticated_client group_found)<def_stmt>group_matches_authenticated_client_authority identity context<block_start><return>context.group.authority<eq>identity.auth_client.authority<block_end><def_stmt>resolve_predicates mapping<block_start>""" Expand predicates with requirements into concrete lists of predicates. This takes a permission map which contains predicates which reference other ones (using `@requires`), and converts each clause to include the parents in parent first order. This means any parent which is referred to by a predicate is executed before it, and no predicate appears more than once. """<line_sep><return>{key:[_expand_clause(clause)<for>clause clauses]<for>key,clauses mapping.items()}<block_end><def_stmt>_expand_clause clause<block_start>"""Generate all of the predicates + parents in a clause without dupes."""<line_sep>seen_before=set()<line_sep># The chain.from_iterable here flattens nested iterables <return>list(chain.from_iterable(_expand_predicate(predicate seen_before)<for>predicate clause))<block_end><def_stmt>_expand_predicate predicate seen_before<block_start>"""Generate all of the parents and the predicate in parents first order."""<if_stmt>hasattr(predicate "requires")<block_start><for_stmt>parent predicate.requires<block_start><yield><from>_expand_predicate(parent seen_before)<block_end><block_end><if_stmt>predicate<not><in>seen_before<block_start>seen_before.add(predicate)<line_sep><yield>predicate<block_end><block_end>
<import_stmt>os<import_stmt>sys<import_stmt>gc<import_stmt>ctypes<import_stmt>psutil<import_stmt>pytest<import_stmt>warnings<import_stmt>threading<import_from_stmt>time sleep<import_from_stmt>multiprocessing util current_process<import_from_stmt>pickle PicklingError UnpicklingError<import_from_stmt>distutils.version LooseVersion<import_stmt>loky<import_from_stmt>loky cpu_count<import_from_stmt>loky get_reusable_executor<import_from_stmt>loky.process_executor _RemoteTraceback TerminatedWorkerError<import_from_stmt>loky.process_executor BrokenProcessPool ShutdownExecutorError<import_from_stmt>loky.reusable_executor _ReusablePoolExecutor<import_stmt>cloudpickle<import_from_stmt>._executor_mixin ReusableExecutorMixin<import_from_stmt>.utils TimingWrapper id_sleep check_python_subprocess_call<import_from_stmt>.utils filter_match<line_sep>cloudpickle_version=LooseVersion(cloudpickle.__version__)<line_sep># Compat windows <if_stmt>sys.platform<eq>"win32"<block_start><import_from_stmt>signal SIGTERM<as>SIGKILL<line_sep>libc=ctypes.cdll.msvcrt<block_end><else_stmt><block_start><import_from_stmt>signal SIGKILL<import_from_stmt>ctypes.util find_library<line_sep>libc=ctypes.CDLL(find_library("c"))<block_end><try_stmt><block_start><import_stmt>numpy<as>np<block_end><except_stmt>ImportError<block_start>np=<none><block_end># Backward compat for python2 cPickle module PICKLING_ERRORS=(PicklingError )<try_stmt><block_start><import_stmt>cPickle<line_sep>PICKLING_ERRORS<augadd>(cPickle.PicklingError )<block_end><except_stmt>ImportError<block_start><pass><block_end><def_stmt>clean_warning_registry <block_start>"""Safe way to reset warnings."""<line_sep>warnings.resetwarnings()<line_sep>reg="__warningregistry__"<for_stmt>mod_name,mod list(sys.modules.items())<block_start><if_stmt>hasattr(mod reg)<block_start>getattr(mod reg).clear()<block_end><block_end><block_end><def_stmt>wait_dead worker n_tries=1000 delay=0.001<block_start>"""Wait for process pid to die"""<for_stmt>i range(n_tries)<block_start><if_stmt>worker.exitcode<is><not><none><block_start><return><block_end>sleep(delay)<block_end><raise>RuntimeError("Process %d failed to die for at least %0.3fs"%(worker.pid delay<times>n_tries))<block_end><def_stmt>crash <block_start>"""Induces a segfault"""<import_stmt>faulthandler<line_sep>faulthandler._sigsegv()<block_end><def_stmt>exit <block_start>"""Induces a sys exit with exitcode 0"""<line_sep>sys.exit(0)<block_end><def_stmt>c_exit exitcode=0<block_start>"""Induces a libc exit with exitcode 0"""<line_sep>libc.exit(exitcode)<block_end><def_stmt>sleep_then_check_pids_exist arg<block_start>"""Sleep for some time and the check if all the passed pids exist"""<line_sep>time,pids=arg<line_sep>sleep(time)<line_sep>res=<true><for_stmt>p pids<block_start>res<augand>psutil.pid_exists(p)<block_end><return>res<block_end><def_stmt>kill_friend pid delay=0<block_start>"""Function that send SIGKILL at process pid"""<line_sep>sleep(delay)<try_stmt><block_start>os.kill(pid SIGKILL)<block_end><except_stmt>(PermissionError ProcessLookupError)<as>e<block_start><if_stmt>psutil.pid_exists(pid)<block_start>util.debug("Fail to kill an alive process?!?")<line_sep><raise>e<block_end>util.debug("process {} was already dead".format(pid))<block_end><block_end><def_stmt>raise_error etype=UnpicklingError message=<none><block_start>"""Function that raises an Exception in process"""<line_sep><raise>etype(message)<block_end><def_stmt>return_instance cls<block_start>"""Function that returns a instance of cls"""<line_sep><return>cls()<block_end><class_stmt>SayWhenError(ValueError)<block_start><pass><block_end><def_stmt>exception_throwing_generator total when<block_start><for_stmt>i range(total)<block_start><if_stmt>i<eq>when<block_start><raise>SayWhenError("Somebody said when")<block_end><yield>i<block_end><block_end><def_stmt>do_nothing arg<block_start>"""Function that return True, test passing argument"""<line_sep><return><true><block_end><class_stmt>CrashAtPickle(object)<block_start>"""Bad object that triggers a segfault at pickling time."""<def_stmt>__reduce__ self<block_start>crash()<block_end><block_end><class_stmt>CrashAtUnpickle(object)<block_start>"""Bad object that triggers a segfault at unpickling time."""<def_stmt>__reduce__ self<block_start><return>crash ()<block_end><block_end><class_stmt>ExitAtPickle(object)<block_start>"""Bad object that triggers a segfault at pickling time."""<def_stmt>__reduce__ self<block_start>exit()<block_end><block_end><class_stmt>ExitAtUnpickle(object)<block_start>"""Bad object that triggers a process exit at unpickling time."""<def_stmt>__reduce__ self<block_start><return>exit ()<block_end><block_end><class_stmt>CExitAtPickle(object)<block_start>"""Bad object that triggers a segfault at pickling time."""<def_stmt>__reduce__ self<block_start>c_exit()<block_end><block_end><class_stmt>CExitAtUnpickle(object)<block_start>"""Bad object that triggers a process exit at unpickling time."""<def_stmt>__reduce__ self<block_start><return>c_exit ()<block_end><block_end><class_stmt>ErrorAtPickle(object)<block_start>"""Bad object that raises an error at pickling time."""<def_stmt>__init__ self fail=<true><block_start>self.fail=fail<block_end><def_stmt>__reduce__ self<block_start><if_stmt>self.fail<block_start><raise>PicklingError("Error in pickle")<block_end><else_stmt><block_start><return>id (42 )<block_end><block_end><block_end><class_stmt>ErrorAtUnpickle(object)<block_start>"""Bad object that triggers a process exit at unpickling time."""<def_stmt>__init__ self etype=UnpicklingError message='the error message'<block_start>self.etype=etype<line_sep>self.message=message<block_end><def_stmt>__reduce__ self<block_start><return>raise_error (self.etype self.message)<block_end><block_end><class_stmt>CrashAtGCInWorker(object)<block_start>"""Bad object that triggers a segfault at call item GC time"""<def_stmt>__del__ self<block_start><if_stmt>current_process().name<ne>"MainProcess"<block_start>crash()<block_end><block_end><block_end><class_stmt>CExitAtGCInWorker(object)<block_start>"""Exit worker at call item GC time"""<def_stmt>__del__ self<block_start><if_stmt>current_process().name<ne>"MainProcess"<block_start>c_exit()<block_end><block_end><block_end><class_stmt>TestExecutorDeadLock(ReusableExecutorMixin)<block_start>crash_cases=[# Check problem occuring while pickling a task in (id (ExitAtPickle() ) PicklingError <none>) (id (ErrorAtPickle() ) PicklingError <none>) # Check problem occuring while unpickling a task on workers (id (ExitAtUnpickle() ) BrokenProcessPool r"SystemExit") (id (CExitAtUnpickle() ) TerminatedWorkerError r"EXIT\(0\)") (id (ErrorAtUnpickle() ) BrokenProcessPool r"UnpicklingError") (id (CrashAtUnpickle() ) TerminatedWorkerError r"SIGSEGV") # Check problem occuring during function execution on workers (crash () TerminatedWorkerError r"SIGSEGV") (exit () SystemExit <none>) (c_exit () TerminatedWorkerError r"EXIT\(0\)") (raise_error (RuntimeError ) RuntimeError <none>) # Check problem occuring while pickling a task result # on workers (return_instance (CrashAtPickle ) TerminatedWorkerError r"SIGSEGV") (return_instance (ExitAtPickle ) SystemExit <none>) (return_instance (CExitAtPickle ) TerminatedWorkerError r"EXIT\(0\)") (return_instance (ErrorAtPickle ) PicklingError <none>) # Check problem occuring while unpickling a task in # the result_handler thread (return_instance (ExitAtUnpickle ) BrokenProcessPool r"SystemExit") (return_instance (ErrorAtUnpickle ) BrokenProcessPool r"UnpicklingError") ]<line_sep>@pytest.mark.parametrize("func, args, expected_err, match" crash_cases)<def_stmt>test_crashes self func args expected_err match<block_start>"""Test various reusable_executor crash handling"""<line_sep>executor=get_reusable_executor(max_workers=2)<line_sep>res=executor.submit(func *args)<line_sep>match_err=<none><if_stmt>expected_err<is>TerminatedWorkerError<block_start>match_err=filter_match(match)<line_sep>match=<none><block_end><with_stmt>pytest.raises(expected_err match=match_err)<as>exc_info<block_start>res.result()<block_end># For remote traceback, ensure that the cause contains the original # error <if_stmt>match<is><not><none><block_start><with_stmt>pytest.raises(_RemoteTraceback match=match)<block_start><raise>exc_info.value.__cause__<block_end><block_end><block_end>@pytest.mark.parametrize("func, args, expected_err, match" crash_cases)<def_stmt>test_in_callback_submit_with_crash self func args expected_err match<block_start>"""Test the recovery from callback crash"""<line_sep>executor=get_reusable_executor(max_workers=2 timeout=12)<def_stmt>in_callback_submit future<block_start>future2=get_reusable_executor(max_workers=2 timeout=12).submit(func *args)<line_sep># Store the future of the job submitted in the callback to make it # easy to introspect. future.callback_future=future2<line_sep>future.callback_done.set()<block_end># Make sure the first submitted job last a bit to make sure that # the callback will be called in the queue manager thread and not # immediately in the main thread. delay=0.1<line_sep>f=executor.submit(id_sleep 42 delay)<line_sep>f.callback_done=threading.Event()<line_sep>f.add_done_callback(in_callback_submit)<assert_stmt>f.result()<eq>42<if_stmt><not>f.callback_done.wait(timeout=3)<block_start><raise>AssertionError('callback not done before timeout')<block_end>match_err=<none><if_stmt>expected_err<is>TerminatedWorkerError<block_start>match_err=filter_match(match)<line_sep>match=<none><block_end><with_stmt>pytest.raises(expected_err match=match_err)<as>exc_info<block_start>f.callback_future.result()<block_end># For remote traceback, ensure that the cause contains the original # error <if_stmt>match<is><not><none><block_start><with_stmt>pytest.raises(_RemoteTraceback match=match)<block_start><raise>exc_info.value.__cause__<block_end><block_end><block_end><def_stmt>test_callback_crash_on_submit self<block_start>"""Errors in the callback execution directly in queue manager thread. This case can break the process executor and we want to make sure that we can detect the issue and recover by calling get_reusable_executor. """<line_sep>executor=get_reusable_executor(max_workers=2)<line_sep># Make sure the first submitted job last a bit to make sure that # the callback will be called in the queue manager thread and not # immediately in the main thread. delay=0.1<line_sep>f=executor.submit(id_sleep 42 delay)<line_sep>f.add_done_callback(<lambda>_:exit())<assert_stmt>f.result()<eq>42<assert_stmt>executor.submit(id_sleep 42 0.1).result()<eq>42<line_sep>executor=get_reusable_executor(max_workers=2)<line_sep>f=executor.submit(id_sleep 42 delay)<line_sep>f.add_done_callback(<lambda>_:raise_error())<assert_stmt>f.result()<eq>42<assert_stmt>executor.submit(id_sleep 42 0.).result()<eq>42<block_end><def_stmt>test_deadlock_kill self<block_start>"""Test deadlock recovery for reusable_executor"""<line_sep>executor=get_reusable_executor(max_workers=1 timeout=<none>)<line_sep># trigger the spawning of the worker process executor.submit(sleep 0.1)<line_sep>worker=next(iter(executor._processes.values()))<with_stmt>pytest.warns(UserWarning)<as>recorded_warnings<block_start>executor=get_reusable_executor(max_workers=2 timeout=<none>)<block_end><assert_stmt>len(recorded_warnings)<eq>1<line_sep>expected_msg=("Trying to resize an executor with running jobs:"<concat>" waiting for jobs completion before resizing.")<assert_stmt>recorded_warnings[0].message.args[0]<eq>expected_msg<line_sep>os.kill(worker.pid SIGKILL)<line_sep>wait_dead(worker)<line_sep># wait for the executor to be able to detect the issue and set itself # in broken state: sleep(.5)<with_stmt>pytest.raises(TerminatedWorkerError match=filter_match(r"SIGKILL"))<block_start>executor.submit(id_sleep 42 0.1).result()<block_end># the get_reusable_executor factory should be able to create a new # working instance executor=get_reusable_executor(max_workers=2 timeout=<none>)<assert_stmt>executor.submit(id_sleep 42 0.).result()<eq>42<block_end>@pytest.mark.parametrize("n_proc" [1 2 5 13])<def_stmt>test_crash_races self n_proc<block_start>"""Test the race conditions in reusable_executor crash handling"""<if_stmt>(sys.platform<eq>'win32'<and>sys.version_info<ge>(3 8)<and>n_proc<g>5)<block_start>pytest.skip("On win32, the paging size can be too small to import numpy "<concat>"multiple times in the sub-processes (imported when loading "<concat>"this file). Skipping while no better solution is found. See "<concat>"https://github.com/joblib/loky/issues/279 for more details.")<block_end># Test for external crash signal comming from neighbor # with various race setup executor=get_reusable_executor(max_workers=n_proc timeout=<none>)<line_sep>executor.map(id range(n_proc))# trigger the creation of the workers pids=list(executor._processes.keys())<assert_stmt>len(pids)<eq>n_proc<assert_stmt><none><not><in>pids<line_sep>res=executor.map(sleep_then_check_pids_exist [(.0001<times>(j<floordiv>2) pids)<for>j range(2<times>n_proc)])<assert_stmt>all(list(res))<with_stmt>pytest.raises(TerminatedWorkerError match=filter_match(r"SIGKILL"))<block_start>res=executor.map(kill_friend pids[::-1])<line_sep>list(res)<block_end><block_end><def_stmt>test_imap_handle_iterable_exception self# The catch of the errors in imap generation depend on the # builded version of python <block_start>executor=get_reusable_executor(max_workers=2)<with_stmt>pytest.raises(SayWhenError)<block_start>executor.map(id_sleep exception_throwing_generator(10 3) chunksize=1)<block_end># SayWhenError seen at start of problematic chunk's results executor=get_reusable_executor(max_workers=2)<with_stmt>pytest.raises(SayWhenError)<block_start>executor.map(id_sleep exception_throwing_generator(20 7) chunksize=2)<block_end>executor=get_reusable_executor(max_workers=2)<with_stmt>pytest.raises(SayWhenError)<block_start>executor.map(id_sleep exception_throwing_generator(20 7) chunksize=4)<block_end><block_end><def_stmt>test_queue_full_deadlock self<block_start>executor=get_reusable_executor(max_workers=1)<line_sep>fs_fail=[executor.submit(do_nothing ErrorAtPickle(<true>))<for>i range(100)]<line_sep>fs=[executor.submit(do_nothing ErrorAtPickle(<false>))<for>i range(100)]<with_stmt>pytest.raises(PicklingError)<block_start>fs_fail[99].result()<block_end><assert_stmt>fs[99].result()<block_end><def_stmt>test_informative_error_when_fail_at_unpickle self<block_start>executor=get_reusable_executor(max_workers=2)<line_sep>obj=ErrorAtUnpickle(RuntimeError 'message raised in child')<line_sep>f=executor.submit(id obj)<with_stmt>pytest.raises(BrokenProcessPool)<as>exc_info<block_start>f.result()<block_end><assert_stmt>'RuntimeError'<in>str(exc_info.value.__cause__)<assert_stmt>'message raised in child'<in>str(exc_info.value.__cause__)<block_end>@pytest.mark.skipif(np<is><none> reason="requires numpy")<def_stmt>test_numpy_dot_parent_and_child_no_freeze self<block_start>"""Test that no freeze happens in child process when numpy's thread pool is started in the parent. """<line_sep>a=np.random.randn(1000 1000)<line_sep>np.dot(a a)# trigger the thread pool init in the parent process executor=get_reusable_executor(max_workers=2)<line_sep>executor.submit(np.dot a a).result()<line_sep>executor.shutdown(wait=<true>)<block_end><block_end><class_stmt>TestTerminateExecutor(ReusableExecutorMixin)<block_start><def_stmt>test_shutdown_kill self<block_start>"""Test reusable_executor termination handling"""<import_from_stmt>itertools repeat<line_sep>executor=get_reusable_executor(max_workers=5)<line_sep>res1=executor.map(id_sleep range(100) repeat(.001))<line_sep>res2=executor.map(id_sleep range(100) repeat(1))<assert_stmt>list(res1)<eq>list(range(100))<line_sep>shutdown=TimingWrapper(executor.shutdown)<line_sep>shutdown(wait=<true> kill_workers=<true>)<assert_stmt>shutdown.elapsed<l>5<line_sep># We should get an error as the executor shutdowned before we fetched # all the results from the long running operation. <with_stmt>pytest.raises(ShutdownExecutorError)<block_start>list(res2)<block_end><block_end><def_stmt>test_shutdown_deadlock self<block_start>"""Test recovery if killed after resize call"""<line_sep># Test the executor.shutdown call do not cause deadlock executor=get_reusable_executor(max_workers=2 timeout=<none>)<line_sep>executor.map(id range(2))# start the worker processes executor.submit(kill_friend (next(iter(executor._processes.keys())) .0))<line_sep>sleep(.01)<line_sep>executor.shutdown(wait=<true>)<block_end><def_stmt>test_kill_workers_on_new_options self# submit a long running job with no timeout <block_start>executor=get_reusable_executor(max_workers=2 timeout=<none>)<line_sep>f=executor.submit(sleep 10000)<line_sep># change the constructor parameter while requesting not to wait # for the long running task to complete (the workers will get # shutdown forcibly) executor=get_reusable_executor(max_workers=2 timeout=5 kill_workers=<true>)<with_stmt>pytest.raises(ShutdownExecutorError)<block_start>f.result()<block_end>f2=executor.submit(id_sleep 42 0)<assert_stmt>f2.result()<eq>42<block_end>@pytest.mark.parametrize("bad_object, match" [(CrashAtGCInWorker r"SIGSEGV") (CExitAtGCInWorker r"EXIT\(0\)")])<def_stmt>test_call_item_gc_crash_or_exit self bad_object match<block_start>executor=get_reusable_executor(max_workers=1)<line_sep>bad_object=bad_object()<line_sep>f=executor.submit(id bad_object)<line_sep># The worker will successfully send back its result to the master # process before crashing so this future can always be collected: <assert_stmt>f.result()<is><not><none><line_sep># The executor should automatically detect that the worker has crashed # when processing subsequently dispatched tasks: <with_stmt>pytest.raises(TerminatedWorkerError match=filter_match(match))<block_start>executor.submit(gc.collect).result()<for_stmt>r executor.map(sleep [.1]<times>100)<block_start><pass><block_end><block_end><block_end><block_end><class_stmt>TestResizeExecutor(ReusableExecutorMixin)<block_start><def_stmt>test_reusable_executor_resize self<block_start>"""Test reusable_executor resizing"""<line_sep>executor=get_reusable_executor(max_workers=2 timeout=<none>)<line_sep>executor.map(id range(2))<line_sep># Decreasing the executor should drop a single process and keep one of # the old one as it is still in a good shape. The resize should not # occur while there are on going works. pids=list(executor._processes.keys())<line_sep>res1=executor.submit(sleep_then_check_pids_exist (.3 pids))<line_sep>clean_warning_registry()<with_stmt>warnings.catch_warnings(record=<true>)<as>w# Cause all warnings to always be triggered. <block_start>warnings.simplefilter("always")<line_sep>executor=get_reusable_executor(max_workers=1 timeout=<none>)<assert_stmt>len(w)<eq>1<line_sep>expected_msg="Trying to resize an executor with running jobs"<assert_stmt>expected_msg<in>str(w[0].message)<assert_stmt>res1.result() ("Resize should wait for current processes "<concat>" to finish")<assert_stmt>len(executor._processes)<eq>1<assert_stmt>next(iter(executor._processes.keys()))<in>pids<block_end># Requesting the same number of process should not impact the executor # nor kill the processed old_pid=next(iter((executor._processes.keys())))<line_sep>unchanged_executor=get_reusable_executor(max_workers=1 timeout=<none>)<assert_stmt>len(unchanged_executor._processes)<eq>1<assert_stmt>unchanged_executor<is>executor<assert_stmt>next(iter(unchanged_executor._processes.keys()))<eq>old_pid<line_sep># Growing the executor again should add a single process and keep the # old one as it is still in a good shape executor=get_reusable_executor(max_workers=2 timeout=<none>)<assert_stmt>len(executor._processes)<eq>2<assert_stmt>old_pid<in>list(executor._processes.keys())<block_end>@pytest.mark.parametrize("reuse" [<true> <false>])@pytest.mark.parametrize("kill_workers" [<true> <false>])<def_stmt>test_reusable_executor_resize_many_times self kill_workers reuse# Tentative non-regression test for a deadlock when shutting down # the workers of an executor prior to resizing it. <block_start>kwargs={'timeout':<none> 'kill_workers':kill_workers 'reuse':reuse }<with_stmt>warnings.catch_warnings(record=<true>)# Cause all warnings to always be triggered. <block_start>warnings.simplefilter("always")<for_stmt>size [12 2 1 12 6 1 8 5]<block_start>executor=get_reusable_executor(max_workers=size **kwargs)<line_sep>executor.map(sleep [0.01]<times>6)<line_sep># Do not wait for the tasks to complete. <block_end>executor.shutdown()<block_end><block_end><def_stmt>test_kill_after_resize_call self<block_start>"""Test recovery if killed after resize call"""<line_sep># Test the executor resizing called before a kill arrive executor=get_reusable_executor(max_workers=2 timeout=<none>)<line_sep>executor.map(id range(2))# trigger the creation of worker processes pid=next(iter(executor._processes.keys()))<line_sep>executor.submit(kill_friend (pid .1))<with_stmt>pytest.warns(UserWarning)<as>recorded_warnings<block_start>warnings.simplefilter("always")<line_sep>executor=get_reusable_executor(max_workers=1 timeout=<none>)<block_end><assert_stmt>len(recorded_warnings)<eq>1<line_sep>expected_msg=("Trying to resize an executor with running jobs:"<concat>" waiting for jobs completion before resizing.")<assert_stmt>recorded_warnings[0].message.args[0]<eq>expected_msg<assert_stmt>executor.submit(id_sleep 42 0.).result()<eq>42<line_sep>executor.shutdown()<block_end><def_stmt>test_resize_after_timeout self<block_start><with_stmt>warnings.catch_warnings(record=<true>)<as>recorded_warnings<block_start>warnings.simplefilter("always")<line_sep>executor=get_reusable_executor(max_workers=2 timeout=.001)<assert_stmt>executor.submit(id_sleep 42 0.).result()<eq>42<line_sep>sleep(.1)<line_sep>executor=get_reusable_executor(max_workers=8 timeout=.001)<assert_stmt>executor.submit(id_sleep 42 0.).result()<eq>42<line_sep>sleep(.1)<line_sep>executor=get_reusable_executor(max_workers=2 timeout=.001)<assert_stmt>executor.submit(id_sleep 42 0.).result()<eq>42<block_end><if_stmt>len(recorded_warnings)<g>1<block_start>expected_msg='A worker stopped'<assert_stmt>expected_msg<in>recorded_warnings[0].message.args[0]<block_end><block_end><block_end><class_stmt>TestGetReusableExecutor(ReusableExecutorMixin)<block_start><def_stmt>test_invalid_process_number self<block_start>"""Raise error on invalid process number"""<with_stmt>pytest.raises(ValueError)<block_start>get_reusable_executor(max_workers=0)<block_end><with_stmt>pytest.raises(ValueError)<block_start>get_reusable_executor(max_workers=-1)<block_end>executor=get_reusable_executor()<with_stmt>pytest.raises(ValueError)<block_start>executor._resize(max_workers=<none>)<block_end><block_end>@pytest.mark.skipif(sys.platform<eq>"win32" reason="No fork on windows")@pytest.mark.skipif(sys.version_info<le>(3 4) reason="No context before 3.4")<def_stmt>test_invalid_context self<block_start>"""Raise error on invalid context"""<with_stmt>pytest.warns(UserWarning)<block_start><with_stmt>pytest.raises(ValueError)<block_start>get_reusable_executor(max_workers=2 context="fork")<block_end><block_end><block_end><def_stmt>test_pass_start_method_name_as_context self<block_start>executor=get_reusable_executor(max_workers=2 context='loky')<assert_stmt>executor.submit(id 42).result()<ge>0<with_stmt>pytest.raises(ValueError)<block_start>get_reusable_executor(max_workers=2 context='bad_start_method')<block_end><block_end><def_stmt>test_interactively_defined_executor_no_main self# check that the init_main_module parameter works properly # when using -c option, we don't need the safeguard if __name__ .. # and thus test LokyProcess without the extra argument. For running # a script, it is necessary to use init_main_module=False. <block_start>code="""if True: from loky import get_reusable_executor e = get_reusable_executor() e.submit(id, 42).result() print("ok") """<line_sep>check_python_subprocess_call(code stdout_regex=r"ok")<block_end><def_stmt>test_reused_flag self<block_start>executor,_=_ReusablePoolExecutor.get_reusable_executor(max_workers=2)<line_sep>executor,reused=_ReusablePoolExecutor.get_reusable_executor(max_workers=2)<assert_stmt>reused<line_sep>executor.shutdown(kill_workers=<true>)<line_sep>executor,reused=_ReusablePoolExecutor.get_reusable_executor(max_workers=2)<assert_stmt><not>reused<block_end>@pytest.mark.xfail(cloudpickle_version<ge>LooseVersion("0.5.4")<and>cloudpickle_version<le>LooseVersion("0.7.0") reason="Known issue in cloudpickle")# https://github.com/cloudpipe/cloudpickle/pull/240 <def_stmt>test_interactively_defined_nested_functions self# Check that it's possible to call nested interactively defined # functions and furthermore that changing the code interactively # is taken into account by the single worker process. <block_start>code="""if True: from loky import get_reusable_executor e = get_reusable_executor(max_workers=1) # Force a start of the children process: e.submit(id, 42).result() # Test that it's possible to call interactively defined, nested # functions: def inner_func(x): return -x def outer_func(x): return inner_func(x) assert e.submit(outer_func, 1).result() == outer_func(1) == -1 # Test that changes to the definition of the inner function are # taken into account in subsequent calls to the outer function. def inner_func(x): return x assert e.submit(outer_func, 1).result() == outer_func(1) == 1 print("ok") """<line_sep>check_python_subprocess_call(code stdout_regex=r"ok")<block_end><def_stmt>test_interactively_defined_recursive_functions self# Check that it's possible to call a recursive function defined # in a closure. # Also check that calling several function that stems from the same # factory with different closure states results in the expected result: # the function definitions should not collapse in the single worker # process. <block_start>code="""if True: from loky import get_reusable_executor e = get_reusable_executor(max_workers=1) # Force a start of the children process: e.submit(id, 42).result() def make_func(seed): def func(x): if x <= 0: return seed return func(x - 1) + 1 return func func = make_func(0) assert e.submit(func, 5).result() == func(5) == 5 func = make_func(1) assert e.submit(func, 5).result() == func(5) == 6 print("ok") """<line_sep>check_python_subprocess_call(code stdout_regex=r"ok")<block_end><def_stmt>test_compat_with_concurrent_futures_exception self# It should be possible to use a loky process pool executor as a dropin # replacement for a ProcessPoolExecutor, including when catching # exceptions: <block_start>concurrent=pytest.importorskip('concurrent')<import_from_stmt>concurrent.futures.process BrokenProcessPool<as>BPPExc<with_stmt>pytest.raises(BPPExc)<block_start>get_reusable_executor(max_workers=2).submit(crash).result()<block_end>e=get_reusable_executor(max_workers=2)<line_sep>f=e.submit(id 42)<line_sep># Ensure that loky.Future are compatible with concurrent.futures # (see #155) <assert_stmt>isinstance(f concurrent.futures.Future)<line_sep>(done running)=concurrent.futures.wait([f] timeout=15)<assert_stmt>len(running)<eq>0<block_end>thread_configurations=[('constant' 'clean_start') ('constant' 'broken_start') ('varying' 'clean_start') ('varying' 'broken_start') ]<line_sep>@pytest.mark.parametrize("workers, executor_state" thread_configurations)<def_stmt>test_reusable_executor_thread_safety self workers executor_state<block_start><if_stmt>executor_state<eq>'clean_start'# Create a new shared executor and ensures that it's workers are # ready: <block_start>get_reusable_executor(reuse=<false>).submit(id 42).result()<block_end><else_stmt># Break the shared executor before launching the threads: <block_start><with_stmt>pytest.raises(TerminatedWorkerError match=filter_match(r"SIGSEGV"))<block_start>executor=get_reusable_executor(reuse=<false>)<line_sep>executor.submit(return_instance CrashAtPickle).result()<block_end><block_end><def_stmt>helper_func output_collector max_workers=2 n_outer_steps=5 n_inner_steps=10<block_start><with_stmt>warnings.catch_warnings()# ignore resize warnings <block_start>warnings.simplefilter("always")<line_sep>executor=get_reusable_executor(max_workers=max_workers)<for_stmt>i range(n_outer_steps)<block_start>results=executor.map(<lambda>x:x<power>2 range(n_inner_steps))<line_sep>expected_result=[x<power>2<for>x range(n_inner_steps)]<assert_stmt>list(results)<eq>expected_result<block_end>output_collector.append('ok')<block_end><block_end><if_stmt>workers<eq>'constant'<block_start>max_workers=[2]<times>10<block_end><else_stmt><block_start>max_workers=[(i%4)+1<for>i range(10)]<block_end># Use the same executor with the same number of workers concurrently # in different threads: output_collector=[]<line_sep>threads=[threading.Thread(target=helper_func args=(output_collector w) name='test_thread_%02d_max_workers_%d'%(i w))<for>i,w enumerate(max_workers)]<with_stmt>warnings.catch_warnings(record=<true>)<block_start><for_stmt>t threads<block_start>t.start()<block_end><for_stmt>t threads<block_start>t.join()<block_end><block_end><assert_stmt>output_collector<eq>['ok']<times>len(threads)<block_end><def_stmt>test_reusable_executor_reuse_true self<block_start>executor=get_reusable_executor(max_workers=3 timeout=42)<line_sep>executor.submit(id 42).result()<assert_stmt>len(executor._processes)<eq>3<assert_stmt>executor._timeout<eq>42<line_sep>executor2=get_reusable_executor(reuse=<true>)<line_sep>executor2.submit(id 42).result()<assert_stmt>len(executor2._processes)<eq>3<assert_stmt>executor2._timeout<eq>42<assert_stmt>executor2<is>executor<line_sep>executor3=get_reusable_executor()<line_sep>executor3.submit(id 42).result()<assert_stmt>len(executor3._processes)<eq>cpu_count()<assert_stmt>executor3._timeout<eq>10<assert_stmt>executor3<is><not>executor<line_sep>executor4=get_reusable_executor()<assert_stmt>executor4<is>executor3<block_end><block_end><class_stmt>TestExecutorInitializer(ReusableExecutorMixin)<block_start><def_stmt>_initializer self x<block_start>loky._initialized_state=x<block_end><def_stmt>_test_initializer self delay=0<block_start>sleep(delay)<line_sep><return>getattr(loky "_initialized_state" "uninitialized")<block_end><def_stmt>test_reusable_initializer self<block_start>executor=get_reusable_executor(max_workers=2 initializer=self._initializer initargs=('done' ))<assert_stmt>executor.submit(self._test_initializer).result()<eq>'done'<line_sep># when the initializer change, the executor is re-spawned executor=get_reusable_executor(max_workers=2 initializer=self._initializer initargs=(42 ))<assert_stmt>executor.submit(self._test_initializer).result()<eq>42<line_sep># With reuse=True, the executor use the same initializer executor=get_reusable_executor(max_workers=4 reuse=<true>)<for_stmt>x executor.map(self._test_initializer delay=.1)<block_start><assert_stmt>x<eq>42<block_end># With reuse='auto', the initializer is not used anymore executor=get_reusable_executor(max_workers=4)<for_stmt>x executor.map(self._test_initializer delay=.1)<block_start><assert_stmt>x<eq>'uninitialized'<block_end><block_end><block_end>
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved. # This work is licensed under the NVIDIA Source Code License - Non-commercial. Full # text can be found in LICENSE.md <import_from_stmt>setuptools setup dist<import_stmt>wheel<import_stmt>os<line_sep># required to geneerate a platlib folder required by audittools <import_from_stmt>setuptools.command.install install<line_sep># for generating a wheel version from git tag <import_from_stmt>setuptools_scm get_version<class_stmt>InstallPlatlib(install)<block_start><def_stmt>finalize_options self<block_start>install.finalize_options(self)<if_stmt>self.distribution.has_ext_modules()<block_start>self.install_lib=self.install_platlib<block_end><block_end><block_end># force setuptools to recognize that this is # actually a binary distribution <class_stmt>BinaryDistribution(dist.Distribution)<block_start><def_stmt>is_pure self<block_start><return><false><block_end><def_stmt>has_ext_modules foo<block_start><return><true><block_end><block_end># This gets the version from the most recent git tag, potentially concatinating # a commit hash at the end. current_version=get_version(root=".." relative_to=__file__ fallback_version='0.0.0-dev0')<line_sep>optix_version=os.environ.get("OPTIX_VERSION" <none>)<if_stmt>optix_version<block_start>current_version=current_version+"."+optix_version<block_end>print(current_version)<line_sep>setup(# This package is called nvisii name='nvisii' install_requires=['numpy>=1.19.5'] packages=['nvisii' "nvisii.importers"] # include the package "nvisii" # make sure the shared library is included package_data={'':("*.dll" "*.pyd" "*.so")} include_package_data=<true> description='' # See class BinaryDistribution that was defined earlier distclass=BinaryDistribution version=current_version author='<NAME>' author_email='' maintainer='' maintainer_email='' python_requires=">=3.6" cmdclass={'install':InstallPlatlib} )<line_sep>
# Run this script to create stopwords.py based on stopwords.txt <import_stmt>json<def_stmt>generate input_txt output_py# Read line by line <block_start>txt_file=open(input_txt)<line_sep>words=set([])<for_stmt>raw_line txt_file<block_start>line=raw_line.strip()<line_sep># Skip empty line <if_stmt>len(line)<l>1<block_start><continue><block_end># Skip comments <if_stmt>line[0]<eq>'#'<block_start><continue><block_end># Collect the stopwords words.add(line)<block_end># Dump the array to a file output=open(output_py 'w')<line_sep>output.write('# DO NOT EDIT THIS FILE!\n')<line_sep>output.write('# Edit stopwords.txt, generate this file again via ')<line_sep>output.write('generate_stopwords.py\n')<line_sep>output.write('stopwords = set(%s)'%(json.dumps(sorted(words) indent=4)))<line_sep>output.close()<line_sep>txt_file.close()<block_end><if_stmt>__name__<eq>'__main__'<block_start>generate('stopwords.txt' 'stopwords.py')<block_end>
""" (c) 2013 LinkedIn Corp. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. """<import_stmt>logging<import_from_stmt>..base.calendar BaseExchangeCalendarEvent BaseExchangeCalendarService ExchangeEventOrganizer ExchangeEventResponse<import_from_stmt>..base.folder BaseExchangeFolder BaseExchangeFolderService<import_from_stmt>..base.soap ExchangeServiceSOAP<import_from_stmt>..exceptions FailedExchangeException ExchangeStaleChangeKeyException ExchangeItemNotFoundException ExchangeInternalServerTransientErrorException ExchangeIrresolvableConflictException InvalidEventType<import_from_stmt>..compat BASESTRING_TYPES<import_from_stmt>. soap_request<import_from_stmt>lxml etree<import_from_stmt>copy deepcopy<import_from_stmt>datetime date<import_stmt>warnings<line_sep>log=logging.getLogger("pyexchange")<class_stmt>Exchange2010Service(ExchangeServiceSOAP)<block_start><def_stmt>calendar self id="calendar"<block_start><return>Exchange2010CalendarService(service=self calendar_id=id)<block_end><def_stmt>mail self<block_start><raise>NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")<block_end><def_stmt>contacts self<block_start><raise>NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")<block_end><def_stmt>folder self<block_start><return>Exchange2010FolderService(service=self)<block_end><def_stmt>_send_soap_request self body headers=<none> retries=2 timeout=30 encoding="utf-8"<block_start>headers={"Accept":"text/xml" "Content-type":"text/xml; charset=%s "%encoding}<line_sep><return>super(Exchange2010Service self)._send_soap_request(body headers=headers retries=retries timeout=timeout encoding=encoding)<block_end><def_stmt>_check_for_errors self xml_tree<block_start>super(Exchange2010Service self)._check_for_errors(xml_tree)<line_sep>self._check_for_exchange_fault(xml_tree)<block_end><def_stmt>_check_for_exchange_fault self xml_tree# If the request succeeded, we should see a <m:ResponseCode>NoError</m:ResponseCode> # somewhere in the response. if we don't (a) see the tag or (b) it doesn't say "NoError" # then flip out <block_start>response_codes=xml_tree.xpath(u'//m:ResponseCode' namespaces=soap_request.NAMESPACES)<if_stmt><not>response_codes<block_start><raise>FailedExchangeException(u"Exchange server did not return a status response" <none>)<block_end># The full (massive) list of possible return responses is here. # http://msdn.microsoft.com/en-us/library/aa580757(v=exchg.140).aspx <for_stmt>code response_codes<block_start><if_stmt>code.text<eq>u"ErrorChangeKeyRequiredForWriteOperations"# change key is missing or stale. we can fix that, so throw a special error <block_start><raise>ExchangeStaleChangeKeyException(u"Exchange Fault (%s) from Exchange server"%code.text)<block_end><elif_stmt>code.text<eq>u"ErrorItemNotFound"# exchange_invite_key wasn't found on the server <block_start><raise>ExchangeItemNotFoundException(u"Exchange Fault (%s) from Exchange server"%code.text)<block_end><elif_stmt>code.text<eq>u"ErrorIrresolvableConflict"# tried to update an item with an old change key <block_start><raise>ExchangeIrresolvableConflictException(u"Exchange Fault (%s) from Exchange server"%code.text)<block_end><elif_stmt>code.text<eq>u"ErrorInternalServerTransientError"# temporary internal server error. throw a special error so we can retry <block_start><raise>ExchangeInternalServerTransientErrorException(u"Exchange Fault (%s) from Exchange server"%code.text)<block_end><elif_stmt>code.text<eq>u"ErrorCalendarOccurrenceIndexIsOutOfRecurrenceRange"# just means some or all of the requested instances are out of range <block_start><pass><block_end><elif_stmt>code.text<ne>u"NoError"<block_start><raise>FailedExchangeException(u"Exchange Fault (%s) from Exchange server"%code.text)<block_end><block_end><block_end><block_end><class_stmt>Exchange2010CalendarService(BaseExchangeCalendarService)<block_start><def_stmt>event self id=<none> **kwargs<block_start><return>Exchange2010CalendarEvent(service=self.service id=id **kwargs)<block_end><def_stmt>get_event self id<block_start><return>Exchange2010CalendarEvent(service=self.service id=id)<block_end><def_stmt>new_event self **properties<block_start><return>Exchange2010CalendarEvent(service=self.service calendar_id=self.calendar_id **properties)<block_end><def_stmt>list_events self start=<none> end=<none> details=<false> delegate_for=<none><block_start><return>Exchange2010CalendarEventList(service=self.service calendar_id=self.calendar_id start=start end=end details=details delegate_for=delegate_for)<block_end><block_end><class_stmt>Exchange2010CalendarEventList(object)<block_start>""" Creates & Stores a list of Exchange2010CalendarEvent items in the "self.events" variable. """<def_stmt>__init__ self service=<none> calendar_id=u'calendar' start=<none> end=<none> details=<false> delegate_for=<none><block_start>self.service=service<line_sep>self.count=0<line_sep>self.start=start<line_sep>self.end=end<line_sep>self.events=list()<line_sep>self.event_ids=list()<line_sep>self.details=details<line_sep>self.delegate_for=delegate_for<line_sep># This request uses a Calendar-specific query between two dates. body=soap_request.get_calendar_items(format=u'AllProperties' calendar_id=calendar_id start=self.start end=self.end delegate_for=self.delegate_for)<line_sep>response_xml=self.service.send(body)<line_sep>self._parse_response_for_all_events(response_xml)<line_sep># Populate the event ID list, for convenience reasons. <for_stmt>event self.events<block_start>self.event_ids.append(event._id)<block_end># If we have requested all the details, basically repeat the previous 3 steps, # but instead of start/stop, we have a list of ID fields. <if_stmt>self.details<block_start>log.debug(u'Received request for all details, retrieving now!')<line_sep>self.load_all_details()<block_end><return><block_end><def_stmt>_parse_response_for_all_events self response<block_start>""" This function will retrieve *most* of the event data, excluding Organizer & Attendee details """<line_sep>items=response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem' namespaces=soap_request.NAMESPACES)<if_stmt><not>items<block_start>items=response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem' namespaces=soap_request.NAMESPACES)<block_end><if_stmt>items<block_start>self.count=len(items)<line_sep>log.debug(u'Found %s items'%self.count)<for_stmt>item items<block_start>self._add_event(xml=soap_request.M.Items(deepcopy(item)))<block_end><block_end><else_stmt><block_start>log.debug(u'No calendar items found with search parameters.')<block_end><return>self<block_end><def_stmt>_add_event self xml=<none><block_start>log.debug(u'Adding new event to all events list.')<line_sep>event=Exchange2010CalendarEvent(service=self.service xml=xml)<line_sep>log.debug(u'Subject of new event is %s'%event.subject)<line_sep>self.events.append(event)<line_sep><return>self<block_end><def_stmt>load_all_details self<block_start>""" This function will execute all the event lookups for known events. This is intended for use when you want to have a completely populated event entry, including Organizer & Attendee details. """<line_sep>log.debug(u"Loading all details")<if_stmt>self.count<g>0# Now, empty out the events to prevent duplicates! <block_start><del_stmt>(self.events[:])<line_sep># Send the SOAP request with the list of exchange ID values. log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))<line_sep>body=soap_request.get_item(exchange_id=self.event_ids format=u'AllProperties')<line_sep>response_xml=self.service.send(body)<line_sep># Re-parse the results for all the details! self._parse_response_for_all_events(response_xml)<block_end><return>self<block_end><block_end><class_stmt>Exchange2010CalendarEvent(BaseExchangeCalendarEvent)<block_start><def_stmt>_init_from_service self id<block_start>log.debug(u'Creating new Exchange2010CalendarEvent object from ID')<line_sep>body=soap_request.get_item(exchange_id=id format=u'AllProperties')<line_sep>response_xml=self.service.send(body)<line_sep>properties=self._parse_response_for_get_event(response_xml)<line_sep>self._update_properties(properties)<line_sep>self._id=id<line_sep>log.debug(u'Created new event object with ID: %s'%self._id)<line_sep>self._reset_dirty_attributes()<line_sep><return>self<block_end><def_stmt>_init_from_xml self xml=<none><block_start>log.debug(u'Creating new Exchange2010CalendarEvent object from XML')<line_sep>properties=self._parse_response_for_get_event(xml)<line_sep>self._update_properties(properties)<line_sep>self._id,self._change_key=self._parse_id_and_change_key_from_response(xml)<line_sep>log.debug(u'Created new event object with ID: %s'%self._id)<line_sep>self._reset_dirty_attributes()<line_sep><return>self<block_end><def_stmt>as_json self<block_start><raise>NotImplementedError<block_end><def_stmt>validate self<block_start><if_stmt>self.recurrence<is><not><none><block_start><if_stmt><not>(isinstance(self.recurrence_end_date date))<block_start><raise>ValueError('recurrence_end_date must be of type date')<block_end><elif_stmt>(self.recurrence_end_date<l>self.start.date())<block_start><raise>ValueError('recurrence_end_date must be after start')<block_end><if_stmt>self.recurrence<eq>u'daily'<block_start><if_stmt><not>(isinstance(self.recurrence_interval int)<and>1<le>self.recurrence_interval<le>999)<block_start><raise>ValueError('recurrence_interval must be an int in the range from 1 to 999')<block_end><block_end><elif_stmt>self.recurrence<eq>u'weekly'<block_start><if_stmt><not>(isinstance(self.recurrence_interval int)<and>1<le>self.recurrence_interval<le>99)<block_start><raise>ValueError('recurrence_interval must be an int in the range from 1 to 99')<block_end><if_stmt>self.recurrence_days<is><none><block_start><raise>ValueError('recurrence_days is required')<block_end><for_stmt>day self.recurrence_days.split(' ')<block_start><if_stmt>day<not><in>self.WEEKLY_DAYS<block_start><raise>ValueError('recurrence_days received unknown value: %s'%day)<block_end><block_end><block_end><elif_stmt>self.recurrence<eq>u'monthly'<block_start><if_stmt><not>(isinstance(self.recurrence_interval int)<and>1<le>self.recurrence_interval<le>99)<block_start><raise>ValueError('recurrence_interval must be an int in the range from 1 to 99')<block_end><block_end><elif_stmt>self.recurrence<eq>u'yearly'<block_start><pass><block_end># everything is pulled from start <else_stmt><block_start><raise>ValueError('recurrence received unknown value: %s'%self.recurrence)<block_end><block_end>super(Exchange2010CalendarEvent self).validate()<block_end><def_stmt>create self<block_start>""" Creates an event in Exchange. :: event = service.calendar().new_event( subject=u"80s Movie Night", location = u"My house", ) event.create() Invitations to attendees are sent out immediately. """<line_sep>self.validate()<line_sep>body=soap_request.new_event(self)<line_sep>response_xml=self.service.send(body)<line_sep>self._id,self._change_key=self._parse_id_and_change_key_from_response(response_xml)<line_sep><return>self<block_end><def_stmt>resend_invitations self<block_start>""" Resends invites for an event. :: event = service.calendar().get_event(id='KEY HERE') event.resend_invitations() Anybody who has not declined this meeting will get a new invite. """<if_stmt><not>self.id<block_start><raise>TypeError(u"You can't send invites for an event that hasn't been created yet.")<block_end># Under the hood, this is just an .update() but with no attributes changed. # We're going to enforce that by checking if there are any changed attributes and bail if there are <if_stmt>self._dirty_attributes<block_start><raise>ValueError(u"There are unsaved changes to this invite - please update it first: %r"%self._dirty_attributes)<block_end>self.refresh_change_key()<line_sep>body=soap_request.update_item(self [] calendar_item_update_operation_type=u'SendOnlyToAll')<line_sep>self.service.send(body)<line_sep><return>self<block_end><def_stmt>update self calendar_item_update_operation_type=u'SendToAllAndSaveCopy' **kwargs<block_start>""" Updates an event in Exchange. :: event = service.calendar().get_event(id='KEY HERE') event.location = u'New location' event.update() If no changes to the event have been made, this method does nothing. Notification of the change event is sent to all users. If you wish to just notify people who were added, specify ``send_only_to_changed_attendees=True``. """<if_stmt><not>self.id<block_start><raise>TypeError(u"You can't update an event that hasn't been created yet.")<block_end><if_stmt>'send_only_to_changed_attendees'<in>kwargs<block_start>warnings.warn("The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead." DeprecationWarning )<line_sep># 20140502 <if_stmt>kwargs['send_only_to_changed_attendees']<block_start>calendar_item_update_operation_type=u'SendToChangedAndSaveCopy'<block_end><block_end>VALID_UPDATE_OPERATION_TYPES=(u'SendToNone' u'SendOnlyToAll' u'SendOnlyToChanged' u'SendToAllAndSaveCopy' u'SendToChangedAndSaveCopy' )<if_stmt>calendar_item_update_operation_type<not><in>VALID_UPDATE_OPERATION_TYPES<block_start><raise>ValueError('calendar_item_update_operation_type has unknown value')<block_end>self.validate()<if_stmt>self._dirty_attributes<block_start>log.debug(u"Updating these attributes: %r"%self._dirty_attributes)<line_sep>self.refresh_change_key()<line_sep>body=soap_request.update_item(self self._dirty_attributes calendar_item_update_operation_type=calendar_item_update_operation_type)<line_sep>self.service.send(body)<line_sep>self._reset_dirty_attributes()<block_end><else_stmt><block_start>log.info(u"Update was called, but there's nothing to update. Doing nothing.")<block_end><return>self<block_end><def_stmt>cancel self<block_start>""" Cancels an event in Exchange. :: event = service.calendar().get_event(id='KEY HERE') event.cancel() This will send notifications to anyone who has not declined the meeting. """<if_stmt><not>self.id<block_start><raise>TypeError(u"You can't delete an event that hasn't been created yet.")<block_end>self.refresh_change_key()<line_sep>self.service.send(soap_request.delete_event(self))<line_sep># TODO rsanders high - check return status to make sure it was actually sent <return><none><block_end><def_stmt>move_to self folder_id<block_start>""" :param str folder_id: The Calendar ID to where you want to move the event to. Moves an event to a different folder (calendar). :: event = service.calendar().get_event(id='KEY HERE') event.move_to(folder_id='NEW CALENDAR KEY HERE') """<if_stmt><not>folder_id<block_start><raise>TypeError(u"You can't move an event to a non-existant folder")<block_end><if_stmt><not>isinstance(folder_id BASESTRING_TYPES)<block_start><raise>TypeError(u"folder_id must be a string")<block_end><if_stmt><not>self.id<block_start><raise>TypeError(u"You can't move an event that hasn't been created yet.")<block_end>self.refresh_change_key()<line_sep>response_xml=self.service.send(soap_request.move_event(self folder_id))<line_sep>new_id,new_change_key=self._parse_id_and_change_key_from_response(response_xml)<if_stmt><not>new_id<block_start><raise>ValueError(u"MoveItem returned success but requested item not moved")<block_end>self._id=new_id<line_sep>self._change_key=new_change_key<line_sep>self.calendar_id=folder_id<line_sep><return>self<block_end><def_stmt>get_master self<block_start>""" get_master() :raises InvalidEventType: When this method is called on an event that is not a Occurrence type. This will return the master event to the occurrence. **Examples**:: event = service.calendar().get_event(id='<event_id>') print event.type # If it prints out 'Occurrence' then that means we could get the master. master = event.get_master() print master.type # Will print out 'RecurringMaster'. """<if_stmt>self.type<ne>'Occurrence'<block_start><raise>InvalidEventType("get_master method can only be called on a 'Occurrence' event type")<block_end>body=soap_request.get_master(exchange_id=self._id format=u"AllProperties")<line_sep>response_xml=self.service.send(body)<line_sep><return>Exchange2010CalendarEvent(service=self.service xml=response_xml)<block_end><def_stmt>get_occurrence self instance_index<block_start>""" get_occurrence(instance_index) :param iterable instance_index: This should be tuple or list of integers which correspond to occurrences. :raises TypeError: When instance_index is not an iterable of ints. :raises InvalidEventType: When this method is called on an event that is not a RecurringMaster type. This will return a list of occurrence events. **Examples**:: master = service.calendar().get_event(id='<event_id>') # The following will return the first 20 occurrences in the recurrence. # If there are not 20 occurrences, it will only return what it finds. occurrences = master.get_occurrence(range(1,21)) for occurrence in occurrences: print occurrence.start """<if_stmt><not>all([isinstance(i int)<for>i instance_index])<block_start><raise>TypeError("instance_index must be an interable of type int")<block_end><if_stmt>self.type<ne>'RecurringMaster'<block_start><raise>InvalidEventType("get_occurrance method can only be called on a 'RecurringMaster' event type")<block_end>body=soap_request.get_occurrence(exchange_id=self._id instance_index=instance_index format=u"AllProperties")<line_sep>response_xml=self.service.send(body)<line_sep>items=response_xml.xpath(u'//m:GetItemResponseMessage/m:Items' namespaces=soap_request.NAMESPACES)<line_sep>events=[]<for_stmt>item items<block_start>event=Exchange2010CalendarEvent(service=self.service xml=deepcopy(item))<if_stmt>event.id<block_start>events.append(event)<block_end><block_end><return>events<block_end><def_stmt>conflicting_events self<block_start>""" conflicting_events() This will return a list of conflicting events. **Example**:: event = service.calendar().get_event(id='<event_id>') for conflict in event.conflicting_events(): print conflict.subject """<if_stmt><not>self.conflicting_event_ids<block_start><return>[]<block_end>body=soap_request.get_item(exchange_id=self.conflicting_event_ids format="AllProperties")<line_sep>response_xml=self.service.send(body)<line_sep>items=response_xml.xpath(u'//m:GetItemResponseMessage/m:Items' namespaces=soap_request.NAMESPACES)<line_sep>events=[]<for_stmt>item items<block_start>event=Exchange2010CalendarEvent(service=self.service xml=deepcopy(item))<if_stmt>event.id<block_start>events.append(event)<block_end><block_end><return>events<block_end><def_stmt>refresh_change_key self<block_start>body=soap_request.get_item(exchange_id=self._id format=u"IdOnly")<line_sep>response_xml=self.service.send(body)<line_sep>self._id,self._change_key=self._parse_id_and_change_key_from_response(response_xml)<line_sep><return>self<block_end><def_stmt>_parse_id_and_change_key_from_response self response<block_start>id_elements=response.xpath(u'//m:Items/t:CalendarItem/t:ItemId' namespaces=soap_request.NAMESPACES)<if_stmt>id_elements<block_start>id_element=id_elements[0]<line_sep><return>id_element.get(u"Id" <none>) id_element.get(u"ChangeKey" <none>)<block_end><else_stmt><block_start><return><none> <none><block_end><block_end><def_stmt>_parse_response_for_get_event self response<block_start>result=self._parse_event_properties(response)<line_sep>organizer_properties=self._parse_event_organizer(response)<if_stmt>organizer_properties<is><not><none><block_start><if_stmt>'email'<not><in>organizer_properties<block_start>organizer_properties['email']=<none><block_end>result[u'organizer']=ExchangeEventOrganizer(**organizer_properties)<block_end>attendee_properties=self._parse_event_attendees(response)<line_sep>result[u'_attendees']=self._build_resource_dictionary([ExchangeEventResponse(**attendee)<for>attendee attendee_properties])<line_sep>resource_properties=self._parse_event_resources(response)<line_sep>result[u'_resources']=self._build_resource_dictionary([ExchangeEventResponse(**resource)<for>resource resource_properties])<line_sep>result['_conflicting_event_ids']=self._parse_event_conflicts(response)<line_sep><return>result<block_end><def_stmt>_parse_event_properties self response<block_start>property_map={u'subject':{u'xpath':u'//m:Items/t:CalendarItem/t:Subject' } u'location':{u'xpath':u'//m:Items/t:CalendarItem/t:Location' } u'availability':{u'xpath':u'//m:Items/t:CalendarItem/t:LegacyFreeBusyStatus' } u'start':{u'xpath':u'//m:Items/t:CalendarItem/t:Start' u'cast':u'datetime' } u'end':{u'xpath':u'//m:Items/t:CalendarItem/t:End' u'cast':u'datetime' } u'html_body':{u'xpath':u'//m:Items/t:CalendarItem/t:Body[@BodyType="HTML"]' } u'text_body':{u'xpath':u'//m:Items/t:CalendarItem/t:Body[@BodyType="Text"]' } u'_type':{u'xpath':u'//m:Items/t:CalendarItem/t:CalendarItemType' } u'reminder_minutes_before_start':{u'xpath':u'//m:Items/t:CalendarItem/t:ReminderMinutesBeforeStart' u'cast':u'int' } u'is_all_day':{u'xpath':u'//m:Items/t:CalendarItem/t:IsAllDayEvent' u'cast':u'bool' } u'recurrence_end_date':{u'xpath':u'//m:Items/t:CalendarItem/t:Recurrence/t:EndDateRecurrence/t:EndDate' u'cast':u'date_only_naive' } u'recurrence_interval':{u'xpath':u'//m:Items/t:CalendarItem/t:Recurrence/*/t:Interval' u'cast':u'int' } u'recurrence_days':{u'xpath':u'//m:Items/t:CalendarItem/t:Recurrence/t:WeeklyRecurrence/t:DaysOfWeek' } }<line_sep>result=self.service._xpath_to_dict(element=response property_map=property_map namespace_map=soap_request.NAMESPACES)<try_stmt><block_start>recurrence_node=response.xpath(u'//m:Items/t:CalendarItem/t:Recurrence' namespaces=soap_request.NAMESPACES)[0]<block_end><except_stmt>IndexError<block_start>recurrence_node=<none><block_end><if_stmt>recurrence_node<is><not><none><block_start><if_stmt>recurrence_node.find('t:DailyRecurrence' namespaces=soap_request.NAMESPACES)<is><not><none><block_start>result['recurrence']='daily'<block_end><elif_stmt>recurrence_node.find('t:WeeklyRecurrence' namespaces=soap_request.NAMESPACES)<is><not><none><block_start>result['recurrence']='weekly'<block_end><elif_stmt>recurrence_node.find('t:AbsoluteMonthlyRecurrence' namespaces=soap_request.NAMESPACES)<is><not><none><block_start>result['recurrence']='monthly'<block_end><elif_stmt>recurrence_node.find('t:AbsoluteYearlyRecurrence' namespaces=soap_request.NAMESPACES)<is><not><none><block_start>result['recurrence']='yearly'<block_end><block_end><return>result<block_end><def_stmt>_parse_event_organizer self response<block_start>organizer=response.xpath(u'//m:Items/t:CalendarItem/t:Organizer/t:Mailbox' namespaces=soap_request.NAMESPACES)<line_sep>property_map={u'name':{u'xpath':u't:Name'} u'email':{u'xpath':u't:EmailAddress'} }<if_stmt>organizer<block_start><return>self.service._xpath_to_dict(element=organizer[0] property_map=property_map namespace_map=soap_request.NAMESPACES)<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>_parse_event_resources self response<block_start>property_map={u'name':{u'xpath':u't:Mailbox/t:Name'} u'email':{u'xpath':u't:Mailbox/t:EmailAddress'} u'response':{u'xpath':u't:ResponseType'} u'last_response':{u'xpath':u't:LastResponseTime' u'cast':u'datetime'} }<line_sep>result=[]<line_sep>resources=response.xpath(u'//m:Items/t:CalendarItem/t:Resources/t:Attendee' namespaces=soap_request.NAMESPACES)<for_stmt>attendee resources<block_start>attendee_properties=self.service._xpath_to_dict(element=attendee property_map=property_map namespace_map=soap_request.NAMESPACES)<line_sep>attendee_properties[u'required']=<true><if_stmt>u'last_response'<not><in>attendee_properties<block_start>attendee_properties[u'last_response']=<none><block_end><if_stmt>u'email'<in>attendee_properties<block_start>result.append(attendee_properties)<block_end><block_end><return>result<block_end><def_stmt>_parse_event_attendees self response<block_start>property_map={u'name':{u'xpath':u't:Mailbox/t:Name'} u'email':{u'xpath':u't:Mailbox/t:EmailAddress'} u'response':{u'xpath':u't:ResponseType'} u'last_response':{u'xpath':u't:LastResponseTime' u'cast':u'datetime'} }<line_sep>result=[]<line_sep>required_attendees=response.xpath(u'//m:Items/t:CalendarItem/t:RequiredAttendees/t:Attendee' namespaces=soap_request.NAMESPACES)<for_stmt>attendee required_attendees<block_start>attendee_properties=self.service._xpath_to_dict(element=attendee property_map=property_map namespace_map=soap_request.NAMESPACES)<line_sep>attendee_properties[u'required']=<true><if_stmt>u'last_response'<not><in>attendee_properties<block_start>attendee_properties[u'last_response']=<none><block_end><if_stmt>u'email'<in>attendee_properties<block_start>result.append(attendee_properties)<block_end><block_end>optional_attendees=response.xpath(u'//m:Items/t:CalendarItem/t:OptionalAttendees/t:Attendee' namespaces=soap_request.NAMESPACES)<for_stmt>attendee optional_attendees<block_start>attendee_properties=self.service._xpath_to_dict(element=attendee property_map=property_map namespace_map=soap_request.NAMESPACES)<line_sep>attendee_properties[u'required']=<false><if_stmt>u'last_response'<not><in>attendee_properties<block_start>attendee_properties[u'last_response']=<none><block_end><if_stmt>u'email'<in>attendee_properties<block_start>result.append(attendee_properties)<block_end><block_end><return>result<block_end><def_stmt>_parse_event_conflicts self response<block_start>conflicting_ids=response.xpath(u'//m:Items/t:CalendarItem/t:ConflictingMeetings/t:CalendarItem/t:ItemId' namespaces=soap_request.NAMESPACES)<line_sep><return>[id_element.get(u"Id")<for>id_element conflicting_ids]<block_end><block_end><class_stmt>Exchange2010FolderService(BaseExchangeFolderService)<block_start><def_stmt>folder self id=<none> **kwargs<block_start><return>Exchange2010Folder(service=self.service id=id **kwargs)<block_end><def_stmt>get_folder self id<block_start>""" :param str id: The Exchange ID of the folder to retrieve from the Exchange store. Retrieves the folder specified by the id, from the Exchange store. **Examples**:: folder = service.folder().get_folder(id) """<line_sep><return>Exchange2010Folder(service=self.service id=id)<block_end><def_stmt>new_folder self **properties<block_start>""" new_folder(display_name=display_name, folder_type=folder_type, parent_id=parent_id) :param str display_name: The display name given to the new folder. :param str folder_type: The type of folder to create. Possible values are 'Folder', 'CalendarFolder', 'ContactsFolder', 'SearchFolder', 'TasksFolder'. :param str parent_id: The parent folder where the new folder will be created. Creates a new folder with the given properties. Not saved until you call the create() method. **Examples**:: folder = service.folder().new_folder( display_name=u"New Folder Name", folder_type="CalendarFolder", parent_id='calendar', ) folder.create() """<line_sep><return>Exchange2010Folder(service=self.service **properties)<block_end><def_stmt>find_folder self parent_id<block_start>""" find_folder(parent_id) :param str parent_id: The parent folder to list. This method will return a list of sub-folders to a given parent folder. **Examples**:: # Iterate through folders within the default 'calendar' folder. folders = service.folder().find_folder(parent_id='calendar') for folder in folders: print(folder.display_name) # Delete all folders within the 'calendar' folder. folders = service.folder().find_folder(parent_id='calendar') for folder in folders: folder.delete() """<line_sep>body=soap_request.find_folder(parent_id=parent_id format=u'AllProperties')<line_sep>response_xml=self.service.send(body)<line_sep><return>self._parse_response_for_find_folder(response_xml)<block_end><def_stmt>_parse_response_for_find_folder self response<block_start>result=[]<line_sep>folders=response.xpath(u'//t:Folders/t:*' namespaces=soap_request.NAMESPACES)<for_stmt>folder folders<block_start>result.append(Exchange2010Folder(service=self.service xml=etree.fromstring(etree.tostring(folder))# Might be a better way to do this ))<block_end><return>result<block_end><block_end><class_stmt>Exchange2010Folder(BaseExchangeFolder)<block_start><def_stmt>_init_from_service self id<block_start>body=soap_request.get_folder(folder_id=id format=u'AllProperties')<line_sep>response_xml=self.service.send(body)<line_sep>properties=self._parse_response_for_get_folder(response_xml)<line_sep>self._update_properties(properties)<line_sep><return>self<block_end><def_stmt>_init_from_xml self xml<block_start>properties=self._parse_response_for_get_folder(xml)<line_sep>self._update_properties(properties)<line_sep><return>self<block_end><def_stmt>create self<block_start>""" Creates a folder in Exchange. :: calendar = service.folder().new_folder( display_name=u"New Folder Name", folder_type="CalendarFolder", parent_id='calendar', ) calendar.create() """<line_sep>self.validate()<line_sep>body=soap_request.new_folder(self)<line_sep>response_xml=self.service.send(body)<line_sep>self._id,self._change_key=self._parse_id_and_change_key_from_response(response_xml)<line_sep><return>self<block_end><def_stmt>delete self<block_start>""" Deletes a folder from the Exchange store. :: folder = service.folder().get_folder(id) print("Deleting folder: %s" % folder.display_name) folder.delete() """<if_stmt><not>self.id<block_start><raise>TypeError(u"You can't delete a folder that hasn't been created yet.")<block_end>body=soap_request.delete_folder(self)<line_sep>response_xml=self.service.send(body)# noqa # TODO: verify deletion self._id=<none><line_sep>self._change_key=<none><line_sep><return><none><block_end><def_stmt>move_to self folder_id<block_start>""" :param str folder_id: The Folder ID of what will be the new parent folder, of this folder. Move folder to a different location, specified by folder_id:: folder = service.folder().get_folder(id) folder.move_to(folder_id="ID of new location's folder") """<if_stmt><not>folder_id<block_start><raise>TypeError(u"You can't move to a non-existant folder")<block_end><if_stmt><not>isinstance(folder_id BASESTRING_TYPES)<block_start><raise>TypeError(u"folder_id must be a string")<block_end><if_stmt><not>self.id<block_start><raise>TypeError(u"You can't move a folder that hasn't been created yet.")<block_end>response_xml=self.service.send(soap_request.move_folder(self folder_id))# noqa result_id,result_key=self._parse_id_and_change_key_from_response(response_xml)<if_stmt>self.id<ne>result_id<block_start><raise>ValueError(u"MoveFolder returned success but requested folder not moved")<block_end>self.parent_id=folder_id<line_sep><return>self<block_end><def_stmt>_parse_response_for_get_folder self response<block_start>FOLDER_PATH=u'//t:Folder | //t:CalendarFolder | //t:ContactsFolder | //t:SearchFolder | //t:TasksFolder'<line_sep>path=response.xpath(FOLDER_PATH namespaces=soap_request.NAMESPACES)[0]<line_sep>result=self._parse_folder_properties(path)<line_sep><return>result<block_end><def_stmt>_parse_folder_properties self response<block_start>property_map={u'display_name':{u'xpath':u't:DisplayName'} }<line_sep>self._id,self._change_key=self._parse_id_and_change_key_from_response(response)<line_sep>self._parent_id=self._parse_parent_id_and_change_key_from_response(response)[0]<line_sep>self.folder_type=etree.QName(response).localname<line_sep><return>self.service._xpath_to_dict(element=response property_map=property_map namespace_map=soap_request.NAMESPACES)<block_end><def_stmt>_parse_id_and_change_key_from_response self response<block_start>id_elements=response.xpath(u'//t:FolderId' namespaces=soap_request.NAMESPACES)<if_stmt>id_elements<block_start>id_element=id_elements[0]<line_sep><return>id_element.get(u"Id" <none>) id_element.get(u"ChangeKey" <none>)<block_end><else_stmt><block_start><return><none> <none><block_end><block_end><def_stmt>_parse_parent_id_and_change_key_from_response self response<block_start>id_elements=response.xpath(u'//t:ParentFolderId' namespaces=soap_request.NAMESPACES)<if_stmt>id_elements<block_start>id_element=id_elements[0]<line_sep><return>id_element.get(u"Id" <none>) id_element.get(u"ChangeKey" <none>)<block_end><else_stmt><block_start><return><none> <none><block_end><block_end><block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>paddle<import_stmt>paddle.nn<as>nn<import_stmt>paddle.nn.functional<as>F<import_stmt>math<import_from_stmt>net Word2VecLayer Word2VecInferLayer<class_stmt>StaticModel(object)<block_start><def_stmt>__init__ self config<block_start>self.cost=<none><line_sep>self.metrics={}<line_sep>self.config=config<line_sep>self._init_hyper_parameters()<block_end><def_stmt>_init_hyper_parameters self<block_start>self.sparse_feature_number=self.config.get("hyper_parameters.sparse_feature_number")<line_sep>self.sparse_feature_dim=self.config.get("hyper_parameters.sparse_feature_dim")<line_sep>self.neg_num=self.config.get("hyper_parameters.neg_num")<line_sep>self.with_shuffle_batch=self.config.get("hyper_parameters.with_shuffle_batch")<line_sep>self.learning_rate=self.config.get("hyper_parameters.optimizer.learning_rate")<line_sep>self.decay_steps=self.config.get("hyper_parameters.optimizer.decay_steps")<line_sep>self.decay_rate=self.config.get("hyper_parameters.optimizer.decay_rate")<block_end><def_stmt>create_feeds self is_infer=<false><block_start><if_stmt>is_infer<block_start>analogy_a=paddle.static.data(name="analogy_a" shape=[<none> 1] dtype='int64')<line_sep>analogy_b=paddle.static.data(name="analogy_b" shape=[<none> 1] dtype='int64')<line_sep>analogy_c=paddle.static.data(name="analogy_c" shape=[<none> 1] dtype='int64')<line_sep>#analogy_d = paddle.static.data( # name="analogy_d", shape=[None], dtype='int64') <return>[analogy_a analogy_b analogy_c]<block_end>input_word=paddle.static.data(name="input_word" shape=[<none> 1] dtype='int64')<line_sep>true_word=paddle.static.data(name='true_label' shape=[<none> 1] dtype='int64')<if_stmt>self.with_shuffle_batch<block_start><return>[input_word true_word]<block_end>neg_word=paddle.static.data(name="neg_label" shape=[<none> self.neg_num] dtype='int64')<line_sep><return>[input_word true_word neg_word]<block_end><def_stmt>net self inputs is_infer=<false><block_start>word2vec_model=Word2VecLayer(self.sparse_feature_number self.sparse_feature_dim self.neg_num emb_name="emb" emb_w_name="emb_w" emb_b_name="emb_b")<line_sep>true_logits,neg_logits=word2vec_model.forward(inputs)<line_sep>label_ones=paddle.full(shape=[paddle.shape(true_logits)[0] 1] fill_value=1.0)<line_sep>label_zeros=paddle.full(shape=[paddle.shape(true_logits)[0] self.neg_num] fill_value=0.0)<line_sep>true_logits=paddle.nn.functional.sigmoid(true_logits)<line_sep>true_xent=paddle.nn.functional.binary_cross_entropy(true_logits label_ones)<line_sep>neg_logits=paddle.nn.functional.sigmoid(neg_logits)<line_sep>neg_xent=paddle.nn.functional.binary_cross_entropy(neg_logits label_zeros)<line_sep>cost=paddle.add(true_xent neg_xent)<line_sep>avg_cost=paddle.mean(x=cost)<line_sep>self._cost=avg_cost<line_sep>fetch_dict={'loss':avg_cost}<line_sep><return>fetch_dict<block_end><def_stmt>create_optimizer self strategy=<none><block_start>optimizer=paddle.optimizer.SGD(learning_rate=self.learning_rate)<line_sep># learning_rate=paddle.fluid.layers.exponential_decay( # learning_rate=self.learning_rate, # decay_steps=self.decay_steps, # decay_rate=self.decay_rate, # staircase=True)) <if_stmt>strategy<ne><none><block_start><import_stmt>paddle.distributed.fleet<as>fleet<line_sep>optimizer=fleet.distributed_optimizer(optimizer strategy)<block_end><return>optimizer<block_end><def_stmt>infer_net self input#[analogy_a, analogy_b, analogy_c] = inputs <block_start>all_label=paddle.static.data(name="all_label" shape=[self.sparse_feature_number] dtype='int64')<line_sep>word2vec=Word2VecInferLayer(self.sparse_feature_number self.sparse_feature_dim "emb")<line_sep>val,pred_idx=word2vec.forward(input[0] input[1] input[2] all_label)<line_sep>fetch_dict={'pred_idx':pred_idx}<line_sep><return>fetch_dict<block_end><block_end>
<import_from_stmt>sequana.tools bam_to_mapped_unmapped_fastq reverse_complement StatsBAM2Mapped<import_from_stmt>sequana sequana_data<import_from_stmt>sequana.tools bam_get_paired_distance GZLineCounter PairedFastQ<import_from_stmt>sequana.tools genbank_features_parser<def_stmt>test_StatsBAM2Mapped <block_start>data=sequana_data("test.bam" "testing")<line_sep>res=StatsBAM2Mapped(data)<line_sep>res.to_html()<block_end><def_stmt>test_bam2fastq <block_start>data=sequana_data("test.bam" "testing")<line_sep>res=bam_to_mapped_unmapped_fastq(data)<block_end><def_stmt>test_reverse_complement <block_start><assert_stmt>reverse_complement("AACCGGTTA")<eq>'TAACCGGTT'<block_end><def_stmt>test_reverse <block_start><import_from_stmt>sequana.tools reverse<assert_stmt>reverse("AACCGG")<eq>'GGCCAA'<block_end><def_stmt>test_distance <block_start>data=sequana_data("test.bam" "testing")<line_sep>distances=bam_get_paired_distance(data)<block_end><def_stmt>test_gc_content <block_start><import_from_stmt>sequana.tools gc_content<line_sep>data=sequana_data('measles.fa' "testing")<line_sep>gc_content(data 10)['chr1']<line_sep>gc_content(data 101 circular=<true>)['chr1']<block_end><def_stmt>test_genbank_features_parser <block_start>data=sequana_data("JB409847.gbk")<line_sep>genbank_features_parser(data)<block_end><def_stmt>test_gzlinecounter <block_start><assert_stmt>len(GZLineCounter(sequana_data("test.fastq.gz")))<eq>1000<block_end><def_stmt>test_paired_file <block_start>f1=sequana_data("test.fastq.gz")<line_sep>f2=sequana_data("test.fastq.gz")<assert_stmt>PairedFastQ(f1 f2).is_synchronised()<block_end>
_base_=['./rotated-detection_static.py' '../_base_/backends/tensorrt.py']<line_sep>onnx_config=dict(output_names=['dets' 'labels'] input_shape=<none> dynamic_axes={'input':{0:'batch' 2:'height' 3:'width'} 'dets':{0:'batch' 1:'num_dets' } 'labels':{0:'batch' 1:'num_dets' } } )<line_sep>backend_config=dict(common_config=dict(max_workspace_size=1<lshift>30) model_inputs=[dict(input_shapes=dict(input=dict(min_shape=[1 3 320 320] opt_shape=[1 3 1024 1024] max_shape=[1 3 1024 1024])))])<line_sep>
<import_stmt>numpy<as>np<def_stmt>load_mnist # the data, shuffled and split between train and test sets <block_start><import_from_stmt>keras.datasets mnist<line_sep>(x_train y_train),(x_test y_test)=mnist.load_data()<line_sep>x=np.concatenate((x_train x_test))<line_sep>y=np.concatenate((y_train y_test))<line_sep>x=x.reshape(-1 28 28 1).astype('float32')<line_sep>x=x/255.<line_sep>print('MNIST:' x.shape)<line_sep><return>x y<block_end><def_stmt>load_usps data_path='./data/usps'<block_start><import_stmt>os<if_stmt><not>os.path.exists(data_path+'/usps_train.jf')<block_start><if_stmt><not>os.path.exists(data_path+'/usps_train.jf.gz')<block_start>os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s'%data_path)<line_sep>os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s'%data_path)<block_end>os.system('gunzip %s/usps_train.jf.gz'%data_path)<line_sep>os.system('gunzip %s/usps_test.jf.gz'%data_path)<block_end><with_stmt>open(data_path+'/usps_train.jf')<as>f<block_start>data=f.readlines()<block_end>data=data[1:-1]<line_sep>data=[list(map(float line.split()))<for>line data]<line_sep>data=np.array(data)<line_sep>data_train,labels_train=data[: 1:] data[: 0]<with_stmt>open(data_path+'/usps_test.jf')<as>f<block_start>data=f.readlines()<block_end>data=data[1:-1]<line_sep>data=[list(map(float line.split()))<for>line data]<line_sep>data=np.array(data)<line_sep>data_test,labels_test=data[: 1:] data[: 0]<line_sep>x=np.concatenate((data_train data_test)).astype('float32')<line_sep>x<augdiv>2.0<line_sep>x=x.reshape([-1 16 16 1])<line_sep>y=np.concatenate((labels_train labels_test))<line_sep>print('USPS samples' x.shape)<line_sep><return>x y<block_end>
######### # Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>json<import_stmt>logging<import_from_stmt>cloudify.utils setup_logger<import_from_stmt>integration_tests.framework.docker execute copy_file_to_manager <import_from_stmt>integration_tests.tests.constants MANAGER_PYTHON<import_from_stmt>integration_tests.tests.utils get_resource<line_sep>logger=setup_logger('Flask Utils' logging.INFO)<line_sep>security_config=<none><line_sep>PREPARE_SCRIPT_PATH='/tmp/prepare_reset_storage.py'<line_sep>SCRIPT_PATH='/tmp/reset_storage.py'<line_sep>CONFIG_PATH='/tmp/reset_storage_config.json'<def_stmt>prepare_reset_storage_script container_id<block_start>reset_script=get_resource('scripts/reset_storage.py')<line_sep>prepare=get_resource('scripts/prepare_reset_storage.py')<line_sep>copy_file_to_manager(container_id reset_script SCRIPT_PATH)<line_sep>copy_file_to_manager(container_id prepare PREPARE_SCRIPT_PATH)<line_sep>execute(container_id [MANAGER_PYTHON PREPARE_SCRIPT_PATH '--config' CONFIG_PATH])<block_end><def_stmt>reset_storage container_id<block_start>logger.info('Resetting PostgreSQL DB')<line_sep># reset the storage by calling a script on the manager, to access # localhost-only APIs (rabbitmq management api) execute(container_id [MANAGER_PYTHON SCRIPT_PATH '--config' CONFIG_PATH])<block_end><def_stmt>set_ldap config_data<block_start>logger.info('Setting LDAP configuration')<line_sep>_prepare_set_ldap_script()<line_sep>execute("{manager_python} {script_path} --config '{cfg_data}'".format(manager_python=MANAGER_PYTHON script_path='/tmp/set_ldap.py' cfg_data=json.dumps(config_data)))<block_end><def_stmt>_prepare_set_ldap_script <block_start>set_ldap_script=get_resource('scripts/set_ldap.py')<line_sep>copy_file_to_manager(set_ldap_script '/tmp/set_ldap.py')<block_end>
<import_stmt>pytest<import_from_stmt>astropy.io fits<import_stmt>numpy<as>np<import_from_stmt>lightkurve.io.kepseismic read_kepseismic_lightcurve<import_from_stmt>lightkurve.io.detect detect_filetype<line_sep>@pytest.mark.remote_data<def_stmt>test_detect_kepseismic <block_start>"""Can we detect the correct format for KEPSEISMIC files?"""<line_sep>url="https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"<line_sep>f=fits.open(url)<assert_stmt>detect_filetype(f)<eq>"KEPSEISMIC"<block_end>@pytest.mark.remote_data<def_stmt>test_read_kepseismic <block_start>"""Can we read KEPSEISMIC files?"""<line_sep>url="https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"<with_stmt>fits.open(url mode="readonly")<as>hdulist<block_start>fluxes=hdulist[1].data["FLUX"]<block_end>lc=read_kepseismic_lightcurve(url)<line_sep>flux_lc=lc.flux.value<line_sep># print(flux_lc, fluxes) <assert_stmt>np.sum(fluxes)<eq>np.sum(flux_lc)<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>django forms<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>django.utils.encoding smart_bytes<import_from_stmt>django.utils timezone<import_from_stmt>..core utils<import_from_stmt>..core.utils.forms NestedModelChoiceField<import_from_stmt>..category.models Category<import_from_stmt>.models Topic<class_stmt>TopicForm(forms.ModelForm)<block_start>topic_hash=forms.CharField(max_length=32 widget=forms.HiddenInput required=<false>)<class_stmt>Meta<block_start>model=Topic<line_sep>fields=('title' 'category')<block_end><def_stmt>__init__ self user *args **kwargs<block_start>super(TopicForm self).__init__(*args **kwargs)<line_sep>self.user=user<line_sep>self.fields['category']=NestedModelChoiceField(queryset=Category.objects.visible().opened().ordered() related_name='category_set' parent_field='parent_id' label_field='title' label=_("Category") empty_label=_("Choose a category"))<if_stmt>self.instance.pk<and><not>user.st.is_moderator<block_start><del_stmt>self.fields['category']<block_end><block_end><def_stmt>get_category self<block_start><return>self.cleaned_data['category']<block_end><def_stmt>get_topic_hash self<block_start>topic_hash=self.cleaned_data.get('topic_hash' <none>)<if_stmt>topic_hash<block_start><return>topic_hash<block_end><return>utils.get_hash((smart_bytes(self.cleaned_data['title']) smart_bytes('category-{}'.format(self.cleaned_data['category'].pk))))<block_end><def_stmt>save self commit=<true><block_start><if_stmt><not>self.instance.pk<block_start>self.instance.user=self.user<block_end>self.instance.reindex_at=timezone.now()<line_sep><return>super(TopicForm self).save(commit)<block_end><block_end>
<import_stmt>unittest<import_stmt>os<import_from_stmt>six StringIO<import_from_stmt>package_manager util<line_sep>CHECKSUM_TXT="1915adb697103d42655711e7b00a7dbe398a33d7719d6370c01001273010d069"<line_sep>DEBIAN_JESSIE_OS_RELEASE="""PRETTY_NAME="Distroless" NAME="Debian GNU/Linux" ID="debian" VERSION_ID="8" VERSION="Debian GNU/Linux 8 (jessie)" HOME_URL="https://github.com/GoogleContainerTools/distroless" SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md" BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new" """<line_sep>DEBIAN_STRETCH_OS_RELEASE="""PRETTY_NAME="Distroless" NAME="Debian GNU/Linux" ID="debian" VERSION_ID="9" VERSION="Debian GNU/Linux 9 (stretch)" HOME_URL="https://github.com/GoogleContainerTools/distroless" SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md" BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new" """<line_sep>DEBIAN_BUSTER_OS_RELEASE="""PRETTY_NAME="Distroless" NAME="Debian GNU/Linux" ID="debian" VERSION_ID="10" VERSION="Debian GNU/Linux 10 (buster)" HOME_URL="https://github.com/GoogleContainerTools/distroless" SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md" BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new" """<line_sep># VERSION and VERSION_ID aren't set on unknown distros DEBIAN_UNKNOWN_OS_RELEASE="""PRETTY_NAME="Distroless" NAME="Debian GNU/Linux" ID="debian" HOME_URL="https://github.com/GoogleContainerTools/distroless" SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md" BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new" """<line_sep>osReleaseForDistro={"jessie":DEBIAN_JESSIE_OS_RELEASE "stretch":DEBIAN_STRETCH_OS_RELEASE "buster":DEBIAN_BUSTER_OS_RELEASE "???":DEBIAN_UNKNOWN_OS_RELEASE }<class_stmt>TestUtil(unittest.TestCase)<block_start><def_stmt>test_sha256 self<block_start>current_dir=os.path.dirname(__file__)<line_sep>filename=os.path.join(current_dir 'testdata' 'checksum.txt')<line_sep>actual=util.sha256_checksum(filename)<line_sep>self.assertEqual(CHECKSUM_TXT actual)<block_end><def_stmt>test_generate_debian_os_release self<block_start><for_stmt>distro,expected_output osReleaseForDistro.items()<block_start>output_file=StringIO()<line_sep>util.generate_os_release(distro output_file)<line_sep>self.assertEqual(expected_output output_file.getvalue())<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
"""Support for SmartHab device integration."""<import_from_stmt>datetime timedelta<import_stmt>logging<import_stmt>pysmarthab<import_from_stmt>requests.exceptions Timeout<import_from_stmt>homeassistant.components.light LightEntity<import_from_stmt>. DATA_HUB DOMAIN<line_sep>_LOGGER=logging.getLogger(__name__)<line_sep>SCAN_INTERVAL=timedelta(seconds=60)<async_keyword><def_stmt>async_setup_entry hass config_entry async_add_entities<block_start>"""Set up SmartHab lights from a config entry."""<line_sep>hub=hass.data[DOMAIN][config_entry.entry_id][DATA_HUB]<line_sep>entities=(SmartHabLight(light)<for>light <await>hub.async_get_device_list()<if>isinstance(light pysmarthab.Light))<line_sep>async_add_entities(entities <true>)<block_end><class_stmt>SmartHabLight(LightEntity)<block_start>"""Representation of a SmartHab Light."""<def_stmt>__init__ self light<block_start>"""Initialize a SmartHabLight."""<line_sep>self._light=light<block_end>@property<def_stmt>unique_id self<arrow>str<block_start>"""Return a unique ID."""<line_sep><return>self._light.device_id<block_end>@property<def_stmt>name self<arrow>str<block_start>"""Return the display name of this light."""<line_sep><return>self._light.label<block_end>@property<def_stmt>is_on self<arrow>bool<block_start>"""Return true if light is on."""<line_sep><return>self._light.state<block_end><async_keyword><def_stmt>async_turn_on self **kwargs<block_start>"""Instruct the light to turn on."""<line_sep><await>self._light.async_turn_on()<block_end><async_keyword><def_stmt>async_turn_off self **kwargs<block_start>"""Instruct the light to turn off."""<line_sep><await>self._light.async_turn_off()<block_end><async_keyword><def_stmt>async_update self<block_start>"""Fetch new state data for this light."""<try_stmt><block_start><await>self._light.async_update()<block_end><except_stmt>Timeout<block_start>_LOGGER.error("Reached timeout while updating light %s from API" self.entity_id)<block_end><block_end><block_end>
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). <import_from_future_stmt> annotations<import_from_stmt>textwrap dedent<import_from_stmt>typing Any ContextManager<import_stmt>pytest<import_from_stmt>pants.backend.docker.goals package_image<import_from_stmt>pants.backend.docker.subsystems dockerfile_parser<import_from_stmt>pants.backend.docker.subsystems.dockerfile_parser DockerfileInfo<import_from_stmt>pants.backend.docker.target_types DockerImageTarget<import_from_stmt>pants.backend.docker.util_rules dependencies docker_binary docker_build_args docker_build_context docker_build_env dockerfile <import_from_stmt>pants.backend.docker.util_rules.docker_build_args DockerBuildArgs<import_from_stmt>pants.backend.docker.util_rules.docker_build_context DockerBuildContext DockerBuildContextRequest <import_from_stmt>pants.backend.docker.util_rules.docker_build_env DockerBuildEnvironment<import_from_stmt>pants.backend.docker.value_interpolation DockerBuildArgsInterpolationValue DockerInterpolationContext DockerInterpolationValue <import_from_stmt>pants.backend.python target_types_rules<import_from_stmt>pants.backend.python.goals package_pex_binary<import_from_stmt>pants.backend.python.goals.package_pex_binary PexBinaryFieldSet<import_from_stmt>pants.backend.python.target_types PexBinary<import_from_stmt>pants.backend.python.util_rules pex_from_targets<import_from_stmt>pants.backend.shell.target_types ShellSourcesGeneratorTarget ShellSourceTarget<import_from_stmt>pants.backend.shell.target_types rules<as>shell_target_types_rules<import_from_stmt>pants.core.goals.package BuiltPackage<import_from_stmt>pants.core.target_types FilesGeneratorTarget<import_from_stmt>pants.core.target_types rules<as>core_target_types_rules<import_from_stmt>pants.engine.addresses Address<import_from_stmt>pants.engine.fs EMPTY_DIGEST EMPTY_SNAPSHOT Snapshot<import_from_stmt>pants.engine.internals.scheduler ExecutionError<import_from_stmt>pants.testutil.pytest_util no_exception<import_from_stmt>pants.testutil.rule_runner QueryRule RuleRunner<def_stmt>create_rule_runner <arrow>RuleRunner<block_start>rule_runner=RuleRunner(rules=[*core_target_types_rules() *dependencies.rules() *docker_binary.rules() *docker_build_args.rules() *docker_build_context.rules() *docker_build_env.rules() *dockerfile.rules() *dockerfile_parser.rules() *package_image.rules() *package_pex_binary.rules() *pex_from_targets.rules() *shell_target_types_rules() *target_types_rules.rules() QueryRule(BuiltPackage [PexBinaryFieldSet]) QueryRule(DockerBuildContext (DockerBuildContextRequest )) ] target_types=[DockerImageTarget FilesGeneratorTarget PexBinary ShellSourcesGeneratorTarget ShellSourceTarget ] )<line_sep><return>rule_runner<block_end>@pytest.fixture<def_stmt>rule_runner <arrow>RuleRunner<block_start><return>create_rule_runner()<block_end><def_stmt>assert_build_context rule_runner:RuleRunner address:Address * build_upstream_images:bool=<false> expected_files:list[str] expected_interpolation_context:dict[str dict[str str]|DockerInterpolationValue]|<none>=<none> pants_args:list[str]|<none>=<none> runner_options:dict[str Any]|<none>=<none> <arrow>DockerBuildContext<block_start><if_stmt>runner_options<is><none><block_start>runner_options={}<block_end>runner_options.setdefault("env_inherit" set()).update({"PATH" "PYENV_ROOT" "HOME"})<line_sep>rule_runner.set_options(pants_args<or>[] **runner_options)<line_sep>context=rule_runner.request(DockerBuildContext [DockerBuildContextRequest(address=address build_upstream_images=build_upstream_images )] )<line_sep>snapshot=rule_runner.request(Snapshot [context.digest])<assert_stmt>sorted(expected_files)<eq>sorted(snapshot.files)<if_stmt>expected_interpolation_context<is><not><none><block_start><if_stmt>"build_args"<in>expected_interpolation_context<block_start>expected_interpolation_context["build_args"]=DockerBuildArgsInterpolationValue(expected_interpolation_context["build_args"])<block_end><assert_stmt>context.interpolation_context<eq>DockerInterpolationContext.from_dict(expected_interpolation_context)<block_end><return>context<block_end><def_stmt>test_file_dependencies rule_runner:RuleRunner<arrow><none><block_start>rule_runner.write_files({# img_A -> files_A # img_A -> img_B "src/a/BUILD":dedent("""\ docker_image(name="img_A", dependencies=[":files_A", "src/b:img_B"]) files(name="files_A", sources=["files/**"]) """) "src/a/Dockerfile":"FROM base" "src/a/files/a01":"" "src/a/files/a02":"" # img_B -> files_B "src/b/BUILD":dedent("""\ docker_image(name="img_B", dependencies=[":files_B"]) files(name="files_B", sources=["files/**"]) """) "src/b/Dockerfile":"FROM base" "src/b/files/b01":"" "src/b/files/b02":"" # Mixed "src/c/BUILD":dedent("""\ docker_image(name="img_C", dependencies=["src/a:files_A", "src/b:files_B"]) """) "src/c/Dockerfile":"FROM base" })<line_sep># We want files_B in build context for img_B assert_build_context(rule_runner Address("src/b" target_name="img_B") expected_files=["src/b/Dockerfile" "src/b/files/b01" "src/b/files/b02"] )<line_sep># We want files_A in build context for img_A, but not files_B assert_build_context(rule_runner Address("src/a" target_name="img_A") expected_files=["src/a/Dockerfile" "src/a/files/a01" "src/a/files/a02"] )<line_sep># Mixed. assert_build_context(rule_runner Address("src/c" target_name="img_C") expected_files=["src/c/Dockerfile" "src/a/files/a01" "src/a/files/a02" "src/b/files/b01" "src/b/files/b02" ] )<block_end><def_stmt>test_from_image_build_arg_dependency rule_runner:RuleRunner<arrow><none><block_start>rule_runner.write_files({"src/upstream/BUILD":dedent("""\ docker_image( name="image", repository="upstream/{name}", instructions=["FROM alpine"], ) """) "src/downstream/BUILD":"docker_image(name='image')" "src/downstream/Dockerfile":dedent("""\ ARG BASE_IMAGE=src/upstream:image FROM $BASE_IMAGE """) })<line_sep>assert_build_context(rule_runner Address("src/downstream" target_name="image") expected_files=["src/downstream/Dockerfile"] build_upstream_images=<true> expected_interpolation_context={"baseimage":{"tag":"latest"} "stage0":{"tag":"latest"} "build_args":{"BASE_IMAGE":"upstream/image:latest" } } )<block_end><def_stmt>test_files_out_of_tree rule_runner:RuleRunner<arrow><none># src/a:img_A -> res/static:files <block_start>rule_runner.write_files({"src/a/BUILD":dedent("""\ docker_image(name="img_A", dependencies=["res/static:files"]) """) "res/static/BUILD":dedent("""\ files(name="files", sources=["!BUILD", "**/*"]) """) "src/a/Dockerfile":"FROM base" "res/static/s01":"" "res/static/s02":"" "res/static/sub/s03":"" })<line_sep>assert_build_context(rule_runner Address("src/a" target_name="img_A") expected_files=["src/a/Dockerfile" "res/static/s01" "res/static/s02" "res/static/sub/s03" ] )<block_end><def_stmt>test_packaged_pex_path rule_runner:RuleRunner<arrow><none># This test is here to ensure that we catch if there is any change in the generated path where # built pex binaries go, as we rely on that for dependency inference in the Dockerfile. <block_start>rule_runner.write_files({"src/docker/BUILD":"""docker_image(dependencies=["src/python/proj/cli:bin"])""" "src/docker/Dockerfile":"""FROM python""" "src/python/proj/cli/BUILD":"""pex_binary(name="bin", entry_point="main.py")""" "src/python/proj/cli/main.py":"""print("cli main")""" })<line_sep>assert_build_context(rule_runner Address("src/docker" target_name="docker") expected_files=["src/docker/Dockerfile" "src.python.proj.cli/bin.pex"] )<block_end><def_stmt>test_interpolation_context_from_dockerfile rule_runner:RuleRunner<arrow><none><block_start>rule_runner.write_files({"src/docker/BUILD":"docker_image()" "src/docker/Dockerfile":dedent("""\ FROM python:3.8 FROM alpine as interim FROM interim FROM scratch:1-1 as output """) })<line_sep>assert_build_context(rule_runner Address("src/docker") expected_files=["src/docker/Dockerfile"] expected_interpolation_context={"baseimage":{"tag":"3.8"} "stage0":{"tag":"3.8"} "interim":{"tag":"latest"} "stage2":{"tag":"latest"} "output":{"tag":"1-1"} "build_args":{} } )<block_end><def_stmt>test_synthetic_dockerfile rule_runner:RuleRunner<arrow><none><block_start>rule_runner.write_files({"src/docker/BUILD":dedent("""\ docker_image( instructions=[ "FROM python:3.8", "FROM alpine as interim", "FROM interim", "FROM scratch:1-1 as output", ] ) """) })<line_sep>assert_build_context(rule_runner Address("src/docker") expected_files=["src/docker/Dockerfile.docker"] expected_interpolation_context={"baseimage":{"tag":"3.8"} "stage0":{"tag":"3.8"} "interim":{"tag":"latest"} "stage2":{"tag":"latest"} "output":{"tag":"1-1"} "build_args":{} } )<block_end><def_stmt>test_shell_source_dependencies rule_runner:RuleRunner<arrow><none><block_start>rule_runner.write_files({"src/docker/BUILD":dedent("""\ docker_image(dependencies=[":entrypoint", ":shell"]) shell_source(name="entrypoint", source="entrypoint.sh") shell_sources(name="shell", sources=["scripts/**/*.sh"]) """) "src/docker/Dockerfile":"FROM base" "src/docker/entrypoint.sh":"" "src/docker/scripts/s01.sh":"" "src/docker/scripts/s02.sh":"" "src/docker/scripts/random.file":"" })<line_sep>assert_build_context(rule_runner Address("src/docker") expected_files=["src/docker/Dockerfile" "src/docker/entrypoint.sh" "src/docker/scripts/s01.sh" "src/docker/scripts/s02.sh" ] )<block_end><def_stmt>test_build_arg_defaults_from_dockerfile rule_runner:RuleRunner<arrow><none># Test that only explicitly defined build args in the BUILD file or pants configuraiton use the # environment for its values. <block_start>rule_runner.write_files({"src/docker/BUILD":dedent("""\ docker_image( extra_build_args=[ "base_version", ] ) """) "src/docker/Dockerfile":dedent("""\ ARG base_name=python ARG base_version=3.8 FROM ${base_name}:${base_version} ARG NO_DEF ENV opt=${NO_DEF} """) })<line_sep>assert_build_context(rule_runner Address("src/docker") runner_options={"env":{"base_name":"no-effect" "base_version":"3.9" } } expected_files=["src/docker/Dockerfile"] expected_interpolation_context={"baseimage":{"tag":"${base_version}"} "stage0":{"tag":"${base_version}"} "build_args":{# `base_name` is not listed here, as it was not an explicitly defined build arg. "base_version":"3.9" } } )<block_end>@pytest.mark.parametrize("dockerfile_arg_value, extra_build_arg_value, expect" [pytest.param(<none> <none> no_exception() id="No args defined") pytest.param(<none> "" pytest.raises(ExecutionError match=r"variable 'MY_ARG' is undefined") id="No default value for build arg" ) pytest.param(<none> "some default value" no_exception() id="Default value for build arg") pytest.param("" <none> no_exception() id="No build arg defined, and ARG without default") pytest.param("" "" pytest.raises(ExecutionError match=r"variable 'MY_ARG' is undefined") id="No default value from ARG" ) pytest.param("" "some default value" no_exception() id="Default value for build arg, ARG present") pytest.param("some default value" <none> no_exception() id="No build arg defined, only ARG") pytest.param("some default value" "" no_exception() id="Default value from ARG") pytest.param("some default value" "some other default" no_exception() id="Default value for build arg, ARG default" ) ] )<def_stmt>test_undefined_env_var_behavior rule_runner:RuleRunner dockerfile_arg_value:str|<none> extra_build_arg_value:str|<none> expect:ContextManager <arrow><none><block_start>dockerfile_arg=""<if_stmt>dockerfile_arg_value<is><not><none><block_start>dockerfile_arg="ARG MY_ARG"<if_stmt>dockerfile_arg_value<block_start>dockerfile_arg<augadd>f"={dockerfile_arg_value}"<block_end><block_end>extra_build_args=""<if_stmt>extra_build_arg_value<is><not><none><block_start>extra_build_args='extra_build_args=["MY_ARG'<if_stmt>extra_build_arg_value<block_start>extra_build_args<augadd>f"={extra_build_arg_value}"<block_end>extra_build_args<augadd>'"],'<block_end>rule_runner.write_files({"src/docker/BUILD":dedent(f"""\ docker_image( {extra_build_args} ) """) "src/docker/Dockerfile":dedent(f"""\ FROM python:3.8 {dockerfile_arg} """) })<with_stmt>expect<block_start>assert_build_context(rule_runner Address("src/docker") expected_files=["src/docker/Dockerfile"] )<block_end><block_end>@pytest.fixture(scope="session")<def_stmt>build_context <arrow>DockerBuildContext<block_start>rule_runner=create_rule_runner()<line_sep>rule_runner.write_files({"src/docker/BUILD":dedent("""\ docker_image( extra_build_args=["DEF_ARG"], instructions=[ "FROM python:3.8", "ARG MY_ARG", "ARG DEF_ARG=some-value", ], ) """) })<line_sep><return>assert_build_context(rule_runner Address("src/docker") expected_files=["src/docker/Dockerfile.docker"] )<block_end>@pytest.mark.parametrize("fmt_string, result, expectation" [pytest.param("{build_args.MY_ARG}" <none> pytest.raises(ValueError match=(r"The build arg 'MY_ARG' is undefined\. Defined build args are: DEF_ARG\.") ) id="ARG_NAME" ) pytest.param("{build_args.DEF_ARG}" "some-value" no_exception() id="DEF_ARG" ) ] )<def_stmt>test_build_arg_behavior build_context:DockerBuildContext fmt_string:str result:str|<none> expectation:ContextManager <arrow><none><block_start><with_stmt>expectation<block_start><assert_stmt>fmt_string.format(**build_context.interpolation_context)<eq>result<block_end><block_end><def_stmt>test_create_docker_build_context <arrow><none><block_start>context=DockerBuildContext.create(build_args=DockerBuildArgs.from_strings("ARGNAME=value1") snapshot=EMPTY_SNAPSHOT build_env=DockerBuildEnvironment.create({"ENVNAME":"value2"}) dockerfile_info=DockerfileInfo(address=Address("test") digest=EMPTY_DIGEST source="test/Dockerfile" putative_target_addresses=() version_tags=("base latest" "stage1 1.2" "dev 2.0" "prod 2.0") build_args=DockerBuildArgs.from_strings() from_image_build_arg_names=() copy_sources=() ) )<assert_stmt>list(context.build_args)<eq>["ARGNAME=value1"]<assert_stmt>dict(context.build_env.environment)<eq>{"ENVNAME":"value2"}<assert_stmt>context.dockerfile<eq>"test/Dockerfile"<assert_stmt>context.stages<eq>("base" "dev" "prod")<block_end>
<import_from_stmt>.model DeepLabResNetModel<import_from_stmt>.hc_deeplab HyperColumn_Deeplabv2<import_from_stmt>.image_reader ImageReader read_data_list get_indicator_mat get_batch_1chunk read_an_image_from_disk tf_wrap_get_patch get_batch<import_from_stmt>.utils decode_labels inv_preprocess prepare_label<line_sep>
# coding: utf-8 """ joplin-web """<import_from_stmt>django.conf settings<import_from_stmt>django.http.response JsonResponse<import_from_stmt>django.urls reverse<import_from_stmt>joplin_api JoplinApiSync<import_from_stmt>joplin_web.utils nb_notes_by_tag nb_notes_by_folder<import_stmt>logging<import_from_stmt>rich console<line_sep>console=console.Console()<line_sep>logger=logging.getLogger("joplin_web.app")<line_sep>joplin=JoplinApiSync(token=settings.JOPLIN_WEBCLIPPER_TOKEN)<def_stmt>get_folders request<block_start>""" all the folders :param request :return: json """<line_sep>res=joplin.get_folders()<line_sep>json_data=sorted(res.json() key=<lambda>k:k['title'])<line_sep>data=nb_notes_by_folder(json_data)<line_sep>logger.debug(data)<line_sep><return>JsonResponse(data safe=<false>)<block_end><def_stmt>get_tags request<block_start>res=joplin.get_tags()<line_sep>json_data=sorted(res.json() key=<lambda>k:k['title'])<line_sep>data=nb_notes_by_tag(json_data)<line_sep><return>JsonResponse(data safe=<false>)<block_end>
<import_from_stmt>.base *<class_stmt>Input(Layer)<block_start><def_stmt>__init__ self input_shape:Union[List Tuple] **kwargs<block_start>super(Input self).__init__(input_shape=input_shape **kwargs)<line_sep>self._shape=input_shape<block_end><def_stmt>call self x:F.Tensor *args **kwargs<arrow>F.Tensor<block_start>self._data=x<line_sep><return>self._data<block_end><block_end><class_stmt>Reshape(Layer)<block_start><def_stmt>__init__ self shape:Tuple **kwargs<block_start>super().__init__(shape=shape **kwargs)<block_end><def_stmt>call self x:F.Tensor *args **kwargs<arrow>F.Tensor<block_start>self._data=F.view(x (-1 )+self._shape self._data)<line_sep><return>self._data<block_end><def_stmt>compute_output_shape self input_shape:Union[List Tuple]=<none><arrow>Union[List Tuple]<block_start><return>self._shape<block_end><block_end><class_stmt>ZeroPadding2D(Layer)<block_start><def_stmt>__init__ self padding **kwargs<block_start>self.padding=padding<line_sep>super(ZeroPadding2D self).__init__(**kwargs)<block_end><def_stmt>call self x:F.Tensor *args **kwargs<arrow>F.Tensor<block_start>self._data=F.pad2d(x self.padding self._data)<line_sep><return>self._data<block_end><def_stmt>compute_output_shape self input_shape:Union[List Tuple]=<none><arrow>Union[List Tuple]<block_start>self._shape=(input_shape[0] input_shape[1]+2<times>self.padding[0] input_shape[2]+2<times>self.padding[1])<line_sep><return>self._shape<block_end><block_end><class_stmt>Add(Layer)<block_start><def_stmt>__call__ self inbounds:List[Layer] *args **kwargs<block_start><for_stmt>inbound inbounds<block_start>self._in_bounds.append(inbound)<line_sep>inbound.add_out_bounds(self)<line_sep>self._shape=inbound.shape<block_end><return>self<block_end><def_stmt>init_layer_out_tensor self x:F.Tensor=<none><block_start>x=self._in_bounds[0].data<if>x<is><none><else>x<if_stmt>self._data<is><none><or>x.shape[0]<g>self._data.shape_capacity[0]<block_start>self._data=Zeros()((x.shape[0] )+self.shape requires_grad=self.trainable)<line_sep>self._data.to('static')<for_stmt>in_bound self._in_bounds<block_start>self._data.add_in_bounds(in_bound.data)<block_end><block_end><elif_stmt>x.shape[0]<l>self._data.shape_capacity[0]<block_start><if_stmt>GLOBAL.TRAINING<block_start>self._data.slices(slice(<none> x.shape[0] <none>))<block_end><else_stmt><block_start>self._data=Zeros()((x.shape[0] )+self.shape requires_grad=self.trainable)<line_sep>self._data.to('static')<for_stmt>in_bound self._in_bounds<block_start>self._data.add_in_bounds(in_bound.data)<block_end><block_end><block_end><else_stmt><block_start>self._data.slices(slice(<none> <none> <none>))<block_end><block_end><def_stmt>forward self x:F.Tensor=<none> *args **kwargs<arrow>F.Tensor<block_start>self._data.zero_()<for_stmt>in_bound self._in_bounds<block_start>GLOBAL.np.add(self._data.eval in_bound.data.eval out=self._data.eval)<if_stmt>GLOBAL.TRAINING<and>in_bound.data.requires_grad<block_start>initialize_ops_grad(in_bound.data)<block_end>self._data.requires_grad=self._data.requires_grad<or>in_bound.data.requires_grad<block_end><return>self._data<block_end><def_stmt>compute_output_shape self input_shape:Union[List Tuple]=<none><arrow>Union[List Tuple]<block_start><return>self._shape<block_end><def_stmt>backward self gradients:F.Tensor=<none><block_start><for_stmt>in_bound self._in_bounds<block_start><if_stmt>in_bound.data.requires_grad<block_start>GLOBAL.np.add(in_bound.data.grad.eval self._data.grad.eval out=in_bound.data.grad.eval)<block_end><block_end>self._data.zero_grad()<block_end><block_end>
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. <import_stmt>torchx.examples.apps.lightning_classy_vision.component<as>lightning_classy_vision<import_from_stmt>torchx.components.component_test_base ComponentTestCase<class_stmt>DistributedComponentTest(ComponentTestCase)<block_start><def_stmt>test_trainer self<arrow><none><block_start>self.validate(lightning_classy_vision "trainer")<block_end><def_stmt>test_interpret self<arrow><none><block_start>self.validate(lightning_classy_vision "interpret")<block_end><block_end>
<import_from_stmt>os.path join dirname<import_stmt>numpy<as>np<import_from_stmt>.text put_text<import_from_stmt>.. const<import_from_stmt>..os makedirs<import_from_stmt>..imprt preset_import<import_from_stmt>..log get_logger<line_sep>logger=get_logger()<def_stmt>make_video imgs fps=24 outpath=<none> method='matplotlib' dpi=96 bitrate=-1<block_start>"""Writes a list of images into a grayscale or color video. Args: imgs (list(numpy.ndarray)): Each image should be of type ``uint8`` or ``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB). fps (int, optional): Frame rate. outpath (str, optional): Where to write the video to (a .mp4 file). ``None`` means ``os.path.join(const.Dir.tmp, 'make_video.mp4')``. method (str, optional): Method to use: ``'matplotlib'``, ``'opencv'``, ``'video_api'``. dpi (int, optional): Dots per inch when using ``matplotlib``. bitrate (int, optional): Bit rate in kilobits per second when using ``matplotlib``; reasonable values include 7200. Writes - A video of the images. """<if_stmt>outpath<is><none><block_start>outpath=join(const.Dir.tmp 'make_video.mp4')<block_end>makedirs(dirname(outpath))<assert_stmt>imgs "Frame list is empty"<for_stmt>frame imgs<block_start><assert_stmt>np.issubdtype(frame.dtype np.unsignedinteger) "Image type must be unsigned integer"<block_end>h,w=imgs[0].shape[:2]<for_stmt>frame imgs[1:]<block_start><assert_stmt>frame.shape[:2]<eq>(h w) "All frames must have the same shape"<block_end><if_stmt>method<eq>'matplotlib'<block_start><import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>matplotlib animation<line_sep>w_in,h_in=w/dpi h/dpi<line_sep>fig=plt.figure(figsize=(w_in h_in))<line_sep>Writer=animation.writers['ffmpeg']# may require you to specify path writer=Writer(fps=fps bitrate=bitrate)<def_stmt>img_plt arr<block_start>img_plt_=plt.imshow(arr)<line_sep>ax=plt.gca()<line_sep>ax.set_position([0 0 1 1])<line_sep>ax.set_axis_off()<line_sep><return>img_plt_<block_end>anim=animation.ArtistAnimation(fig [(img_plt(x) )<for>x imgs])<line_sep>anim.save(outpath writer=writer)<line_sep># If obscure error like "ValueError: Invalid file object: <_io.Buff..." # occurs, consider upgrading matplotlib so that it prints out the real, # underlying ffmpeg error plt.close('all')<block_end><elif_stmt>method<eq>'opencv'<block_start>cv2=preset_import('cv2' assert_success=<true>)<line_sep># TODO: debug codecs (see http://www.fourcc.org/codecs.php) <if_stmt>outpath.endswith('.mp4')# fourcc = cv2.VideoWriter_fourcc(*'MJPG') # fourcc = cv2.VideoWriter_fourcc(*'X264') <block_start>fourcc=cv2.VideoWriter_fourcc(*'H264')<line_sep># fourcc = 0x00000021 <block_end><elif_stmt>outpath.endswith('.avi')<block_start>fourcc=cv2.VideoWriter_fourcc(*'XVID')<block_end><else_stmt><block_start><raise>NotImplementedError("Video type of\n\t%s"%outpath)<block_end>vw=cv2.VideoWriter(outpath fourcc fps (w h))<for_stmt>frame imgs<block_start><if_stmt>frame.ndim<eq>3<block_start>frame=frame[: : ::-1]# cv2 uses BGR <block_end>vw.write(frame)<block_end>vw.release()<block_end><elif_stmt>method<eq>'video_api'<block_start>video_api=preset_import('video_api' assert_success=<true>)<assert_stmt>outpath.endswith('.webm') "`video_api` requires .webm"<with_stmt>video_api.write(outpath fps=fps)<as>h<block_start><for_stmt>frame imgs<block_start><if_stmt>frame.ndim<eq>3<and>frame.shape[2]<eq>4<block_start>frame=frame[: : :3]<block_end>#frame = frame.astype(np.ubyte) h.add_frame(frame)<block_end><block_end><block_end><else_stmt><block_start><raise>ValueError(method)<block_end>logger.debug("Images written as a video to:\n%s" outpath)<block_end><def_stmt>make_comparison_video imgs1 imgs2 bar_width=4 bar_color=(1 0 0) sweep_vertically=<false> sweeps=1 label1='' label2='' font_size=<none> font_ttf=<none> label1_top_left_xy=<none> label2_top_left_xy=<none> **make_video_kwargs<block_start>"""Writes two lists of images into a comparison video that toggles between two videos with a sweeping bar. Args: imgs? (list(numpy.ndarray)): Each image should be of type ``uint8`` or ``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB). bar_width (int, optional): Width of the sweeping bar. bar_color (tuple(float), optional): Bar and label RGB, normalized to :math:`[0,1]`. Defaults to red. sweep_vertically (bool, optional): Whether to sweep vertically or horizontally. sweeps (int, optional): Number of sweeps. label? (str, optional): Label for each video. font_size (int, optional): Font size. font_ttf (str, optional): Path to the .ttf font file. Defaults to Arial. label?_top_left_xy (tuple(int), optional): The XY coordinate of the label's top left corner. make_video_kwargs (dict, optional): Keyword arguments for :func:`make_video`. Writes - A comparison video. """<line_sep># Bar is perpendicular to sweep-along sweep_along=0<if>sweep_vertically<else>1<line_sep>bar_along=1<if>sweep_vertically<else>0<line_sep># Number of frames n_frames=len(imgs1)<assert_stmt>n_frames<eq>len(imgs2) "Videos to be compared have different numbers of frames"<line_sep>img_shape=imgs1[0].shape<line_sep># Bar color according to image dtype img_dtype=imgs1[0].dtype<line_sep>bar_color=np.array(bar_color dtype=img_dtype)<if_stmt>np.issubdtype(img_dtype np.integer)<block_start>bar_color<augmul>np.iinfo(img_dtype).max<block_end># Map from frame index to bar location, considering possibly multiple trips bar_locs=[]<for_stmt>i range(sweeps)<block_start>ind=np.arange(0 img_shape[sweep_along])<if_stmt>i%2<eq>1# reverse every other trip <block_start>ind=ind[::-1]<block_end>bar_locs.append(ind)<block_end>bar_locs=np.hstack(bar_locs)# all possible locations ind=np.linspace(0 len(bar_locs)-1 num=n_frames endpoint=<true>)<line_sep>bar_locs=[bar_locs[int(x)]<for>x ind]# uniformly sampled # Label locations <if_stmt>label1_top_left_xy<is><none># Label 1 at top left corner <block_start>label1_top_left_xy=(int(0.1<times>img_shape[1]) int(0.05<times>img_shape[0]))<block_end><if_stmt>label2_top_left_xy<is><none><block_start><if_stmt>sweep_vertically# Label 2 at bottom left corner <block_start>label2_top_left_xy=(int(0.1<times>img_shape[1]) int(0.75<times>img_shape[0]))<block_end><else_stmt># Label 2 at top right corner <block_start>label2_top_left_xy=(int(0.7<times>img_shape[1]) int(0.05<times>img_shape[0]))<block_end><block_end>frames=[]<for_stmt>i,(img1 img2) enumerate(zip(imgs1 imgs2))<block_start><assert_stmt>img1.shape<eq>img_shape f"`imgs1[{i}]` has a differnet shape"<assert_stmt>img2.shape<eq>img_shape f"`imgs2[{i}]` has a differnet shape"<assert_stmt>img1.dtype<eq>img_dtype f"`imgs1[{i}]` has a differnet dtype"<assert_stmt>img2.dtype<eq>img_dtype f"`imgs2[{i}]` has a differnet dtype"<line_sep># Label the two images img1=put_text(img1 label1 label_top_left_xy=label1_top_left_xy font_size=font_size font_color=bar_color font_ttf=font_ttf)<line_sep>img2=put_text(img2 label2 label_top_left_xy=label2_top_left_xy font_size=font_size font_color=bar_color font_ttf=font_ttf)<line_sep># Bar start and end bar_loc=bar_locs[i]<line_sep>bar_width_half=bar_width<floordiv>2<line_sep>bar_start=max(0 bar_loc-bar_width_half)<line_sep>bar_end=min(bar_loc+bar_width_half img_shape[sweep_along])<line_sep># Up to bar start, we show Image 1; bar end onwards, Image 2 img1=np.take(img1 range(bar_start) axis=sweep_along)<line_sep>img2=np.take(img2 range(bar_end img_shape[sweep_along]) axis=sweep_along)<line_sep># Between the two images, we show the bar actual_bar_width=img_shape[sweep_along]-img1.shape[sweep_along]-img2.shape[sweep_along]<line_sep>reps=[1 1 1]<line_sep>reps[sweep_along]=actual_bar_width<line_sep>reps[bar_along]=img_shape[bar_along]<line_sep>bar_img=np.tile(bar_color reps)<line_sep>frame=np.concatenate((img1 bar_img img2) axis=sweep_along)<line_sep>frames.append(frame)<block_end>make_video(frames **make_video_kwargs)<block_end>
<import_stmt>logging<class_stmt>BaseConfig(object)<block_start>"""BaseConfig provides a common interface for nested access for all Config objects in CCI."""<line_sep>defaults={}<def_stmt>__init__ self config=<none> keychain=<none><block_start><if_stmt>config<is><none><block_start>self.config={}<block_end><else_stmt><block_start>self.config=config<block_end>self._init_logger()<line_sep>self._load_config()<block_end><def_stmt>_init_logger self<block_start>"""Initializes self.logger"""<line_sep>self.logger=logging.getLogger(__name__)<block_end><def_stmt>_load_config self<block_start>"""Subclasses may override this method to initialize :py:attr:`~config`"""<line_sep><pass><block_end><def_stmt>__getattr__ self name<block_start>tree=name.split("__")<if_stmt>name.startswith("_")<block_start><raise>AttributeError(f"Attribute {name} not found")<block_end>value=<none><line_sep>value_found=<false><line_sep>config=self.config<if_stmt>len(tree)<g>1# Walk through the config dictionary using __ as a delimiter <block_start><for_stmt>key tree[:-1]<block_start>config=config.get(key)<if_stmt>config<is><none><block_start><break><block_end><block_end><block_end><if_stmt>config<and>tree[-1]<in>config<block_start>value=config[tree[-1]]<line_sep>value_found=<true><block_end><if_stmt>value_found<block_start><return>value<block_end><else_stmt><block_start><return>self.defaults.get(name)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<as>np<line_sep># Func1: change density map into count map # density map: batch size * 1 * w * h <def_stmt>get_local_count density_map psize pstride<block_start>IF_gpu=torch.cuda.is_available()# if gpu, return gpu IF_ret_gpu=(density_map.device.type<eq>'cuda')<line_sep>psize,pstride=int(psize) int(pstride)<line_sep>density_map=density_map.cpu().type(torch.float32)<line_sep>conv_kernel=torch.ones(1 1 psize psize dtype=torch.float32)<if_stmt>IF_gpu<block_start>density_map,conv_kernel=density_map.cuda() conv_kernel.cuda()<block_end>count_map=F.conv2d(density_map conv_kernel stride=pstride)<if_stmt><not>IF_ret_gpu<block_start>count_map=count_map.cpu()<block_end><return>count_map<block_end># Func2: convert count to class (0->c-1) <def_stmt>Count2Class count_map label_indice<block_start><if_stmt>isinstance(label_indice np.ndarray)<block_start>label_indice=torch.from_numpy(label_indice)<block_end>IF_gpu=torch.cuda.is_available()<line_sep>IF_ret_gpu=(count_map.device.type<eq>'cuda')<line_sep>label_indice=label_indice.cpu().type(torch.float32)<line_sep>cls_num=len(label_indice)+1<line_sep>cls_map=torch.zeros(count_map.size()).type(torch.LongTensor)<if_stmt>IF_gpu<block_start>count_map,label_indice,cls_map=count_map.cuda() label_indice.cuda() cls_map.cuda()<block_end><for_stmt>i range(cls_num-1)<block_start><if_stmt>IF_gpu<block_start>cls_map=cls_map+(count_map<ge>label_indice[i]).cpu().type(torch.LongTensor).cuda()<block_end><else_stmt><block_start>cls_map=cls_map+(count_map<ge>label_indice[i]).cpu().type(torch.LongTensor)<block_end><block_end><if_stmt><not>IF_ret_gpu<block_start>cls_map=cls_map.cpu()<block_end><return>cls_map<block_end># Func3: convert class (0->c-1) to count number <def_stmt>Class2Count pre_cls label_indice<block_start>''' # --Input: # 1.pre_cls is class label range in [0,1,2,...,C-1] # 2.label_indice not include 0 but the other points # --Output: # 1.count value, the same size as pre_cls '''<if_stmt>isinstance(label_indice np.ndarray)<block_start>label_indice=torch.from_numpy(label_indice)<block_end>label_indice=label_indice.squeeze()<line_sep>IF_gpu=torch.cuda.is_available()<line_sep>IF_ret_gpu=(pre_cls.device.type<eq>'cuda')<line_sep># tranform interval to count value map label2count=[0.0]<for_stmt>(i item) enumerate(label_indice)<block_start><if_stmt>i<l>label_indice.size()[0]-1<block_start>tmp_count=(label_indice[i]+label_indice[i+1])/2<block_end><else_stmt><block_start>tmp_count=label_indice[i]<block_end>label2count.append(tmp_count)<block_end>label2count=torch.tensor(label2count)<line_sep>label2count=label2count.type(torch.FloatTensor)<line_sep>#outputs = outputs.max(dim=1)[1].cpu().data ORI_SIZE=pre_cls.size()<line_sep>pre_cls=pre_cls.reshape(-1).cpu()<line_sep>pre_counts=torch.index_select(label2count 0 pre_cls.cpu().type(torch.LongTensor))<line_sep>pre_counts=pre_counts.reshape(ORI_SIZE)<if_stmt>IF_ret_gpu<block_start>pre_counts=pre_counts.cuda()<block_end><return>pre_counts<block_end><if_stmt>__name__<eq>'__main__'<block_start>pre_cls=torch.Tensor([[0 1 2] [3 4 4]])<line_sep>label_indice=torch.Tensor([0.5 1 1.5 2])<line_sep>pre_counts=Class2Count(pre_cls label_indice)<line_sep>print(pre_cls)<line_sep>print(label_indice)<line_sep>print(pre_counts)<line_sep>pre_cls=Count2Class(pre_counts label_indice)<line_sep>print(pre_cls)<block_end>
<import_stmt>a<as>b<import_stmt>b.c<as>e<line_sep>b.foo(1)<line_sep>e.baz(1)<line_sep>
"""Issue #712"""<import_from_stmt>nbformat.v4.nbbase new_code_cell new_notebook<import_from_stmt>jupytext reads writes<import_from_stmt>jupytext.cell_to_text three_backticks_or_more<import_from_stmt>jupytext.compare compare compare_notebooks<import_from_stmt>.utils requires_myst<def_stmt>test_three_backticks_or_more <block_start><assert_stmt>three_backticks_or_more([""])<eq>"```"<assert_stmt>three_backticks_or_more(["``"])<eq>"```"<assert_stmt>three_backticks_or_more(["```python"])<eq>"````"<assert_stmt>three_backticks_or_more(["```"])<eq>"````"<assert_stmt>three_backticks_or_more(["`````python"])<eq>"``````"<assert_stmt>three_backticks_or_more(["`````"])<eq>"``````"<block_end><def_stmt>test_triple_backticks_in_code_cell no_jupytext_version_number nb=new_notebook(metadata={"main_language":"python"} cells=[new_code_cell('''a = """ ``` foo ``` """''')] ) text='''--- jupyter: jupytext: main_language: python --- ````python a = """ ``` foo ``` """ ```` ''' <block_start>actual_text=writes(nb fmt="md")<line_sep>compare(actual_text text)<line_sep>actual_nb=reads(text fmt="md")<line_sep>compare_notebooks(actual_nb nb)<block_end>@requires_myst<def_stmt>test_triple_backticks_in_code_cell_myst no_jupytext_version_number nb=new_notebook(metadata={"main_language":"python"} cells=[new_code_cell('''a = """ ``` foo ``` """''')] ) text='''--- jupytext: main_language: python --- ````{code-cell} a = """ ``` foo ``` """ ```` ''' <block_start>actual_text=writes(nb fmt="md:myst")<line_sep>compare(actual_text text)<line_sep>actual_nb=reads(text fmt="md:myst")<line_sep>compare_notebooks(actual_nb nb)<block_end><def_stmt>test_alternate_tree_four_five_backticks no_jupytext_version_number nb=new_notebook(metadata={"main_language":"python"} cells=[new_code_cell('a = """\n```\n"""') new_code_cell("b = 2") new_code_cell('c = """\n````\n"""') ] ) text='''--- jupyter: jupytext: main_language: python --- ````python a = """ ``` """ ```` ```python b = 2 ``` `````python c = """ ```` """ ````` ''' <block_start>actual_text=writes(nb fmt="md")<line_sep>compare(actual_text text)<line_sep>actual_nb=reads(text fmt="md")<line_sep>compare_notebooks(actual_nb nb)<block_end>
''' This file provides a wrapper class for Fast_AT (https://github.com/locuslab/fast_adversarial) model for CIFAR-10 dataset. '''<import_stmt>sys<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>tensorflow<as>tf<import_from_stmt>ares.model.pytorch_wrapper pytorch_classifier_with_logits<import_from_stmt>ares.utils get_res_path<line_sep>MODEL_PATH=get_res_path('./cifar10/cifar_model_weights_30_epochs.pth')<def_stmt>load _<block_start>model=Fast_AT()<line_sep>model.load()<line_sep><return>model<block_end>@pytorch_classifier_with_logits(n_class=10 x_min=0.0 x_max=1.0 x_shape=(32 32 3) x_dtype=tf.float32 y_dtype=tf.int32)<class_stmt>Fast_AT(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>torch.nn.Module.__init__(self)<line_sep>self.model=PreActResNet18().cuda()<line_sep>self._mean_torch=torch.tensor((0.4914 0.4822 0.4465)).view(3 1 1).cuda()<line_sep>self._std_torch=torch.tensor((0.2471 0.2435 0.2616)).view(3 1 1).cuda()<block_end><def_stmt>forward self x<block_start>x=x.transpose(1 2).transpose(1 3).contiguous()<line_sep>input_var=(x.cuda()-self._mean_torch)/self._std_torch<line_sep>labels=self.model(input_var)<line_sep><return>labels.cpu()<block_end><def_stmt>load self<block_start>checkpoint=torch.load(MODEL_PATH)<line_sep>self.model.load_state_dict(checkpoint)<line_sep>self.model.float()<line_sep>self.model.eval()<block_end><block_end><class_stmt>PreActBlock(nn.Module)<block_start>'''Pre-activation version of the BasicBlock.'''<line_sep>expansion=1<def_stmt>__init__ self in_planes planes stride=1<block_start>super(PreActBlock self).__init__()<line_sep>self.bn1=nn.BatchNorm2d(in_planes)<line_sep>self.conv1=nn.Conv2d(in_planes planes kernel_size=3 stride=stride padding=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(planes)<line_sep>self.conv2=nn.Conv2d(planes planes kernel_size=3 stride=1 padding=1 bias=<false>)<if_stmt>stride<ne>1<or>in_planes<ne>self.expansion<times>planes<block_start>self.shortcut=nn.Sequential(nn.Conv2d(in_planes self.expansion<times>planes kernel_size=1 stride=stride bias=<false>))<block_end><block_end><def_stmt>forward self x<block_start>out=F.relu(self.bn1(x))<line_sep>shortcut=self.shortcut(x)<if>hasattr(self 'shortcut')<else>x<line_sep>out=self.conv1(out)<line_sep>out=self.conv2(F.relu(self.bn2(out)))<line_sep>out<augadd>shortcut<line_sep><return>out<block_end><block_end><class_stmt>PreActBottleneck(nn.Module)<block_start>'''Pre-activation version of the original Bottleneck module.'''<line_sep>expansion=4<def_stmt>__init__ self in_planes planes stride=1<block_start>super(PreActBottleneck self).__init__()<line_sep>self.bn1=nn.BatchNorm2d(in_planes)<line_sep>self.conv1=nn.Conv2d(in_planes planes kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(planes)<line_sep>self.conv2=nn.Conv2d(planes planes kernel_size=3 stride=stride padding=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(planes)<line_sep>self.conv3=nn.Conv2d(planes self.expansion<times>planes kernel_size=1 bias=<false>)<if_stmt>stride<ne>1<or>in_planes<ne>self.expansion<times>planes<block_start>self.shortcut=nn.Sequential(nn.Conv2d(in_planes self.expansion<times>planes kernel_size=1 stride=stride bias=<false>))<block_end><block_end><def_stmt>forward self x<block_start>out=F.relu(self.bn1(x))<line_sep>shortcut=self.shortcut(out)<if>hasattr(self 'shortcut')<else>x<line_sep>out=self.conv1(out)<line_sep>out=self.conv2(F.relu(self.bn2(out)))<line_sep>out=self.conv3(F.relu(self.bn3(out)))<line_sep>out<augadd>shortcut<line_sep><return>out<block_end><block_end><class_stmt>PreActResNet(nn.Module)<block_start><def_stmt>__init__ self block num_blocks num_classes=10<block_start>super(PreActResNet self).__init__()<line_sep>self.in_planes=64<line_sep>self.conv1=nn.Conv2d(3 64 kernel_size=3 stride=1 padding=1 bias=<false>)<line_sep>self.layer1=self._make_layer(block 64 num_blocks[0] stride=1)<line_sep>self.layer2=self._make_layer(block 128 num_blocks[1] stride=2)<line_sep>self.layer3=self._make_layer(block 256 num_blocks[2] stride=2)<line_sep>self.layer4=self._make_layer(block 512 num_blocks[3] stride=2)<line_sep>self.bn=nn.BatchNorm2d(512<times>block.expansion)<line_sep>self.linear=nn.Linear(512<times>block.expansion num_classes)<block_end><def_stmt>_make_layer self block planes num_blocks stride<block_start>strides=[stride]+[1]<times>(num_blocks-1)<line_sep>layers=[]<for_stmt>stride strides<block_start>layers.append(block(self.in_planes planes stride))<line_sep>self.in_planes=planes<times>block.expansion<block_end><return>nn.Sequential(*layers)<block_end><def_stmt>forward self x<block_start>out=self.conv1(x)<line_sep>out=self.layer1(out)<line_sep>out=self.layer2(out)<line_sep>out=self.layer3(out)<line_sep>out=self.layer4(out)<line_sep>out=F.relu(self.bn(out))<line_sep>out=F.avg_pool2d(out 4)<line_sep>out=out.view(out.size(0) -1)<line_sep>out=self.linear(out)<line_sep><return>out<block_end><block_end><def_stmt>PreActResNet18 <block_start><return>PreActResNet(PreActBlock [2 2 2 2])<block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt><not>os.path.exists(MODEL_PATH)<block_start><if_stmt><not>os.path.exists(os.path.dirname(MODEL_PATH))<block_start>os.makedirs(os.path.dirname(MODEL_PATH) exist_ok=<true>)<block_end>url='https://drive.google.com/file/d/1XM-v4hqi9u8EDrQ2xdCo37XXcM9q-R07/view'<line_sep>print('Please download "{}" to "{}".'.format(url MODEL_PATH))<block_end><block_end>
<import_from_stmt>mayan.apps.testing.tests.base GenericViewTestCase<import_from_stmt>..events event_smart_link_edited<import_from_stmt>..permissions permission_smart_link_edit<import_from_stmt>.mixins SmartLinkConditionViewTestMixin SmartLinkTestMixin SmartLinkViewTestMixin <class_stmt>SmartLinkConditionViewTestCase(SmartLinkConditionViewTestMixin SmartLinkTestMixin SmartLinkViewTestMixin GenericViewTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self._create_test_smart_link()<block_end><def_stmt>test_smart_link_condition_create_view_no_permission self<block_start>condition_count=self.test_smart_link.conditions.count()<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_create_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.assertEqual(self.test_smart_link.conditions.count() condition_count)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_smart_link_condition_create_view_with_access self<block_start>self.grant_access(obj=self.test_smart_link permission=permission_smart_link_edit)<line_sep>condition_count=self.test_smart_link.conditions.count()<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_create_view()<line_sep>self.assertEqual(response.status_code 302)<line_sep>self.assertEqual(self.test_smart_link.conditions.count() condition_count+1)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 1)<line_sep>self.assertEqual(events[0].action_object self.test_smart_link_condition)<line_sep>self.assertEqual(events[0].actor self._test_case_user)<line_sep>self.assertEqual(events[0].target self.test_smart_link)<line_sep>self.assertEqual(events[0].verb event_smart_link_edited.id)<block_end><def_stmt>test_smart_link_condition_delete_view_no_permission self<block_start>self._create_test_smart_link_condition()<line_sep>condition_count=self.test_smart_link.conditions.count()<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_delete_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.assertEqual(self.test_smart_link.conditions.count() condition_count)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_smart_link_condition_delete_view_with_access self<block_start>self._create_test_smart_link_condition()<line_sep>self.grant_access(obj=self.test_smart_link permission=permission_smart_link_edit)<line_sep>condition_count=self.test_smart_link.conditions.count()<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_delete_view()<line_sep>self.assertEqual(response.status_code 302)<line_sep>self.assertEqual(self.test_smart_link.conditions.count() condition_count-1)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 1)<line_sep>self.assertEqual(events[0].action_object <none>)<line_sep>self.assertEqual(events[0].actor self._test_case_user)<line_sep>self.assertEqual(events[0].target self.test_smart_link)<line_sep>self.assertEqual(events[0].verb event_smart_link_edited.id)<block_end><def_stmt>test_smart_link_condition_edit_view_no_permission self<block_start>self._create_test_smart_link_condition()<line_sep>instance_values=self._model_instance_to_dictionary(instance=self.test_smart_link_condition)<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_edit_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.test_smart_link_condition.refresh_from_db()<line_sep>self.assertEqual(self._model_instance_to_dictionary(instance=self.test_smart_link_condition) instance_values)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_smart_link_condition_edit_view_with_access self<block_start>self._create_test_smart_link_condition()<line_sep>self.grant_access(obj=self.test_smart_link permission=permission_smart_link_edit)<line_sep>instance_values=self._model_instance_to_dictionary(instance=self.test_smart_link_condition)<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_edit_view()<line_sep>self.assertEqual(response.status_code 302)<line_sep>self.test_smart_link_condition.refresh_from_db()<line_sep>self.assertNotEqual(self._model_instance_to_dictionary(instance=self.test_smart_link_condition) instance_values)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 1)<line_sep>self.assertEqual(events[0].action_object self.test_smart_link_condition)<line_sep>self.assertEqual(events[0].actor self._test_case_user)<line_sep>self.assertEqual(events[0].target self.test_smart_link)<line_sep>self.assertEqual(events[0].verb event_smart_link_edited.id)<block_end><def_stmt>test_smart_link_condition_list_view_no_permission self<block_start>self._create_test_smart_link_condition()<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_list_view()<line_sep>self.assertNotContains(response=response status_code=404 text=self.test_smart_link_condition.smart_link.label)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_smart_link_condition_list_view_with_access self<block_start>self._create_test_smart_link_condition()<line_sep>self.grant_access(obj=self.test_smart_link permission=permission_smart_link_edit)<line_sep>self._clear_events()<line_sep>response=self._request_test_smart_link_condition_list_view()<line_sep>self.assertContains(response=response status_code=200 text=self.test_smart_link_condition.smart_link.label)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><block_end>
<import_from_stmt>container.base TimeBase<import_from_stmt>container.array TimeArray TimeDtype<import_from_stmt>container.timeseries TimeSeries<import_from_stmt>container.timeframe TimeFrame<line_sep>