content
stringlengths
0
1.55M
<import_stmt>collections<def_stmt>canonicalize json_obj preserve_sequence_order=<true><block_start>""" This function canonicalizes a Python object that will be serialized as JSON. Example usage: json.dumps(canonicalize(my_obj)) Args: json_obj (object): the Python object that will later be serialized as JSON. Returns: object: json_obj now sorted to its canonical form. """<if_stmt>isinstance(json_obj collections.MutableMapping)<block_start>sorted_obj=sorted({key:canonicalize(val preserve_sequence_order)<for>key,val json_obj.items()}.items())<line_sep><return>collections.OrderedDict(sorted_obj)<block_end><elif_stmt>isinstance(json_obj (list tuple))<block_start>seq=[canonicalize(val preserve_sequence_order)<for>val json_obj]<line_sep><return>seq<if>preserve_sequence_order<else>sorted(seq)<block_end><return>json_obj<block_end>
# Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # <try_stmt><block_start><import_stmt>e2e.fixtures<import_from_stmt>e2e.conftest_utils *# noqa <import_from_stmt>e2e.conftest_utils pytest_addoption<as>_e2e_pytest_addoption# noqa <import_from_stmt>e2e config# noqa <import_from_stmt>e2e.utils get_plugins_from_packages<line_sep>pytest_plugins=get_plugins_from_packages([e2e])<block_end><except_stmt>ImportError<block_start>_e2e_pytest_addoption=<none><line_sep><pass><block_end><import_stmt>config<import_stmt>pytest<import_from_stmt>ote_sdk.test_suite.pytest_insertions *<import_from_stmt>ote_sdk.test_suite.training_tests_common REALLIFE_USECASE_CONSTANT<line_sep>pytest_plugins=get_pytest_plugins_from_ote()<line_sep>ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')<line_sep>@pytest.fixture<def_stmt>ote_test_domain_fx <block_start><return>'model-preparation-algorithm'<block_end>@pytest.fixture<def_stmt>ote_test_scenario_fx current_test_parameters_fx<block_start><assert_stmt>isinstance(current_test_parameters_fx dict)<if_stmt>current_test_parameters_fx.get('usecase')<eq>REALLIFE_USECASE_CONSTANT<block_start><return>'performance'<block_end><else_stmt><block_start><return>'integration'<block_end><block_end>@pytest.fixture(scope='session')<def_stmt>ote_templates_root_dir_fx <block_start><import_stmt>os.path<as>osp<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>root=osp.dirname(osp.dirname(osp.realpath(__file__)))<line_sep>root=f'{root}/configs/'<line_sep>logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}')<line_sep><return>root<block_end>@pytest.fixture(scope='session')<def_stmt>ote_reference_root_dir_fx <block_start><import_stmt>os.path<as>osp<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>root=osp.dirname(osp.dirname(osp.realpath(__file__)))<line_sep>root=f'{root}/tests/reference/'<line_sep>logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}')<line_sep><return>root<block_end># pytest magic <def_stmt>pytest_generate_tests metafunc<block_start>ote_pytest_generate_tests_insertion(metafunc)<block_end><def_stmt>pytest_addoption parser<block_start>ote_pytest_addoption_insertion(parser)<block_end>
"""Validation for UDFs. Warning: This is an experimental module and API here can change without notice. DO NOT USE DIRECTLY. """<import_from_stmt>inspect Parameter Signature signature<import_from_stmt>typing Any Callable List<import_stmt>ibis.common.exceptions<as>com<import_from_stmt>ibis.expr.datatypes DataType<def_stmt>_parameter_count funcsig:Signature<arrow>int<block_start>"""Get the number of positional-or-keyword or position-only parameters in a function signature. Parameters ---------- funcsig : inspect.Signature A UDF signature Returns ------- int The number of parameters """<line_sep><return>sum(param.kind<in>{param.POSITIONAL_OR_KEYWORD param.POSITIONAL_ONLY}<for>param funcsig.parameters.values()<if>param.default<is>Parameter.empty)<block_end><def_stmt>validate_input_type input_type:List[DataType] func:Callable<arrow>Signature<block_start>"""Check that the declared number of inputs (the length of `input_type`) and the number of inputs to `func` are equal. If the signature of `func` uses *args, then no check is done (since no check can be done). Parameters ---------- input_type : List[DataType] func : callable Returns ------- inspect.Signature """<line_sep>funcsig=signature(func)<line_sep>params=funcsig.parameters.values()<line_sep># We can only do validation if all the positional arguments are explicit # (i.e. no *args) <if_stmt><not>any(param.kind<is>Parameter.VAR_POSITIONAL<for>param params)<block_start>declared_parameter_count=len(input_type)<line_sep>function_parameter_count=_parameter_count(funcsig)<if_stmt>declared_parameter_count<ne>function_parameter_count<block_start><raise>TypeError('Function signature {!r} has {:d} parameters, '<concat>'input_type has {:d}. These must match. Non-column '<concat>'parameters must be defined as keyword only, i.e., '<concat>'def foo(col, *, function_param).'.format(func.__name__ function_parameter_count declared_parameter_count ))<block_end><block_end><return>funcsig<block_end><def_stmt>validate_output_type output_type:Any<arrow><none><block_start>"""Check that the output type is a single datatype."""<if_stmt>isinstance(output_type list)<block_start><raise>com.IbisTypeError('The output type of a UDF must be a single datatype.')<block_end><block_end>
<import_from_stmt>datetime datetime<line_sep># ensure an rpc peer is added <def_stmt>addpeer p rpcpeer<block_start>pid=rpcpeer['id']<if_stmt>pid<not><in>p.persist['peerstate']<block_start>p.persist['peerstate'][pid]={'connected':rpcpeer['connected'] 'last_seen':datetime.now()<if>rpcpeer['connected']<else><none> 'avail':1.0<if>rpcpeer['connected']<else>0.0}<block_end><block_end># exponetially smooth online/offline states of peers <def_stmt>trace_availability p rpcpeers<block_start>p.persist['availcount']<augadd>1<line_sep>leadwin=max(min(p.avail_window p.persist['availcount']<times>p.avail_interval) p.avail_interval)<line_sep>samples=leadwin/p.avail_interval<line_sep>alpha=1.0/samples<line_sep>beta=1.0-alpha<for_stmt>rpcpeer rpcpeers['peers']<block_start>pid=rpcpeer['id']<line_sep>addpeer(p rpcpeer)<if_stmt>rpcpeer['connected']<block_start>p.persist['peerstate'][pid]['last_seen']=datetime.now()<line_sep>p.persist['peerstate'][pid]['connected']=<true><line_sep>p.persist['peerstate'][pid]['avail']=1.0<times>alpha+p.persist['peerstate'][pid]['avail']<times>beta<block_end><else_stmt><block_start>p.persist['peerstate'][pid]['connected']=<false><line_sep>p.persist['peerstate'][pid]['avail']=0.0<times>alpha+p.persist['peerstate'][pid]['avail']<times>beta<block_end><block_end><block_end>
<import_stmt>sys<import_stmt>scipy.stats<line_sep>normal=scipy.stats.norm(0 1)<def_stmt>phi_major x<block_start><return>normal.cdf(x)<block_end><def_stmt>phi_minor x<block_start><return>normal.pdf(x)<block_end><def_stmt>v x t<block_start>xt=x-t<line_sep>denom=phi_major(xt)<line_sep><return>-xt<if>(denom<l>sys.float_info.epsilon)<else>phi_minor(xt)/denom<block_end><def_stmt>w x t<block_start>xt=x-t<line_sep>denom=phi_major(xt)<if_stmt>denom<l>sys.float_info.epsilon<block_start><return>1<if>(x<l>0)<else>0<block_end><return>v(x t)<times>(v(x t)+xt)<block_end><def_stmt>vt x t<block_start>xx=abs(x)<line_sep>b=phi_major(t-xx)-phi_major(-t-xx)<if_stmt>b<l>1e-5<block_start><if_stmt>x<l>0<block_start><return>-x-t<block_end><return>-x+t<block_end>a=phi_minor(-t-xx)-phi_minor(t-xx)<line_sep><return>(-a<if>x<l>0<else>a)/b<block_end><def_stmt>wt x t<block_start>xx=abs(x)<line_sep>b=phi_major(t-xx)-phi_major(-t-xx)<if_stmt>b<l>sys.float_info.epsilon<block_start><return>1.0<block_end><return>((t-xx)<times>phi_minor(t-xx)+(t+xx)<times>phi_minor(-t-xx))/b+vt(x t)<times>vt(x t)<block_end>
# Status: Being ported by Steven Watanabe # Base revision: 47077 # # Copyright (c) 2005 <NAME>. # Copyright 2006 <NAME> # Copyright (c) 2008 <NAME> # # Use, modification and distribution is subject to the Boost Software # License Version 1.0. (See accompanying file LICENSE_1_0.txt or # http://www.boost.org/LICENSE_1_0.txt) ##### Using Precompiled Headers (Quick Guide) ##### # # Make precompiled mypch.hpp: # # import pch ; # # cpp-pch mypch # : # sources # mypch.hpp # : # requiremnts # <toolset>msvc:<source>mypch.cpp # ; # # Add cpp-pch to sources: # # exe hello # : main.cpp hello.cpp mypch # ; <import_from_stmt>b2.build type feature generators<import_from_stmt>b2.tools builtin<line_sep>type.register('PCH' ['pch'])<line_sep>type.register('C_PCH' [] 'PCH')<line_sep>type.register('CPP_PCH' [] 'PCH')<line_sep># Control precompiled header (PCH) generation. feature.feature('pch' ['on' 'off'] ['propagated'])<line_sep>feature.feature('pch-header' [] ['free' 'dependency'])<line_sep>feature.feature('pch-file' [] ['free' 'dependency'])<class_stmt>PchGenerator(generators.Generator)<block_start>""" Base PCH generator. The 'run' method has the logic to prevent this generator from being run unless it's being used for a top-level PCH target. """<def_stmt>action_class self<block_start><return>builtin.CompileAction<block_end><def_stmt>run self project name prop_set sources<block_start><if_stmt><not>name# Unless this generator is invoked as the top-most generator for a # main target, fail. This allows using 'H' type as input type for # this generator, while preventing Boost.Build to try this generator # when not explicitly asked for. # # One bad example is msvc, where pch generator produces both PCH # target and OBJ target, so if there's any header generated (like by # bison, or by msidl), we'd try to use pch generator to get OBJ from # that H, which is completely wrong. By restricting this generator # only to pch main target, such problem is solved. <block_start><pass><block_end><else_stmt><block_start>r=self.run_pch(project name prop_set.add_raw(['<define>BOOST_BUILD_PCH_ENABLED']) sources)<line_sep><return>generators.add_usage_requirements(r ['<define>BOOST_BUILD_PCH_ENABLED'])<block_end><block_end># This rule must be overridden by the derived classes. <def_stmt>run_pch self project name prop_set sources<block_start><pass><block_end><block_end># NOTE: requirements are empty, default pch generator can be applied when # pch=off. generators.register(builtin.DummyGenerator("pch.default-c-pch-generator" <false> [] ['C_PCH'] []))<line_sep>generators.register(builtin.DummyGenerator("pch.default-cpp-pch-generator" <false> [] ['CPP_PCH'] []))<line_sep>
# -*- coding: utf-8 -*- # The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>. <import_stmt>pytest<import_from_stmt>skidl netlist_to_skidl<import_from_stmt>.setup_teardown get_filename setup_function teardown_function<def_stmt>test_parser_1 <block_start>netlist_to_skidl(get_filename("Arduino_Uno_R3_From_Scratch.net"))<block_end>
"""Implementations of algorithms for continuous control."""<import_stmt>functools<import_from_stmt>typing Optional Sequence Tuple<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<import_stmt>optax<import_from_stmt>jaxrl.agents.sac temperature<import_from_stmt>jaxrl.agents.sac.actor update<as>update_actor<import_from_stmt>jaxrl.agents.sac.critic target_update<import_from_stmt>jaxrl.agents.sac_v1.critic update_q update_v<import_from_stmt>jaxrl.datasets Batch<import_from_stmt>jaxrl.networks critic_net policies<import_from_stmt>jaxrl.networks.common InfoDict Model PRNGKey<line_sep>@functools.partial(jax.jit static_argnames=('update_target'))<def_stmt>_update_jit rng:PRNGKey actor:Model critic:Model value:Model target_value:Model temp:Model batch:Batch discount:float tau:float target_entropy:float update_target:bool<arrow>Tuple[PRNGKey Model Model Model Model Model InfoDict]<block_start>new_critic,critic_info=update_q(critic target_value batch discount)<line_sep>rng,key=jax.random.split(rng)<line_sep>new_actor,actor_info=update_actor(key actor new_critic temp batch)<line_sep>rng,key=jax.random.split(rng)<line_sep>new_value,value_info=update_v(key new_actor new_critic value temp batch <true>)<if_stmt>update_target<block_start>new_target_value=target_update(new_value target_value tau)<block_end><else_stmt><block_start>new_target_value=target_value<block_end>new_temp,alpha_info=temperature.update(temp actor_info['entropy'] target_entropy)<line_sep><return>rng new_actor new_critic new_value new_target_value new_temp {**critic_info **value_info **actor_info **alpha_info}<block_end><class_stmt>SACV1Learner(object)<block_start><def_stmt>__init__ self seed:int observations:jnp.ndarray actions:jnp.ndarray actor_lr:float=3e-4 value_lr:float=3e-4 critic_lr:float=3e-4 temp_lr:float=3e-4 hidden_dims:Sequence[int]=(256 256) discount:float=0.99 tau:float=0.005 target_update_period:int=1 target_entropy:Optional[float]=<none> init_temperature:float=1.0<block_start>""" An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290 """<line_sep>action_dim=actions.shape[-1]<if_stmt>target_entropy<is><none><block_start>self.target_entropy=-action_dim/2<block_end><else_stmt><block_start>self.target_entropy=target_entropy<block_end>self.tau=tau<line_sep>self.target_update_period=target_update_period<line_sep>self.discount=discount<line_sep>rng=jax.random.PRNGKey(seed)<line_sep>rng,actor_key,critic_key,temp_key=jax.random.split(rng 4)<line_sep>actor_def=policies.NormalTanhPolicy(hidden_dims action_dim)<line_sep>actor=Model.create(actor_def inputs=[actor_key observations] tx=optax.adam(learning_rate=actor_lr))<line_sep>critic_def=critic_net.DoubleCritic(hidden_dims)<line_sep>critic=Model.create(critic_def inputs=[critic_key observations actions] tx=optax.adam(learning_rate=critic_lr))<line_sep>value_def=critic_net.ValueCritic(hidden_dims)<line_sep>value=Model.create(value_def inputs=[critic_key observations] tx=optax.adam(learning_rate=value_lr))<line_sep>target_value=Model.create(value_def inputs=[critic_key observations])<line_sep>temp=Model.create(temperature.Temperature(init_temperature) inputs=[temp_key] tx=optax.adam(learning_rate=temp_lr))<line_sep>self.actor=actor<line_sep>self.critic=critic<line_sep>self.value=value<line_sep>self.target_value=target_value<line_sep>self.temp=temp<line_sep>self.rng=rng<line_sep>self.step=1<block_end><def_stmt>sample_actions self observations:np.ndarray temperature:float=1.0<arrow>jnp.ndarray<block_start>rng,actions=policies.sample_actions(self.rng self.actor.apply_fn self.actor.params observations temperature)<line_sep>self.rng=rng<line_sep>actions=np.asarray(actions)<line_sep><return>np.clip(actions -1 1)<block_end><def_stmt>update self batch:Batch<arrow>InfoDict<block_start>self.step<augadd>1<line_sep>new_rng,new_actor,new_critic,new_value,new_target_value,new_temp,info=_update_jit(self.rng self.actor self.critic self.value self.target_value self.temp batch self.discount self.tau self.target_entropy self.step%self.target_update_period<eq>0)<line_sep>self.rng=new_rng<line_sep>self.actor=new_actor<line_sep>self.critic=new_critic<line_sep>self.value=new_value<line_sep>self.target_value=new_target_value<line_sep>self.temp=new_temp<line_sep><return>info<block_end><block_end>
"""Example revision Revision ID: fdf0cf6487a3 Revises: Create Date: 2021-08-09 17:55:19.491713 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<line_sep># revision identifiers, used by Alembic. revision="<KEY>"<line_sep>down_revision=<none><line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.create_table("example" sa.Column("example_id" sa.Integer() nullable=<false>) )<line_sep># ### end Alembic commands ### <block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.drop_table("measurements")<line_sep># ### end Alembic commands ### <block_end>
# -*- Python -*- <import_stmt>os<line_sep># Setup config name. config.name='MemorySanitizer'+getattr(config 'name_suffix' 'default')<line_sep># Setup source root. config.test_source_root=os.path.dirname(__file__)<line_sep># Setup default compiler flags used with -fsanitize=memory option. clang_msan_cflags=(["-fsanitize=memory" "-mno-omit-leaf-frame-pointer" "-fno-omit-frame-pointer" "-fno-optimize-sibling-calls"]+[config.target_cflags]+config.debug_info_flags)<line_sep># Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD. <if_stmt>config.host_os<eq>'FreeBSD'<block_start>clang_msan_cflags<augadd>["-lexecinfo" "-fPIC"]<block_end>clang_msan_cxxflags=config.cxx_mode_flags+clang_msan_cflags<line_sep># Flags for KMSAN invocation. This is C-only, we're not interested in C++. clang_kmsan_cflags=(["-fsanitize=kernel-memory"]+[config.target_cflags]+config.debug_info_flags)<def_stmt>build_invocation compile_flags<block_start><return>" "+" ".join([config.clang]+compile_flags)+" "<block_end>config.substitutions.append(("%clang_msan " build_invocation(clang_msan_cflags)))<line_sep>config.substitutions.append(("%clangxx_msan " build_invocation(clang_msan_cxxflags)))<line_sep>config.substitutions.append(("%clang_kmsan " build_invocation(clang_kmsan_cflags)))<line_sep># Default test suffixes. config.suffixes=['.c' '.cc' '.cpp']<if_stmt>config.host_os<not><in>['Linux' 'NetBSD' 'FreeBSD']<block_start>config.unsupported=<true><block_end># For mips64, mips64el we have forced store_context_size to 1 because these # archs use slow unwinder which is not async signal safe. Therefore we only # check the first frame since store_context size is 1. <if_stmt>config.host_arch<in>['mips64' 'mips64el']<block_start>config.substitutions.append(('CHECK-%short-stack' 'CHECK-SHORT-STACK'))<block_end><else_stmt><block_start>config.substitutions.append(('CHECK-%short-stack' 'CHECK-FULL-STACK'))<block_end>
## -*- encoding: utf-8 -*- """ This file (./domaines_doctest.sage) was *autogenerated* from ./domaines.tex, with sagetex.sty version 2011/05/27 v2.3.1. It contains the contents of all the sageexample environments from this file. You should be able to doctest this file with: sage -t ./domaines_doctest.sage It is always safe to delete this file; it is not used in typesetting your document. Sage example in ./domaines.tex, line 10:: sage: x = var('x') Sage example in ./domaines.tex, line 69:: sage: o = 12/35 sage: type(o) <... 'sage.rings.rational.Rational'> Sage example in ./domaines.tex, line 82:: sage: type(12/35) <... 'sage.rings.rational.Rational'> Sage example in ./domaines.tex, line 131:: sage: o = 720 sage: o.factor() 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 142:: sage: type(o).factor(o) 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 157:: sage: 720.factor() 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 166:: sage: o = 720 / 133 sage: o.numerator().factor() 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 253:: sage: 3 * 7 21 Sage example in ./domaines.tex, line 261:: sage: (2/3) * (6/5) 4/5 Sage example in ./domaines.tex, line 267:: sage: (1 + I) * (1 - I) 2 Sage example in ./domaines.tex, line 274:: sage: (x + 2) * (x + 1) (x + 2)*(x + 1) sage: (x + 1) * (x + 2) (x + 2)*(x + 1) Sage example in ./domaines.tex, line 308:: sage: def fourth_power(a): ....: a = a * a ....: a = a * a ....: return a Sage example in ./domaines.tex, line 330:: sage: fourth_power(2) 16 sage: fourth_power(3/2) 81/16 sage: fourth_power(I) 1 sage: fourth_power(x+1) (x + 1)^4 sage: M = matrix([[0,-1],[1,0]]); M [ 0 -1] [ 1 0] sage: fourth_power(M) [1 0] [0 1] Sage example in ./domaines.tex, line 375:: sage: t = type(5/1); t <... 'sage.rings.rational.Rational'> sage: t == type(5) False Sage example in ./domaines.tex, line 476:: sage: a = 5; a 5 sage: a.is_unit() False Sage example in ./domaines.tex, line 484:: sage: a = 5/1; a 5 sage: a.is_unit() True Sage example in ./domaines.tex, line 507:: sage: parent(5) Integer Ring sage: parent(5/1) Rational Field Sage example in ./domaines.tex, line 515:: sage: ZZ Integer Ring sage: QQ Rational Field Sage example in ./domaines.tex, line 525:: sage: QQ(5).parent() Rational Field sage: ZZ(5/1).parent() Integer Ring sage: ZZ(1/5) Traceback (most recent call last): ... TypeError: no conversion of this rational to integer Sage example in ./domaines.tex, line 543:: sage: ZZ(1), QQ(1), RR(1), CC(1) (1, 1, 1.00000000000000, 1.00000000000000) Sage example in ./domaines.tex, line 568:: sage: cartesian_product([QQ, QQ]) The Cartesian product of (Rational Field, Rational Field) Sage example in ./domaines.tex, line 574:: sage: ZZ.fraction_field() Rational Field Sage example in ./domaines.tex, line 580:: sage: ZZ['x'] Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 591:: sage: Z5 = GF(5); Z5 Finite Field of size 5 sage: P = Z5['x']; P Univariate Polynomial Ring in x over Finite Field of size 5 sage: M = MatrixSpace(P, 3, 3); M Full MatrixSpace of 3 by 3 dense matrices over Univariate Polynomial Ring in x over Finite Field of size 5 Sage example in ./domaines.tex, line 602:: sage: M.random_element() # random [2*x^2 + 3*x + 4 4*x^2 + 2*x + 2 4*x^2 + 2*x] [ 3*x 2*x^2 + x + 3 3*x^2 + 4*x] [ 4*x^2 + 3 3*x^2 + 2*x + 4 2*x + 4] Sage example in ./domaines.tex, line 697:: sage: QQ.category() Join of Category of number fields and Category of quotient fields and Category of metric spaces Sage example in ./domaines.tex, line 704:: sage: QQ in Fields() True Sage example in ./domaines.tex, line 712:: sage: QQ in CommutativeAdditiveGroups() True Sage example in ./domaines.tex, line 718:: sage: QQ['x'] in EuclideanDomains() True Sage example in ./domaines.tex, line 859:: sage: 5.parent() Integer Ring Sage example in ./domaines.tex, line 872:: sage: type(factor(4)) <class 'sage.structure.factorization_integer.IntegerFactorization'> Sage example in ./domaines.tex, line 895:: sage: int(5) 5 sage: type(int(5)) <... 'int'> Sage example in ./domaines.tex, line 909:: sage: Integer(5) 5 sage: type(Integer(5)) <... 'sage.rings.integer.Integer'> Sage example in ./domaines.tex, line 926:: sage: factorial(99) / factorial(100) - 1 / 50 -1/100 Sage example in ./domaines.tex, line 974:: sage: 72/53 - 5/3 * 2.7 -3.14150943396227 Sage example in ./domaines.tex, line 982:: sage: cos(1), cos(1.) (cos(1), 0.540302305868140) Sage example in ./domaines.tex, line 1000:: sage: pi.n(digits=50) # variant: n(pi,digits=50) 3.1415926535897932384626433832795028841971693993751 Sage example in ./domaines.tex, line 1020:: sage: z = CC(1,2); z.arg() 1.10714871779409 Sage example in ./domaines.tex, line 1036:: sage: I.parent() Number Field in I with defining polynomial x^2 + 1 with I = 1*I Sage example in ./domaines.tex, line 1043:: sage: (1.+2.*I).parent() Complex Field with 53 bits of precision sage: (1.+2.*SR(I)).parent() Symbolic Ring Sage example in ./domaines.tex, line 1064:: sage: z = 3 * exp(I*pi/4) sage: z.real(), z.imag(), z.abs().canonicalize_radical() (3/2*sqrt(2), 3/2*sqrt(2), 3) Sage example in ./domaines.tex, line 1094:: sage: a, b, c = 0, 2, 3 sage: a == 1 or (b == 2 and c == 3) True Sage example in ./domaines.tex, line 1147:: sage: x, y = var('x, y') sage: bool( (x-y)*(x+y) == x^2-y^2 ) True Sage example in ./domaines.tex, line 1171:: sage: Z4 = IntegerModRing(4); Z4 Ring of integers modulo 4 sage: m = Z4(7); m 3 Sage example in ./domaines.tex, line 1184:: sage: 3 * m + 1 2 Sage example in ./domaines.tex, line 1191:: sage: Z3 = GF(3); Z3 Finite Field of size 3 Sage example in ./domaines.tex, line 1243:: sage: a = matrix(QQ, [[1,2,3],[2,4,8],[3,9,27]]) sage: (a^2 + 1) * a^(-1) [ -5 13/2 7/3] [ 7 1 25/3] [ 2 19/2 27] Sage example in ./domaines.tex, line 1259:: sage: M = MatrixSpace(QQ,3,3); M Full MatrixSpace of 3 by 3 dense matrices over Rational Field sage: a = M([[1,2,3],[2,4,8],[3,9,27]]) sage: (a^2 + 1) * a^(-1) [ -5 13/2 7/3] [ 7 1 25/3] [ 2 19/2 27] Sage example in ./domaines.tex, line 1283:: sage: P = ZZ['x']; P Univariate Polynomial Ring in x over Integer Ring sage: F = P.fraction_field(); F Fraction Field of Univariate Polynomial Ring in x over Integer Ring sage: p = P(x+1) * P(x); p x^2 + x sage: p + 1/p (x^4 + 2*x^3 + x^2 + 1)/(x^2 + x) sage: parent(p + 1/p) Fraction Field of Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 1382:: sage: k.<a> = NumberField(x^3 + x + 1); a^3; a^4+3*a -a - 1 -a^2 + 2*a Sage example in ./domaines.tex, line 1416:: sage: parent(sin(x)) Symbolic Ring Sage example in ./domaines.tex, line 1422:: sage: SR Symbolic Ring Sage example in ./domaines.tex, line 1428:: sage: SR.category() Category of fields Sage example in ./domaines.tex, line 1482:: sage: R = QQ['x1,x2,x3,x4']; R Multivariate Polynomial Ring in x1, x2, x3, x4 over Rational Field sage: x1, x2, x3, x4 = R.gens() Sage example in ./domaines.tex, line 1489:: sage: x1 * (x2 - x3) x1*x2 - x1*x3 Sage example in ./domaines.tex, line 1496:: sage: (x1+x2)*(x1-x2) - (x1^2 - x2^2) 0 Sage example in ./domaines.tex, line 1509:: sage: P = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ); P * P.lc() x1^3*x2^2*x3 - x1^2*x2^3*x3 - x1^3*x2*x3^2 + x1*x2^3*x3^2 + x1^2*x2*x3^3 - x1*x2^2*x3^3 - x1^3*x2^2*x4 + x1^2*x2^3*x4 + x1^3*x3^2*x4 - x2^3*x3^2*x4 - x1^2*x3^3*x4 + x2^2*x3^3*x4 + x1^3*x2*x4^2 - x1*x2^3*x4^2 - x1^3*x3*x4^2 + x2^3*x3*x4^2 + x1*x3^3*x4^2 - x2*x3^3*x4^2 - x1^2*x2*x4^3 + x1*x2^2*x4^3 + x1^2*x3*x4^3 - x2^2*x3*x4^3 - x1*x3^2*x4^3 + x2*x3^2*x4^3 Sage example in ./domaines.tex, line 1531:: sage: x1, x2, x3, x4 = SR.var('x1, x2, x3, x4') sage: got = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ) sage: expected1 = -(x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4) sage: expected2 = (x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4) sage: bool(got == expected1 or got == expected2) True Sage example in ./domaines.tex, line 1581:: sage: x = var('x') sage: p = 54*x^4+36*x^3-102*x^2-72*x-12 sage: factor(p) 6*(x^2 - 2)*(3*x + 1)^2 Sage example in ./domaines.tex, line 1616:: sage: R = ZZ['x']; R Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 1622:: sage: q = R(p); q 54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12 Sage example in ./domaines.tex, line 1629:: sage: parent(q) Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 1635:: sage: factor(q) 2 * 3 * (3*x + 1)^2 * (x^2 - 2) Sage example in ./domaines.tex, line 1642:: sage: R = QQ['x']; R Univariate Polynomial Ring in x over Rational Field sage: q = R(p); q 54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12 sage: factor(q) (54) * (x + 1/3)^2 * (x^2 - 2) Sage example in ./domaines.tex, line 1665:: sage: R = ComplexField(16)['x']; R Univariate Polynomial Ring in x over Complex Field with 16 bits of precision sage: q = R(p); q 54.00*x^4 + 36.00*x^3 - 102.0*x^2 - 72.00*x - 12.00 sage: factor(q) (54.00) * (x - 1.414) * (x + 0.3333)^2 * (x + 1.414) Sage example in ./domaines.tex, line 1685:: sage: R = QQ[sqrt(2)]['x']; R Univariate Polynomial Ring in x over Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095? sage: q = R(p); q 54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12 sage: factor(q) (54) * (x - sqrt2) * (x + sqrt2) * (x + 1/3)^2 Sage example in ./domaines.tex, line 1698:: sage: R = GF(5)['x']; R Univariate Polynomial Ring in x over Finite Field of size 5 sage: q = R(p); q 4*x^4 + x^3 + 3*x^2 + 3*x + 3 sage: factor(q) (4) * (x + 2)^2 * (x^2 + 3) """<line_sep>
# Copyright 1996-2021 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Webots GPS device wrapper for ROS2."""<import_from_stmt>rclpy.qos QoSReliabilityPolicy qos_profile_sensor_data<import_from_stmt>std_msgs.msg Float32<import_from_stmt>sensor_msgs.msg NavSatFix NavSatStatus<import_from_stmt>geometry_msgs.msg PointStamped<import_from_stmt>.sensor_device SensorDevice<import_from_stmt>controller GPS<class_stmt>GpsDevice(SensorDevice)<block_start>""" ROS2 wrapper for Webots GPS node. Creates suitable ROS2 interface based on Webots [GPS](https://cyberbotics.com/doc/reference/gps) node instance: It allows the following functinalities: - Publishes position measurements of type `sensor_msgs::NavSatFix` if WGS84 - Publishes position measurements of type `geometry_msgs::PointStamped` if LOCAL Args: ---- node (WebotsNode): The ROS2 node. device_key (str): Unique identifier of the device used for configuration. wb_device (Gps): Webots node of type GPS. Kwargs: params (dict): Inherited from `SensorDevice` + the following:: dict: { 'timestep': int, # Publish period in ms (default 128ms) } """<def_stmt>__init__ self node device_key wb_device params=<none><block_start>super().__init__(node device_key wb_device params)<line_sep>self.__speed_publisher=<none><line_sep>self.__gps_publisher=<none><line_sep>self.__coordinate_system=self._wb_device.getCoordinateSystem()<line_sep># Exit if disabled <if_stmt>self._disable<block_start><return><block_end># Change default timestep self._timestep=128<line_sep>qos_sensor_reliable=qos_profile_sensor_data<line_sep>qos_sensor_reliable.reliability=QoSReliabilityPolicy.RELIABLE<line_sep># Create topics self.__speed_publisher=node.create_publisher(Float32 self._topic_name+'/speed' qos_sensor_reliable)<if_stmt>self.__coordinate_system<eq>GPS.WGS84<block_start>self.__gps_publisher=node.create_publisher(NavSatFix self._topic_name+'/gps' qos_sensor_reliable)<block_end><else_stmt><block_start>self.__gps_publisher=node.create_publisher(PointStamped self._topic_name+'/gps' qos_sensor_reliable)<block_end><block_end><def_stmt>step self<block_start>stamp=super().step()<if_stmt><not>stamp<block_start><return><block_end><if_stmt>self.__gps_publisher.get_subscription_count()<g>0<or>self.__speed_publisher.get_subscription_count()<g>0<or>self._always_publish<block_start>self._wb_device.enable(self._timestep)<line_sep>msg=Float32()<line_sep>msg.data=self._wb_device.getSpeed()<line_sep>self.__speed_publisher.publish(msg)<if_stmt>self.__coordinate_system<eq>GPS.WGS84<block_start>msg=NavSatFix()<line_sep>msg.header.stamp=stamp<line_sep>msg.header.frame_id=self._frame_id<line_sep>msg.latitude=self._wb_device.getValues()[0]<line_sep>msg.longitude=self._wb_device.getValues()[1]<line_sep>msg.altitude=self._wb_device.getValues()[2]<line_sep>msg.position_covariance_type=NavSatFix.COVARIANCE_TYPE_UNKNOWN<line_sep>msg.status.service=NavSatStatus.SERVICE_GPS<line_sep>self.__gps_publisher.publish(msg)<block_end><else_stmt><block_start>msg=PointStamped()<line_sep>msg.header.stamp=stamp<line_sep>msg.header.frame_id=self._frame_id<line_sep>msg.point.x=self._wb_device.getValues()[0]<line_sep>msg.point.y=self._wb_device.getValues()[1]<line_sep>msg.point.z=self._wb_device.getValues()[2]<line_sep>self.__gps_publisher.publish(msg)<block_end><block_end><else_stmt><block_start>self._wb_device.disable()<block_end><block_end><block_end>
<import_from_stmt>django.shortcuts render<import_from_stmt>django.views View<line_sep># Create your views here. <def_stmt>simple request<block_start><return>render(request 'tmpl/simple.html')<block_end><def_stmt>guess request<block_start>context={'zap':'42'}<line_sep><return>render(request 'tmpl/guess.html' context)<block_end><def_stmt>special request<block_start>context={'txt':'<b>bold</b>' 'zap':'42'}<line_sep><return>render(request 'tmpl/special.html' context)<block_end><def_stmt>loop request<block_start>f=['Apple' 'Orange' 'Banana' 'Lychee']<line_sep>n=['peanut' 'cashew']<line_sep>x={'fruits':f 'nuts':n 'zap':'42'}<line_sep><return>render(request 'tmpl/loop.html' x)<block_end><def_stmt>cond request<block_start>x={'guess':'42'}<line_sep><return>render(request 'tmpl/cond.html' x)<block_end><def_stmt>nested request<block_start>x={'outer':{'inner':'42'}}<line_sep><return>render(request 'tmpl/nested.html' x)<block_end># Call this with a parameter number <class_stmt>GameView(View)<block_start><def_stmt>get self request guess<block_start>x={'guess':int(guess)}<line_sep><return>render(request 'tmpl/cond.html' x)<block_end><block_end># Using inheritance (extend) <class_stmt>Game2View(View)<block_start><def_stmt>get self request guess<block_start>x={'guess':int(guess)}<line_sep><return>render(request 'tmpl/cond2.html' x)<block_end><block_end>
<import_stmt>requests<import_stmt>jsonpickle<import_from_stmt>requests_oauthlib OAuth1<import_from_stmt>urllib.parse parse_qs urlencode<import_stmt>cherrypy<import_from_stmt>collections defaultdict<import_stmt>json<import_stmt>os<import_stmt>re<import_from_stmt>collections defaultdict<line_sep># For readable serializations jsonpickle.set_encoder_options('json' sort_keys=<true> indent=4)<class_stmt>LocalCache(object)<block_start>""" Generic class for encapsulating twitter credential caching """<line_sep>server_data_template="{}.server"<line_sep>user_data_template="{0}.user.{1}"<def_stmt>__init__ self backup="tmp/twitter.cache"<block_start>self.backup=backup#Unique identifier for the backup of this cache self.memcache={"users":defaultdict(<lambda>:{}) "server":defaultdict(<lambda>:{})}<line_sep>self.deserialize()<block_end><def_stmt>users self<block_start><return>self.memcache['users']<block_end><def_stmt>set_user_state self user_id state<block_start>self.memcache['users'][user_id]=state<block_end><def_stmt>update_user_state self user_id state={}<block_start>self.memcache['users'][user_id].update(state)<block_end><def_stmt>get_user_state self user_id<block_start><return>self.memcache['users'][user_id]<block_end><def_stmt>clear_user_state self user_id<block_start><return>self.memcache['users'][user_id].clear()<block_end><def_stmt>update_server_state self state_dict<block_start>self.memcache['server'].update(state_dict)<block_end><def_stmt>get_server_state self<block_start><return>self.memcache['server']<block_end><def_stmt>clear_server_state self<block_start><return>self.memcache['server'].clear()<block_end><def_stmt>initialize_user_queue self user_id queue<block_start>self.memcache['users'][user_id]['user_queue']=ReadableQueue(queue)<block_end><def_stmt>user_queue self user_id<block_start><if_stmt>'user_queue'<in>self.memcache['users'][user_id]<block_start><return>self.memcache['users'][user_id]['user_queue']<block_end><block_end><def_stmt>server_fname self<block_start><return>self.server_data_template.format(self.backup)<block_end><def_stmt>user_fname self user<block_start><return>self.user_data_template.format(self.backup user)<block_end><def_stmt>deserialize self<block_start>cache_loaded=<false><if_stmt>os.path.exists(self.server_fname())<and><not>os.path.isdir(self.backup)<block_start><try_stmt><block_start>self.memcache={"server":{} "users":{}}<with_stmt>open(self.server_fname())<as>backupfile<block_start>print("Attempting to reload cache")<line_sep>self.memcache['server']=jsonpickle.decode(backupfile.read())<block_end>print("Server cache loaded" json.dumps(self.memcache indent=4))<for_stmt>user self.memcache['server']['user_list']# Try to load as much user data as possible <block_start><if_stmt>os.path.exists(self.user_fname(user))<block_start>print("found path for user" user)<with_stmt>open(self.user_fname(user))<as>userfile<block_start>user_data=jsonpickle.decode(userfile.read())<block_end>self.memcache['users'][user]=user_data<block_end><block_end>cache_loaded=<true><block_end><except_stmt>Exception<as>e<block_start>print("Cache file corrupted...")<line_sep><raise>e<block_end><block_end><if_stmt><not>cache_loaded<block_start>print("Cache could not be loaded")<line_sep><pass><block_end><else_stmt><block_start>print("CACHE LOADED SUCCESSFULLY!")<block_end><block_end><def_stmt>serialize self<block_start>json_to_serialize=self.memcache['server']<line_sep>user_list=list(self.users().keys())<line_sep>json_to_serialize.update({"user_list":user_list})<with_stmt>open(self.server_fname() 'w')<as>backup_server# Serialize Server: <block_start>json_encoded=jsonpickle.encode(json_to_serialize)<line_sep>backup_server.write(json_encoded)<block_end><for_stmt>user user_list<block_start>user_data=self.get_user_state(user)<line_sep>json_encoded=jsonpickle.encode(user_data)<with_stmt>open(self.user_fname(user) 'w')<as>userfile<block_start>userfile.write(json_encoded)<block_end><block_end><block_end><block_end><class_stmt>ReadableQueue(object)<block_start><def_stmt>__init__ self queue=[] pos=0<block_start>self.hashmap={"queue":[(i e)<for>i,e enumerate(queue)] "pos":pos}<line_sep><return><block_end><def_stmt>queue self<block_start><return>self.hashmap['queue']<block_end><def_stmt>is_empty self<block_start><return>len(self.queue())<eq>0<block_end><def_stmt>is_finished self<block_start><return>self.pos()<eq>len(self.queue())<block_end><def_stmt>pos self<block_start><return>self.hashmap['pos']<block_end><def_stmt>set_pos self val<block_start>self.hashmap['pos']=val<block_end><def_stmt>get_next self offset=1<block_start><if_stmt>self.pos()<l>len(self.queue())<block_start>temp_queue=self.queue()[self.pos():self.pos()+offset]<line_sep>self.set_pos(self.pos()+offset)<if_stmt>self.pos()<g>len(self.queue())<block_start>self.set_pos(len(self.queue()))<block_end><return>temp_queue<block_end><block_end><def_stmt>read_out_next self offset=1<block_start><return>" ".join([readable.read_out(index)<for>index,readable self.get_next(offset)])<block_end><def_stmt>has_prev self<block_start><return>self.pos()<g>0<block_end><def_stmt>get_prev self offset=1<block_start><if_stmt>self.pos()<g>0<block_start>self.set_pos(self.pos()-offset)<if_stmt>self.pos()<l>0<block_start>offset=offset+self.pos()<line_sep># [1, current(2), 3] get_prev(offeset=3) # pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1 self.set_pos(0)<block_end><return>self.queue()[self.pos():offset]<block_end><return><none><block_end><def_stmt>read_out_prev self offset=1<block_start><return>" ".join([readable.read_out()<for>readable self.get_prev(offset)])<block_end><block_end>#Local cache caches tokens for different users local_cache=LocalCache()<def_stmt>strip_html text<block_start>""" Get rid of ugly twitter html """<def_stmt>reply_to text<block_start>replying_to=[]<line_sep>split_text=text.split()<for_stmt>index,token enumerate(split_text)<block_start><if_stmt>token.startswith('@')<block_start>replying_to.append(token[1:])<block_end><else_stmt><block_start>message=split_text[index:]<line_sep><break><block_end><block_end>rply_msg=""<if_stmt>len(replying_to)<g>0<block_start>rply_msg="Replying to "<for_stmt>token replying_to[:-1]<block_start>rply_msg<augadd>token+","<block_end><if_stmt>len(replying_to)<g>1<block_start>rply_msg<augadd>'and '<block_end>rply_msg<augadd>replying_to[-1]+". "<block_end><return>rply_msg+" ".join(message)<block_end>text=reply_to(text)<line_sep>text=text.replace('@' ' ')<line_sep><return>" ".join([token<for>token text.split()<if>('http:'<not><in>token)<and>('https:'<not><in>token)])<block_end><class_stmt>Tweet(object)<block_start><def_stmt>__init__ self json_obj<block_start>self.tweet=json_obj<block_end><def_stmt>get_id self<block_start><return>self.tweet['id']<block_end><def_stmt>get_raw_text self<block_start><return>self.tweet['text']<block_end><def_stmt>_process_text self<block_start>text=strip_html(self.tweet['text'])<line_sep>user_mentions=self.tweet['entities']['user_mentions']<line_sep>text=text.replace('@' 'at ')<for_stmt>user user_mentions<block_start>text=text.replace(user['screen_name'] user['name'])<block_end><return>text<block_end><def_stmt>get_screen_name self<block_start><return>self.tweet['user']['screen_name']<block_end><def_stmt>get_user_name self<block_start><return>self.tweet['user']['name']<block_end><def_stmt>read_out self index<block_start>text=self._process_text()<line_sep><return>"tweet number {num} by {user} : {text} ,".format(num=index+1 user=self.get_user_name() text=text)<block_end><def_stmt>detailed_description self<block_start>response_builder=["This tweet was posted by {user_name} whose twitter handle is {screen_name} the account description reads: {description}.".format(screen_name=self.tweet['user']['screen_name'] user_name=self.tweet['user']['name'] description=self.tweet['user']['description'])]<if_stmt>self.tweet['retweeted']<block_start>response_builder<augadd>["It's been retweeted {} times.".format(self.tweet['retweet_count'])]<block_end><if_stmt>self.tweet['favorited']<block_start>response_builder<augadd>["{} people have favorited it.".format(self.tweet['favorites_count'])]<block_end><if_stmt>self.tweet["in_reply_to_screen_name"]<block_start>response_builder<augadd>["it was posted in response to user {}.".format(self.tweet['in_reply_to_screen_name'])]<block_end>response_builder<augadd>["the text of the tweet is, {}.".format(self._process_text())]<line_sep><return>" ".join(response_builder)<block_end><def_stmt>user_mentions self<block_start><return>self.tweet['user_mentions']<block_end><block_end><def_stmt>get_cached_access_pair uid<block_start><if_stmt>uid<in>local_cache.users()<block_start>access_token=local_cache.get_user_state(uid)['access_token']<line_sep>access_secret=local_cache.get_user_state(uid)['access_secret']<line_sep><return>access_token access_secret<block_end><else_stmt><block_start><raise>ValueError<block_end><block_end><def_stmt>get_request_token callback_url=<none><block_start>url="https://api.twitter.com/oauth/request_token"<line_sep>consumer_key,consumer_secret=local_cache.get_server_state()['twitter_keys']<line_sep>auth=OAuth1(consumer_key consumer_secret)<line_sep>params={"oauth_callback":callback_url}<line_sep>r=requests.post(url auth=auth params=params)<line_sep>response_obj=parse_qs(r.text)<line_sep>local_cache.update_server_state({"request_token":response_obj['oauth_token'][0] "request_secret":response_obj['oauth_token_secret'][0]})<line_sep><return>response_obj['oauth_token_secret'] response_obj['oauth_token']<block_end><def_stmt>authenticate_user_page callback_url="" metadata=<none><block_start>url="https://api.twitter.com/oauth/authenticate"<line_sep>oauth_secret,oauth_token=get_request_token(callback_url)<line_sep>local_cache.update_server_state({'metadata':metadata})<line_sep>params={"force_login":<true> "oauth_token":oauth_token}<line_sep>r=requests.get(url params=params)<line_sep><return>r.text<block_end><def_stmt>post_tweet user_id message additional_params={}<block_start>""" Helper function to post a tweet """<line_sep>url="https://api.twitter.com/1.1/statuses/update.json"<line_sep>params={"status":message}<line_sep>params.update(additional_params)<line_sep>r=make_twitter_request(url user_id params request_type='POST')<line_sep>print(r.text)<line_sep><return>"Successfully posted a tweet {}".format(message)<block_end><def_stmt>get_access_token oauth_token oauth_verifier<block_start>url="https://api.twitter.com/oauth/access_token"<line_sep>params={"oauth_verifier":oauth_verifier}<line_sep>server_state=local_cache.get_server_state()<line_sep>request_token=server_state['request_token']<line_sep>request_secret=server_state['request_secret']<line_sep>consumer_key,consumer_secret=server_state['twitter_keys']<line_sep>auth=OAuth1(consumer_key consumer_secret request_token request_secret)<line_sep>r=requests.post(url params=params auth=auth)<line_sep>response_obj=parse_qs(r.text)<line_sep>uid=response_obj['oauth_token'][0]<line_sep>print("Access token" uid)<line_sep>local_cache.set_user_state(user_id=uid state={"access_token":response_obj['oauth_token'][0] "access_secret":response_obj['oauth_token_secret'][0] 'twitter_user_id':response_obj['user_id'][0] 'screen_name':response_obj['screen_name'][0]})<line_sep>local_cache.serialize()<line_sep>fragments={"state":local_cache.get_server_state()['metadata']['state'] "access_token":uid "token_type":"Bearer"}<line_sep><return>urlencode(fragments)<block_end><def_stmt>get_twitter_auth user_id<block_start>consumer_key,consumer_secret=local_cache.get_server_state()['twitter_keys']<line_sep>access_token,access_secret=get_cached_access_pair(user_id)<line_sep><return>OAuth1(consumer_key consumer_secret access_token access_secret)<block_end><def_stmt>process_tweets tweet_list<block_start>""" Clean tweets and enumerate, preserving only things that we are interested in """<line_sep><return>[Tweet(tweet)<for>tweet tweet_list]<block_end><def_stmt>make_twitter_request url user_id params={} request_type='GET'<block_start>""" Generically make a request to twitter API using a particular user's authorization """<if_stmt>request_type<eq>"GET"<block_start><return>requests.get(url auth=get_twitter_auth(user_id) params=params)<block_end><elif_stmt>request_type<eq>"POST"<block_start><return>requests.post(url auth=get_twitter_auth(user_id) params=params)<block_end><block_end><def_stmt>get_user_twitter_details user_id params={}<block_start>url="https://api.twitter.com/1.1/users/lookup.json"<line_sep>user_cache=local_cache.get_user_state(user_id)<line_sep>params.update({"user_id":user_cache['twitter_user_id']})<line_sep>response=make_twitter_request(url user_id params)<line_sep><return>response.json()<block_end><def_stmt>geo_search user_id search_location<block_start>""" Search for a location - free form """<line_sep>url="https://api.twitter.com/1.1/geo/search.json"<line_sep>params={"query":search_location}<line_sep>response=make_twitter_request(url user_id params).json()<line_sep><return>response<block_end><def_stmt>closest_trend_search user_id params={}#url = "https://api.twitter.com/1.1/trends/place.json" <block_start>url="https://api.twitter.com/1.1/trends/closest.json"<line_sep>response=make_twitter_request(url user_id params).json()<line_sep><return>response<block_end><def_stmt>list_trends user_id woe_id<block_start>url="https://api.twitter.com/1.1/trends/place.json"<line_sep>params={"id":woe_id}<line_sep>response=make_twitter_request(url user_id params).json()<line_sep><return>response<block_end><def_stmt>read_out_tweets processed_tweets speech_convertor=<none><block_start>""" Input - list of processed 'Tweets' output - list of spoken responses """<line_sep><return>["tweet number {num} by {user}. {text}.".format(num=index+1 user=user text=text)<for>index,(user text) enumerate(processed_tweets)]<block_end><def_stmt>request_tweet_list url user_id params={}<block_start><return>process_tweets(make_twitter_request(url user_id).json())<block_end><def_stmt>get_home_tweets user_id input_params={}<block_start>url="https://api.twitter.com/1.1/statuses/home_timeline.json"<line_sep>print("Trying to get home tweets")<line_sep>response=request_tweet_list(url user_id)<line_sep><return>response<block_end><def_stmt>get_retweets_of_me user_id input_params={}<block_start>""" returns recently retweeted tweets """<line_sep>url="https://api.twitter.com/1.1/statuses/retweets_of_me.json"<line_sep>print("trying to get retweets")<line_sep><return>request_tweet_list(url user_id)<block_end><def_stmt>get_my_favourite_tweets user_id input_params={}<block_start>""" Returns a user's favourite tweets """<line_sep>url="https://api.twitter.com/1.1/favorites/list.json"<line_sep><return>request_tweet_list(url user_id)<block_end><def_stmt>get_user_latest_tweets user_id params={}<block_start>url="https://api.twitter.com/1.1/statuses/user_timeline.json?"<line_sep><return>request_tweet_list(url user_id params)<block_end><def_stmt>get_latest_twitter_mentions user_id<block_start>url="https://api.twitter.com/1.1/statuses/mentions_timeline.json"<line_sep><return>request_tweet_list(url user_id)<block_end><def_stmt>search_for_tweets_about user_id params<block_start>""" Search twitter API """<line_sep>url="https://api.twitter.com/1.1/search/tweets.json"<line_sep>response=make_twitter_request(url user_id params)<line_sep><return>process_tweets(response.json()["statuses"])<block_end>
<import_stmt>copy<import_stmt>pickle<import_stmt>tcod<def_stmt>test_tcod_random <arrow><none><block_start>rand=tcod.random.Random(tcod.random.COMPLEMENTARY_MULTIPLY_WITH_CARRY)<assert_stmt>0<le>rand.randint(0 100)<le>100<assert_stmt>0<le>rand.uniform(0 100)<le>100<line_sep>rand.guass(0 1)<line_sep>rand.inverse_guass(0 1)<block_end><def_stmt>test_tcod_random_copy <arrow><none><block_start>rand=tcod.random.Random(tcod.random.MERSENNE_TWISTER)<line_sep>rand2=copy.copy(rand)<assert_stmt>rand.uniform(0 1)<eq>rand2.uniform(0 1)<assert_stmt>rand.uniform(0 1)<eq>rand2.uniform(0 1)<assert_stmt>rand.uniform(0 1)<eq>rand2.uniform(0 1)<block_end><def_stmt>test_tcod_random_pickle <arrow><none><block_start>rand=tcod.random.Random(tcod.random.MERSENNE_TWISTER)<line_sep>rand2=pickle.loads(pickle.dumps(rand))<assert_stmt>rand.uniform(0 1)<eq>rand2.uniform(0 1)<assert_stmt>rand.uniform(0 1)<eq>rand2.uniform(0 1)<assert_stmt>rand.uniform(0 1)<eq>rand2.uniform(0 1)<block_end>
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Viewlet. """<import_stmt>os<import_stmt>zope.viewlet.viewlet<import_from_stmt>Products.Five.browser.pagetemplatefile ViewPageTemplateFile<class_stmt>ViewletBase(zope.viewlet.viewlet.ViewletBase)<block_start><pass><block_end><class_stmt>SimpleAttributeViewlet(zope.viewlet.viewlet.SimpleAttributeViewlet)<block_start><pass><block_end><class_stmt>simple(zope.viewlet.viewlet.simple)# We need to ensure that the proper __init__ is called. <block_start>__init__=ViewletBase.__init__<block_end><def_stmt>SimpleViewletClass template bases=() attributes=<none> name=''<block_start>"""A function that can be used to generate a viewlet from a set of information. """<line_sep># Create the base class hierarchy bases<augadd>(simple ViewletBase)<line_sep>attrs={'index':ViewPageTemplateFile(template) '__name__':name}<if_stmt>attributes<block_start>attrs.update(attributes)<block_end># Generate a derived view class. class_=type("SimpleViewletClass from %s"%template bases attrs)<line_sep><return>class_<block_end><class_stmt>ResourceViewletBase(zope.viewlet.viewlet.ResourceViewletBase)<block_start><pass><block_end><def_stmt>JavaScriptViewlet path<block_start>"""Create a viewlet that can simply insert a javascript link."""<line_sep>src=os.path.join(os.path.dirname(__file__) 'javascript_viewlet.pt')<line_sep>klass=type('JavaScriptViewlet' (ResourceViewletBase ViewletBase) {'index':ViewPageTemplateFile(src) '_path':path})<line_sep><return>klass<block_end><class_stmt>CSSResourceViewletBase(zope.viewlet.viewlet.CSSResourceViewletBase)<block_start><pass><block_end><def_stmt>CSSViewlet path media="all" rel="stylesheet"<block_start>"""Create a viewlet that can simply insert a javascript link."""<line_sep>src=os.path.join(os.path.dirname(__file__) 'css_viewlet.pt')<line_sep>klass=type('CSSViewlet' (CSSResourceViewletBase ViewletBase) {'index':ViewPageTemplateFile(src) '_path':path '_media':media '_rel':rel})<line_sep><return>klass<block_end>
# Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<import_from_stmt>... opcodes<import_from_stmt>... tensor<as>mt<import_from_stmt>...core OutputType recursive_tile<import_from_stmt>...core.operand OperandStage<import_from_stmt>...serialization.serializables KeyField Int32Field<import_from_stmt>...tensor.array_utils as_same_device device<import_from_stmt>...tensor.core TensorOrder<import_from_stmt>...tensor.random RandomStateField<import_from_stmt>...utils has_unknown_shape<import_from_stmt>..metrics euclidean_distances<import_from_stmt>..operands LearnOperand LearnOperandMixin<def_stmt>_kmeans_plus_plus_init X x_squared_norms random_state n_clusters:int n_local_trials:int=<none><block_start>n_samples,n_features=X.shape<line_sep>centers=mt.empty((n_clusters n_features) dtype=X.dtype)<assert_stmt>x_squared_norms<is><not><none> 'x_squared_norms None in _k_init'<line_sep># Set the number of local seeding trials if none is given <if_stmt>n_local_trials<is><none># This is what Arthur/Vassilvitskii tried, but did not report # specific results for other than mentioning in the conclusion # that it helped. <block_start>n_local_trials=2+int(np.log(n_clusters))<block_end># Pick first center randomly center_id=random_state.randint(n_samples)<if_stmt>X.issparse()# pragma: no cover <block_start>centers[0]=X[center_id].todense()<block_end><else_stmt><block_start>centers[0]=X[center_id]<block_end># Initialize list of closest distances and calculate current potential closest_dist_sq=euclidean_distances(centers[0 mt.newaxis] X Y_norm_squared=x_squared_norms squared=<true>)<line_sep>current_pot=closest_dist_sq.sum()<line_sep># Pick the remaining n_clusters-1 points <for_stmt>c range(1 n_clusters)# Choose center candidates by sampling with probability proportional # to the squared distance to the closest existing center <block_start>rand_vals=random_state.random_sample(n_local_trials)<times>current_pot<line_sep>candidate_ids=mt.searchsorted(closest_dist_sq.cumsum() rand_vals)<line_sep># XXX: numerical imprecision can result in a candidate_id out of range candidate_ids=mt.clip(candidate_ids <none> closest_dist_sq.size-1)<line_sep># Compute distances to center candidates distance_to_candidates=euclidean_distances(X[candidate_ids] X Y_norm_squared=x_squared_norms squared=<true>)<line_sep># update closest distances squared and potential for each candidate distance_to_candidates=mt.minimum(closest_dist_sq distance_to_candidates)<line_sep>candidates_pot=distance_to_candidates.sum(axis=1)<line_sep># Decide which candidate is the best best_candidate=mt.argmin(candidates_pot)<line_sep>current_pot=candidates_pot[best_candidate]<line_sep>closest_dist_sq=distance_to_candidates[best_candidate]<line_sep>best_candidate=candidate_ids[best_candidate]<line_sep># Permanently add best center candidate found in local tries <if_stmt>X.issparse()# pragma: no cover <block_start>c_center=X[best_candidate].todense()<block_end><else_stmt><block_start>c_center=X[best_candidate]<block_end>centers[c]=c_center<block_end><return>centers<block_end><class_stmt>KMeansPlusPlusInit(LearnOperand LearnOperandMixin)<block_start>_op_type_=opcodes.KMEANS_PLUS_PLUS_INIT<line_sep>_x=KeyField('x')<line_sep>_n_clusters=Int32Field('n_clusters')<line_sep>_x_squared_norms=KeyField('x_squared_norms')<line_sep>_state=RandomStateField('state')<line_sep>_n_local_trials=Int32Field('n_local_trials')<def_stmt>__init__ self x=<none> n_clusters=<none> x_squared_norms=<none> state=<none> n_local_trials=<none> output_types=<none> **kw<block_start>super().__init__(_x=x _n_clusters=n_clusters _x_squared_norms=x_squared_norms _state=state _n_local_trials=n_local_trials _output_types=output_types **kw)<if_stmt>self._output_types<is><none><block_start>self._output_types=[OutputType.tensor]<block_end><block_end>@property<def_stmt>x self<block_start><return>self._x<block_end>@property<def_stmt>n_clusters self<block_start><return>self._n_clusters<block_end>@property<def_stmt>x_squared_norms self<block_start><return>self._x_squared_norms<block_end>@property<def_stmt>state self<block_start><return>self._state<block_end>@property<def_stmt>n_local_trials self<block_start><return>self._n_local_trials<block_end><def_stmt>_set_inputs self inputs<block_start>super()._set_inputs(inputs)<line_sep>self._x=self._inputs[0]<line_sep>self._x_squared_norms=self._inputs[-1]<block_end><def_stmt>__call__ self<block_start>inputs=[self._x self._x_squared_norms]<line_sep>kw={'shape':(self._n_clusters self._x.shape[1]) 'dtype':self._x.dtype 'order':TensorOrder.C_ORDER}<line_sep><return>self.new_tileable(inputs kws=[kw])<block_end>@classmethod<def_stmt>_tile_one_chunk cls op:"KMeansPlusPlusInit"<block_start>out=op.outputs[0]<line_sep>chunk_op=op.copy().reset_key()<line_sep>chunk_kw=out.params.copy()<line_sep>chunk_kw['index']=(0 0)<line_sep>chunk_inputs=[op.x.chunks[0] op.x_squared_norms.chunks[0]]<line_sep>chunk=chunk_op.new_chunk(chunk_inputs kws=[chunk_kw])<line_sep>kw=out.params<line_sep>kw['chunks']=[chunk]<line_sep>kw['nsplits']=tuple((s )<for>s out.shape)<line_sep>new_op=op.copy()<line_sep><return>new_op.new_tileables(op.inputs kws=[kw])<block_end>@classmethod<def_stmt>tile cls op:"KMeansPlusPlusInit"<block_start><if_stmt>len(op.x.chunks)<eq>1<block_start><assert_stmt>len(op.x_squared_norms.chunks)<eq>1<line_sep><return>cls._tile_one_chunk(op)<block_end><else_stmt><block_start><return>(<yield><from>cls._tile_k_init(op))<block_end><block_end>@classmethod<def_stmt>_tile_k_init cls op:"KMeansPlusPlusInit"<block_start>X=op.x<line_sep>n_clusters=op.n_clusters<line_sep>x_squared_norms=op.x_squared_norms<line_sep>random_state=op.state<line_sep>n_local_trials=op.n_local_trials<line_sep>centers=_kmeans_plus_plus_init(X x_squared_norms random_state n_clusters n_local_trials)<line_sep><return>(<yield><from>recursive_tile(centers))<block_end>@classmethod<def_stmt>execute cls ctx op:"KMeansPlusPlusInit"<block_start><try_stmt><block_start><import_from_stmt>sklearn.cluster._kmeans _kmeans_plusplus<block_end><except_stmt>ImportError# pragma: no cover <block_start><try_stmt><block_start><import_from_stmt>sklearn.cluster._kmeans _k_init<block_end><except_stmt>ImportError<block_start><import_from_stmt>sklearn.cluster.k_means_ _k_init<block_end><def_stmt>_kmeans_plusplus *args **kwargs<block_start><return>_k_init(*args **kwargs) <none><block_end><block_end>(x x_squared_norms),device_id,_=as_same_device([ctx[inp.key]<for>inp op.inputs] device=op.device ret_extra=<true>)<with_stmt>device(device_id)<block_start>ctx[op.outputs[0].key]=_kmeans_plusplus(x op.n_clusters x_squared_norms=x_squared_norms random_state=op.state n_local_trials=op.n_local_trials)[0]<block_end><block_end><block_end>############################################################################### # Initialization heuristic <def_stmt>_k_init X n_clusters x_squared_norms random_state n_local_trials=<none><block_start>"""Init n_clusters seeds according to k-means++ Parameters ---------- X : array or sparse matrix, shape (n_samples, n_features) The data to pick seeds for. To avoid memory copy, the input data should be double precision (dtype=np.float64). n_clusters : integer The number of seeds to choose x_squared_norms : array, shape (n_samples,) Squared Euclidean norm of each data point. random_state : int, RandomState instance The generator used to initialize the centers. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. n_local_trials : integer, optional The number of seeding trials for each center (except the first), of which the one reducing inertia the most is greedily chosen. Set to None to make the number of trials depend logarithmically on the number of seeds (2+log(k)); this is the default. Notes ----- Selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. see: <NAME>. and <NAME>. "k-means++: the advantages of careful seeding". ACM-SIAM symposium on Discrete algorithms. 2007 Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip, which is the implementation used in the aforementioned paper. """<line_sep>op=KMeansPlusPlusInit(x=X n_clusters=n_clusters x_squared_norms=x_squared_norms state=random_state n_local_trials=n_local_trials)<line_sep><return>op()<block_end><class_stmt>KMeansScalablePlusPlusInit(LearnOperand LearnOperandMixin)<block_start>_op_type_=opcodes.KMEANS_SCALABLE_PLUS_PLUS_INIT<line_sep>_x=KeyField('x')<line_sep>_n_clusters=Int32Field('n_clusters')<line_sep>_x_squared_norms=KeyField('x_squared_norms')<line_sep>_state=RandomStateField('state')<line_sep>_init_iter=Int32Field('init_iter')<line_sep>_oversampling_factor=Int32Field('oversampling_factor')<def_stmt>__init__ self x=<none> n_clusters=<none> x_squared_norms=<none> state=<none> init_iter=<none> oversampling_factor=<none> output_types=<none> **kw<block_start>super().__init__(_x=x _n_clusters=n_clusters _x_squared_norms=x_squared_norms _state=state _init_iter=init_iter _oversampling_factor=oversampling_factor _output_types=output_types **kw)<if_stmt>self._output_types<is><none><block_start>self._output_types=[OutputType.tensor]<block_end><block_end>@property<def_stmt>x self<block_start><return>self._x<block_end>@property<def_stmt>n_clusters self<block_start><return>self._n_clusters<block_end>@property<def_stmt>x_squared_norms self<block_start><return>self._x_squared_norms<block_end>@property<def_stmt>state self<block_start><return>self._state<block_end>@property<def_stmt>init_iter self<block_start><return>self._init_iter<block_end>@property<def_stmt>oversampling_factor self<block_start><return>self._oversampling_factor<block_end><def_stmt>_set_inputs self inputs<block_start>super()._set_inputs(inputs)<if_stmt>self._x<is><not><none><block_start>self._x=self._inputs[0]<block_end><if_stmt>self._x_squared_norms<is><not><none><block_start>self._x_squared_norms=self._inputs[-1]<block_end><block_end><def_stmt>__call__ self<block_start>inputs=[self._x self._x_squared_norms]<line_sep>kw={'shape':(self._n_clusters self._x.shape[1]) 'dtype':self._x.dtype 'order':TensorOrder.C_ORDER}<line_sep><return>self.new_tileable(inputs kws=[kw])<block_end>@classmethod<def_stmt>tile cls op:"KMeansScalablePlusPlusInit"<block_start><if_stmt>has_unknown_shape(*op.inputs)<block_start><yield><block_end>x=mt.tensor(op.x)<line_sep>x_squared_norms=mt.atleast_2d(op.x_squared_norms)<line_sep>out=op.outputs[0]<line_sep>random_state=op.state<line_sep>rs=mt.random.RandomState.from_numpy(random_state)<line_sep>n_samples,n_features=x.shape<line_sep>n_clusters=op.n_clusters<line_sep># step 1, sample a centroid centers=x[random_state.randint(n_samples size=1)]<for_stmt>_ range(op.init_iter)<block_start>distances=euclidean_distances(x centers X_norm_squared=x_squared_norms squared=<true>)<line_sep># calculate the cost of data with respect to current centers cost=mt.sum(mt.min(distances axis=1))<line_sep># calculate the distribution to sample new centers distribution=mt.full(len(distances) 1/len(distances))<line_sep>mt.true_divide(mt.min(distances axis=1) cost where=cost<ne>0 out=distribution)<line_sep># pick new centers new_centers_size=op.oversampling_factor<times>n_clusters<line_sep>new_centers=x[rs.choice(n_samples new_centers_size p=distribution)]<line_sep>centers=mt.concatenate([centers new_centers])<block_end># rechunk centers into one chunk centers=(<yield><from>recursive_tile(centers)).rechunk(centers.shape)<line_sep>distances=<yield><from>recursive_tile(euclidean_distances(x centers X_norm_squared=x_squared_norms squared=<true>))<line_sep>map_index_to_chunks={}<line_sep># calculate weight for each chunk <for_stmt>c distances.chunks<block_start>map_chunk_op=KMeansScalablePlusPlusInit(stage=OperandStage.map)<line_sep>map_chunk_kw={'shape':(len(centers) ) 'dtype':np.dtype(np.int64) 'order':TensorOrder.C_ORDER 'index':c.index}<line_sep>map_chunk=map_chunk_op.new_chunk([c] kws=[map_chunk_kw])<line_sep>map_index_to_chunks[c.index]=map_chunk<block_end>combine_chunks=[]<for_stmt>i range(distances.chunk_shape[0])<block_start>map_chunks=[map_index_to_chunks[i j]<for>j range(distances.chunk_shape[1])]<line_sep>combine_chunk_op=KMeansScalablePlusPlusInit(stage=OperandStage.combine)<line_sep>combine_chunk_kw={'shape':(len(centers) ) 'dtype':np.dtype(np.int64) 'order':TensorOrder.C_ORDER 'index':(i )}<line_sep>combine_chunk=combine_chunk_op.new_chunk(map_chunks kws=[combine_chunk_kw])<line_sep>combine_chunks.append(combine_chunk)<block_end>reduce_chunk_op=KMeansScalablePlusPlusInit(n_clusters=op.n_clusters state=random_state stage=OperandStage.reduce)<line_sep>reduce_chunk_kw=out.params<line_sep>reduce_chunk_kw['index']=(0 0)<line_sep>reduce_chunk=reduce_chunk_op.new_chunk([centers.chunks[0]]+combine_chunks kws=[reduce_chunk_kw])<line_sep>new_op=op.copy()<line_sep>kw=out.params<line_sep>kw['chunks']=[reduce_chunk]<line_sep>kw['nsplits']=tuple((s )<for>s out.shape)<line_sep><return>new_op.new_tileables(op.inputs kws=[kw])<block_end>@classmethod<def_stmt>_execute_map cls ctx op:"KMeansScalablePlusPlusInit"<block_start>distances=ctx[op.inputs[0].key]<line_sep>min_distance_ids=np.argmin(distances axis=1)<line_sep>min_distances=distances[range(len(distances)) min_distance_ids]<line_sep>ctx[op.outputs[0].key]=(min_distances min_distance_ids)<block_end>@classmethod<def_stmt>_execute_combine cls ctx op:"KMeansScalablePlusPlusInit"<block_start>out=op.outputs[0]<line_sep>all_distances,all_min_distance_ids=tuple(zip(*(ctx[inp.key]<for>inp op.inputs)))<line_sep>distances=np.stack(all_distances).T<line_sep>min_distance_ids=np.stack(all_min_distance_ids).T<line_sep>combined_min_distance_id=np.argmin(distances axis=1)<line_sep>min_distance_ids=min_distance_ids[range(len(distances)) combined_min_distance_id]<line_sep>count=np.bincount(min_distance_ids)<line_sep>result=np.zeros(out.shape[0] dtype=np.int64)<line_sep>result[:len(count)]=count<line_sep>ctx[out.key]=result<block_end>@classmethod<def_stmt>_execute_reduce cls ctx op:"KMeansScalablePlusPlusInit"<block_start><import_from_stmt>sklearn.cluster KMeans<line_sep>inputs=[ctx[inp.key]<for>inp op.inputs]<line_sep>count=np.zeros(inputs[1].shape[0] dtype=np.int64)<for_stmt>inp inputs[1:]<block_start>count<augadd>inp<block_end>weight=count/count.sum()<line_sep>centers=inputs[0]<line_sep>kmeans=KMeans(n_clusters=op.n_clusters n_init=1 random_state=op.state)<line_sep>kmeans.fit(centers sample_weight=weight)<line_sep>ctx[op.outputs[0].key]=kmeans.cluster_centers_<block_end>@classmethod<def_stmt>execute cls ctx op:"KMeansScalablePlusPlusInit"<block_start><if_stmt>op.stage<eq>OperandStage.map<block_start><return>cls._execute_map(ctx op)<block_end><elif_stmt>op.stage<eq>OperandStage.combine<block_start><return>cls._execute_combine(ctx op)<block_end><else_stmt><block_start><return>cls._execute_reduce(ctx op)<block_end><block_end><block_end><def_stmt>_scalable_k_init X n_clusters x_squared_norms random_state oversampling_factor=2 init_iter=5<block_start>op=KMeansScalablePlusPlusInit(x=X n_clusters=n_clusters x_squared_norms=x_squared_norms state=random_state init_iter=init_iter oversampling_factor=oversampling_factor)<line_sep><return>op()<block_end>
<import_stmt>sys<import_stmt>socket<line_sep>conn=socket.create_connection(('0.0.0.0' 8080))<line_sep>msgs=[# 0 Keep-Alive, Transfer-Encoding chunked 'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n' # 1,2,3 Close, EOF "encoding" 'GET / HTTP/1.1\r\n\r\n' 'GET / HTTP/1.1\r\nConnection: close\r\n\r\n' 'GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n' # 4 Bad Request 'GET /%20%20% HTTP/1.1\r\n\r\n' # 5 Bug #14 'GET /%20abc HTTP/1.0\r\n\r\n' # 6 Content-{Length, Type} 'GET / HTTP/1.0\r\nContent-Length: 11\r\n'<concat>'Content-Type: text/blah\r\nContent-Fype: bla\r\n'<concat>'Content-Tength: bla\r\n\r\nhello world' # 7 POST memory leak 'POST / HTTP/1.0\r\nContent-Length: 1000\r\n\r\n%s'%('a'<times>1000) # 8,9 CVE-2015-0219 'GET / HTTP/1.1\r\nFoo_Bar: bad\r\n\r\n' 'GET / HTTP/1.1\r\nFoo-Bar: good\r\nFoo_Bar: bad\r\n\r\n']<line_sep>conn.send(msgs[int(sys.argv[1])].encode())<while_stmt>1<block_start>data=conn.recv(100)<if_stmt><not>data<block_start><break><block_end>print(repr(data))<if_stmt>data.endswith(b'0\r\n\r\n')<block_start><if_stmt>raw_input('new request? Y/n')<eq>'n'<block_start>exit()<block_end>conn.send(b'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n')<block_end><block_end>
# Copyright (c) 2010-2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>os<import_stmt>codecs<import_from_stmt>simpleparse parser<import_from_stmt>.Newline Newline<import_from_stmt>.Indent Indent<import_from_stmt>.Dedent Dedent<import_from_stmt>.util error<line_sep>_ebnf_file=os.path.join(os.path.dirname(__file__) 'syntax.ebnf')<with_stmt>open(_ebnf_file)<as>_thefile<block_start>_ebnf=_thefile.read()<block_end><class_stmt>Parser(parser.Parser)<block_start><def_stmt>__init__ self<block_start>self.indent=0<line_sep>offside=(("NEWLINE" Newline(self).table()) ("INDENT" Indent(self).table()) ("DEDENT" Dedent(self).table()) )<line_sep>parser.Parser.__init__(self _ebnf 'root' prebuilts=offside)<block_end><def_stmt>parse_string self input compiler<block_start>compiler.reset()<line_sep>start,_,end=parser.Parser.parse(self input processor=compiler)<if_stmt>end<l>len(input)<block_start>error(input end)<block_end><if_stmt>'input'<not><in>compiler.context.grammars<block_start>error(input end 'Required grammar "input" not found.')<block_end><return>compiler.context<block_end><def_stmt>parse self filename compiler encoding='utf8'<block_start><with_stmt>codecs.open(filename 'r' encoding=encoding)<as>input_file<block_start>string=input_file.read()<line_sep><return>self.parse_string(string compiler)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>gi.repository.GdkPixbuf Pixbuf<import_from_stmt>os makedirs<def_stmt>main <block_start><for_stmt>size (16 22 24 32 48 64 128 256 512)<block_start>icon=Pixbuf.new_from_file_at_scale("formiko.svg" size size <true>)<line_sep>makedirs("%dx%d"%(size size))<line_sep>icon.savev("%dx%d/formiko.png"%(size size) "png" [] [])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
### ### This file was automatically generated ### <import_from_stmt>archinfo.arch register_arch Endness Register<import_from_stmt>.common ArchPcode<class_stmt>ArchPcode_PowerPC_LE_32_QUICC(ArchPcode)<block_start>name='PowerPC:LE:32:QUICC'<line_sep>pcode_arch='PowerPC:LE:32:QUICC'<line_sep>description='PowerQUICC-III 32-bit little endian family'<line_sep>bits=32<line_sep>ip_offset=0x780<line_sep>sp_offset=0x4<line_sep>bp_offset=sp_offset<line_sep>instruction_endness=Endness.LE<line_sep>register_list=[Register('r0' 4 0x0) Register('r1' 4 0x4) Register('r2' 4 0x8) Register('r3' 4 0xc) Register('r4' 4 0x10) Register('r5' 4 0x14) Register('r6' 4 0x18) Register('r7' 4 0x1c) Register('r8' 4 0x20) Register('r9' 4 0x24) Register('r10' 4 0x28) Register('r11' 4 0x2c) Register('r12' 4 0x30) Register('r13' 4 0x34) Register('r14' 4 0x38) Register('r15' 4 0x3c) Register('r16' 4 0x40) Register('r17' 4 0x44) Register('r18' 4 0x48) Register('r19' 4 0x4c) Register('r20' 4 0x50) Register('r21' 4 0x54) Register('r22' 4 0x58) Register('r23' 4 0x5c) Register('r24' 4 0x60) Register('r25' 4 0x64) Register('r26' 4 0x68) Register('r27' 4 0x6c) Register('r28' 4 0x70) Register('r29' 4 0x74) Register('r30' 4 0x78) Register('r31' 4 0x7c) Register('xer_so' 1 0x400) Register('xer_ov' 1 0x401) Register('xer_ov32' 1 0x402) Register('xer_ca' 1 0x403) Register('xer_ca32' 1 0x404) Register('xer_count' 1 0x405) Register('fp_fx' 1 0x500) Register('fp_fex' 1 0x501) Register('fp_vx' 1 0x502) Register('fp_ox' 1 0x503) Register('fp_ux' 1 0x504) Register('fp_zx' 1 0x505) Register('fp_xx' 1 0x506) Register('fp_vxsnan' 1 0x507) Register('fp_vxisi' 1 0x508) Register('fp_vxidi' 1 0x509) Register('fp_vxzdz' 1 0x50a) Register('fp_vximz' 1 0x50b) Register('fp_vxvc' 1 0x50c) Register('fp_fr' 1 0x50d) Register('fp_fi' 1 0x50e) Register('fp_c' 1 0x50f) Register('fp_cc0' 1 0x510) Register('fp_cc1' 1 0x511) Register('fp_cc2' 1 0x512) Register('fp_cc3' 1 0x513) Register('fp_reserve1' 1 0x514) Register('fp_vxsoft' 1 0x515) Register('fp_vxsqrt' 1 0x516) Register('fp_vxcvi' 1 0x517) Register('fp_ve' 1 0x518) Register('fp_oe' 1 0x519) Register('fp_ue' 1 0x51a) Register('fp_ze' 1 0x51b) Register('fp_xe' 1 0x51c) Register('fp_ni' 1 0x51d) Register('fp_rn0' 1 0x51e) Register('fp_rn1' 1 0x51f) Register('msr' 4 0x700) Register('reserve_address' 4 0x720) Register('reserve' 1 0x728) Register('reserve_length' 1 0x730) Register('pc' 4 0x780 alias_names=('ip' )) Register('sr0' 4 0x800) Register('sr1' 4 0x804) Register('sr2' 4 0x808) Register('sr3' 4 0x80c) Register('sr4' 4 0x810) Register('sr5' 4 0x814) Register('sr6' 4 0x818) Register('sr7' 4 0x81c) Register('sr8' 4 0x820) Register('sr9' 4 0x824) Register('sr10' 4 0x828) Register('sr11' 4 0x82c) Register('sr12' 4 0x830) Register('sr13' 4 0x834) Register('sr14' 4 0x838) Register('sr15' 4 0x83c) Register('crall' 8 0x900) Register('cr0' 1 0x900) Register('cr1' 1 0x901) Register('cr2' 1 0x902) Register('cr3' 1 0x903) Register('cr4' 1 0x904) Register('cr5' 1 0x905) Register('cr6' 1 0x906) Register('cr7' 1 0x907) Register('tea' 4 0x980) Register('r2save' 4 0x988) Register('spr000' 4 0x1000) Register('xer' 4 0x1004) Register('spr002' 4 0x1008) Register('spr003' 4 0x100c) Register('spr004' 4 0x1010) Register('spr005' 4 0x1014) Register('spr006' 4 0x1018) Register('spr007' 4 0x101c) Register('lr' 4 0x1020) Register('ctr' 4 0x1024) Register('spr00a' 4 0x1028) Register('spr00b' 4 0x102c) Register('spr00c' 4 0x1030) Register('spr00d' 4 0x1034) Register('spr00e' 4 0x1038) Register('spr00f' 4 0x103c) Register('spr010' 4 0x1040) Register('spr011' 4 0x1044) Register('spr012' 4 0x1048) Register('spr013' 4 0x104c) Register('spr014' 4 0x1050) Register('spr015' 4 0x1054) Register('spr016' 4 0x1058) Register('spr017' 4 0x105c) Register('spr018' 4 0x1060) Register('spr019' 4 0x1064) Register('srr0' 4 0x1068) Register('srr1' 4 0x106c) Register('spr01c' 4 0x1070) Register('spr01d' 4 0x1074) Register('spr01e' 4 0x1078) Register('spr01f' 4 0x107c) Register('spr020' 4 0x1080) Register('spr021' 4 0x1084) Register('spr022' 4 0x1088) Register('spr023' 4 0x108c) Register('spr024' 4 0x1090) Register('spr025' 4 0x1094) Register('spr026' 4 0x1098) Register('spr027' 4 0x109c) Register('spr028' 4 0x10a0) Register('spr029' 4 0x10a4) Register('spr02a' 4 0x10a8) Register('spr02b' 4 0x10ac) Register('spr02c' 4 0x10b0) Register('spr02d' 4 0x10b4) Register('spr02e' 4 0x10b8) Register('spr02f' 4 0x10bc) Register('spr030' 4 0x10c0) Register('spr031' 4 0x10c4) Register('spr032' 4 0x10c8) Register('spr033' 4 0x10cc) Register('spr034' 4 0x10d0) Register('spr035' 4 0x10d4) Register('spr036' 4 0x10d8) Register('spr037' 4 0x10dc) Register('spr038' 4 0x10e0) Register('spr039' 4 0x10e4) Register('spr03a' 4 0x10e8) Register('spr03b' 4 0x10ec) Register('spr03c' 4 0x10f0) Register('spr03d' 4 0x10f4) Register('spr03e' 4 0x10f8) Register('spr03f' 4 0x10fc) Register('spr040' 4 0x1100) Register('spr041' 4 0x1104) Register('spr042' 4 0x1108) Register('spr043' 4 0x110c) Register('spr044' 4 0x1110) Register('spr045' 4 0x1114) Register('spr046' 4 0x1118) Register('spr047' 4 0x111c) Register('spr048' 4 0x1120) Register('spr049' 4 0x1124) Register('spr04a' 4 0x1128) Register('spr04b' 4 0x112c) Register('spr04c' 4 0x1130) Register('spr04d' 4 0x1134) Register('spr04e' 4 0x1138) Register('spr04f' 4 0x113c) Register('spr050' 4 0x1140) Register('spr051' 4 0x1144) Register('spr052' 4 0x1148) Register('spr053' 4 0x114c) Register('spr054' 4 0x1150) Register('spr055' 4 0x1154) Register('spr056' 4 0x1158) Register('spr057' 4 0x115c) Register('spr058' 4 0x1160) Register('spr059' 4 0x1164) Register('spr05a' 4 0x1168) Register('spr05b' 4 0x116c) Register('spr05c' 4 0x1170) Register('spr05d' 4 0x1174) Register('spr05e' 4 0x1178) Register('spr05f' 4 0x117c) Register('spr060' 4 0x1180) Register('spr061' 4 0x1184) Register('spr062' 4 0x1188) Register('spr063' 4 0x118c) Register('spr064' 4 0x1190) Register('spr065' 4 0x1194) Register('spr066' 4 0x1198) Register('spr067' 4 0x119c) Register('spr068' 4 0x11a0) Register('spr069' 4 0x11a4) Register('spr06a' 4 0x11a8) Register('spr06b' 4 0x11ac) Register('spr06c' 4 0x11b0) Register('spr06d' 4 0x11b4) Register('spr06e' 4 0x11b8) Register('spr06f' 4 0x11bc) Register('spr070' 4 0x11c0) Register('spr071' 4 0x11c4) Register('spr072' 4 0x11c8) Register('spr073' 4 0x11cc) Register('spr074' 4 0x11d0) Register('spr075' 4 0x11d4) Register('spr076' 4 0x11d8) Register('spr077' 4 0x11dc) Register('spr078' 4 0x11e0) Register('spr079' 4 0x11e4) Register('spr07a' 4 0x11e8) Register('spr07b' 4 0x11ec) Register('spr07c' 4 0x11f0) Register('spr07d' 4 0x11f4) Register('spr07e' 4 0x11f8) Register('spr07f' 4 0x11fc) Register('spr080' 4 0x1200) Register('spr081' 4 0x1204) Register('spr082' 4 0x1208) Register('spr083' 4 0x120c) Register('spr084' 4 0x1210) Register('spr085' 4 0x1214) Register('spr086' 4 0x1218) Register('spr087' 4 0x121c) Register('spr088' 4 0x1220) Register('spr089' 4 0x1224) Register('spr08a' 4 0x1228) Register('spr08b' 4 0x122c) Register('spr08c' 4 0x1230) Register('spr08d' 4 0x1234) Register('spr08e' 4 0x1238) Register('spr08f' 4 0x123c) Register('spr090' 4 0x1240) Register('spr091' 4 0x1244) Register('spr092' 4 0x1248) Register('spr093' 4 0x124c) Register('spr094' 4 0x1250) Register('spr095' 4 0x1254) Register('spr096' 4 0x1258) Register('spr097' 4 0x125c) Register('spr098' 4 0x1260) Register('spr099' 4 0x1264) Register('spr09a' 4 0x1268) Register('spr09b' 4 0x126c) Register('spr09c' 4 0x1270) Register('spr09d' 4 0x1274) Register('spr09e' 4 0x1278) Register('spr09f' 4 0x127c) Register('spr0a0' 4 0x1280) Register('spr0a1' 4 0x1284) Register('spr0a2' 4 0x1288) Register('spr0a3' 4 0x128c) Register('spr0a4' 4 0x1290) Register('spr0a5' 4 0x1294) Register('spr0a6' 4 0x1298) Register('spr0a7' 4 0x129c) Register('spr0a8' 4 0x12a0) Register('spr0a9' 4 0x12a4) Register('spr0aa' 4 0x12a8) Register('spr0ab' 4 0x12ac) Register('spr0ac' 4 0x12b0) Register('spr0ad' 4 0x12b4) Register('spr0ae' 4 0x12b8) Register('spr0af' 4 0x12bc) Register('spr0b0' 4 0x12c0) Register('spr0b1' 4 0x12c4) Register('spr0b2' 4 0x12c8) Register('spr0b3' 4 0x12cc) Register('spr0b4' 4 0x12d0) Register('spr0b5' 4 0x12d4) Register('spr0b6' 4 0x12d8) Register('spr0b7' 4 0x12dc) Register('spr0b8' 4 0x12e0) Register('spr0b9' 4 0x12e4) Register('spr0ba' 4 0x12e8) Register('spr0bb' 4 0x12ec) Register('spr0bc' 4 0x12f0) Register('spr0bd' 4 0x12f4) Register('spr0be' 4 0x12f8) Register('spr0bf' 4 0x12fc) Register('spr0c0' 4 0x1300) Register('spr0c1' 4 0x1304) Register('spr0c2' 4 0x1308) Register('spr0c3' 4 0x130c) Register('spr0c4' 4 0x1310) Register('spr0c5' 4 0x1314) Register('spr0c6' 4 0x1318) Register('spr0c7' 4 0x131c) Register('spr0c8' 4 0x1320) Register('spr0c9' 4 0x1324) Register('spr0ca' 4 0x1328) Register('spr0cb' 4 0x132c) Register('spr0cc' 4 0x1330) Register('spr0cd' 4 0x1334) Register('spr0ce' 4 0x1338) Register('spr0cf' 4 0x133c) Register('spr0d0' 4 0x1340) Register('spr0d1' 4 0x1344) Register('spr0d2' 4 0x1348) Register('spr0d3' 4 0x134c) Register('spr0d4' 4 0x1350) Register('spr0d5' 4 0x1354) Register('spr0d6' 4 0x1358) Register('spr0d7' 4 0x135c) Register('spr0d8' 4 0x1360) Register('spr0d9' 4 0x1364) Register('spr0da' 4 0x1368) Register('spr0db' 4 0x136c) Register('spr0dc' 4 0x1370) Register('spr0dd' 4 0x1374) Register('spr0de' 4 0x1378) Register('spr0df' 4 0x137c) Register('spr0e0' 4 0x1380) Register('spr0e1' 4 0x1384) Register('spr0e2' 4 0x1388) Register('spr0e3' 4 0x138c) Register('spr0e4' 4 0x1390) Register('spr0e5' 4 0x1394) Register('spr0e6' 4 0x1398) Register('spr0e7' 4 0x139c) Register('spr0e8' 4 0x13a0) Register('spr0e9' 4 0x13a4) Register('spr0ea' 4 0x13a8) Register('spr0eb' 4 0x13ac) Register('spr0ec' 4 0x13b0) Register('spr0ed' 4 0x13b4) Register('spr0ee' 4 0x13b8) Register('spr0ef' 4 0x13bc) Register('spr0f0' 4 0x13c0) Register('spr0f1' 4 0x13c4) Register('spr0f2' 4 0x13c8) Register('spr0f3' 4 0x13cc) Register('spr0f4' 4 0x13d0) Register('spr0f5' 4 0x13d4) Register('spr0f6' 4 0x13d8) Register('spr0f7' 4 0x13dc) Register('spr0f8' 4 0x13e0) Register('spr0f9' 4 0x13e4) Register('spr0fa' 4 0x13e8) Register('spr0fb' 4 0x13ec) Register('spr0fc' 4 0x13f0) Register('spr0fd' 4 0x13f4) Register('spr0fe' 4 0x13f8) Register('spr0ff' 4 0x13fc) Register('spr100' 4 0x1400) Register('spr101' 4 0x1404) Register('spr102' 4 0x1408) Register('spr103' 4 0x140c) Register('spr104' 4 0x1410) Register('spr105' 4 0x1414) Register('spr106' 4 0x1418) Register('spr107' 4 0x141c) Register('spr108' 4 0x1420) Register('spr109' 4 0x1424) Register('spr10a' 4 0x1428) Register('spr10b' 4 0x142c) Register('tblr' 4 0x1430) Register('tbur' 4 0x1434) Register('spr10e' 4 0x1438) Register('spr10f' 4 0x143c) Register('spr110' 4 0x1440) Register('spr111' 4 0x1444) Register('spr112' 4 0x1448) Register('spr113' 4 0x144c) Register('spr114' 4 0x1450) Register('spr115' 4 0x1454) Register('spr116' 4 0x1458) Register('spr117' 4 0x145c) Register('spr118' 4 0x1460) Register('spr119' 4 0x1464) Register('spr11a' 4 0x1468) Register('spr11b' 4 0x146c) Register('tblw' 4 0x1470) Register('tbuw' 4 0x1474) Register('spr11e' 4 0x1478) Register('spr11f' 4 0x147c) Register('spr120' 4 0x1480) Register('spr121' 4 0x1484) Register('spr122' 4 0x1488) Register('spr123' 4 0x148c) Register('spr124' 4 0x1490) Register('spr125' 4 0x1494) Register('spr126' 4 0x1498) Register('spr127' 4 0x149c) Register('spr128' 4 0x14a0) Register('spr129' 4 0x14a4) Register('spr12a' 4 0x14a8) Register('spr12b' 4 0x14ac) Register('spr12c' 4 0x14b0) Register('spr12d' 4 0x14b4) Register('spr12e' 4 0x14b8) Register('spr12f' 4 0x14bc) Register('spr130' 4 0x14c0) Register('spr131' 4 0x14c4) Register('spr132' 4 0x14c8) Register('spr133' 4 0x14cc) Register('spr134' 4 0x14d0) Register('spr135' 4 0x14d4) Register('spr136' 4 0x14d8) Register('spr137' 4 0x14dc) Register('spr138' 4 0x14e0) Register('spr139' 4 0x14e4) Register('spr13a' 4 0x14e8) Register('spr13b' 4 0x14ec) Register('spr13c' 4 0x14f0) Register('spr13d' 4 0x14f4) Register('spr13e' 4 0x14f8) Register('spr13f' 4 0x14fc) Register('spr140' 4 0x1500) Register('spr141' 4 0x1504) Register('spr142' 4 0x1508) Register('spr143' 4 0x150c) Register('spr144' 4 0x1510) Register('spr145' 4 0x1514) Register('spr146' 4 0x1518) Register('spr147' 4 0x151c) Register('spr148' 4 0x1520) Register('spr149' 4 0x1524) Register('spr14a' 4 0x1528) Register('spr14b' 4 0x152c) Register('spr14c' 4 0x1530) Register('spr14d' 4 0x1534) Register('spr14e' 4 0x1538) Register('spr14f' 4 0x153c) Register('spr150' 4 0x1540) Register('spr151' 4 0x1544) Register('spr152' 4 0x1548) Register('spr153' 4 0x154c) Register('spr154' 4 0x1550) Register('spr155' 4 0x1554) Register('spr156' 4 0x1558) Register('spr157' 4 0x155c) Register('spr158' 4 0x1560) Register('spr159' 4 0x1564) Register('spr15a' 4 0x1568) Register('spr15b' 4 0x156c) Register('spr15c' 4 0x1570) Register('spr15d' 4 0x1574) Register('spr15e' 4 0x1578) Register('spr15f' 4 0x157c) Register('spr160' 4 0x1580) Register('spr161' 4 0x1584) Register('spr162' 4 0x1588) Register('spr163' 4 0x158c) Register('spr164' 4 0x1590) Register('spr165' 4 0x1594) Register('spr166' 4 0x1598) Register('spr167' 4 0x159c) Register('spr168' 4 0x15a0) Register('spr169' 4 0x15a4) Register('spr16a' 4 0x15a8) Register('spr16b' 4 0x15ac) Register('spr16c' 4 0x15b0) Register('spr16d' 4 0x15b4) Register('spr16e' 4 0x15b8) Register('spr16f' 4 0x15bc) Register('spr170' 4 0x15c0) Register('spr171' 4 0x15c4) Register('spr172' 4 0x15c8) Register('spr173' 4 0x15cc) Register('spr174' 4 0x15d0) Register('spr175' 4 0x15d4) Register('spr176' 4 0x15d8) Register('spr177' 4 0x15dc) Register('spr178' 4 0x15e0) Register('spr179' 4 0x15e4) Register('spr17a' 4 0x15e8) Register('spr17b' 4 0x15ec) Register('spr17c' 4 0x15f0) Register('spr17d' 4 0x15f4) Register('spr17e' 4 0x15f8) Register('spr17f' 4 0x15fc) Register('spr180' 4 0x1600) Register('spr181' 4 0x1604) Register('spr182' 4 0x1608) Register('spr183' 4 0x160c) Register('spr184' 4 0x1610) Register('spr185' 4 0x1614) Register('spr186' 4 0x1618) Register('spr187' 4 0x161c) Register('spr188' 4 0x1620) Register('spr189' 4 0x1624) Register('spr18a' 4 0x1628) Register('spr18b' 4 0x162c) Register('spr18c' 4 0x1630) Register('spr18d' 4 0x1634) Register('spr18e' 4 0x1638) Register('spr18f' 4 0x163c) Register('spr190' 4 0x1640) Register('spr191' 4 0x1644) Register('spr192' 4 0x1648) Register('spr193' 4 0x164c) Register('spr194' 4 0x1650) Register('spr195' 4 0x1654) Register('spr196' 4 0x1658) Register('spr197' 4 0x165c) Register('spr198' 4 0x1660) Register('spr199' 4 0x1664) Register('spr19a' 4 0x1668) Register('spr19b' 4 0x166c) Register('spr19c' 4 0x1670) Register('spr19d' 4 0x1674) Register('spr19e' 4 0x1678) Register('spr19f' 4 0x167c) Register('spr1a0' 4 0x1680) Register('spr1a1' 4 0x1684) Register('spr1a2' 4 0x1688) Register('spr1a3' 4 0x168c) Register('spr1a4' 4 0x1690) Register('spr1a5' 4 0x1694) Register('spr1a6' 4 0x1698) Register('spr1a7' 4 0x169c) Register('spr1a8' 4 0x16a0) Register('spr1a9' 4 0x16a4) Register('spr1aa' 4 0x16a8) Register('spr1ab' 4 0x16ac) Register('spr1ac' 4 0x16b0) Register('spr1ad' 4 0x16b4) Register('spr1ae' 4 0x16b8) Register('spr1af' 4 0x16bc) Register('spr1b0' 4 0x16c0) Register('spr1b1' 4 0x16c4) Register('spr1b2' 4 0x16c8) Register('spr1b3' 4 0x16cc) Register('spr1b4' 4 0x16d0) Register('spr1b5' 4 0x16d4) Register('spr1b6' 4 0x16d8) Register('spr1b7' 4 0x16dc) Register('spr1b8' 4 0x16e0) Register('spr1b9' 4 0x16e4) Register('spr1ba' 4 0x16e8) Register('spr1bb' 4 0x16ec) Register('spr1bc' 4 0x16f0) Register('spr1bd' 4 0x16f4) Register('spr1be' 4 0x16f8) Register('spr1bf' 4 0x16fc) Register('spr1c0' 4 0x1700) Register('spr1c1' 4 0x1704) Register('spr1c2' 4 0x1708) Register('spr1c3' 4 0x170c) Register('spr1c4' 4 0x1710) Register('spr1c5' 4 0x1714) Register('spr1c6' 4 0x1718) Register('spr1c7' 4 0x171c) Register('spr1c8' 4 0x1720) Register('spr1c9' 4 0x1724) Register('spr1ca' 4 0x1728) Register('spr1cb' 4 0x172c) Register('spr1cc' 4 0x1730) Register('spr1cd' 4 0x1734) Register('spr1ce' 4 0x1738) Register('spr1cf' 4 0x173c) Register('spr1d0' 4 0x1740) Register('spr1d1' 4 0x1744) Register('spr1d2' 4 0x1748) Register('spr1d3' 4 0x174c) Register('spr1d4' 4 0x1750) Register('spr1d5' 4 0x1754) Register('spr1d6' 4 0x1758) Register('spr1d7' 4 0x175c) Register('spr1d8' 4 0x1760) Register('spr1d9' 4 0x1764) Register('spr1da' 4 0x1768) Register('spr1db' 4 0x176c) Register('spr1dc' 4 0x1770) Register('spr1dd' 4 0x1774) Register('spr1de' 4 0x1778) Register('spr1df' 4 0x177c) Register('spr1e0' 4 0x1780) Register('spr1e1' 4 0x1784) Register('spr1e2' 4 0x1788) Register('spr1e3' 4 0x178c) Register('spr1e4' 4 0x1790) Register('spr1e5' 4 0x1794) Register('spr1e6' 4 0x1798) Register('spr1e7' 4 0x179c) Register('spr1e8' 4 0x17a0) Register('spr1e9' 4 0x17a4) Register('spr1ea' 4 0x17a8) Register('spr1eb' 4 0x17ac) Register('spr1ec' 4 0x17b0) Register('spr1ed' 4 0x17b4) Register('spr1ee' 4 0x17b8) Register('spr1ef' 4 0x17bc) Register('spr1f0' 4 0x17c0) Register('spr1f1' 4 0x17c4) Register('spr1f2' 4 0x17c8) Register('spr1f3' 4 0x17cc) Register('spr1f4' 4 0x17d0) Register('spr1f5' 4 0x17d4) Register('spr1f6' 4 0x17d8) Register('spr1f7' 4 0x17dc) Register('spr1f8' 4 0x17e0) Register('spr1f9' 4 0x17e4) Register('spr1fa' 4 0x17e8) Register('spr1fb' 4 0x17ec) Register('spr1fc' 4 0x17f0) Register('spr1fd' 4 0x17f4) Register('spr1fe' 4 0x17f8) Register('spr1ff' 4 0x17fc) Register('spr200' 4 0x1800) Register('spr201' 4 0x1804) Register('spr202' 4 0x1808) Register('spr203' 4 0x180c) Register('spr204' 4 0x1810) Register('spr205' 4 0x1814) Register('spr206' 4 0x1818) Register('spr207' 4 0x181c) Register('spr208' 4 0x1820) Register('spr209' 4 0x1824) Register('spr20a' 4 0x1828) Register('spr20b' 4 0x182c) Register('spr20c' 4 0x1830) Register('spr20d' 4 0x1834) Register('spr20e' 4 0x1838) Register('spr20f' 4 0x183c) Register('spr210' 4 0x1840) Register('spr211' 4 0x1844) Register('spr212' 4 0x1848) Register('spr213' 4 0x184c) Register('spr214' 4 0x1850) Register('spr215' 4 0x1854) Register('spr216' 4 0x1858) Register('spr217' 4 0x185c) Register('spr218' 4 0x1860) Register('spr219' 4 0x1864) Register('spr21a' 4 0x1868) Register('spr21b' 4 0x186c) Register('spr21c' 4 0x1870) Register('spr21d' 4 0x1874) Register('spr21e' 4 0x1878) Register('spr21f' 4 0x187c) Register('spr220' 4 0x1880) Register('spr221' 4 0x1884) Register('spr222' 4 0x1888) Register('spr223' 4 0x188c) Register('spr224' 4 0x1890) Register('spr225' 4 0x1894) Register('spr226' 4 0x1898) Register('spr227' 4 0x189c) Register('spr228' 4 0x18a0) Register('spr229' 4 0x18a4) Register('spr22a' 4 0x18a8) Register('spr22b' 4 0x18ac) Register('spr22c' 4 0x18b0) Register('spr22d' 4 0x18b4) Register('spr22e' 4 0x18b8) Register('spr22f' 4 0x18bc) Register('spr230' 4 0x18c0) Register('spr231' 4 0x18c4) Register('spr232' 4 0x18c8) Register('spr233' 4 0x18cc) Register('spr234' 4 0x18d0) Register('spr235' 4 0x18d4) Register('spr236' 4 0x18d8) Register('spr237' 4 0x18dc) Register('spr238' 4 0x18e0) Register('spr239' 4 0x18e4) Register('spr23a' 4 0x18e8) Register('spr23b' 4 0x18ec) Register('spr23c' 4 0x18f0) Register('spr23d' 4 0x18f4) Register('spr23e' 4 0x18f8) Register('spr23f' 4 0x18fc) Register('spr240' 4 0x1900) Register('spr241' 4 0x1904) Register('spr242' 4 0x1908) Register('spr243' 4 0x190c) Register('spr244' 4 0x1910) Register('spr245' 4 0x1914) Register('spr246' 4 0x1918) Register('spr247' 4 0x191c) Register('spr248' 4 0x1920) Register('spr249' 4 0x1924) Register('spr24a' 4 0x1928) Register('spr24b' 4 0x192c) Register('spr24c' 4 0x1930) Register('spr24d' 4 0x1934) Register('spr24e' 4 0x1938) Register('spr24f' 4 0x193c) Register('spr250' 4 0x1940) Register('spr251' 4 0x1944) Register('spr252' 4 0x1948) Register('spr253' 4 0x194c) Register('spr254' 4 0x1950) Register('spr255' 4 0x1954) Register('spr256' 4 0x1958) Register('spr257' 4 0x195c) Register('spr258' 4 0x1960) Register('spr259' 4 0x1964) Register('spr25a' 4 0x1968) Register('spr25b' 4 0x196c) Register('spr25c' 4 0x1970) Register('spr25d' 4 0x1974) Register('spr25e' 4 0x1978) Register('spr25f' 4 0x197c) Register('spr260' 4 0x1980) Register('spr261' 4 0x1984) Register('spr262' 4 0x1988) Register('spr263' 4 0x198c) Register('spr264' 4 0x1990) Register('spr265' 4 0x1994) Register('spr266' 4 0x1998) Register('spr267' 4 0x199c) Register('spr268' 4 0x19a0) Register('spr269' 4 0x19a4) Register('spr26a' 4 0x19a8) Register('spr26b' 4 0x19ac) Register('spr26c' 4 0x19b0) Register('spr26d' 4 0x19b4) Register('spr26e' 4 0x19b8) Register('spr26f' 4 0x19bc) Register('spr270' 4 0x19c0) Register('spr271' 4 0x19c4) Register('spr272' 4 0x19c8) Register('spr273' 4 0x19cc) Register('spr274' 4 0x19d0) Register('spr275' 4 0x19d4) Register('spr276' 4 0x19d8) Register('spr277' 4 0x19dc) Register('spr278' 4 0x19e0) Register('spr279' 4 0x19e4) Register('spr27a' 4 0x19e8) Register('spr27b' 4 0x19ec) Register('spr27c' 4 0x19f0) Register('spr27d' 4 0x19f4) Register('spr27e' 4 0x19f8) Register('spr27f' 4 0x19fc) Register('spr280' 4 0x1a00) Register('spr281' 4 0x1a04) Register('spr282' 4 0x1a08) Register('spr283' 4 0x1a0c) Register('spr284' 4 0x1a10) Register('spr285' 4 0x1a14) Register('spr286' 4 0x1a18) Register('spr287' 4 0x1a1c) Register('spr288' 4 0x1a20) Register('spr289' 4 0x1a24) Register('spr28a' 4 0x1a28) Register('spr28b' 4 0x1a2c) Register('spr28c' 4 0x1a30) Register('spr28d' 4 0x1a34) Register('spr28e' 4 0x1a38) Register('spr28f' 4 0x1a3c) Register('spr290' 4 0x1a40) Register('spr291' 4 0x1a44) Register('spr292' 4 0x1a48) Register('spr293' 4 0x1a4c) Register('spr294' 4 0x1a50) Register('spr295' 4 0x1a54) Register('spr296' 4 0x1a58) Register('spr297' 4 0x1a5c) Register('spr298' 4 0x1a60) Register('spr299' 4 0x1a64) Register('spr29a' 4 0x1a68) Register('spr29b' 4 0x1a6c) Register('spr29c' 4 0x1a70) Register('spr29d' 4 0x1a74) Register('spr29e' 4 0x1a78) Register('spr29f' 4 0x1a7c) Register('spr2a0' 4 0x1a80) Register('spr2a1' 4 0x1a84) Register('spr2a2' 4 0x1a88) Register('spr2a3' 4 0x1a8c) Register('spr2a4' 4 0x1a90) Register('spr2a5' 4 0x1a94) Register('spr2a6' 4 0x1a98) Register('spr2a7' 4 0x1a9c) Register('spr2a8' 4 0x1aa0) Register('spr2a9' 4 0x1aa4) Register('spr2aa' 4 0x1aa8) Register('spr2ab' 4 0x1aac) Register('spr2ac' 4 0x1ab0) Register('spr2ad' 4 0x1ab4) Register('spr2ae' 4 0x1ab8) Register('spr2af' 4 0x1abc) Register('spr2b0' 4 0x1ac0) Register('spr2b1' 4 0x1ac4) Register('spr2b2' 4 0x1ac8) Register('spr2b3' 4 0x1acc) Register('spr2b4' 4 0x1ad0) Register('spr2b5' 4 0x1ad4) Register('spr2b6' 4 0x1ad8) Register('spr2b7' 4 0x1adc) Register('spr2b8' 4 0x1ae0) Register('spr2b9' 4 0x1ae4) Register('spr2ba' 4 0x1ae8) Register('spr2bb' 4 0x1aec) Register('spr2bc' 4 0x1af0) Register('spr2bd' 4 0x1af4) Register('spr2be' 4 0x1af8) Register('spr2bf' 4 0x1afc) Register('spr2c0' 4 0x1b00) Register('spr2c1' 4 0x1b04) Register('spr2c2' 4 0x1b08) Register('spr2c3' 4 0x1b0c) Register('spr2c4' 4 0x1b10) Register('spr2c5' 4 0x1b14) Register('spr2c6' 4 0x1b18) Register('spr2c7' 4 0x1b1c) Register('spr2c8' 4 0x1b20) Register('spr2c9' 4 0x1b24) Register('spr2ca' 4 0x1b28) Register('spr2cb' 4 0x1b2c) Register('spr2cc' 4 0x1b30) Register('spr2cd' 4 0x1b34) Register('spr2ce' 4 0x1b38) Register('spr2cf' 4 0x1b3c) Register('spr2d0' 4 0x1b40) Register('spr2d1' 4 0x1b44) Register('spr2d2' 4 0x1b48) Register('spr2d3' 4 0x1b4c) Register('spr2d4' 4 0x1b50) Register('spr2d5' 4 0x1b54) Register('spr2d6' 4 0x1b58) Register('spr2d7' 4 0x1b5c) Register('spr2d8' 4 0x1b60) Register('spr2d9' 4 0x1b64) Register('spr2da' 4 0x1b68) Register('spr2db' 4 0x1b6c) Register('spr2dc' 4 0x1b70) Register('spr2dd' 4 0x1b74) Register('spr2de' 4 0x1b78) Register('spr2df' 4 0x1b7c) Register('spr2e0' 4 0x1b80) Register('spr2e1' 4 0x1b84) Register('spr2e2' 4 0x1b88) Register('spr2e3' 4 0x1b8c) Register('spr2e4' 4 0x1b90) Register('spr2e5' 4 0x1b94) Register('spr2e6' 4 0x1b98) Register('spr2e7' 4 0x1b9c) Register('spr2e8' 4 0x1ba0) Register('spr2e9' 4 0x1ba4) Register('spr2ea' 4 0x1ba8) Register('spr2eb' 4 0x1bac) Register('spr2ec' 4 0x1bb0) Register('spr2ed' 4 0x1bb4) Register('spr2ee' 4 0x1bb8) Register('spr2ef' 4 0x1bbc) Register('spr2f0' 4 0x1bc0) Register('spr2f1' 4 0x1bc4) Register('spr2f2' 4 0x1bc8) Register('spr2f3' 4 0x1bcc) Register('spr2f4' 4 0x1bd0) Register('spr2f5' 4 0x1bd4) Register('spr2f6' 4 0x1bd8) Register('spr2f7' 4 0x1bdc) Register('spr2f8' 4 0x1be0) Register('spr2f9' 4 0x1be4) Register('spr2fa' 4 0x1be8) Register('spr2fb' 4 0x1bec) Register('spr2fc' 4 0x1bf0) Register('spr2fd' 4 0x1bf4) Register('spr2fe' 4 0x1bf8) Register('spr2ff' 4 0x1bfc) Register('spr300' 4 0x1c00) Register('spr301' 4 0x1c04) Register('spr302' 4 0x1c08) Register('spr303' 4 0x1c0c) Register('spr304' 4 0x1c10) Register('spr305' 4 0x1c14) Register('spr306' 4 0x1c18) Register('spr307' 4 0x1c1c) Register('spr308' 4 0x1c20) Register('spr309' 4 0x1c24) Register('spr30a' 4 0x1c28) Register('spr30b' 4 0x1c2c) Register('spr30c' 4 0x1c30) Register('spr30d' 4 0x1c34) Register('spr30e' 4 0x1c38) Register('spr30f' 4 0x1c3c) Register('spr310' 4 0x1c40) Register('spr311' 4 0x1c44) Register('spr312' 4 0x1c48) Register('spr313' 4 0x1c4c) Register('spr314' 4 0x1c50) Register('spr315' 4 0x1c54) Register('spr316' 4 0x1c58) Register('spr317' 4 0x1c5c) Register('spr318' 4 0x1c60) Register('spr319' 4 0x1c64) Register('spr31a' 4 0x1c68) Register('spr31b' 4 0x1c6c) Register('spr31c' 4 0x1c70) Register('spr31d' 4 0x1c74) Register('spr31e' 4 0x1c78) Register('spr31f' 4 0x1c7c) Register('spr320' 4 0x1c80) Register('spr321' 4 0x1c84) Register('spr322' 4 0x1c88) Register('spr323' 4 0x1c8c) Register('spr324' 4 0x1c90) Register('spr325' 4 0x1c94) Register('spr326' 4 0x1c98) Register('spr327' 4 0x1c9c) Register('spr328' 4 0x1ca0) Register('spr329' 4 0x1ca4) Register('spr32a' 4 0x1ca8) Register('spr32b' 4 0x1cac) Register('spr32c' 4 0x1cb0) Register('spr32d' 4 0x1cb4) Register('spr32e' 4 0x1cb8) Register('tar' 4 0x1cbc) Register('spr330' 4 0x1cc0) Register('spr331' 4 0x1cc4) Register('spr332' 4 0x1cc8) Register('spr333' 4 0x1ccc) Register('spr334' 4 0x1cd0) Register('spr335' 4 0x1cd4) Register('spr336' 4 0x1cd8) Register('spr337' 4 0x1cdc) Register('spr338' 4 0x1ce0) Register('spr339' 4 0x1ce4) Register('spr33a' 4 0x1ce8) Register('spr33b' 4 0x1cec) Register('spr33c' 4 0x1cf0) Register('spr33d' 4 0x1cf4) Register('spr33e' 4 0x1cf8) Register('spr33f' 4 0x1cfc) Register('spr340' 4 0x1d00) Register('spr341' 4 0x1d04) Register('spr342' 4 0x1d08) Register('spr343' 4 0x1d0c) Register('spr344' 4 0x1d10) Register('spr345' 4 0x1d14) Register('spr346' 4 0x1d18) Register('spr347' 4 0x1d1c) Register('spr348' 4 0x1d20) Register('spr349' 4 0x1d24) Register('spr34a' 4 0x1d28) Register('spr34b' 4 0x1d2c) Register('spr34c' 4 0x1d30) Register('spr34d' 4 0x1d34) Register('spr34e' 4 0x1d38) Register('spr34f' 4 0x1d3c) Register('spr350' 4 0x1d40) Register('spr351' 4 0x1d44) Register('spr352' 4 0x1d48) Register('spr353' 4 0x1d4c) Register('spr354' 4 0x1d50) Register('spr355' 4 0x1d54) Register('spr356' 4 0x1d58) Register('spr357' 4 0x1d5c) Register('spr358' 4 0x1d60) Register('spr359' 4 0x1d64) Register('spr35a' 4 0x1d68) Register('spr35b' 4 0x1d6c) Register('spr35c' 4 0x1d70) Register('spr35d' 4 0x1d74) Register('spr35e' 4 0x1d78) Register('spr35f' 4 0x1d7c) Register('spr360' 4 0x1d80) Register('spr361' 4 0x1d84) Register('spr362' 4 0x1d88) Register('spr363' 4 0x1d8c) Register('spr364' 4 0x1d90) Register('spr365' 4 0x1d94) Register('spr366' 4 0x1d98) Register('spr367' 4 0x1d9c) Register('spr368' 4 0x1da0) Register('spr369' 4 0x1da4) Register('spr36a' 4 0x1da8) Register('spr36b' 4 0x1dac) Register('spr36c' 4 0x1db0) Register('spr36d' 4 0x1db4) Register('spr36e' 4 0x1db8) Register('spr36f' 4 0x1dbc) Register('spr370' 4 0x1dc0) Register('spr371' 4 0x1dc4) Register('spr372' 4 0x1dc8) Register('spr373' 4 0x1dcc) Register('spr374' 4 0x1dd0) Register('spr375' 4 0x1dd4) Register('spr376' 4 0x1dd8) Register('spr377' 4 0x1ddc) Register('spr378' 4 0x1de0) Register('spr379' 4 0x1de4) Register('spr37a' 4 0x1de8) Register('spr37b' 4 0x1dec) Register('spr37c' 4 0x1df0) Register('spr37d' 4 0x1df4) Register('spr37e' 4 0x1df8) Register('spr37f' 4 0x1dfc) Register('spr380' 4 0x1e00) Register('spr381' 4 0x1e04) Register('spr382' 4 0x1e08) Register('spr383' 4 0x1e0c) Register('spr384' 4 0x1e10) Register('spr385' 4 0x1e14) Register('spr386' 4 0x1e18) Register('spr387' 4 0x1e1c) Register('spr388' 4 0x1e20) Register('spr389' 4 0x1e24) Register('spr38a' 4 0x1e28) Register('spr38b' 4 0x1e2c) Register('spr38c' 4 0x1e30) Register('spr38d' 4 0x1e34) Register('spr38e' 4 0x1e38) Register('spr38f' 4 0x1e3c) Register('spr390' 4 0x1e40) Register('spr391' 4 0x1e44) Register('spr392' 4 0x1e48) Register('spr393' 4 0x1e4c) Register('spr394' 4 0x1e50) Register('spr395' 4 0x1e54) Register('spr396' 4 0x1e58) Register('spr397' 4 0x1e5c) Register('spr398' 4 0x1e60) Register('spr399' 4 0x1e64) Register('spr39a' 4 0x1e68) Register('spr39b' 4 0x1e6c) Register('spr39c' 4 0x1e70) Register('spr39d' 4 0x1e74) Register('spr39e' 4 0x1e78) Register('spr39f' 4 0x1e7c) Register('spr3a0' 4 0x1e80) Register('spr3a1' 4 0x1e84) Register('spr3a2' 4 0x1e88) Register('spr3a3' 4 0x1e8c) Register('spr3a4' 4 0x1e90) Register('spr3a5' 4 0x1e94) Register('spr3a6' 4 0x1e98) Register('spr3a7' 4 0x1e9c) Register('spr3a8' 4 0x1ea0) Register('spr3a9' 4 0x1ea4) Register('spr3aa' 4 0x1ea8) Register('spr3ab' 4 0x1eac) Register('spr3ac' 4 0x1eb0) Register('spr3ad' 4 0x1eb4) Register('spr3ae' 4 0x1eb8) Register('spr3af' 4 0x1ebc) Register('spr3b0' 4 0x1ec0) Register('spr3b1' 4 0x1ec4) Register('spr3b2' 4 0x1ec8) Register('spr3b3' 4 0x1ecc) Register('spr3b4' 4 0x1ed0) Register('spr3b5' 4 0x1ed4) Register('spr3b6' 4 0x1ed8) Register('spr3b7' 4 0x1edc) Register('spr3b8' 4 0x1ee0) Register('spr3b9' 4 0x1ee4) Register('spr3ba' 4 0x1ee8) Register('spr3bb' 4 0x1eec) Register('spr3bc' 4 0x1ef0) Register('spr3bd' 4 0x1ef4) Register('spr3be' 4 0x1ef8) Register('spr3bf' 4 0x1efc) Register('spr3c0' 4 0x1f00) Register('spr3c1' 4 0x1f04) Register('spr3c2' 4 0x1f08) Register('spr3c3' 4 0x1f0c) Register('spr3c4' 4 0x1f10) Register('spr3c5' 4 0x1f14) Register('spr3c6' 4 0x1f18) Register('spr3c7' 4 0x1f1c) Register('spr3c8' 4 0x1f20) Register('spr3c9' 4 0x1f24) Register('spr3ca' 4 0x1f28) Register('spr3cb' 4 0x1f2c) Register('spr3cc' 4 0x1f30) Register('spr3cd' 4 0x1f34) Register('spr3ce' 4 0x1f38) Register('spr3cf' 4 0x1f3c) Register('spr3d0' 4 0x1f40) Register('spr3d1' 4 0x1f44) Register('spr3d2' 4 0x1f48) Register('spr3d3' 4 0x1f4c) Register('spr3d4' 4 0x1f50) Register('spr3d5' 4 0x1f54) Register('spr3d6' 4 0x1f58) Register('spr3d7' 4 0x1f5c) Register('spr3d8' 4 0x1f60) Register('spr3d9' 4 0x1f64) Register('spr3da' 4 0x1f68) Register('spr3db' 4 0x1f6c) Register('spr3dc' 4 0x1f70) Register('spr3dd' 4 0x1f74) Register('spr3de' 4 0x1f78) Register('spr3df' 4 0x1f7c) Register('spr3e0' 4 0x1f80) Register('spr3e1' 4 0x1f84) Register('spr3e2' 4 0x1f88) Register('spr3e3' 4 0x1f8c) Register('spr3e4' 4 0x1f90) Register('spr3e5' 4 0x1f94) Register('spr3e6' 4 0x1f98) Register('spr3e7' 4 0x1f9c) Register('spr3e8' 4 0x1fa0) Register('spr3e9' 4 0x1fa4) Register('spr3ea' 4 0x1fa8) Register('spr3eb' 4 0x1fac) Register('spr3ec' 4 0x1fb0) Register('spr3ed' 4 0x1fb4) Register('spr3ee' 4 0x1fb8) Register('spr3ef' 4 0x1fbc) Register('spr3f0' 4 0x1fc0) Register('spr3f1' 4 0x1fc4) Register('spr3f2' 4 0x1fc8) Register('spr3f3' 4 0x1fcc) Register('spr3f4' 4 0x1fd0) Register('spr3f5' 4 0x1fd4) Register('spr3f6' 4 0x1fd8) Register('spr3f7' 4 0x1fdc) Register('spr3f8' 4 0x1fe0) Register('spr3f9' 4 0x1fe4) Register('spr3fa' 4 0x1fe8) Register('spr3fb' 4 0x1fec) Register('spr3fc' 4 0x1ff0) Register('spr3fd' 4 0x1ff4) Register('spr3fe' 4 0x1ff8) Register('spr3ff' 4 0x1ffc) Register('vs0' 16 0x4000) Register('f0' 8 0x4008) Register('vs1' 16 0x4010) Register('f1' 8 0x4018) Register('vs2' 16 0x4020) Register('f2' 8 0x4028) Register('vs3' 16 0x4030) Register('f3' 8 0x4038) Register('vs4' 16 0x4040) Register('f4' 8 0x4048) Register('vs5' 16 0x4050) Register('f5' 8 0x4058) Register('vs6' 16 0x4060) Register('f6' 8 0x4068) Register('vs7' 16 0x4070) Register('f7' 8 0x4078) Register('vs8' 16 0x4080) Register('f8' 8 0x4088) Register('vs9' 16 0x4090) Register('f9' 8 0x4098) Register('vs10' 16 0x40a0) Register('f10' 8 0x40a8) Register('vs11' 16 0x40b0) Register('f11' 8 0x40b8) Register('vs12' 16 0x40c0) Register('f12' 8 0x40c8) Register('vs13' 16 0x40d0) Register('f13' 8 0x40d8) Register('vs14' 16 0x40e0) Register('f14' 8 0x40e8) Register('vs15' 16 0x40f0) Register('f15' 8 0x40f8) Register('vs16' 16 0x4100) Register('f16' 8 0x4108) Register('vs17' 16 0x4110) Register('f17' 8 0x4118) Register('vs18' 16 0x4120) Register('f18' 8 0x4128) Register('vs19' 16 0x4130) Register('f19' 8 0x4138) Register('vs20' 16 0x4140) Register('f20' 8 0x4148) Register('vs21' 16 0x4150) Register('f21' 8 0x4158) Register('vs22' 16 0x4160) Register('f22' 8 0x4168) Register('vs23' 16 0x4170) Register('f23' 8 0x4178) Register('vs24' 16 0x4180) Register('f24' 8 0x4188) Register('vs25' 16 0x4190) Register('f25' 8 0x4198) Register('vs26' 16 0x41a0) Register('f26' 8 0x41a8) Register('vs27' 16 0x41b0) Register('f27' 8 0x41b8) Register('vs28' 16 0x41c0) Register('f28' 8 0x41c8) Register('vs29' 16 0x41d0) Register('f29' 8 0x41d8) Register('vs30' 16 0x41e0) Register('f30' 8 0x41e8) Register('vs31' 16 0x41f0) Register('f31' 8 0x41f8) Register('vs32' 16 0x4200) Register('vr0_64_1' 8 0x4200) Register('vr0_32_3' 4 0x4200) Register('vr0_16_7' 2 0x4200) Register('vr0_8_15' 1 0x4200) Register('vr0_8_14' 1 0x4201) Register('vr0_16_6' 2 0x4202) Register('vr0_8_13' 1 0x4202) Register('vr0_8_12' 1 0x4203) Register('vr0_32_2' 4 0x4204) Register('vr0_16_5' 2 0x4204) Register('vr0_8_11' 1 0x4204) Register('vr0_8_10' 1 0x4205) Register('vr0_16_4' 2 0x4206) Register('vr0_8_9' 1 0x4206) Register('vr0_8_8' 1 0x4207) Register('vr0_64_0' 8 0x4208) Register('vr0_32_1' 4 0x4208) Register('vr0_16_3' 2 0x4208) Register('vr0_8_7' 1 0x4208) Register('vr0_8_6' 1 0x4209) Register('vr0_16_2' 2 0x420a) Register('vr0_8_5' 1 0x420a) Register('vr0_8_4' 1 0x420b) Register('vr0_32_0' 4 0x420c) Register('vr0_16_1' 2 0x420c) Register('vr0_8_3' 1 0x420c) Register('vr0_8_2' 1 0x420d) Register('vr0_16_0' 2 0x420e) Register('vr0_8_1' 1 0x420e) Register('vr0_8_0' 1 0x420f) Register('vs33' 16 0x4210) Register('vr1_64_1' 8 0x4210) Register('vr1_32_3' 4 0x4210) Register('vr1_16_7' 2 0x4210) Register('vr1_8_15' 1 0x4210) Register('vr1_8_14' 1 0x4211) Register('vr1_16_6' 2 0x4212) Register('vr1_8_13' 1 0x4212) Register('vr1_8_12' 1 0x4213) Register('vr1_32_2' 4 0x4214) Register('vr1_16_5' 2 0x4214) Register('vr1_8_11' 1 0x4214) Register('vr1_8_10' 1 0x4215) Register('vr1_16_4' 2 0x4216) Register('vr1_8_9' 1 0x4216) Register('vr1_8_8' 1 0x4217) Register('vr1_64_0' 8 0x4218) Register('vr1_32_1' 4 0x4218) Register('vr1_16_3' 2 0x4218) Register('vr1_8_7' 1 0x4218) Register('vr1_8_6' 1 0x4219) Register('vr1_16_2' 2 0x421a) Register('vr1_8_5' 1 0x421a) Register('vr1_8_4' 1 0x421b) Register('vr1_32_0' 4 0x421c) Register('vr1_16_1' 2 0x421c) Register('vr1_8_3' 1 0x421c) Register('vr1_8_2' 1 0x421d) Register('vr1_16_0' 2 0x421e) Register('vr1_8_1' 1 0x421e) Register('vr1_8_0' 1 0x421f) Register('vs34' 16 0x4220) Register('vr2_64_1' 8 0x4220) Register('vr2_32_3' 4 0x4220) Register('vr2_16_7' 2 0x4220) Register('vr2_8_15' 1 0x4220) Register('vr2_8_14' 1 0x4221) Register('vr2_16_6' 2 0x4222) Register('vr2_8_13' 1 0x4222) Register('vr2_8_12' 1 0x4223) Register('vr2_32_2' 4 0x4224) Register('vr2_16_5' 2 0x4224) Register('vr2_8_11' 1 0x4224) Register('vr2_8_10' 1 0x4225) Register('vr2_16_4' 2 0x4226) Register('vr2_8_9' 1 0x4226) Register('vr2_8_8' 1 0x4227) Register('vr2_64_0' 8 0x4228) Register('vr2_32_1' 4 0x4228) Register('vr2_16_3' 2 0x4228) Register('vr2_8_7' 1 0x4228) Register('vr2_8_6' 1 0x4229) Register('vr2_16_2' 2 0x422a) Register('vr2_8_5' 1 0x422a) Register('vr2_8_4' 1 0x422b) Register('vr2_32_0' 4 0x422c) Register('vr2_16_1' 2 0x422c) Register('vr2_8_3' 1 0x422c) Register('vr2_8_2' 1 0x422d) Register('vr2_16_0' 2 0x422e) Register('vr2_8_1' 1 0x422e) Register('vr2_8_0' 1 0x422f) Register('vs35' 16 0x4230) Register('vr3_64_1' 8 0x4230) Register('vr3_32_3' 4 0x4230) Register('vr3_16_7' 2 0x4230) Register('vr3_8_15' 1 0x4230) Register('vr3_8_14' 1 0x4231) Register('vr3_16_6' 2 0x4232) Register('vr3_8_13' 1 0x4232) Register('vr3_8_12' 1 0x4233) Register('vr3_32_2' 4 0x4234) Register('vr3_16_5' 2 0x4234) Register('vr3_8_11' 1 0x4234) Register('vr3_8_10' 1 0x4235) Register('vr3_16_4' 2 0x4236) Register('vr3_8_9' 1 0x4236) Register('vr3_8_8' 1 0x4237) Register('vr3_64_0' 8 0x4238) Register('vr3_32_1' 4 0x4238) Register('vr3_16_3' 2 0x4238) Register('vr3_8_7' 1 0x4238) Register('vr3_8_6' 1 0x4239) Register('vr3_16_2' 2 0x423a) Register('vr3_8_5' 1 0x423a) Register('vr3_8_4' 1 0x423b) Register('vr3_32_0' 4 0x423c) Register('vr3_16_1' 2 0x423c) Register('vr3_8_3' 1 0x423c) Register('vr3_8_2' 1 0x423d) Register('vr3_16_0' 2 0x423e) Register('vr3_8_1' 1 0x423e) Register('vr3_8_0' 1 0x423f) Register('vs36' 16 0x4240) Register('vr4_64_1' 8 0x4240) Register('vr4_32_3' 4 0x4240) Register('vr4_16_7' 2 0x4240) Register('vr4_8_15' 1 0x4240) Register('vr4_8_14' 1 0x4241) Register('vr4_16_6' 2 0x4242) Register('vr4_8_13' 1 0x4242) Register('vr4_8_12' 1 0x4243) Register('vr4_32_2' 4 0x4244) Register('vr4_16_5' 2 0x4244) Register('vr4_8_11' 1 0x4244) Register('vr4_8_10' 1 0x4245) Register('vr4_16_4' 2 0x4246) Register('vr4_8_9' 1 0x4246) Register('vr4_8_8' 1 0x4247) Register('vr4_64_0' 8 0x4248) Register('vr4_32_1' 4 0x4248) Register('vr4_16_3' 2 0x4248) Register('vr4_8_7' 1 0x4248) Register('vr4_8_6' 1 0x4249) Register('vr4_16_2' 2 0x424a) Register('vr4_8_5' 1 0x424a) Register('vr4_8_4' 1 0x424b) Register('vr4_32_0' 4 0x424c) Register('vr4_16_1' 2 0x424c) Register('vr4_8_3' 1 0x424c) Register('vr4_8_2' 1 0x424d) Register('vr4_16_0' 2 0x424e) Register('vr4_8_1' 1 0x424e) Register('vr4_8_0' 1 0x424f) Register('vs37' 16 0x4250) Register('vr5_64_1' 8 0x4250) Register('vr5_32_3' 4 0x4250) Register('vr5_16_7' 2 0x4250) Register('vr5_8_15' 1 0x4250) Register('vr5_8_14' 1 0x4251) Register('vr5_16_6' 2 0x4252) Register('vr5_8_13' 1 0x4252) Register('vr5_8_12' 1 0x4253) Register('vr5_32_2' 4 0x4254) Register('vr5_16_5' 2 0x4254) Register('vr5_8_11' 1 0x4254) Register('vr5_8_10' 1 0x4255) Register('vr5_16_4' 2 0x4256) Register('vr5_8_9' 1 0x4256) Register('vr5_8_8' 1 0x4257) Register('vr5_64_0' 8 0x4258) Register('vr5_32_1' 4 0x4258) Register('vr5_16_3' 2 0x4258) Register('vr5_8_7' 1 0x4258) Register('vr5_8_6' 1 0x4259) Register('vr5_16_2' 2 0x425a) Register('vr5_8_5' 1 0x425a) Register('vr5_8_4' 1 0x425b) Register('vr5_32_0' 4 0x425c) Register('vr5_16_1' 2 0x425c) Register('vr5_8_3' 1 0x425c) Register('vr5_8_2' 1 0x425d) Register('vr5_16_0' 2 0x425e) Register('vr5_8_1' 1 0x425e) Register('vr5_8_0' 1 0x425f) Register('vs38' 16 0x4260) Register('vr6_64_1' 8 0x4260) Register('vr6_32_3' 4 0x4260) Register('vr6_16_7' 2 0x4260) Register('vr6_8_15' 1 0x4260) Register('vr6_8_14' 1 0x4261) Register('vr6_16_6' 2 0x4262) Register('vr6_8_13' 1 0x4262) Register('vr6_8_12' 1 0x4263) Register('vr6_32_2' 4 0x4264) Register('vr6_16_5' 2 0x4264) Register('vr6_8_11' 1 0x4264) Register('vr6_8_10' 1 0x4265) Register('vr6_16_4' 2 0x4266) Register('vr6_8_9' 1 0x4266) Register('vr6_8_8' 1 0x4267) Register('vr6_64_0' 8 0x4268) Register('vr6_32_1' 4 0x4268) Register('vr6_16_3' 2 0x4268) Register('vr6_8_7' 1 0x4268) Register('vr6_8_6' 1 0x4269) Register('vr6_16_2' 2 0x426a) Register('vr6_8_5' 1 0x426a) Register('vr6_8_4' 1 0x426b) Register('vr6_32_0' 4 0x426c) Register('vr6_16_1' 2 0x426c) Register('vr6_8_3' 1 0x426c) Register('vr6_8_2' 1 0x426d) Register('vr6_16_0' 2 0x426e) Register('vr6_8_1' 1 0x426e) Register('vr6_8_0' 1 0x426f) Register('vs39' 16 0x4270) Register('vr7_64_1' 8 0x4270) Register('vr7_32_3' 4 0x4270) Register('vr7_16_7' 2 0x4270) Register('vr7_8_15' 1 0x4270) Register('vr7_8_14' 1 0x4271) Register('vr7_16_6' 2 0x4272) Register('vr7_8_13' 1 0x4272) Register('vr7_8_12' 1 0x4273) Register('vr7_32_2' 4 0x4274) Register('vr7_16_5' 2 0x4274) Register('vr7_8_11' 1 0x4274) Register('vr7_8_10' 1 0x4275) Register('vr7_16_4' 2 0x4276) Register('vr7_8_9' 1 0x4276) Register('vr7_8_8' 1 0x4277) Register('vr7_64_0' 8 0x4278) Register('vr7_32_1' 4 0x4278) Register('vr7_16_3' 2 0x4278) Register('vr7_8_7' 1 0x4278) Register('vr7_8_6' 1 0x4279) Register('vr7_16_2' 2 0x427a) Register('vr7_8_5' 1 0x427a) Register('vr7_8_4' 1 0x427b) Register('vr7_32_0' 4 0x427c) Register('vr7_16_1' 2 0x427c) Register('vr7_8_3' 1 0x427c) Register('vr7_8_2' 1 0x427d) Register('vr7_16_0' 2 0x427e) Register('vr7_8_1' 1 0x427e) Register('vr7_8_0' 1 0x427f) Register('vs40' 16 0x4280) Register('vr8_64_1' 8 0x4280) Register('vr8_32_3' 4 0x4280) Register('vr8_16_7' 2 0x4280) Register('vr8_8_15' 1 0x4280) Register('vr8_8_14' 1 0x4281) Register('vr8_16_6' 2 0x4282) Register('vr8_8_13' 1 0x4282) Register('vr8_8_12' 1 0x4283) Register('vr8_32_2' 4 0x4284) Register('vr8_16_5' 2 0x4284) Register('vr8_8_11' 1 0x4284) Register('vr8_8_10' 1 0x4285) Register('vr8_16_4' 2 0x4286) Register('vr8_8_9' 1 0x4286) Register('vr8_8_8' 1 0x4287) Register('vr8_64_0' 8 0x4288) Register('vr8_32_1' 4 0x4288) Register('vr8_16_3' 2 0x4288) Register('vr8_8_7' 1 0x4288) Register('vr8_8_6' 1 0x4289) Register('vr8_16_2' 2 0x428a) Register('vr8_8_5' 1 0x428a) Register('vr8_8_4' 1 0x428b) Register('vr8_32_0' 4 0x428c) Register('vr8_16_1' 2 0x428c) Register('vr8_8_3' 1 0x428c) Register('vr8_8_2' 1 0x428d) Register('vr8_16_0' 2 0x428e) Register('vr8_8_1' 1 0x428e) Register('vr8_8_0' 1 0x428f) Register('vs41' 16 0x4290) Register('vr9_64_1' 8 0x4290) Register('vr9_32_3' 4 0x4290) Register('vr9_16_7' 2 0x4290) Register('vr9_8_15' 1 0x4290) Register('vr9_8_14' 1 0x4291) Register('vr9_16_6' 2 0x4292) Register('vr9_8_13' 1 0x4292) Register('vr9_8_12' 1 0x4293) Register('vr9_32_2' 4 0x4294) Register('vr9_16_5' 2 0x4294) Register('vr9_8_11' 1 0x4294) Register('vr9_8_10' 1 0x4295) Register('vr9_16_4' 2 0x4296) Register('vr9_8_9' 1 0x4296) Register('vr9_8_8' 1 0x4297) Register('vr9_64_0' 8 0x4298) Register('vr9_32_1' 4 0x4298) Register('vr9_16_3' 2 0x4298) Register('vr9_8_7' 1 0x4298) Register('vr9_8_6' 1 0x4299) Register('vr9_16_2' 2 0x429a) Register('vr9_8_5' 1 0x429a) Register('vr9_8_4' 1 0x429b) Register('vr9_32_0' 4 0x429c) Register('vr9_16_1' 2 0x429c) Register('vr9_8_3' 1 0x429c) Register('vr9_8_2' 1 0x429d) Register('vr9_16_0' 2 0x429e) Register('vr9_8_1' 1 0x429e) Register('vr9_8_0' 1 0x429f) Register('vs42' 16 0x42a0) Register('vr10_64_1' 8 0x42a0) Register('vr10_32_3' 4 0x42a0) Register('vr10_16_7' 2 0x42a0) Register('vr10_8_15' 1 0x42a0) Register('vr10_8_14' 1 0x42a1) Register('vr10_16_6' 2 0x42a2) Register('vr10_8_13' 1 0x42a2) Register('vr10_8_12' 1 0x42a3) Register('vr10_32_2' 4 0x42a4) Register('vr10_16_5' 2 0x42a4) Register('vr10_8_11' 1 0x42a4) Register('vr10_8_10' 1 0x42a5) Register('vr10_16_4' 2 0x42a6) Register('vr10_8_9' 1 0x42a6) Register('vr10_8_8' 1 0x42a7) Register('vr10_64_0' 8 0x42a8) Register('vr10_32_1' 4 0x42a8) Register('vr10_16_3' 2 0x42a8) Register('vr10_8_7' 1 0x42a8) Register('vr10_8_6' 1 0x42a9) Register('vr10_16_2' 2 0x42aa) Register('vr10_8_5' 1 0x42aa) Register('vr10_8_4' 1 0x42ab) Register('vr10_32_0' 4 0x42ac) Register('vr10_16_1' 2 0x42ac) Register('vr10_8_3' 1 0x42ac) Register('vr10_8_2' 1 0x42ad) Register('vr10_16_0' 2 0x42ae) Register('vr10_8_1' 1 0x42ae) Register('vr10_8_0' 1 0x42af) Register('vs43' 16 0x42b0) Register('vr11_64_1' 8 0x42b0) Register('vr11_32_3' 4 0x42b0) Register('vr11_16_7' 2 0x42b0) Register('vr11_8_15' 1 0x42b0) Register('vr11_8_14' 1 0x42b1) Register('vr11_16_6' 2 0x42b2) Register('vr11_8_13' 1 0x42b2) Register('vr11_8_12' 1 0x42b3) Register('vr11_32_2' 4 0x42b4) Register('vr11_16_5' 2 0x42b4) Register('vr11_8_11' 1 0x42b4) Register('vr11_8_10' 1 0x42b5) Register('vr11_16_4' 2 0x42b6) Register('vr11_8_9' 1 0x42b6) Register('vr11_8_8' 1 0x42b7) Register('vr11_64_0' 8 0x42b8) Register('vr11_32_1' 4 0x42b8) Register('vr11_16_3' 2 0x42b8) Register('vr11_8_7' 1 0x42b8) Register('vr11_8_6' 1 0x42b9) Register('vr11_16_2' 2 0x42ba) Register('vr11_8_5' 1 0x42ba) Register('vr11_8_4' 1 0x42bb) Register('vr11_32_0' 4 0x42bc) Register('vr11_16_1' 2 0x42bc) Register('vr11_8_3' 1 0x42bc) Register('vr11_8_2' 1 0x42bd) Register('vr11_16_0' 2 0x42be) Register('vr11_8_1' 1 0x42be) Register('vr11_8_0' 1 0x42bf) Register('vs44' 16 0x42c0) Register('vr12_64_1' 8 0x42c0) Register('vr12_32_3' 4 0x42c0) Register('vr12_16_7' 2 0x42c0) Register('vr12_8_15' 1 0x42c0) Register('vr12_8_14' 1 0x42c1) Register('vr12_16_6' 2 0x42c2) Register('vr12_8_13' 1 0x42c2) Register('vr12_8_12' 1 0x42c3) Register('vr12_32_2' 4 0x42c4) Register('vr12_16_5' 2 0x42c4) Register('vr12_8_11' 1 0x42c4) Register('vr12_8_10' 1 0x42c5) Register('vr12_16_4' 2 0x42c6) Register('vr12_8_9' 1 0x42c6) Register('vr12_8_8' 1 0x42c7) Register('vr12_64_0' 8 0x42c8) Register('vr12_32_1' 4 0x42c8) Register('vr12_16_3' 2 0x42c8) Register('vr12_8_7' 1 0x42c8) Register('vr12_8_6' 1 0x42c9) Register('vr12_16_2' 2 0x42ca) Register('vr12_8_5' 1 0x42ca) Register('vr12_8_4' 1 0x42cb) Register('vr12_32_0' 4 0x42cc) Register('vr12_16_1' 2 0x42cc) Register('vr12_8_3' 1 0x42cc) Register('vr12_8_2' 1 0x42cd) Register('vr12_16_0' 2 0x42ce) Register('vr12_8_1' 1 0x42ce) Register('vr12_8_0' 1 0x42cf) Register('vs45' 16 0x42d0) Register('vr13_64_1' 8 0x42d0) Register('vr13_32_3' 4 0x42d0) Register('vr13_16_7' 2 0x42d0) Register('vr13_8_15' 1 0x42d0) Register('vr13_8_14' 1 0x42d1) Register('vr13_16_6' 2 0x42d2) Register('vr13_8_13' 1 0x42d2) Register('vr13_8_12' 1 0x42d3) Register('vr13_32_2' 4 0x42d4) Register('vr13_16_5' 2 0x42d4) Register('vr13_8_11' 1 0x42d4) Register('vr13_8_10' 1 0x42d5) Register('vr13_16_4' 2 0x42d6) Register('vr13_8_9' 1 0x42d6) Register('vr13_8_8' 1 0x42d7) Register('vr13_64_0' 8 0x42d8) Register('vr13_32_1' 4 0x42d8) Register('vr13_16_3' 2 0x42d8) Register('vr13_8_7' 1 0x42d8) Register('vr13_8_6' 1 0x42d9) Register('vr13_16_2' 2 0x42da) Register('vr13_8_5' 1 0x42da) Register('vr13_8_4' 1 0x42db) Register('vr13_32_0' 4 0x42dc) Register('vr13_16_1' 2 0x42dc) Register('vr13_8_3' 1 0x42dc) Register('vr13_8_2' 1 0x42dd) Register('vr13_16_0' 2 0x42de) Register('vr13_8_1' 1 0x42de) Register('vr13_8_0' 1 0x42df) Register('vs46' 16 0x42e0) Register('vr14_64_1' 8 0x42e0) Register('vr14_32_3' 4 0x42e0) Register('vr14_16_7' 2 0x42e0) Register('vr14_8_15' 1 0x42e0) Register('vr14_8_14' 1 0x42e1) Register('vr14_16_6' 2 0x42e2) Register('vr14_8_13' 1 0x42e2) Register('vr14_8_12' 1 0x42e3) Register('vr14_32_2' 4 0x42e4) Register('vr14_16_5' 2 0x42e4) Register('vr14_8_11' 1 0x42e4) Register('vr14_8_10' 1 0x42e5) Register('vr14_16_4' 2 0x42e6) Register('vr14_8_9' 1 0x42e6) Register('vr14_8_8' 1 0x42e7) Register('vr14_64_0' 8 0x42e8) Register('vr14_32_1' 4 0x42e8) Register('vr14_16_3' 2 0x42e8) Register('vr14_8_7' 1 0x42e8) Register('vr14_8_6' 1 0x42e9) Register('vr14_16_2' 2 0x42ea) Register('vr14_8_5' 1 0x42ea) Register('vr14_8_4' 1 0x42eb) Register('vr14_32_0' 4 0x42ec) Register('vr14_16_1' 2 0x42ec) Register('vr14_8_3' 1 0x42ec) Register('vr14_8_2' 1 0x42ed) Register('vr14_16_0' 2 0x42ee) Register('vr14_8_1' 1 0x42ee) Register('vr14_8_0' 1 0x42ef) Register('vs47' 16 0x42f0) Register('vr15_64_1' 8 0x42f0) Register('vr15_32_3' 4 0x42f0) Register('vr15_16_7' 2 0x42f0) Register('vr15_8_15' 1 0x42f0) Register('vr15_8_14' 1 0x42f1) Register('vr15_16_6' 2 0x42f2) Register('vr15_8_13' 1 0x42f2) Register('vr15_8_12' 1 0x42f3) Register('vr15_32_2' 4 0x42f4) Register('vr15_16_5' 2 0x42f4) Register('vr15_8_11' 1 0x42f4) Register('vr15_8_10' 1 0x42f5) Register('vr15_16_4' 2 0x42f6) Register('vr15_8_9' 1 0x42f6) Register('vr15_8_8' 1 0x42f7) Register('vr15_64_0' 8 0x42f8) Register('vr15_32_1' 4 0x42f8) Register('vr15_16_3' 2 0x42f8) Register('vr15_8_7' 1 0x42f8) Register('vr15_8_6' 1 0x42f9) Register('vr15_16_2' 2 0x42fa) Register('vr15_8_5' 1 0x42fa) Register('vr15_8_4' 1 0x42fb) Register('vr15_32_0' 4 0x42fc) Register('vr15_16_1' 2 0x42fc) Register('vr15_8_3' 1 0x42fc) Register('vr15_8_2' 1 0x42fd) Register('vr15_16_0' 2 0x42fe) Register('vr15_8_1' 1 0x42fe) Register('vr15_8_0' 1 0x42ff) Register('vs48' 16 0x4300) Register('vr16_64_1' 8 0x4300) Register('vr16_32_3' 4 0x4300) Register('vr16_16_7' 2 0x4300) Register('vr16_8_15' 1 0x4300) Register('vr16_8_14' 1 0x4301) Register('vr16_16_6' 2 0x4302) Register('vr16_8_13' 1 0x4302) Register('vr16_8_12' 1 0x4303) Register('vr16_32_2' 4 0x4304) Register('vr16_16_5' 2 0x4304) Register('vr16_8_11' 1 0x4304) Register('vr16_8_10' 1 0x4305) Register('vr16_16_4' 2 0x4306) Register('vr16_8_9' 1 0x4306) Register('vr16_8_8' 1 0x4307) Register('vr16_64_0' 8 0x4308) Register('vr16_32_1' 4 0x4308) Register('vr16_16_3' 2 0x4308) Register('vr16_8_7' 1 0x4308) Register('vr16_8_6' 1 0x4309) Register('vr16_16_2' 2 0x430a) Register('vr16_8_5' 1 0x430a) Register('vr16_8_4' 1 0x430b) Register('vr16_32_0' 4 0x430c) Register('vr16_16_1' 2 0x430c) Register('vr16_8_3' 1 0x430c) Register('vr16_8_2' 1 0x430d) Register('vr16_16_0' 2 0x430e) Register('vr16_8_1' 1 0x430e) Register('vr16_8_0' 1 0x430f) Register('vs49' 16 0x4310) Register('vr17_64_1' 8 0x4310) Register('vr17_32_3' 4 0x4310) Register('vr17_16_7' 2 0x4310) Register('vr17_8_15' 1 0x4310) Register('vr17_8_14' 1 0x4311) Register('vr17_16_6' 2 0x4312) Register('vr17_8_13' 1 0x4312) Register('vr17_8_12' 1 0x4313) Register('vr17_32_2' 4 0x4314) Register('vr17_16_5' 2 0x4314) Register('vr17_8_11' 1 0x4314) Register('vr17_8_10' 1 0x4315) Register('vr17_16_4' 2 0x4316) Register('vr17_8_9' 1 0x4316) Register('vr17_8_8' 1 0x4317) Register('vr17_64_0' 8 0x4318) Register('vr17_32_1' 4 0x4318) Register('vr17_16_3' 2 0x4318) Register('vr17_8_7' 1 0x4318) Register('vr17_8_6' 1 0x4319) Register('vr17_16_2' 2 0x431a) Register('vr17_8_5' 1 0x431a) Register('vr17_8_4' 1 0x431b) Register('vr17_32_0' 4 0x431c) Register('vr17_16_1' 2 0x431c) Register('vr17_8_3' 1 0x431c) Register('vr17_8_2' 1 0x431d) Register('vr17_16_0' 2 0x431e) Register('vr17_8_1' 1 0x431e) Register('vr17_8_0' 1 0x431f) Register('vs50' 16 0x4320) Register('vr18_64_1' 8 0x4320) Register('vr18_32_3' 4 0x4320) Register('vr18_16_7' 2 0x4320) Register('vr18_8_15' 1 0x4320) Register('vr18_8_14' 1 0x4321) Register('vr18_16_6' 2 0x4322) Register('vr18_8_13' 1 0x4322) Register('vr18_8_12' 1 0x4323) Register('vr18_32_2' 4 0x4324) Register('vr18_16_5' 2 0x4324) Register('vr18_8_11' 1 0x4324) Register('vr18_8_10' 1 0x4325) Register('vr18_16_4' 2 0x4326) Register('vr18_8_9' 1 0x4326) Register('vr18_8_8' 1 0x4327) Register('vr18_64_0' 8 0x4328) Register('vr18_32_1' 4 0x4328) Register('vr18_16_3' 2 0x4328) Register('vr18_8_7' 1 0x4328) Register('vr18_8_6' 1 0x4329) Register('vr18_16_2' 2 0x432a) Register('vr18_8_5' 1 0x432a) Register('vr18_8_4' 1 0x432b) Register('vr18_32_0' 4 0x432c) Register('vr18_16_1' 2 0x432c) Register('vr18_8_3' 1 0x432c) Register('vr18_8_2' 1 0x432d) Register('vr18_16_0' 2 0x432e) Register('vr18_8_1' 1 0x432e) Register('vr18_8_0' 1 0x432f) Register('vs51' 16 0x4330) Register('vr19_64_1' 8 0x4330) Register('vr19_32_3' 4 0x4330) Register('vr19_16_7' 2 0x4330) Register('vr19_8_15' 1 0x4330) Register('vr19_8_14' 1 0x4331) Register('vr19_16_6' 2 0x4332) Register('vr19_8_13' 1 0x4332) Register('vr19_8_12' 1 0x4333) Register('vr19_32_2' 4 0x4334) Register('vr19_16_5' 2 0x4334) Register('vr19_8_11' 1 0x4334) Register('vr19_8_10' 1 0x4335) Register('vr19_16_4' 2 0x4336) Register('vr19_8_9' 1 0x4336) Register('vr19_8_8' 1 0x4337) Register('vr19_64_0' 8 0x4338) Register('vr19_32_1' 4 0x4338) Register('vr19_16_3' 2 0x4338) Register('vr19_8_7' 1 0x4338) Register('vr19_8_6' 1 0x4339) Register('vr19_16_2' 2 0x433a) Register('vr19_8_5' 1 0x433a) Register('vr19_8_4' 1 0x433b) Register('vr19_32_0' 4 0x433c) Register('vr19_16_1' 2 0x433c) Register('vr19_8_3' 1 0x433c) Register('vr19_8_2' 1 0x433d) Register('vr19_16_0' 2 0x433e) Register('vr19_8_1' 1 0x433e) Register('vr19_8_0' 1 0x433f) Register('vs52' 16 0x4340) Register('vr20_64_1' 8 0x4340) Register('vr20_32_3' 4 0x4340) Register('vr20_16_7' 2 0x4340) Register('vr20_8_15' 1 0x4340) Register('vr20_8_14' 1 0x4341) Register('vr20_16_6' 2 0x4342) Register('vr20_8_13' 1 0x4342) Register('vr20_8_12' 1 0x4343) Register('vr20_32_2' 4 0x4344) Register('vr20_16_5' 2 0x4344) Register('vr20_8_11' 1 0x4344) Register('vr20_8_10' 1 0x4345) Register('vr20_16_4' 2 0x4346) Register('vr20_8_9' 1 0x4346) Register('vr20_8_8' 1 0x4347) Register('vr20_64_0' 8 0x4348) Register('vr20_32_1' 4 0x4348) Register('vr20_16_3' 2 0x4348) Register('vr20_8_7' 1 0x4348) Register('vr20_8_6' 1 0x4349) Register('vr20_16_2' 2 0x434a) Register('vr20_8_5' 1 0x434a) Register('vr20_8_4' 1 0x434b) Register('vr20_32_0' 4 0x434c) Register('vr20_16_1' 2 0x434c) Register('vr20_8_3' 1 0x434c) Register('vr20_8_2' 1 0x434d) Register('vr20_16_0' 2 0x434e) Register('vr20_8_1' 1 0x434e) Register('vr20_8_0' 1 0x434f) Register('vs53' 16 0x4350) Register('vr21_64_1' 8 0x4350) Register('vr21_32_3' 4 0x4350) Register('vr21_16_7' 2 0x4350) Register('vr21_8_15' 1 0x4350) Register('vr21_8_14' 1 0x4351) Register('vr21_16_6' 2 0x4352) Register('vr21_8_13' 1 0x4352) Register('vr21_8_12' 1 0x4353) Register('vr21_32_2' 4 0x4354) Register('vr21_16_5' 2 0x4354) Register('vr21_8_11' 1 0x4354) Register('vr21_8_10' 1 0x4355) Register('vr21_16_4' 2 0x4356) Register('vr21_8_9' 1 0x4356) Register('vr21_8_8' 1 0x4357) Register('vr21_64_0' 8 0x4358) Register('vr21_32_1' 4 0x4358) Register('vr21_16_3' 2 0x4358) Register('vr21_8_7' 1 0x4358) Register('vr21_8_6' 1 0x4359) Register('vr21_16_2' 2 0x435a) Register('vr21_8_5' 1 0x435a) Register('vr21_8_4' 1 0x435b) Register('vr21_32_0' 4 0x435c) Register('vr21_16_1' 2 0x435c) Register('vr21_8_3' 1 0x435c) Register('vr21_8_2' 1 0x435d) Register('vr21_16_0' 2 0x435e) Register('vr21_8_1' 1 0x435e) Register('vr21_8_0' 1 0x435f) Register('vs54' 16 0x4360) Register('vr22_64_1' 8 0x4360) Register('vr22_32_3' 4 0x4360) Register('vr22_16_7' 2 0x4360) Register('vr22_8_15' 1 0x4360) Register('vr22_8_14' 1 0x4361) Register('vr22_16_6' 2 0x4362) Register('vr22_8_13' 1 0x4362) Register('vr22_8_12' 1 0x4363) Register('vr22_32_2' 4 0x4364) Register('vr22_16_5' 2 0x4364) Register('vr22_8_11' 1 0x4364) Register('vr22_8_10' 1 0x4365) Register('vr22_16_4' 2 0x4366) Register('vr22_8_9' 1 0x4366) Register('vr22_8_8' 1 0x4367) Register('vr22_64_0' 8 0x4368) Register('vr22_32_1' 4 0x4368) Register('vr22_16_3' 2 0x4368) Register('vr22_8_7' 1 0x4368) Register('vr22_8_6' 1 0x4369) Register('vr22_16_2' 2 0x436a) Register('vr22_8_5' 1 0x436a) Register('vr22_8_4' 1 0x436b) Register('vr22_32_0' 4 0x436c) Register('vr22_16_1' 2 0x436c) Register('vr22_8_3' 1 0x436c) Register('vr22_8_2' 1 0x436d) Register('vr22_16_0' 2 0x436e) Register('vr22_8_1' 1 0x436e) Register('vr22_8_0' 1 0x436f) Register('vs55' 16 0x4370) Register('vr23_64_1' 8 0x4370) Register('vr23_32_3' 4 0x4370) Register('vr23_16_7' 2 0x4370) Register('vr23_8_15' 1 0x4370) Register('vr23_8_14' 1 0x4371) Register('vr23_16_6' 2 0x4372) Register('vr23_8_13' 1 0x4372) Register('vr23_8_12' 1 0x4373) Register('vr23_32_2' 4 0x4374) Register('vr23_16_5' 2 0x4374) Register('vr23_8_11' 1 0x4374) Register('vr23_8_10' 1 0x4375) Register('vr23_16_4' 2 0x4376) Register('vr23_8_9' 1 0x4376) Register('vr23_8_8' 1 0x4377) Register('vr23_64_0' 8 0x4378) Register('vr23_32_1' 4 0x4378) Register('vr23_16_3' 2 0x4378) Register('vr23_8_7' 1 0x4378) Register('vr23_8_6' 1 0x4379) Register('vr23_16_2' 2 0x437a) Register('vr23_8_5' 1 0x437a) Register('vr23_8_4' 1 0x437b) Register('vr23_32_0' 4 0x437c) Register('vr23_16_1' 2 0x437c) Register('vr23_8_3' 1 0x437c) Register('vr23_8_2' 1 0x437d) Register('vr23_16_0' 2 0x437e) Register('vr23_8_1' 1 0x437e) Register('vr23_8_0' 1 0x437f) Register('vs56' 16 0x4380) Register('vr24_64_1' 8 0x4380) Register('vr24_32_3' 4 0x4380) Register('vr24_16_7' 2 0x4380) Register('vr24_8_15' 1 0x4380) Register('vr24_8_14' 1 0x4381) Register('vr24_16_6' 2 0x4382) Register('vr24_8_13' 1 0x4382) Register('vr24_8_12' 1 0x4383) Register('vr24_32_2' 4 0x4384) Register('vr24_16_5' 2 0x4384) Register('vr24_8_11' 1 0x4384) Register('vr24_8_10' 1 0x4385) Register('vr24_16_4' 2 0x4386) Register('vr24_8_9' 1 0x4386) Register('vr24_8_8' 1 0x4387) Register('vr24_64_0' 8 0x4388) Register('vr24_32_1' 4 0x4388) Register('vr24_16_3' 2 0x4388) Register('vr24_8_7' 1 0x4388) Register('vr24_8_6' 1 0x4389) Register('vr24_16_2' 2 0x438a) Register('vr24_8_5' 1 0x438a) Register('vr24_8_4' 1 0x438b) Register('vr24_32_0' 4 0x438c) Register('vr24_16_1' 2 0x438c) Register('vr24_8_3' 1 0x438c) Register('vr24_8_2' 1 0x438d) Register('vr24_16_0' 2 0x438e) Register('vr24_8_1' 1 0x438e) Register('vr24_8_0' 1 0x438f) Register('vs57' 16 0x4390) Register('vr25_64_1' 8 0x4390) Register('vr25_32_3' 4 0x4390) Register('vr25_16_7' 2 0x4390) Register('vr25_8_15' 1 0x4390) Register('vr25_8_14' 1 0x4391) Register('vr25_16_6' 2 0x4392) Register('vr25_8_13' 1 0x4392) Register('vr25_8_12' 1 0x4393) Register('vr25_32_2' 4 0x4394) Register('vr25_16_5' 2 0x4394) Register('vr25_8_11' 1 0x4394) Register('vr25_8_10' 1 0x4395) Register('vr25_16_4' 2 0x4396) Register('vr25_8_9' 1 0x4396) Register('vr25_8_8' 1 0x4397) Register('vr25_64_0' 8 0x4398) Register('vr25_32_1' 4 0x4398) Register('vr25_16_3' 2 0x4398) Register('vr25_8_7' 1 0x4398) Register('vr25_8_6' 1 0x4399) Register('vr25_16_2' 2 0x439a) Register('vr25_8_5' 1 0x439a) Register('vr25_8_4' 1 0x439b) Register('vr25_32_0' 4 0x439c) Register('vr25_16_1' 2 0x439c) Register('vr25_8_3' 1 0x439c) Register('vr25_8_2' 1 0x439d) Register('vr25_16_0' 2 0x439e) Register('vr25_8_1' 1 0x439e) Register('vr25_8_0' 1 0x439f) Register('vs58' 16 0x43a0) Register('vr26_64_1' 8 0x43a0) Register('vr26_32_3' 4 0x43a0) Register('vr26_16_7' 2 0x43a0) Register('vr26_8_15' 1 0x43a0) Register('vr26_8_14' 1 0x43a1) Register('vr26_16_6' 2 0x43a2) Register('vr26_8_13' 1 0x43a2) Register('vr26_8_12' 1 0x43a3) Register('vr26_32_2' 4 0x43a4) Register('vr26_16_5' 2 0x43a4) Register('vr26_8_11' 1 0x43a4) Register('vr26_8_10' 1 0x43a5) Register('vr26_16_4' 2 0x43a6) Register('vr26_8_9' 1 0x43a6) Register('vr26_8_8' 1 0x43a7) Register('vr26_64_0' 8 0x43a8) Register('vr26_32_1' 4 0x43a8) Register('vr26_16_3' 2 0x43a8) Register('vr26_8_7' 1 0x43a8) Register('vr26_8_6' 1 0x43a9) Register('vr26_16_2' 2 0x43aa) Register('vr26_8_5' 1 0x43aa) Register('vr26_8_4' 1 0x43ab) Register('vr26_32_0' 4 0x43ac) Register('vr26_16_1' 2 0x43ac) Register('vr26_8_3' 1 0x43ac) Register('vr26_8_2' 1 0x43ad) Register('vr26_16_0' 2 0x43ae) Register('vr26_8_1' 1 0x43ae) Register('vr26_8_0' 1 0x43af) Register('vs59' 16 0x43b0) Register('vr27_64_1' 8 0x43b0) Register('vr27_32_3' 4 0x43b0) Register('vr27_16_7' 2 0x43b0) Register('vr27_8_15' 1 0x43b0) Register('vr27_8_14' 1 0x43b1) Register('vr27_16_6' 2 0x43b2) Register('vr27_8_13' 1 0x43b2) Register('vr27_8_12' 1 0x43b3) Register('vr27_32_2' 4 0x43b4) Register('vr27_16_5' 2 0x43b4) Register('vr27_8_11' 1 0x43b4) Register('vr27_8_10' 1 0x43b5) Register('vr27_16_4' 2 0x43b6) Register('vr27_8_9' 1 0x43b6) Register('vr27_8_8' 1 0x43b7) Register('vr27_64_0' 8 0x43b8) Register('vr27_32_1' 4 0x43b8) Register('vr27_16_3' 2 0x43b8) Register('vr27_8_7' 1 0x43b8) Register('vr27_8_6' 1 0x43b9) Register('vr27_16_2' 2 0x43ba) Register('vr27_8_5' 1 0x43ba) Register('vr27_8_4' 1 0x43bb) Register('vr27_32_0' 4 0x43bc) Register('vr27_16_1' 2 0x43bc) Register('vr27_8_3' 1 0x43bc) Register('vr27_8_2' 1 0x43bd) Register('vr27_16_0' 2 0x43be) Register('vr27_8_1' 1 0x43be) Register('vr27_8_0' 1 0x43bf) Register('vs60' 16 0x43c0) Register('vr28_64_1' 8 0x43c0) Register('vr28_32_3' 4 0x43c0) Register('vr28_16_7' 2 0x43c0) Register('vr28_8_15' 1 0x43c0) Register('vr28_8_14' 1 0x43c1) Register('vr28_16_6' 2 0x43c2) Register('vr28_8_13' 1 0x43c2) Register('vr28_8_12' 1 0x43c3) Register('vr28_32_2' 4 0x43c4) Register('vr28_16_5' 2 0x43c4) Register('vr28_8_11' 1 0x43c4) Register('vr28_8_10' 1 0x43c5) Register('vr28_16_4' 2 0x43c6) Register('vr28_8_9' 1 0x43c6) Register('vr28_8_8' 1 0x43c7) Register('vr28_64_0' 8 0x43c8) Register('vr28_32_1' 4 0x43c8) Register('vr28_16_3' 2 0x43c8) Register('vr28_8_7' 1 0x43c8) Register('vr28_8_6' 1 0x43c9) Register('vr28_16_2' 2 0x43ca) Register('vr28_8_5' 1 0x43ca) Register('vr28_8_4' 1 0x43cb) Register('vr28_32_0' 4 0x43cc) Register('vr28_16_1' 2 0x43cc) Register('vr28_8_3' 1 0x43cc) Register('vr28_8_2' 1 0x43cd) Register('vr28_16_0' 2 0x43ce) Register('vr28_8_1' 1 0x43ce) Register('vr28_8_0' 1 0x43cf) Register('vs61' 16 0x43d0) Register('vr29_64_1' 8 0x43d0) Register('vr29_32_3' 4 0x43d0) Register('vr29_16_7' 2 0x43d0) Register('vr29_8_15' 1 0x43d0) Register('vr29_8_14' 1 0x43d1) Register('vr29_16_6' 2 0x43d2) Register('vr29_8_13' 1 0x43d2) Register('vr29_8_12' 1 0x43d3) Register('vr29_32_2' 4 0x43d4) Register('vr29_16_5' 2 0x43d4) Register('vr29_8_11' 1 0x43d4) Register('vr29_8_10' 1 0x43d5) Register('vr29_16_4' 2 0x43d6) Register('vr29_8_9' 1 0x43d6) Register('vr29_8_8' 1 0x43d7) Register('vr29_64_0' 8 0x43d8) Register('vr29_32_1' 4 0x43d8) Register('vr29_16_3' 2 0x43d8) Register('vr29_8_7' 1 0x43d8) Register('vr29_8_6' 1 0x43d9) Register('vr29_16_2' 2 0x43da) Register('vr29_8_5' 1 0x43da) Register('vr29_8_4' 1 0x43db) Register('vr29_32_0' 4 0x43dc) Register('vr29_16_1' 2 0x43dc) Register('vr29_8_3' 1 0x43dc) Register('vr29_8_2' 1 0x43dd) Register('vr29_16_0' 2 0x43de) Register('vr29_8_1' 1 0x43de) Register('vr29_8_0' 1 0x43df) Register('vs62' 16 0x43e0) Register('vr30_64_1' 8 0x43e0) Register('vr30_32_3' 4 0x43e0) Register('vr30_16_7' 2 0x43e0) Register('vr30_8_15' 1 0x43e0) Register('vr30_8_14' 1 0x43e1) Register('vr30_16_6' 2 0x43e2) Register('vr30_8_13' 1 0x43e2) Register('vr30_8_12' 1 0x43e3) Register('vr30_32_2' 4 0x43e4) Register('vr30_16_5' 2 0x43e4) Register('vr30_8_11' 1 0x43e4) Register('vr30_8_10' 1 0x43e5) Register('vr30_16_4' 2 0x43e6) Register('vr30_8_9' 1 0x43e6) Register('vr30_8_8' 1 0x43e7) Register('vr30_64_0' 8 0x43e8) Register('vr30_32_1' 4 0x43e8) Register('vr30_16_3' 2 0x43e8) Register('vr30_8_7' 1 0x43e8) Register('vr30_8_6' 1 0x43e9) Register('vr30_16_2' 2 0x43ea) Register('vr30_8_5' 1 0x43ea) Register('vr30_8_4' 1 0x43eb) Register('vr30_32_0' 4 0x43ec) Register('vr30_16_1' 2 0x43ec) Register('vr30_8_3' 1 0x43ec) Register('vr30_8_2' 1 0x43ed) Register('vr30_16_0' 2 0x43ee) Register('vr30_8_1' 1 0x43ee) Register('vr30_8_0' 1 0x43ef) Register('vs63' 16 0x43f0) Register('vr31_64_1' 8 0x43f0) Register('vr31_32_3' 4 0x43f0) Register('vr31_16_7' 2 0x43f0) Register('vr31_8_15' 1 0x43f0) Register('vr31_8_14' 1 0x43f1) Register('vr31_16_6' 2 0x43f2) Register('vr31_8_13' 1 0x43f2) Register('vr31_8_12' 1 0x43f3) Register('vr31_32_2' 4 0x43f4) Register('vr31_16_5' 2 0x43f4) Register('vr31_8_11' 1 0x43f4) Register('vr31_8_10' 1 0x43f5) Register('vr31_16_4' 2 0x43f6) Register('vr31_8_9' 1 0x43f6) Register('vr31_8_8' 1 0x43f7) Register('vr31_64_0' 8 0x43f8) Register('vr31_32_1' 4 0x43f8) Register('vr31_16_3' 2 0x43f8) Register('vr31_8_7' 1 0x43f8) Register('vr31_8_6' 1 0x43f9) Register('vr31_16_2' 2 0x43fa) Register('vr31_8_5' 1 0x43fa) Register('vr31_8_4' 1 0x43fb) Register('vr31_32_0' 4 0x43fc) Register('vr31_16_1' 2 0x43fc) Register('vr31_8_3' 1 0x43fc) Register('vr31_8_2' 1 0x43fd) Register('vr31_16_0' 2 0x43fe) Register('vr31_8_1' 1 0x43fe) Register('vr31_8_0' 1 0x43ff) Register('contextreg' 4 0x6000) Register('dcr000' 4 0x7000) Register('dcr001' 4 0x7004) Register('dcr002' 4 0x7008) Register('dcr003' 4 0x700c) Register('dcr004' 4 0x7010) Register('dcr005' 4 0x7014) Register('dcr006' 4 0x7018) Register('dcr007' 4 0x701c) Register('dcr008' 4 0x7020) Register('dcr009' 4 0x7024) Register('dcr00a' 4 0x7028) Register('dcr00b' 4 0x702c) Register('dcr00c' 4 0x7030) Register('dcr00d' 4 0x7034) Register('dcr00e' 4 0x7038) Register('dcr00f' 4 0x703c) Register('dcr010' 4 0x7040) Register('dcr011' 4 0x7044) Register('dcr012' 4 0x7048) Register('dcr013' 4 0x704c) Register('dcr014' 4 0x7050) Register('dcr015' 4 0x7054) Register('dcr016' 4 0x7058) Register('dcr017' 4 0x705c) Register('dcr018' 4 0x7060) Register('dcr019' 4 0x7064) Register('dcr01a' 4 0x7068) Register('dcr01b' 4 0x706c) Register('dcr01c' 4 0x7070) Register('dcr01d' 4 0x7074) Register('dcr01e' 4 0x7078) Register('dcr01f' 4 0x707c) Register('dcr020' 4 0x7080) Register('dcr021' 4 0x7084) Register('dcr022' 4 0x7088) Register('dcr023' 4 0x708c) Register('dcr024' 4 0x7090) Register('dcr025' 4 0x7094) Register('dcr026' 4 0x7098) Register('dcr027' 4 0x709c) Register('dcr028' 4 0x70a0) Register('dcr029' 4 0x70a4) Register('dcr02a' 4 0x70a8) Register('dcr02b' 4 0x70ac) Register('dcr02c' 4 0x70b0) Register('dcr02d' 4 0x70b4) Register('dcr02e' 4 0x70b8) Register('dcr02f' 4 0x70bc) Register('dcr030' 4 0x70c0) Register('dcr031' 4 0x70c4) Register('dcr032' 4 0x70c8) Register('dcr033' 4 0x70cc) Register('dcr034' 4 0x70d0) Register('dcr035' 4 0x70d4) Register('dcr036' 4 0x70d8) Register('dcr037' 4 0x70dc) Register('dcr038' 4 0x70e0) Register('dcr039' 4 0x70e4) Register('dcr03a' 4 0x70e8) Register('dcr03b' 4 0x70ec) Register('dcr03c' 4 0x70f0) Register('dcr03d' 4 0x70f4) Register('dcr03e' 4 0x70f8) Register('dcr03f' 4 0x70fc) Register('dcr040' 4 0x7100) Register('dcr041' 4 0x7104) Register('dcr042' 4 0x7108) Register('dcr043' 4 0x710c) Register('dcr044' 4 0x7110) Register('dcr045' 4 0x7114) Register('dcr046' 4 0x7118) Register('dcr047' 4 0x711c) Register('dcr048' 4 0x7120) Register('dcr049' 4 0x7124) Register('dcr04a' 4 0x7128) Register('dcr04b' 4 0x712c) Register('dcr04c' 4 0x7130) Register('dcr04d' 4 0x7134) Register('dcr04e' 4 0x7138) Register('dcr04f' 4 0x713c) Register('dcr050' 4 0x7140) Register('dcr051' 4 0x7144) Register('dcr052' 4 0x7148) Register('dcr053' 4 0x714c) Register('dcr054' 4 0x7150) Register('dcr055' 4 0x7154) Register('dcr056' 4 0x7158) Register('dcr057' 4 0x715c) Register('dcr058' 4 0x7160) Register('dcr059' 4 0x7164) Register('dcr05a' 4 0x7168) Register('dcr05b' 4 0x716c) Register('dcr05c' 4 0x7170) Register('dcr05d' 4 0x7174) Register('dcr05e' 4 0x7178) Register('dcr05f' 4 0x717c) Register('dcr060' 4 0x7180) Register('dcr061' 4 0x7184) Register('dcr062' 4 0x7188) Register('dcr063' 4 0x718c) Register('dcr064' 4 0x7190) Register('dcr065' 4 0x7194) Register('dcr066' 4 0x7198) Register('dcr067' 4 0x719c) Register('dcr068' 4 0x71a0) Register('dcr069' 4 0x71a4) Register('dcr06a' 4 0x71a8) Register('dcr06b' 4 0x71ac) Register('dcr06c' 4 0x71b0) Register('dcr06d' 4 0x71b4) Register('dcr06e' 4 0x71b8) Register('dcr06f' 4 0x71bc) Register('dcr070' 4 0x71c0) Register('dcr071' 4 0x71c4) Register('dcr072' 4 0x71c8) Register('dcr073' 4 0x71cc) Register('dcr074' 4 0x71d0) Register('dcr075' 4 0x71d4) Register('dcr076' 4 0x71d8) Register('dcr077' 4 0x71dc) Register('dcr078' 4 0x71e0) Register('dcr079' 4 0x71e4) Register('dcr07a' 4 0x71e8) Register('dcr07b' 4 0x71ec) Register('dcr07c' 4 0x71f0) Register('dcr07d' 4 0x71f4) Register('dcr07e' 4 0x71f8) Register('dcr07f' 4 0x71fc) Register('dcr080' 4 0x7200) Register('dcr081' 4 0x7204) Register('dcr082' 4 0x7208) Register('dcr083' 4 0x720c) Register('dcr084' 4 0x7210) Register('dcr085' 4 0x7214) Register('dcr086' 4 0x7218) Register('dcr087' 4 0x721c) Register('dcr088' 4 0x7220) Register('dcr089' 4 0x7224) Register('dcr08a' 4 0x7228) Register('dcr08b' 4 0x722c) Register('dcr08c' 4 0x7230) Register('dcr08d' 4 0x7234) Register('dcr08e' 4 0x7238) Register('dcr08f' 4 0x723c) Register('dcr090' 4 0x7240) Register('dcr091' 4 0x7244) Register('dcr092' 4 0x7248) Register('dcr093' 4 0x724c) Register('dcr094' 4 0x7250) Register('dcr095' 4 0x7254) Register('dcr096' 4 0x7258) Register('dcr097' 4 0x725c) Register('dcr098' 4 0x7260) Register('dcr099' 4 0x7264) Register('dcr09a' 4 0x7268) Register('dcr09b' 4 0x726c) Register('dcr09c' 4 0x7270) Register('dcr09d' 4 0x7274) Register('dcr09e' 4 0x7278) Register('dcr09f' 4 0x727c) Register('dcr0a0' 4 0x7280) Register('dcr0a1' 4 0x7284) Register('dcr0a2' 4 0x7288) Register('dcr0a3' 4 0x728c) Register('dcr0a4' 4 0x7290) Register('dcr0a5' 4 0x7294) Register('dcr0a6' 4 0x7298) Register('dcr0a7' 4 0x729c) Register('dcr0a8' 4 0x72a0) Register('dcr0a9' 4 0x72a4) Register('dcr0aa' 4 0x72a8) Register('dcr0ab' 4 0x72ac) Register('dcr0ac' 4 0x72b0) Register('dcr0ad' 4 0x72b4) Register('dcr0ae' 4 0x72b8) Register('dcr0af' 4 0x72bc) Register('dcr0b0' 4 0x72c0) Register('dcr0b1' 4 0x72c4) Register('dcr0b2' 4 0x72c8) Register('dcr0b3' 4 0x72cc) Register('dcr0b4' 4 0x72d0) Register('dcr0b5' 4 0x72d4) Register('dcr0b6' 4 0x72d8) Register('dcr0b7' 4 0x72dc) Register('dcr0b8' 4 0x72e0) Register('dcr0b9' 4 0x72e4) Register('dcr0ba' 4 0x72e8) Register('dcr0bb' 4 0x72ec) Register('dcr0bc' 4 0x72f0) Register('dcr0bd' 4 0x72f4) Register('dcr0be' 4 0x72f8) Register('dcr0bf' 4 0x72fc) Register('dcr0c0' 4 0x7300) Register('dcr0c1' 4 0x7304) Register('dcr0c2' 4 0x7308) Register('dcr0c3' 4 0x730c) Register('dcr0c4' 4 0x7310) Register('dcr0c5' 4 0x7314) Register('dcr0c6' 4 0x7318) Register('dcr0c7' 4 0x731c) Register('dcr0c8' 4 0x7320) Register('dcr0c9' 4 0x7324) Register('dcr0ca' 4 0x7328) Register('dcr0cb' 4 0x732c) Register('dcr0cc' 4 0x7330) Register('dcr0cd' 4 0x7334) Register('dcr0ce' 4 0x7338) Register('dcr0cf' 4 0x733c) Register('dcr0d0' 4 0x7340) Register('dcr0d1' 4 0x7344) Register('dcr0d2' 4 0x7348) Register('dcr0d3' 4 0x734c) Register('dcr0d4' 4 0x7350) Register('dcr0d5' 4 0x7354) Register('dcr0d6' 4 0x7358) Register('dcr0d7' 4 0x735c) Register('dcr0d8' 4 0x7360) Register('dcr0d9' 4 0x7364) Register('dcr0da' 4 0x7368) Register('dcr0db' 4 0x736c) Register('dcr0dc' 4 0x7370) Register('dcr0dd' 4 0x7374) Register('dcr0de' 4 0x7378) Register('dcr0df' 4 0x737c) Register('dcr0e0' 4 0x7380) Register('dcr0e1' 4 0x7384) Register('dcr0e2' 4 0x7388) Register('dcr0e3' 4 0x738c) Register('dcr0e4' 4 0x7390) Register('dcr0e5' 4 0x7394) Register('dcr0e6' 4 0x7398) Register('dcr0e7' 4 0x739c) Register('dcr0e8' 4 0x73a0) Register('dcr0e9' 4 0x73a4) Register('dcr0ea' 4 0x73a8) Register('dcr0eb' 4 0x73ac) Register('dcr0ec' 4 0x73b0) Register('dcr0ed' 4 0x73b4) Register('dcr0ee' 4 0x73b8) Register('dcr0ef' 4 0x73bc) Register('dcr0f0' 4 0x73c0) Register('dcr0f1' 4 0x73c4) Register('dcr0f2' 4 0x73c8) Register('dcr0f3' 4 0x73cc) Register('dcr0f4' 4 0x73d0) Register('dcr0f5' 4 0x73d4) Register('dcr0f6' 4 0x73d8) Register('dcr0f7' 4 0x73dc) Register('dcr0f8' 4 0x73e0) Register('dcr0f9' 4 0x73e4) Register('dcr0fa' 4 0x73e8) Register('dcr0fb' 4 0x73ec) Register('dcr0fc' 4 0x73f0) Register('dcr0fd' 4 0x73f4) Register('dcr0fe' 4 0x73f8) Register('dcr0ff' 4 0x73fc) Register('dcr100' 4 0x7400) Register('dcr101' 4 0x7404) Register('dcr102' 4 0x7408) Register('dcr103' 4 0x740c) Register('dcr104' 4 0x7410) Register('dcr105' 4 0x7414) Register('dcr106' 4 0x7418) Register('dcr107' 4 0x741c) Register('dcr108' 4 0x7420) Register('dcr109' 4 0x7424) Register('dcr10a' 4 0x7428) Register('dcr10b' 4 0x742c) Register('dcr10c' 4 0x7430) Register('dcr10d' 4 0x7434) Register('dcr10e' 4 0x7438) Register('dcr10f' 4 0x743c) Register('dcr110' 4 0x7440) Register('dcr111' 4 0x7444) Register('dcr112' 4 0x7448) Register('dcr113' 4 0x744c) Register('dcr114' 4 0x7450) Register('dcr115' 4 0x7454) Register('dcr116' 4 0x7458) Register('dcr117' 4 0x745c) Register('dcr118' 4 0x7460) Register('dcr119' 4 0x7464) Register('dcr11a' 4 0x7468) Register('dcr11b' 4 0x746c) Register('dcr11c' 4 0x7470) Register('dcr11d' 4 0x7474) Register('dcr11e' 4 0x7478) Register('dcr11f' 4 0x747c) Register('dcr120' 4 0x7480) Register('dcr121' 4 0x7484) Register('dcr122' 4 0x7488) Register('dcr123' 4 0x748c) Register('dcr124' 4 0x7490) Register('dcr125' 4 0x7494) Register('dcr126' 4 0x7498) Register('dcr127' 4 0x749c) Register('dcr128' 4 0x74a0) Register('dcr129' 4 0x74a4) Register('dcr12a' 4 0x74a8) Register('dcr12b' 4 0x74ac) Register('dcr12c' 4 0x74b0) Register('dcr12d' 4 0x74b4) Register('dcr12e' 4 0x74b8) Register('dcr12f' 4 0x74bc) Register('dcr130' 4 0x74c0) Register('dcr131' 4 0x74c4) Register('dcr132' 4 0x74c8) Register('dcr133' 4 0x74cc) Register('dcr134' 4 0x74d0) Register('dcr135' 4 0x74d4) Register('dcr136' 4 0x74d8) Register('dcr137' 4 0x74dc) Register('dcr138' 4 0x74e0) Register('dcr139' 4 0x74e4) Register('dcr13a' 4 0x74e8) Register('dcr13b' 4 0x74ec) Register('dcr13c' 4 0x74f0) Register('dcr13d' 4 0x74f4) Register('dcr13e' 4 0x74f8) Register('dcr13f' 4 0x74fc) Register('dcr140' 4 0x7500) Register('dcr141' 4 0x7504) Register('dcr142' 4 0x7508) Register('dcr143' 4 0x750c) Register('dcr144' 4 0x7510) Register('dcr145' 4 0x7514) Register('dcr146' 4 0x7518) Register('dcr147' 4 0x751c) Register('dcr148' 4 0x7520) Register('dcr149' 4 0x7524) Register('dcr14a' 4 0x7528) Register('dcr14b' 4 0x752c) Register('dcr14c' 4 0x7530) Register('dcr14d' 4 0x7534) Register('dcr14e' 4 0x7538) Register('dcr14f' 4 0x753c) Register('dcr150' 4 0x7540) Register('dcr151' 4 0x7544) Register('dcr152' 4 0x7548) Register('dcr153' 4 0x754c) Register('dcr154' 4 0x7550) Register('dcr155' 4 0x7554) Register('dcr156' 4 0x7558) Register('dcr157' 4 0x755c) Register('dcr158' 4 0x7560) Register('dcr159' 4 0x7564) Register('dcr15a' 4 0x7568) Register('dcr15b' 4 0x756c) Register('dcr15c' 4 0x7570) Register('dcr15d' 4 0x7574) Register('dcr15e' 4 0x7578) Register('dcr15f' 4 0x757c) Register('dcr160' 4 0x7580) Register('dcr161' 4 0x7584) Register('dcr162' 4 0x7588) Register('dcr163' 4 0x758c) Register('dcr164' 4 0x7590) Register('dcr165' 4 0x7594) Register('dcr166' 4 0x7598) Register('dcr167' 4 0x759c) Register('dcr168' 4 0x75a0) Register('dcr169' 4 0x75a4) Register('dcr16a' 4 0x75a8) Register('dcr16b' 4 0x75ac) Register('dcr16c' 4 0x75b0) Register('dcr16d' 4 0x75b4) Register('dcr16e' 4 0x75b8) Register('dcr16f' 4 0x75bc) Register('dcr170' 4 0x75c0) Register('dcr171' 4 0x75c4) Register('dcr172' 4 0x75c8) Register('dcr173' 4 0x75cc) Register('dcr174' 4 0x75d0) Register('dcr175' 4 0x75d4) Register('dcr176' 4 0x75d8) Register('dcr177' 4 0x75dc) Register('dcr178' 4 0x75e0) Register('dcr179' 4 0x75e4) Register('dcr17a' 4 0x75e8) Register('dcr17b' 4 0x75ec) Register('dcr17c' 4 0x75f0) Register('dcr17d' 4 0x75f4) Register('dcr17e' 4 0x75f8) Register('dcr17f' 4 0x75fc) Register('dcr180' 4 0x7600) Register('dcr181' 4 0x7604) Register('dcr182' 4 0x7608) Register('dcr183' 4 0x760c) Register('dcr184' 4 0x7610) Register('dcr185' 4 0x7614) Register('dcr186' 4 0x7618) Register('dcr187' 4 0x761c) Register('dcr188' 4 0x7620) Register('dcr189' 4 0x7624) Register('dcr18a' 4 0x7628) Register('dcr18b' 4 0x762c) Register('dcr18c' 4 0x7630) Register('dcr18d' 4 0x7634) Register('dcr18e' 4 0x7638) Register('dcr18f' 4 0x763c) Register('dcr190' 4 0x7640) Register('dcr191' 4 0x7644) Register('dcr192' 4 0x7648) Register('dcr193' 4 0x764c) Register('dcr194' 4 0x7650) Register('dcr195' 4 0x7654) Register('dcr196' 4 0x7658) Register('dcr197' 4 0x765c) Register('dcr198' 4 0x7660) Register('dcr199' 4 0x7664) Register('dcr19a' 4 0x7668) Register('dcr19b' 4 0x766c) Register('dcr19c' 4 0x7670) Register('dcr19d' 4 0x7674) Register('dcr19e' 4 0x7678) Register('dcr19f' 4 0x767c) Register('dcr1a0' 4 0x7680) Register('dcr1a1' 4 0x7684) Register('dcr1a2' 4 0x7688) Register('dcr1a3' 4 0x768c) Register('dcr1a4' 4 0x7690) Register('dcr1a5' 4 0x7694) Register('dcr1a6' 4 0x7698) Register('dcr1a7' 4 0x769c) Register('dcr1a8' 4 0x76a0) Register('dcr1a9' 4 0x76a4) Register('dcr1aa' 4 0x76a8) Register('dcr1ab' 4 0x76ac) Register('dcr1ac' 4 0x76b0) Register('dcr1ad' 4 0x76b4) Register('dcr1ae' 4 0x76b8) Register('dcr1af' 4 0x76bc) Register('dcr1b0' 4 0x76c0) Register('dcr1b1' 4 0x76c4) Register('dcr1b2' 4 0x76c8) Register('dcr1b3' 4 0x76cc) Register('dcr1b4' 4 0x76d0) Register('dcr1b5' 4 0x76d4) Register('dcr1b6' 4 0x76d8) Register('dcr1b7' 4 0x76dc) Register('dcr1b8' 4 0x76e0) Register('dcr1b9' 4 0x76e4) Register('dcr1ba' 4 0x76e8) Register('dcr1bb' 4 0x76ec) Register('dcr1bc' 4 0x76f0) Register('dcr1bd' 4 0x76f4) Register('dcr1be' 4 0x76f8) Register('dcr1bf' 4 0x76fc) Register('dcr1c0' 4 0x7700) Register('dcr1c1' 4 0x7704) Register('dcr1c2' 4 0x7708) Register('dcr1c3' 4 0x770c) Register('dcr1c4' 4 0x7710) Register('dcr1c5' 4 0x7714) Register('dcr1c6' 4 0x7718) Register('dcr1c7' 4 0x771c) Register('dcr1c8' 4 0x7720) Register('dcr1c9' 4 0x7724) Register('dcr1ca' 4 0x7728) Register('dcr1cb' 4 0x772c) Register('dcr1cc' 4 0x7730) Register('dcr1cd' 4 0x7734) Register('dcr1ce' 4 0x7738) Register('dcr1cf' 4 0x773c) Register('dcr1d0' 4 0x7740) Register('dcr1d1' 4 0x7744) Register('dcr1d2' 4 0x7748) Register('dcr1d3' 4 0x774c) Register('dcr1d4' 4 0x7750) Register('dcr1d5' 4 0x7754) Register('dcr1d6' 4 0x7758) Register('dcr1d7' 4 0x775c) Register('dcr1d8' 4 0x7760) Register('dcr1d9' 4 0x7764) Register('dcr1da' 4 0x7768) Register('dcr1db' 4 0x776c) Register('dcr1dc' 4 0x7770) Register('dcr1dd' 4 0x7774) Register('dcr1de' 4 0x7778) Register('dcr1df' 4 0x777c) Register('dcr1e0' 4 0x7780) Register('dcr1e1' 4 0x7784) Register('dcr1e2' 4 0x7788) Register('dcr1e3' 4 0x778c) Register('dcr1e4' 4 0x7790) Register('dcr1e5' 4 0x7794) Register('dcr1e6' 4 0x7798) Register('dcr1e7' 4 0x779c) Register('dcr1e8' 4 0x77a0) Register('dcr1e9' 4 0x77a4) Register('dcr1ea' 4 0x77a8) Register('dcr1eb' 4 0x77ac) Register('dcr1ec' 4 0x77b0) Register('dcr1ed' 4 0x77b4) Register('dcr1ee' 4 0x77b8) Register('dcr1ef' 4 0x77bc) Register('dcr1f0' 4 0x77c0) Register('dcr1f1' 4 0x77c4) Register('dcr1f2' 4 0x77c8) Register('dcr1f3' 4 0x77cc) Register('dcr1f4' 4 0x77d0) Register('dcr1f5' 4 0x77d4) Register('dcr1f6' 4 0x77d8) Register('dcr1f7' 4 0x77dc) Register('dcr1f8' 4 0x77e0) Register('dcr1f9' 4 0x77e4) Register('dcr1fa' 4 0x77e8) Register('dcr1fb' 4 0x77ec) Register('dcr1fc' 4 0x77f0) Register('dcr1fd' 4 0x77f4) Register('dcr1fe' 4 0x77f8) Register('dcr1ff' 4 0x77fc) Register('dcr200' 4 0x7800) Register('dcr201' 4 0x7804) Register('dcr202' 4 0x7808) Register('dcr203' 4 0x780c) Register('dcr204' 4 0x7810) Register('dcr205' 4 0x7814) Register('dcr206' 4 0x7818) Register('dcr207' 4 0x781c) Register('dcr208' 4 0x7820) Register('dcr209' 4 0x7824) Register('dcr20a' 4 0x7828) Register('dcr20b' 4 0x782c) Register('dcr20c' 4 0x7830) Register('dcr20d' 4 0x7834) Register('dcr20e' 4 0x7838) Register('dcr20f' 4 0x783c) Register('dcr210' 4 0x7840) Register('dcr211' 4 0x7844) Register('dcr212' 4 0x7848) Register('dcr213' 4 0x784c) Register('dcr214' 4 0x7850) Register('dcr215' 4 0x7854) Register('dcr216' 4 0x7858) Register('dcr217' 4 0x785c) Register('dcr218' 4 0x7860) Register('dcr219' 4 0x7864) Register('dcr21a' 4 0x7868) Register('dcr21b' 4 0x786c) Register('dcr21c' 4 0x7870) Register('dcr21d' 4 0x7874) Register('dcr21e' 4 0x7878) Register('dcr21f' 4 0x787c) Register('dcr220' 4 0x7880) Register('dcr221' 4 0x7884) Register('dcr222' 4 0x7888) Register('dcr223' 4 0x788c) Register('dcr224' 4 0x7890) Register('dcr225' 4 0x7894) Register('dcr226' 4 0x7898) Register('dcr227' 4 0x789c) Register('dcr228' 4 0x78a0) Register('dcr229' 4 0x78a4) Register('dcr22a' 4 0x78a8) Register('dcr22b' 4 0x78ac) Register('dcr22c' 4 0x78b0) Register('dcr22d' 4 0x78b4) Register('dcr22e' 4 0x78b8) Register('dcr22f' 4 0x78bc) Register('dcr230' 4 0x78c0) Register('dcr231' 4 0x78c4) Register('dcr232' 4 0x78c8) Register('dcr233' 4 0x78cc) Register('dcr234' 4 0x78d0) Register('dcr235' 4 0x78d4) Register('dcr236' 4 0x78d8) Register('dcr237' 4 0x78dc) Register('dcr238' 4 0x78e0) Register('dcr239' 4 0x78e4) Register('dcr23a' 4 0x78e8) Register('dcr23b' 4 0x78ec) Register('dcr23c' 4 0x78f0) Register('dcr23d' 4 0x78f4) Register('dcr23e' 4 0x78f8) Register('dcr23f' 4 0x78fc) Register('dcr240' 4 0x7900) Register('dcr241' 4 0x7904) Register('dcr242' 4 0x7908) Register('dcr243' 4 0x790c) Register('dcr244' 4 0x7910) Register('dcr245' 4 0x7914) Register('dcr246' 4 0x7918) Register('dcr247' 4 0x791c) Register('dcr248' 4 0x7920) Register('dcr249' 4 0x7924) Register('dcr24a' 4 0x7928) Register('dcr24b' 4 0x792c) Register('dcr24c' 4 0x7930) Register('dcr24d' 4 0x7934) Register('dcr24e' 4 0x7938) Register('dcr24f' 4 0x793c) Register('dcr250' 4 0x7940) Register('dcr251' 4 0x7944) Register('dcr252' 4 0x7948) Register('dcr253' 4 0x794c) Register('dcr254' 4 0x7950) Register('dcr255' 4 0x7954) Register('dcr256' 4 0x7958) Register('dcr257' 4 0x795c) Register('dcr258' 4 0x7960) Register('dcr259' 4 0x7964) Register('dcr25a' 4 0x7968) Register('dcr25b' 4 0x796c) Register('dcr25c' 4 0x7970) Register('dcr25d' 4 0x7974) Register('dcr25e' 4 0x7978) Register('dcr25f' 4 0x797c) Register('dcr260' 4 0x7980) Register('dcr261' 4 0x7984) Register('dcr262' 4 0x7988) Register('dcr263' 4 0x798c) Register('dcr264' 4 0x7990) Register('dcr265' 4 0x7994) Register('dcr266' 4 0x7998) Register('dcr267' 4 0x799c) Register('dcr268' 4 0x79a0) Register('dcr269' 4 0x79a4) Register('dcr26a' 4 0x79a8) Register('dcr26b' 4 0x79ac) Register('dcr26c' 4 0x79b0) Register('dcr26d' 4 0x79b4) Register('dcr26e' 4 0x79b8) Register('dcr26f' 4 0x79bc) Register('dcr270' 4 0x79c0) Register('dcr271' 4 0x79c4) Register('dcr272' 4 0x79c8) Register('dcr273' 4 0x79cc) Register('dcr274' 4 0x79d0) Register('dcr275' 4 0x79d4) Register('dcr276' 4 0x79d8) Register('dcr277' 4 0x79dc) Register('dcr278' 4 0x79e0) Register('dcr279' 4 0x79e4) Register('dcr27a' 4 0x79e8) Register('dcr27b' 4 0x79ec) Register('dcr27c' 4 0x79f0) Register('dcr27d' 4 0x79f4) Register('dcr27e' 4 0x79f8) Register('dcr27f' 4 0x79fc) Register('dcr280' 4 0x7a00) Register('dcr281' 4 0x7a04) Register('dcr282' 4 0x7a08) Register('dcr283' 4 0x7a0c) Register('dcr284' 4 0x7a10) Register('dcr285' 4 0x7a14) Register('dcr286' 4 0x7a18) Register('dcr287' 4 0x7a1c) Register('dcr288' 4 0x7a20) Register('dcr289' 4 0x7a24) Register('dcr28a' 4 0x7a28) Register('dcr28b' 4 0x7a2c) Register('dcr28c' 4 0x7a30) Register('dcr28d' 4 0x7a34) Register('dcr28e' 4 0x7a38) Register('dcr28f' 4 0x7a3c) Register('dcr290' 4 0x7a40) Register('dcr291' 4 0x7a44) Register('dcr292' 4 0x7a48) Register('dcr293' 4 0x7a4c) Register('dcr294' 4 0x7a50) Register('dcr295' 4 0x7a54) Register('dcr296' 4 0x7a58) Register('dcr297' 4 0x7a5c) Register('dcr298' 4 0x7a60) Register('dcr299' 4 0x7a64) Register('dcr29a' 4 0x7a68) Register('dcr29b' 4 0x7a6c) Register('dcr29c' 4 0x7a70) Register('dcr29d' 4 0x7a74) Register('dcr29e' 4 0x7a78) Register('dcr29f' 4 0x7a7c) Register('dcr2a0' 4 0x7a80) Register('dcr2a1' 4 0x7a84) Register('dcr2a2' 4 0x7a88) Register('dcr2a3' 4 0x7a8c) Register('dcr2a4' 4 0x7a90) Register('dcr2a5' 4 0x7a94) Register('dcr2a6' 4 0x7a98) Register('dcr2a7' 4 0x7a9c) Register('dcr2a8' 4 0x7aa0) Register('dcr2a9' 4 0x7aa4) Register('dcr2aa' 4 0x7aa8) Register('dcr2ab' 4 0x7aac) Register('dcr2ac' 4 0x7ab0) Register('dcr2ad' 4 0x7ab4) Register('dcr2ae' 4 0x7ab8) Register('dcr2af' 4 0x7abc) Register('dcr2b0' 4 0x7ac0) Register('dcr2b1' 4 0x7ac4) Register('dcr2b2' 4 0x7ac8) Register('dcr2b3' 4 0x7acc) Register('dcr2b4' 4 0x7ad0) Register('dcr2b5' 4 0x7ad4) Register('dcr2b6' 4 0x7ad8) Register('dcr2b7' 4 0x7adc) Register('dcr2b8' 4 0x7ae0) Register('dcr2b9' 4 0x7ae4) Register('dcr2ba' 4 0x7ae8) Register('dcr2bb' 4 0x7aec) Register('dcr2bc' 4 0x7af0) Register('dcr2bd' 4 0x7af4) Register('dcr2be' 4 0x7af8) Register('dcr2bf' 4 0x7afc) Register('dcr2c0' 4 0x7b00) Register('dcr2c1' 4 0x7b04) Register('dcr2c2' 4 0x7b08) Register('dcr2c3' 4 0x7b0c) Register('dcr2c4' 4 0x7b10) Register('dcr2c5' 4 0x7b14) Register('dcr2c6' 4 0x7b18) Register('dcr2c7' 4 0x7b1c) Register('dcr2c8' 4 0x7b20) Register('dcr2c9' 4 0x7b24) Register('dcr2ca' 4 0x7b28) Register('dcr2cb' 4 0x7b2c) Register('dcr2cc' 4 0x7b30) Register('dcr2cd' 4 0x7b34) Register('dcr2ce' 4 0x7b38) Register('dcr2cf' 4 0x7b3c) Register('dcr2d0' 4 0x7b40) Register('dcr2d1' 4 0x7b44) Register('dcr2d2' 4 0x7b48) Register('dcr2d3' 4 0x7b4c) Register('dcr2d4' 4 0x7b50) Register('dcr2d5' 4 0x7b54) Register('dcr2d6' 4 0x7b58) Register('dcr2d7' 4 0x7b5c) Register('dcr2d8' 4 0x7b60) Register('dcr2d9' 4 0x7b64) Register('dcr2da' 4 0x7b68) Register('dcr2db' 4 0x7b6c) Register('dcr2dc' 4 0x7b70) Register('dcr2dd' 4 0x7b74) Register('dcr2de' 4 0x7b78) Register('dcr2df' 4 0x7b7c) Register('dcr2e0' 4 0x7b80) Register('dcr2e1' 4 0x7b84) Register('dcr2e2' 4 0x7b88) Register('dcr2e3' 4 0x7b8c) Register('dcr2e4' 4 0x7b90) Register('dcr2e5' 4 0x7b94) Register('dcr2e6' 4 0x7b98) Register('dcr2e7' 4 0x7b9c) Register('dcr2e8' 4 0x7ba0) Register('dcr2e9' 4 0x7ba4) Register('dcr2ea' 4 0x7ba8) Register('dcr2eb' 4 0x7bac) Register('dcr2ec' 4 0x7bb0) Register('dcr2ed' 4 0x7bb4) Register('dcr2ee' 4 0x7bb8) Register('dcr2ef' 4 0x7bbc) Register('dcr2f0' 4 0x7bc0) Register('dcr2f1' 4 0x7bc4) Register('dcr2f2' 4 0x7bc8) Register('dcr2f3' 4 0x7bcc) Register('dcr2f4' 4 0x7bd0) Register('dcr2f5' 4 0x7bd4) Register('dcr2f6' 4 0x7bd8) Register('dcr2f7' 4 0x7bdc) Register('dcr2f8' 4 0x7be0) Register('dcr2f9' 4 0x7be4) Register('dcr2fa' 4 0x7be8) Register('dcr2fb' 4 0x7bec) Register('dcr2fc' 4 0x7bf0) Register('dcr2fd' 4 0x7bf4) Register('dcr2fe' 4 0x7bf8) Register('dcr2ff' 4 0x7bfc) Register('dcr300' 4 0x7c00) Register('dcr301' 4 0x7c04) Register('dcr302' 4 0x7c08) Register('dcr303' 4 0x7c0c) Register('dcr304' 4 0x7c10) Register('dcr305' 4 0x7c14) Register('dcr306' 4 0x7c18) Register('dcr307' 4 0x7c1c) Register('dcr308' 4 0x7c20) Register('dcr309' 4 0x7c24) Register('dcr30a' 4 0x7c28) Register('dcr30b' 4 0x7c2c) Register('dcr30c' 4 0x7c30) Register('dcr30d' 4 0x7c34) Register('dcr30e' 4 0x7c38) Register('dcr30f' 4 0x7c3c) Register('dcr310' 4 0x7c40) Register('dcr311' 4 0x7c44) Register('dcr312' 4 0x7c48) Register('dcr313' 4 0x7c4c) Register('dcr314' 4 0x7c50) Register('dcr315' 4 0x7c54) Register('dcr316' 4 0x7c58) Register('dcr317' 4 0x7c5c) Register('dcr318' 4 0x7c60) Register('dcr319' 4 0x7c64) Register('dcr31a' 4 0x7c68) Register('dcr31b' 4 0x7c6c) Register('dcr31c' 4 0x7c70) Register('dcr31d' 4 0x7c74) Register('dcr31e' 4 0x7c78) Register('dcr31f' 4 0x7c7c) Register('dcr320' 4 0x7c80) Register('dcr321' 4 0x7c84) Register('dcr322' 4 0x7c88) Register('dcr323' 4 0x7c8c) Register('dcr324' 4 0x7c90) Register('dcr325' 4 0x7c94) Register('dcr326' 4 0x7c98) Register('dcr327' 4 0x7c9c) Register('dcr328' 4 0x7ca0) Register('dcr329' 4 0x7ca4) Register('dcr32a' 4 0x7ca8) Register('dcr32b' 4 0x7cac) Register('dcr32c' 4 0x7cb0) Register('dcr32d' 4 0x7cb4) Register('dcr32e' 4 0x7cb8) Register('dcr32f' 4 0x7cbc) Register('dcr330' 4 0x7cc0) Register('dcr331' 4 0x7cc4) Register('dcr332' 4 0x7cc8) Register('dcr333' 4 0x7ccc) Register('dcr334' 4 0x7cd0) Register('dcr335' 4 0x7cd4) Register('dcr336' 4 0x7cd8) Register('dcr337' 4 0x7cdc) Register('dcr338' 4 0x7ce0) Register('dcr339' 4 0x7ce4) Register('dcr33a' 4 0x7ce8) Register('dcr33b' 4 0x7cec) Register('dcr33c' 4 0x7cf0) Register('dcr33d' 4 0x7cf4) Register('dcr33e' 4 0x7cf8) Register('dcr33f' 4 0x7cfc) Register('dcr340' 4 0x7d00) Register('dcr341' 4 0x7d04) Register('dcr342' 4 0x7d08) Register('dcr343' 4 0x7d0c) Register('dcr344' 4 0x7d10) Register('dcr345' 4 0x7d14) Register('dcr346' 4 0x7d18) Register('dcr347' 4 0x7d1c) Register('dcr348' 4 0x7d20) Register('dcr349' 4 0x7d24) Register('dcr34a' 4 0x7d28) Register('dcr34b' 4 0x7d2c) Register('dcr34c' 4 0x7d30) Register('dcr34d' 4 0x7d34) Register('dcr34e' 4 0x7d38) Register('dcr34f' 4 0x7d3c) Register('dcr350' 4 0x7d40) Register('dcr351' 4 0x7d44) Register('dcr352' 4 0x7d48) Register('dcr353' 4 0x7d4c) Register('dcr354' 4 0x7d50) Register('dcr355' 4 0x7d54) Register('dcr356' 4 0x7d58) Register('dcr357' 4 0x7d5c) Register('dcr358' 4 0x7d60) Register('dcr359' 4 0x7d64) Register('dcr35a' 4 0x7d68) Register('dcr35b' 4 0x7d6c) Register('dcr35c' 4 0x7d70) Register('dcr35d' 4 0x7d74) Register('dcr35e' 4 0x7d78) Register('dcr35f' 4 0x7d7c) Register('dcr360' 4 0x7d80) Register('dcr361' 4 0x7d84) Register('dcr362' 4 0x7d88) Register('dcr363' 4 0x7d8c) Register('dcr364' 4 0x7d90) Register('dcr365' 4 0x7d94) Register('dcr366' 4 0x7d98) Register('dcr367' 4 0x7d9c) Register('dcr368' 4 0x7da0) Register('dcr369' 4 0x7da4) Register('dcr36a' 4 0x7da8) Register('dcr36b' 4 0x7dac) Register('dcr36c' 4 0x7db0) Register('dcr36d' 4 0x7db4) Register('dcr36e' 4 0x7db8) Register('dcr36f' 4 0x7dbc) Register('dcr370' 4 0x7dc0) Register('dcr371' 4 0x7dc4) Register('dcr372' 4 0x7dc8) Register('dcr373' 4 0x7dcc) Register('dcr374' 4 0x7dd0) Register('dcr375' 4 0x7dd4) Register('dcr376' 4 0x7dd8) Register('dcr377' 4 0x7ddc) Register('dcr378' 4 0x7de0) Register('dcr379' 4 0x7de4) Register('dcr37a' 4 0x7de8) Register('dcr37b' 4 0x7dec) Register('dcr37c' 4 0x7df0) Register('dcr37d' 4 0x7df4) Register('dcr37e' 4 0x7df8) Register('dcr37f' 4 0x7dfc) Register('dcr380' 4 0x7e00) Register('dcr381' 4 0x7e04) Register('dcr382' 4 0x7e08) Register('dcr383' 4 0x7e0c) Register('dcr384' 4 0x7e10) Register('dcr385' 4 0x7e14) Register('dcr386' 4 0x7e18) Register('dcr387' 4 0x7e1c) Register('dcr388' 4 0x7e20) Register('dcr389' 4 0x7e24) Register('dcr38a' 4 0x7e28) Register('dcr38b' 4 0x7e2c) Register('dcr38c' 4 0x7e30) Register('dcr38d' 4 0x7e34) Register('dcr38e' 4 0x7e38) Register('dcr38f' 4 0x7e3c) Register('dcr390' 4 0x7e40) Register('dcr391' 4 0x7e44) Register('dcr392' 4 0x7e48) Register('dcr393' 4 0x7e4c) Register('dcr394' 4 0x7e50) Register('dcr395' 4 0x7e54) Register('dcr396' 4 0x7e58) Register('dcr397' 4 0x7e5c) Register('dcr398' 4 0x7e60) Register('dcr399' 4 0x7e64) Register('dcr39a' 4 0x7e68) Register('dcr39b' 4 0x7e6c) Register('dcr39c' 4 0x7e70) Register('dcr39d' 4 0x7e74) Register('dcr39e' 4 0x7e78) Register('dcr39f' 4 0x7e7c) Register('dcr3a0' 4 0x7e80) Register('dcr3a1' 4 0x7e84) Register('dcr3a2' 4 0x7e88) Register('dcr3a3' 4 0x7e8c) Register('dcr3a4' 4 0x7e90) Register('dcr3a5' 4 0x7e94) Register('dcr3a6' 4 0x7e98) Register('dcr3a7' 4 0x7e9c) Register('dcr3a8' 4 0x7ea0) Register('dcr3a9' 4 0x7ea4) Register('dcr3aa' 4 0x7ea8) Register('dcr3ab' 4 0x7eac) Register('dcr3ac' 4 0x7eb0) Register('dcr3ad' 4 0x7eb4) Register('dcr3ae' 4 0x7eb8) Register('dcr3af' 4 0x7ebc) Register('dcr3b0' 4 0x7ec0) Register('dcr3b1' 4 0x7ec4) Register('dcr3b2' 4 0x7ec8) Register('dcr3b3' 4 0x7ecc) Register('dcr3b4' 4 0x7ed0) Register('dcr3b5' 4 0x7ed4) Register('dcr3b6' 4 0x7ed8) Register('dcr3b7' 4 0x7edc) Register('dcr3b8' 4 0x7ee0) Register('dcr3b9' 4 0x7ee4) Register('dcr3ba' 4 0x7ee8) Register('dcr3bb' 4 0x7eec) Register('dcr3bc' 4 0x7ef0) Register('dcr3bd' 4 0x7ef4) Register('dcr3be' 4 0x7ef8) Register('dcr3bf' 4 0x7efc) Register('dcr3c0' 4 0x7f00) Register('dcr3c1' 4 0x7f04) Register('dcr3c2' 4 0x7f08) Register('dcr3c3' 4 0x7f0c) Register('dcr3c4' 4 0x7f10) Register('dcr3c5' 4 0x7f14) Register('dcr3c6' 4 0x7f18) Register('dcr3c7' 4 0x7f1c) Register('dcr3c8' 4 0x7f20) Register('dcr3c9' 4 0x7f24) Register('dcr3ca' 4 0x7f28) Register('dcr3cb' 4 0x7f2c) Register('dcr3cc' 4 0x7f30) Register('dcr3cd' 4 0x7f34) Register('dcr3ce' 4 0x7f38) Register('dcr3cf' 4 0x7f3c) Register('dcr3d0' 4 0x7f40) Register('dcr3d1' 4 0x7f44) Register('dcr3d2' 4 0x7f48) Register('dcr3d3' 4 0x7f4c) Register('dcr3d4' 4 0x7f50) Register('dcr3d5' 4 0x7f54) Register('dcr3d6' 4 0x7f58) Register('dcr3d7' 4 0x7f5c) Register('dcr3d8' 4 0x7f60) Register('dcr3d9' 4 0x7f64) Register('dcr3da' 4 0x7f68) Register('dcr3db' 4 0x7f6c) Register('dcr3dc' 4 0x7f70) Register('dcr3dd' 4 0x7f74) Register('dcr3de' 4 0x7f78) Register('dcr3df' 4 0x7f7c) Register('dcr3e0' 4 0x7f80) Register('dcr3e1' 4 0x7f84) Register('dcr3e2' 4 0x7f88) Register('dcr3e3' 4 0x7f8c) Register('dcr3e4' 4 0x7f90) Register('dcr3e5' 4 0x7f94) Register('dcr3e6' 4 0x7f98) Register('dcr3e7' 4 0x7f9c) Register('dcr3e8' 4 0x7fa0) Register('dcr3e9' 4 0x7fa4) Register('dcr3ea' 4 0x7fa8) Register('dcr3eb' 4 0x7fac) Register('dcr3ec' 4 0x7fb0) Register('dcr3ed' 4 0x7fb4) Register('dcr3ee' 4 0x7fb8) Register('dcr3ef' 4 0x7fbc) Register('dcr3f0' 4 0x7fc0) Register('dcr3f1' 4 0x7fc4) Register('dcr3f2' 4 0x7fc8) Register('dcr3f3' 4 0x7fcc) Register('dcr3f4' 4 0x7fd0) Register('dcr3f5' 4 0x7fd4) Register('dcr3f6' 4 0x7fd8) Register('dcr3f7' 4 0x7fdc) Register('dcr3f8' 4 0x7fe0) Register('dcr3f9' 4 0x7fe4) Register('dcr3fa' 4 0x7fe8) Register('dcr3fb' 4 0x7fec) Register('dcr3fc' 4 0x7ff0) Register('dcr3fd' 4 0x7ff4) Register('dcr3fe' 4 0x7ff8) Register('dcr3ff' 4 0x7ffc) Register('acc' 8 0x10000)]<block_end>register_arch(['powerpc:le:32:quicc'] 32 Endness.LE ArchPcode_PowerPC_LE_32_QUICC)<line_sep>
<import_from_stmt>importlib _bootstrap<import_from_stmt>. util<import_stmt>collections<import_stmt>imp<import_stmt>sys<import_stmt>unittest<class_stmt>PathHookTests(unittest.TestCase)<block_start>"""Test the path hook for extension modules."""<line_sep># XXX Should it only succeed for pre-existing directories? # XXX Should it only work for directories containing an extension module? <def_stmt>hook self entry<block_start><return>_bootstrap._file_path_hook(entry)<block_end><def_stmt>test_success self# Path hook should handle a directory where a known extension module # exists. <block_start>self.assertTrue(hasattr(self.hook(util.PATH) 'find_module'))<block_end><block_end><def_stmt>test_main <block_start><import_from_stmt>test.support run_unittest<line_sep>run_unittest(PathHookTests)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_main()<block_end>
load("//tools/bzl:maven_jar.bzl" "maven_jar")<def_stmt>external_plugin_deps omit_commons_codec=<true><block_start>JACKSON_VERS="2.10.2"<line_sep>maven_jar(name="scribejava-core" artifact="com.github.scribejava:scribejava-core:6.9.0" sha1="ed761f450d8382f75787e8fee9ae52e7ec768747" )<line_sep>maven_jar(name="jackson-annotations" artifact="com.fasterxml.jackson.core:jackson-annotations:"+JACKSON_VERS sha1="3a13b6105946541b8d4181a0506355b5fae63260" )<line_sep>maven_jar(name="jackson-databind" artifact="com.fasterxml.jackson.core:jackson-databind:"+JACKSON_VERS sha1="0528de95f198afafbcfb0c09d2e43b6e0ea663ec" deps=["@jackson-annotations//jar" ] )<if_stmt><not>omit_commons_codec<block_start>maven_jar(name="commons-codec" artifact="commons-codec:commons-codec:1.4" sha1="4216af16d38465bbab0f3dff8efa14204f7a399a" )<block_end><block_end>
# # This file is part of LiteX. # # Copyright (c) 2014-2019 <NAME> <<EMAIL>> # Copyright (c) 2019 msloniewski <<EMAIL>> # Copyright (c) 2019 vytautasb <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause <import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>math<import_from_stmt>shutil which<import_from_stmt>migen.fhdl.structure _Fragment<import_from_stmt>litex.build.generic_platform Pins IOStandard Misc<import_from_stmt>litex.build tools<line_sep># IO/Placement Constraints (.qsf) ------------------------------------------------------------------ <def_stmt>_format_constraint c signame fmt_r# IO location constraints <block_start><if_stmt>isinstance(c Pins)<block_start>tpl="set_location_assignment -comment \"{name}\" -to {signame} Pin_{pin}"<line_sep><return>tpl.format(signame=signame name=fmt_r pin=c.identifiers[0])<block_end># IO standard constraints <elif_stmt>isinstance(c IOStandard)<block_start>tpl="set_instance_assignment -name io_standard -comment \"{name}\" \"{std}\" -to {signame}"<line_sep><return>tpl.format(signame=signame name=fmt_r std=c.name)<block_end># Others constraints <elif_stmt>isinstance(c Misc)<block_start><if_stmt><not>isinstance(c.misc str)<and>len(c.misc)<eq>2<block_start>tpl="set_instance_assignment -comment \"{name}\" -name {misc[0]} \"{misc[1]}\" -to {signame}"<line_sep><return>tpl.format(signame=signame name=fmt_r misc=c.misc)<block_end><else_stmt><block_start>tpl="set_instance_assignment -comment \"{name}\" -name {misc} -to {signame}"<line_sep><return>tpl.format(signame=signame name=fmt_r misc=c.misc)<block_end><block_end><block_end><def_stmt>_format_qsf_constraint signame pin others resname<block_start>fmt_r="{}:{}".format(*resname[:2])<if_stmt>resname[2]<is><not><none><block_start>fmt_r<augadd>"."+resname[2]<block_end>fmt_c=[_format_constraint(c signame fmt_r)<for>c ([Pins(pin)]+others)]<line_sep><return>'\n'.join(fmt_c)<block_end><def_stmt>_build_qsf_constraints named_sc named_pc<block_start>qsf=[]<for_stmt>sig,pins,others,resname named_sc<block_start><if_stmt>len(pins)<g>1<block_start><for_stmt>i,p enumerate(pins)<block_start>qsf.append(_format_qsf_constraint("{}[{}]".format(sig i) p others resname))<block_end><block_end><else_stmt><block_start>qsf.append(_format_qsf_constraint(sig pins[0] others resname))<block_end><block_end><if_stmt>named_pc<block_start>qsf.append("\n\n".join(named_pc))<block_end><return>"\n".join(qsf)<block_end># Timing Constraints (.sdc) ------------------------------------------------------------------------ <def_stmt>_build_sdc clocks false_paths vns named_sc build_name additional_sdc_commands<block_start>sdc=[]<line_sep># Clock constraints <for_stmt>clk,period sorted(clocks.items() key=<lambda>x:x[0].duid)<block_start>is_port=<false><for_stmt>sig,pins,others,resname named_sc<block_start><if_stmt>sig<eq>vns.get_name(clk)<block_start>is_port=<true><block_end><block_end><if_stmt>is_port<block_start>tpl="create_clock -name {clk} -period {period} [get_ports {{{clk}}}]"<line_sep>sdc.append(tpl.format(clk=vns.get_name(clk) period=str(period)))<block_end><else_stmt><block_start>tpl="create_clock -name {clk} -period {period} [get_nets {{{clk}}}]"<line_sep>sdc.append(tpl.format(clk=vns.get_name(clk) period=str(period)))<block_end><block_end># False path constraints <for_stmt>from_,to sorted(false_paths key=<lambda>x:(x[0].duid x[1].duid))<block_start>tpl="set_false_path -from [get_clocks {{{from_}}}] -to [get_clocks {{{to}}}]"<line_sep>sdc.append(tpl.format(from_=vns.get_name(from_) to=vns.get_name(to)))<block_end># Add additional commands sdc<augadd>additional_sdc_commands<line_sep># Generate .sdc tools.write_to_file("{}.sdc".format(build_name) "\n".join(sdc))<block_end># Project (.qsf) ----------------------------------------------------------------------------------- <def_stmt>_build_qsf device ips sources vincpaths named_sc named_pc build_name additional_qsf_commands<block_start>qsf=[]<line_sep># Set device qsf.append("set_global_assignment -name DEVICE {}".format(device))<line_sep># Add sources <for_stmt>filename,language,library sources<block_start><if_stmt>language<eq>"verilog"<block_start>language="systemverilog"# Enforce use of SystemVerilog <block_end>tpl="set_global_assignment -name {lang}_FILE {path} -library {lib}"<line_sep># Do not add None type files <if_stmt>language<is><not><none><block_start>qsf.append(tpl.format(lang=language.upper() path=filename.replace("\\" "/") lib=library))<block_end># Check if the file is a header. Those should not be explicitly added to qsf, # but rather included in include search_path <else_stmt><block_start><if_stmt>filename.endswith(".svh")<or>filename.endswith(".vh")<block_start>fpath=os.path.dirname(filename)<if_stmt>fpath<not><in>vincpaths<block_start>vincpaths.append(fpath)<block_end><block_end><block_end><block_end># Add ips <for_stmt>filename ips<block_start>tpl="set_global_assignment -name QSYS_FILE {filename}"<line_sep>qsf.append(tpl.replace(filename=filename.replace("\\" "/")))<block_end># Add include paths <for_stmt>path vincpaths<block_start>qsf.append("set_global_assignment -name SEARCH_PATH {}".format(path.replace("\\" "/")))<block_end># Set top level qsf.append("set_global_assignment -name top_level_entity "+build_name)<line_sep># Add io, placement constraints qsf.append(_build_qsf_constraints(named_sc named_pc))<line_sep># Set timing constraints qsf.append("set_global_assignment -name SDC_FILE {}.sdc".format(build_name))<line_sep># Add additional commands qsf<augadd>additional_qsf_commands<line_sep># Generate .qsf tools.write_to_file("{}.qsf".format(build_name) "\n".join(qsf))<block_end># Script ------------------------------------------------------------------------------------------- <def_stmt>_build_script build_name create_rbf<block_start><if_stmt>sys.platform<in>["win32" "cygwin"]<block_start>script_contents="REM Autogenerated by LiteX / git: "+tools.get_litex_git_revision()<line_sep>script_file="build_"+build_name+".bat"<block_end><else_stmt><block_start>script_contents="# Autogenerated by LiteX / git: "+tools.get_litex_git_revision()<line_sep>script_file="build_"+build_name+".sh"<block_end>script_contents<augadd>""" quartus_map --read_settings_files=on --write_settings_files=off {build_name} -c {build_name} quartus_fit --read_settings_files=off --write_settings_files=off {build_name} -c {build_name} quartus_asm --read_settings_files=off --write_settings_files=off {build_name} -c {build_name} quartus_sta {build_name} -c {build_name}"""<if_stmt>create_rbf<block_start>script_contents<augadd>""" if [ -f "{build_name}.sof" ] then quartus_cpf -c {build_name}.sof {build_name}.rbf fi """<block_end>script_contents=script_contents.format(build_name=build_name)<line_sep>tools.write_to_file(script_file script_contents force_unix=<true>)<line_sep><return>script_file<block_end><def_stmt>_run_script script<block_start><if_stmt>sys.platform<in>["win32" "cygwin"]<block_start>shell=["cmd" "/c"]<block_end><else_stmt><block_start>shell=["bash"]<block_end><if_stmt>which("quartus_map")<is><none><block_start>msg="Unable to find Quartus toolchain, please:\n"<line_sep>msg<augadd>"- Add Quartus toolchain to your $PATH."<line_sep><raise>OSError(msg)<block_end><if_stmt>subprocess.call(shell+[script])<ne>0<block_start><raise>OSError("Error occured during Quartus's script execution.")<block_end><block_end># AlteraQuartusToolchain --------------------------------------------------------------------------- <class_stmt>AlteraQuartusToolchain<block_start>attr_translate={}<def_stmt>__init__ self<block_start>self.clocks=dict()<line_sep>self.false_paths=set()<line_sep>self.additional_sdc_commands=[]<line_sep>self.additional_qsf_commands=[]<block_end><def_stmt>build self platform fragment build_dir="build" build_name="top" run=<true> **kwargs# Create build directory <block_start>cwd=os.getcwd()<line_sep>os.makedirs(build_dir exist_ok=<true>)<line_sep>os.chdir(build_dir)<line_sep># Finalize design <if_stmt><not>isinstance(fragment _Fragment)<block_start>fragment=fragment.get_fragment()<block_end>platform.finalize(fragment)<line_sep># Generate verilog v_output=platform.get_verilog(fragment name=build_name **kwargs)<line_sep>named_sc,named_pc=platform.resolve_signals(v_output.ns)<line_sep>v_file=build_name+".v"<line_sep>v_output.write(v_file)<line_sep>platform.add_source(v_file)<line_sep># Generate design timing constraints file (.sdc) _build_sdc(clocks=self.clocks false_paths=self.false_paths vns=v_output.ns named_sc=named_sc build_name=build_name additional_sdc_commands=self.additional_sdc_commands)<line_sep># Generate design project and location constraints file (.qsf) _build_qsf(device=platform.device ips=platform.ips sources=platform.sources vincpaths=platform.verilog_include_paths named_sc=named_sc named_pc=named_pc build_name=build_name additional_qsf_commands=self.additional_qsf_commands)<line_sep># Generate build script script=_build_script(build_name platform.create_rbf)<line_sep># Run <if_stmt>run<block_start>_run_script(script)<block_end>os.chdir(cwd)<line_sep><return>v_output.ns<block_end><def_stmt>add_period_constraint self platform clk period<block_start>clk.attr.add("keep")<line_sep>period=math.floor(period<times>1e3)/1e3# round to lowest picosecond <if_stmt>clk<in>self.clocks<block_start><if_stmt>period<ne>self.clocks[clk]<block_start><raise>ValueError("Clock already constrained to {:.2f}ns, new constraint to {:.2f}ns".format(self.clocks[clk] period))<block_end><block_end>self.clocks[clk]=period<block_end><def_stmt>add_false_path_constraint self platform from_ to<block_start>from_.attr.add("keep")<line_sep>to.attr.add("keep")<if_stmt>(to from_)<not><in>self.false_paths<block_start>self.false_paths.add((from_ to))<block_end><block_end><block_end>
<import_stmt>Crypto.Random.random<as>rand<import_stmt>itertools<import_stmt>math#for log <import_stmt>sys<def_stmt>decomposition i#from stack exchange, don't think it's uniform <block_start><while_stmt>i<g>0<block_start>n=rand.randint(1 i)<line_sep><yield>n<line_sep>i<augsub>n<block_end><block_end><def_stmt>Decomposition i<block_start><while_stmt><true><block_start>l=list(decomposition(i))<if_stmt>len(set(l))<eq>len(l)<block_start><return>l<block_end><block_end><block_end><def_stmt>decomposition2 n s d k#home-brewed, returns no duplicates, includes the number d <block_start>s=s-1<line_sep>n=n<while_stmt><true><block_start>a=[d]<line_sep>nn=n<line_sep>#a.append(d) <for_stmt>i range(0 s)<block_start>a.append(rand.randint(0 n))<block_end>a.sort()<line_sep>#print("a", a) b=[]<line_sep>c=[]<while_stmt>len(a)<g>0<block_start>t=a.pop()<line_sep>#print(t, a) <if_stmt>t<ge>d<block_start>b.append(nn-t)<block_end><else_stmt><block_start>c.append(nn-t)<block_end>nn=t<block_end>c.append(nn)<line_sep>tot=b[:]+c[:]<line_sep>#print("b", b) <if_stmt>sum(set(tot))<eq>n<and>len(c)<g>int(k)<block_start><return>sorted(c) sorted(b)<block_end><block_end><block_end><def_stmt>decomposition3 n s d k#a combination of both methods, designed to get some smaller values <block_start>send,change=decomposition2(n s d k)<for_stmt>i send<block_start><if_stmt>i<g>n/s<block_start>send.remove(i)<line_sep>send=send+list(Decomposition(i))<block_end><block_end><for_stmt>i change<block_start><if_stmt>i<g>n/(s-1)<block_start>change.remove(i)<line_sep>change=change+list(Decomposition(i))<block_end><block_end><return>send change<block_end><def_stmt>divv l m<block_start><return>[a/float(m)<for>a l]<block_end><def_stmt>frexp10 x<block_start>exp=int(math.log10(x))<line_sep><return>x/10<power>exp exp<block_end><def_stmt>decideAmounts totalInputs toSend Partitions k fuzz#fuzz is an optional amount to fuzz the transaction by #so if you start with a big obvious number like 2000, it might be fuzzed by up to "fuzz" amount <block_start>fz=rand.randint(0 int(fuzz<times>1000))/1000.0<line_sep>toSend<augadd>fz<line_sep>g,ii=frexp10(totalInputs)<line_sep>ii=10<power>(-1<times>min(ii-2 0))<line_sep>print("ii" ii)<line_sep>M=10<power>(int(math.log(2<power>Partitions)/math.log(10)))<times>ii<line_sep>#M = 10 ** M print("multiplier:" M)<line_sep>totalInputs=int(totalInputs<times>M)<line_sep>toSend=int(toSend<times>M)<line_sep>change=totalInputs-toSend<line_sep>send_amounts,change_amounts=decomposition3(totalInputs Partitions toSend k)<line_sep>all_amounts=send_amounts[:]+change_amounts[:]<line_sep>rand.shuffle(all_amounts)<line_sep>print("")<line_sep>print("change amounts:" divv(change_amounts M))<line_sep>print("send amounts:" divv(send_amounts M))<line_sep>print("now from the following, how much is sent?")<line_sep>print("all amounts:" sorted(divv(all_amounts M)))<line_sep>print("possible sent amounts:")<line_sep>amounts=[]<for_stmt>L range(0 len(all_amounts)+1)<block_start><for_stmt>subset itertools.combinations(all_amounts L)<block_start>amounts.append(sum(subset))<block_end><block_end>print("number of possible sent amounts:")<line_sep>print(len(amounts))<line_sep>print("2^N:" 2<power>len(all_amounts))<line_sep>print("number of possible sent amounts duplicates removed:")<line_sep>print(len(list(set(amounts))))<block_end><if_stmt>len(sys.argv)<g>2<block_start>kk=2<line_sep>parts=7<line_sep>kk=rand.randint(1 int(parts/4))#how many sends to demand fuzz=1<line_sep>decideAmounts(float(sys.argv[1]) float(sys.argv[2]) parts kk fuzz)<block_end>
'''OpenGL extension ARB.transform_feedback_instanced This module customises the behaviour of the OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more Python-friendly API Overview (from the spec) Multiple instances of geometry may be specified to the GL by calling functions such as DrawArraysInstanced and DrawElementsInstanced. Further, the results of a transform feedback operation may be returned to the GL by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However, it is not presently possible to draw multiple instances of data transform feedback without using a query and the resulting round trip from server to client. This extension adds functionality to draw multiple instances of the result of a transform feedback operation. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt '''<import_from_stmt>OpenGL platform constant arrays<import_from_stmt>OpenGL extensions wrapper<import_stmt>ctypes<import_from_stmt>OpenGL.raw.GL _types _glgets<import_from_stmt>OpenGL.raw.GL.ARB.transform_feedback_instanced *<import_from_stmt>OpenGL.raw.GL.ARB.transform_feedback_instanced _EXTENSION_NAME<def_stmt>glInitTransformFeedbackInstancedARB <block_start>'''Return boolean indicating whether this extension is available'''<import_from_stmt>OpenGL extensions<line_sep><return>extensions.hasGLExtension(_EXTENSION_NAME)<block_end>### END AUTOGENERATED SECTION
# -*- coding: utf-8 -*- """ Geometrical functions --------------------- References ---------- .. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform) .. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html) """<import_stmt>numpy<as>np<import_from_stmt>typing Union<def_stmt>circle center:Union[list np.ndarray] radius:float=1.0 num_points:int=20<arrow>np.ndarray<block_start>""" Build a circle with the given characteristics. Parameters ---------- c : array-like 2D Coordinates of center. r : float Radius of the circle. num_points : int Number of points to build. Returns ------- points : numpy.ndarray N-by-2 array with the coordinates of the circle. """<line_sep>R=np.linspace(0.0 2.0<times>np.pi num_points+1)<line_sep>x=center[0]+radius<times>np.cos(R)<line_sep>y=center[1]+radius<times>np.sin(R)<line_sep><return>np.array([x y]).transpose()<block_end><def_stmt>ellipse center:Union[list np.ndarray] phi:float axes:Union[list np.ndarray] num_points:int=20<arrow>np.ndarray<block_start>""" Build an ellipse with the given characteristics. Parameters ---------- center : array-like 2D Coordinates of center. phi : float Angle, in radians, of the major axis w.r.t. the X-axis axes : array-like Lengths of major and minor axes, respectively. num_points : int Number of points. Defaults to 20. Returns ------- points : numpy.ndarray N-by-2 array with the coordinates of the ellipse. """<line_sep>R=np.linspace(0.0 2.0<times>np.pi num_points+1)<line_sep>a,b=axes<line_sep>x=center[0]+a<times>np.cos(R)<times>np.cos(phi)-b<times>np.sin(R)<times>np.sin(phi)<line_sep>y=center[1]+a<times>np.cos(R)<times>np.sin(phi)+b<times>np.sin(R)<times>np.cos(phi)<line_sep><return>np.array([x y]).transpose()<block_end>
<import_stmt>dataclasses<import_stmt>inspect<import_from_stmt>dataclasses dataclass field<import_from_stmt>pprint pprint<import_stmt>attr<class_stmt>ManualComment<block_start><def_stmt>__init__ self id:int text:str<block_start>self.id:int=id<line_sep>self.text:str=text<block_end><def_stmt>__repr__ self<block_start><return>"{}(id={}, text={})".format(self.__class__.__name__ self.id self.text)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other.__class__<is>self.__class__<block_start><return>(self.id self.text)<eq>(other.id other.text)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__ne__ self other<block_start>result=self.__eq__(other)<if_stmt>result<is>NotImplemented<block_start><return>NotImplemented<block_end><else_stmt><block_start><return><not>result<block_end><block_end><def_stmt>__hash__ self<block_start><return>hash((self.__class__ self.id self.text))<block_end><def_stmt>__lt__ self other<block_start><if_stmt>other.__class__<is>self.__class__<block_start><return>(self.id self.text)<l>(other.id other.text)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__le__ self other<block_start><if_stmt>other.__class__<is>self.__class__<block_start><return>(self.id self.text)<le>(other.id other.text)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__gt__ self other<block_start><if_stmt>other.__class__<is>self.__class__<block_start><return>(self.id self.text)<g>(other.id other.text)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__ge__ self other<block_start><if_stmt>other.__class__<is>self.__class__<block_start><return>(self.id self.text)<ge>(other.id other.text)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><block_end>@dataclass(frozen=<true> order=<true>)<class_stmt>Comment<block_start>id:int<line_sep>text:str=""<line_sep>replies:list[int]=field(default_factory=list repr=<false> compare=<false>)<block_end>@attr.s(frozen=<true> order=<true> slots=<true>)<class_stmt>AttrComment<block_start>id:int=0<line_sep>text:str=""<block_end><def_stmt>main <block_start>comment=Comment(1 "I just subscribed!")<line_sep># comment.id = 3 # can't immutable print(comment)<line_sep>print(dataclasses.astuple(comment))<line_sep>print(dataclasses.asdict(comment))<line_sep>copy=dataclasses.replace(comment id=3)<line_sep>print(copy)<line_sep>pprint(inspect.getmembers(Comment inspect.isfunction))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# # Copyright (C) 2001,2002,2003 <NAME> and Rational Discovery LLC # """ Functionality for ranking bits using info gains **Definitions used in this module** - *sequence*: an object capable of containing other objects which supports __getitem__() and __len__(). Examples of these include lists, tuples, and Numeric arrays. - *IntVector*: an object containing integers which supports __getitem__() and __len__(). Examples include lists, tuples, Numeric Arrays, and BitVects. **NOTE**: Neither *sequences* nor *IntVectors* need to support item assignment. It is perfectly acceptable for them to be read-only, so long as they are random-access. """<import_stmt>numpy<import_from_stmt>rdkit.ML.InfoTheory entropy<def_stmt>FormCounts bitVects actVals whichBit nPossibleActs nPossibleBitVals=2<block_start>""" generates the counts matrix for a particular bit **Arguments** - bitVects: a *sequence* containing *IntVectors* - actVals: a *sequence* - whichBit: an integer, the bit number to use. - nPossibleActs: the (integer) number of possible activity values. - nPossibleBitVals: (optional) if specified, this integer provides the maximum value attainable by the (increasingly inaccurately named) bits in _bitVects_. **Returns** a Numeric array with the counts **Notes** This is really intended for internal use. """<if_stmt>len(bitVects)<ne>len(actVals)<block_start><raise>ValueError('var and activity lists should be the same length')<block_end>res=numpy.zeros((nPossibleBitVals nPossibleActs) numpy.integer)<for_stmt>i range(len(bitVects))<block_start>res[bitVects[i][whichBit] actVals[i]]<augadd>1<block_end><return>res<block_end><def_stmt>CalcInfoGains bitVects actVals nPossibleActs nPossibleBitVals=2<block_start>""" Calculates the information gain for a set of points and activity values **Arguments** - bitVects: a *sequence* containing *IntVectors* - actVals: a *sequence* - nPossibleActs: the (integer) number of possible activity values. - nPossibleBitVals: (optional) if specified, this integer provides the maximum value attainable by the (increasingly inaccurately named) bits in _bitVects_. **Returns** a list of floats """<if_stmt>len(bitVects)<ne>len(actVals)<block_start><raise>ValueError('var and activity lists should be the same length')<block_end>nBits=len(bitVects[0])<line_sep>res=numpy.zeros(nBits numpy.float)<for_stmt>bit range(nBits)<block_start>counts=FormCounts(bitVects actVals bit nPossibleActs nPossibleBitVals=nPossibleBitVals)<line_sep>res[bit]=entropy.InfoGain(counts)<block_end><return>res<block_end><def_stmt>RankBits bitVects actVals nPossibleBitVals=2 metricFunc=CalcInfoGains<block_start>""" Rank a set of bits according to a metric function **Arguments** - bitVects: a *sequence* containing *IntVectors* - actVals: a *sequence* - nPossibleBitVals: (optional) if specified, this integer provides the maximum value attainable by the (increasingly inaccurately named) bits in _bitVects_. - metricFunc: (optional) the metric function to be used. See _CalcInfoGains()_ for a description of the signature of this function. **Returns** A 2-tuple containing: - the relative order of the bits (a list of ints) - the metric calculated for each bit (a list of floats) """<line_sep>nPossibleActs=max(actVals)+1<line_sep>metrics=metricFunc(bitVects actVals nPossibleActs nPossibleBitVals=nPossibleBitVals)<line_sep>bitOrder=list(numpy.argsort(metrics))<line_sep>bitOrder.reverse()<line_sep><return>bitOrder metrics<block_end><def_stmt>AnalyzeSparseVects bitVects actVals<block_start>""" #DOC **Arguments** - bitVects: a *sequence* containing SBVs - actVals: a *sequence* **Returns** a list of floats **Notes** - these need to be bit vects and binary activities """<line_sep>nPts=len(bitVects)<if_stmt>nPts<ne>len(actVals)<block_start><raise>ValueError('var and activity lists should be the same length')<block_end>nBits=bitVects[0].GetSize()<line_sep>actives=numpy.zeros(nBits numpy.integer)<line_sep>inactives=numpy.zeros(nBits numpy.integer)<line_sep>nActives,nInactives=0 0<for_stmt>i range(nPts)<block_start>sig,act=bitVects[i] actVals[i]<line_sep>onBitList=sig.GetOnBits()<if_stmt>act<block_start><for_stmt>bit onBitList<block_start>actives[bit]<augadd>1<block_end>nActives<augadd>1<block_end><else_stmt><block_start><for_stmt>bit onBitList<block_start>inactives[bit]<augadd>1<block_end>nInactives<augadd>1<block_end><block_end>resTbl=numpy.zeros((2 2) numpy.integer)<line_sep>res=[]<line_sep>gains=[]<for_stmt>bit range(nBits)<block_start>nAct,nInact=actives[bit] inactives[bit]<if_stmt>nAct<or>nInact<block_start>resTbl[0 0]=nAct<line_sep>resTbl[1 0]=nPts-nAct<line_sep>resTbl[0 1]=nInact<line_sep>resTbl[1 1]=nPts-nInact<line_sep>gain=entropy.InfoGain(resTbl)<line_sep>gains.append(gain)<line_sep>res.append((bit gain nAct nInact))<block_end><block_end><return>res gains<block_end><def_stmt>SparseRankBits bitVects actVals metricFunc=AnalyzeSparseVects<block_start>""" Rank a set of bits according to a metric function **Arguments** - bitVects: a *sequence* containing SBVs - actVals: a *sequence* - metricFunc: (optional) the metric function to be used. See _SparseCalcInfoGains()_ for a description of the signature of this function. **Returns** A 2-tuple containing: - the relative order of the bits (a list of ints) - the metric calculated for each bit (a list of floats) **Notes** - these need to be bit vects and binary activities """<line_sep>info,metrics=metricFunc(bitVects actVals)<line_sep>bitOrder=list(numpy.argsort(metrics))<line_sep>bitOrder.reverse()<line_sep><return>bitOrder info<block_end>
<import_stmt>copy<import_stmt>math<import_stmt>logging<import_from_stmt>typing Dict List Optional Tuple Union<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.jit<as>jit<import_stmt>torch.autograd<import_stmt>contextlib<import_stmt>glob<import_from_stmt>eight_mile.utils listify Offsets is_sequence str2bool get_alibi_slopes<import_from_stmt>eight_mile.utils transition_mask<as>transition_mask_np<line_sep>MASK_FALSE=<false><line_sep>logger=logging.getLogger("mead.layers")<def_stmt>sequence_mask lengths:torch.Tensor max_len:int=-1<arrow>torch.Tensor<block_start>"""Generate a sequence mask of shape `BxT` based on the given lengths :param lengths: A `B` tensor containing the lengths of each example :param max_len: The maximum width (length) allowed in this mask (default to None) :return: A mask """<line_sep>lens=lengths.cpu()<if_stmt>max_len<l>0<block_start>max_len_v=torch.max(lens)<block_end><else_stmt><block_start>max_len_v=max_len<block_end># 1 x T row=torch.arange(0 max_len_v).type_as(lens).view(1 -1)<line_sep># B x 1 col=lens.view(-1 1)<line_sep># Broadcast to B x T, compares increasing number to max mask=row<l>col<line_sep><return>mask<block_end><def_stmt>sequence_mask_mxlen lengths:torch.Tensor max_len:int<arrow>torch.Tensor<block_start>"""Generate a sequence mask of shape `BxT` based on the given lengths, with a maximum value This function primarily exists to make ONNX tracing work better :param lengths: A `B` tensor containing the lengths of each example :param max_len: The maximum width (length) allowed in this mask (default to None) :return: A mask """<line_sep>lens=lengths.cpu()<line_sep>max_len_v=max_len<line_sep># 1 x T row=torch.arange(0 max_len_v).type_as(lens).view(1 -1)<line_sep># B x 1 col=lens.view(-1 1)<line_sep># Broadcast to B x T, compares increasing number to max mask=row<l>col<line_sep><return>mask<block_end>@torch.jit.script<def_stmt>truncate_mask_over_time mask:torch.Tensor x:torch.Tensor<arrow>torch.Tensor<block_start>Tout=x.shape[1]<line_sep>mask=mask[: :Tout]<line_sep>#mask = mask.narrow(1, 0, arcs_h.shape[1]) <return>mask<block_end><def_stmt>vec_log_sum_exp vec:torch.Tensor dim:int<arrow>torch.Tensor<block_start>"""Vectorized version of log-sum-exp :param vec: Vector :param dim: What dimension to operate on :return: """<line_sep>max_scores,idx=torch.max(vec dim keepdim=<true>)<line_sep>max_scores_broadcast=max_scores.expand_as(vec)<line_sep><return>max_scores+torch.log(torch.sum(torch.exp(vec-max_scores_broadcast) dim keepdim=<true>))<block_end><def_stmt>unsort_batch batch:torch.Tensor perm_idx:torch.Tensor<arrow>torch.Tensor<block_start>"""Undo the sort on a batch of tensors done for packing the data in the RNN. :param batch: The batch of data batch first `[B, ...]` :param perm_idx: The permutation index returned from the torch.sort. :returns: The batch in the original order. """<line_sep># Add ones to the shape of the perm_idx until it can broadcast to the batch perm_idx=perm_idx.to(batch.device)<line_sep>diff=len(batch.shape)-len(perm_idx.shape)<line_sep>extra_dims=[1]<times>diff<line_sep>perm_idx=perm_idx.view([-1]+extra_dims)<line_sep><return>torch.scatter(torch.zeros_like(batch) 0 perm_idx.expand_as(batch) batch)<block_end><def_stmt>infer_lengths tensor dim=1<block_start>"""Infer the lengths of an input based on the idea the Offsets.PAD was used as the padding token. :param tensor: The data to infer the length of, should be either [B, T] or [T, B] :param dim: The dimension which contains the sequential signal :returns: A Tensor of shape `[B]` that has the lengths for example item in the batch """<if_stmt>len(tensor.shape)<ne>2<block_start><raise>ValueError(f"infer_lengths only works with tensors wit two dims right now, got {len(tensor.shape)}")<block_end>offsets=torch.arange(1 tensor.shape[dim]+1 device=tensor.device dtype=tensor.dtype).unsqueeze(1-dim)<line_sep>non_pad_loc=(tensor<ne>Offsets.PAD).to(tensor.dtype)<line_sep><return>torch.argmax(non_pad_loc<times>offsets dim=dim)+1<block_end><def_stmt>tensor_and_lengths inputs<arrow>Tuple[torch.Tensor Optional[torch.Tensor]]<block_start>"""Return either the unpacked inputs (2), or a `Tuple` of the input with None TODO: this function should probably be changed to always return the lengths second. To do this, we just need a sentinel value, e.g. <PAD> (0). The problem with doing this is that it might be possible to generate <PAD> in the middle of the tensor which would make that length invalid. :param inputs: Either a sequence of the `(tensor, length)` or just the `tensor` :return: A `Tuple` of `(tensor, length)` or `(tensor, None)` """<if_stmt>isinstance(inputs (list tuple))<block_start>in_tensor,lengths=inputs<block_end><else_stmt><block_start>in_tensor=inputs<line_sep>lengths=<none><block_end><return>in_tensor lengths<block_end><class_stmt>VariationalDropout(nn.Module)<block_start>"""Inverted dropout that applies the same mask at each time step."""<def_stmt>__init__ self pdrop:float=0.5 batch_first:bool=<false><block_start>"""Variational Dropout :param pdrop: the percentage to drop """<line_sep>super().__init__()<line_sep>self.pdrop=pdrop<line_sep>self.batch_first=batch_first<block_end><def_stmt>extra_repr self<block_start><return>"p=%.1f"%self.pdrop<block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start><if_stmt><not>self.training<block_start><return>input<block_end># Create a mask that covers a single time step <if_stmt>self.batch_first<block_start>dim0=input.size(0)<line_sep>dim1=1<block_end><else_stmt><block_start>dim0=1<line_sep>dim1=input.size(1)<block_end>mask=torch.zeros(dim0 dim1 input.size(2)).bernoulli_(1-self.pdrop).to(input.device)<line_sep>mask=mask/self.pdrop<line_sep># Broadcast the mask over the sequence <return>mask<times>input<block_end><block_end><class_stmt>SequenceLoss(nn.Module)<block_start>"""Computes the loss over a sequence"""<def_stmt>__init__ self LossFn:nn.Module=nn.NLLLoss avg:str="token"<block_start>"""A class that applies a Loss function to sequence via the folding trick. :param LossFn: A loss function to apply (defaults to `nn.NLLLoss`) :param avg: A divisor to apply, valid values are `token` and `batch` """<line_sep>super().__init__()<line_sep>self.avg=avg<if_stmt>avg<eq>"token"<block_start>self.crit=LossFn(ignore_index=Offsets.PAD reduction="mean")<line_sep>self._norm=self._no_norm<block_end><else_stmt><block_start>self.crit=LossFn(ignore_index=Offsets.PAD reduction="sum")<line_sep>self._norm=self._batch_norm<block_end><block_end><def_stmt>_batch_norm self loss inputs<block_start><return>loss/inputs.size()[0]<block_end><def_stmt>_no_norm self loss inputs<block_start><return>loss<block_end><def_stmt>forward self inputs:torch.Tensor targets:torch.Tensor<arrow>torch.Tensor<block_start>"""Evaluate some loss over a sequence. :param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First :param targets: torch.LongTensor, The labels. :returns: torch.FloatTensor, The loss. """<line_sep>total_sz=targets.nelement()<line_sep>loss=self.crit(inputs.view(total_sz -1) targets.view(total_sz))<line_sep><return>self._norm(loss inputs)<block_end><def_stmt>extra_repr self<block_start><return>f"reduction={self.avg}"<block_end><block_end><class_stmt>LabelSmoothingLoss(nn.Module)<block_start><def_stmt>__init__ self label_smoothing ignore_index=0 reduction="none"<block_start>"""Use Label smoothing from `Szegedy et. al., 2015`_ to temper model confidence. Implements add-gamma smoothing where the probability mass of the gold label distribution is smoothed across classes. This implementation is based on `OpenNMT-py`_ but has been adapted to not require the vocabulary size up front. .. _Szegedy et. al., 2015: https://arxiv.org/abs/1512.00567 .. _OpenNMY-py: https://github.com/OpenNMT/OpenNMT-py/blob/938a4f561b07f4d468647823fab761cfb51f21da/onmt/utils/loss.py#L194 """<if_stmt><not>(0.0<l>label_smoothing<le>1.0)<block_start><raise>ValueError(f"`label_smoothing` must be between 0.0 and 1.0, got {label_smoothing}")<block_end>super().__init__()<line_sep>self.ignore_index=ignore_index<line_sep>self.label_smoothing=label_smoothing<line_sep>self.confidence=1.0-label_smoothing<line_sep>self.reduction=reduction<if>reduction<ne>"mean"<else>"batchmean"<block_end><def_stmt>forward self output:torch.Tensor target:torch.Tensor<arrow>torch.Tensor<block_start>""" :param output: The model outputs, [B, V] :param target: The target labels, [B] """<line_sep>B,V=output.size()<line_sep>smoothed=torch.full((B V) self.label_smoothing/(V-2))<line_sep>smoothed[: self.ignore_index]=0<line_sep>smoothed=torch.scatter(smoothed 1 target.unsqueeze(1) self.confidence)<line_sep>smoothed=smoothed.masked_fill_((target<eq>self.ignore_index).unsqueeze(1) 0)<line_sep><return>F.kl_div(output smoothed reduction=self.reduction)<block_end><def_stmt>extra_repr self<block_start><return>f"label_smoothing={self.label_smoothing}"<block_end><block_end><class_stmt>MeanPool1D(nn.Module)<block_start>"""Do a mean pool while accounting for the length of a sequence """<def_stmt>__init__ self outsz batch_first=<true><block_start>"""Set up pooling module :param outsz: The output dim, for dowstream access :param batch_first: Is this module batch first or time first? """<line_sep>super().__init__()<line_sep>self.batch_first=batch_first<line_sep>self.reduction_dim=1<if>self.batch_first<else>0<line_sep>self.output_dim=outsz<line_sep>self.requires_length=<true><block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Apply mean pooling on the valid inputs :param inputs: A tuple of `(input, lengths)` :return: Pooled output """<line_sep>tensor,lengths=tensor_and_lengths(inputs)<line_sep># Regardless of whether the input is `[B, T, H]` or `[T, B, H]` the shape after # the sum is `[B, H]` so the lengths (of shape `[B]`) should be unsqueezed to # `[B, 1]` in order to broadcast <return>torch.sum(tensor self.reduction_dim keepdim=<false>)/torch.unsqueeze(lengths -1).to(tensor.dtype).to(tensor.device)<block_end><def_stmt>extra_repr self<block_start><return>f"batch_first={self.batch_first}"<block_end><block_end><class_stmt>MaxPool1D(nn.Module)<block_start>"""Do a max-pooling operation with or without a length given """<def_stmt>__init__ self outsz batch_first=<true><block_start>super().__init__()<line_sep>self.batch_first=batch_first<line_sep>self.reduction_dim=1<if>self.batch_first<else>0<line_sep>self.output_dim=outsz<block_end><def_stmt>forward self inputs:Union[torch.Tensor Tuple[torch.Tensor torch.Tensor]]<arrow>torch.Tensor<block_start>"""If we are given a tuple as input, we will use the length, otherwise we will do an operation without masking :param inputs: either a tuple of `(input, lengths)` or a tensor `input` :return: A pooled tensor """<line_sep>tensor,lengths=tensor_and_lengths(inputs)<if_stmt>lengths<is><not><none># If tensor = `[B, T, H]` # mask = `[B, T, 1]` # If tensor = `[T, B, H]` # mask = `[T, B, 1]` # So it will mask all the values in H past the right length <block_start>mask=sequence_mask(lengths).to(tensor.device)<line_sep>mask=mask<if>self.batch_first<else>bth2tbh(mask)<line_sep># Fill masked with very negative so it never gets selected tensor=tensor.masked_fill(mask.unsqueeze(-1)<eq>MASK_FALSE -1e4)<block_end>dmax,_=torch.max(tensor self.reduction_dim keepdim=<false>)<line_sep><return>dmax<block_end><def_stmt>extra_repr self<arrow>str<block_start><return>f"batch_first={self.batch_first}"<block_end><block_end># Torch only added this module in 1.4.0, shim <class_stmt>GeLU(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self x<block_start><return>torch.nn.functional.gelu(x)<block_end><block_end>#Code taken from: https://github.com/huggingface/transformers/blob/766d4bf7920213bdd8a8afb42a72719190124568/src/transformers/activations.py#L27 <class_stmt>Gpt2GELU(nn.Module)<block_start>""" Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """<def_stmt>forward self input<block_start><return>0.5<times>input<times>(1.0+torch.tanh(math.sqrt(2.0/math.pi)<times>(input+0.044715<times>torch.pow(input 3.0))))<block_end><block_end><def_stmt>get_activation name:str="relu"<arrow>nn.Module<block_start>"""Get back an `nn.Module` by string name of the activation operator :param name: A string name of the operation :return: A module associated with that string """<if_stmt>name<is><none><or>name<eq>"ident"<block_start><return>nn.Identity()<block_end><if_stmt>name<eq>"tanh"<block_start><return>nn.Tanh()<block_end><if_stmt>name<eq>"gelu"<block_start><return>GeLU()<block_end><if_stmt>name<eq>"hardtanh"<block_start><return>nn.Hardtanh()<block_end><if_stmt>name<eq>"leaky_relu"<block_start><return>nn.LeakyReLU()<block_end><if_stmt>name<eq>"prelu"<block_start><return>nn.PReLU()<block_end><if_stmt>name<eq>"sigmoid"<block_start><return>nn.Sigmoid()<block_end><if_stmt>name<eq>"log_sigmoid"<block_start><return>nn.LogSigmoid()<block_end><if_stmt>name<eq>"log_softmax"<block_start><return>nn.LogSoftmax(dim=-1)<block_end><if_stmt>name<eq>"softmax"<block_start><return>nn.Softmax(dim=-1)<block_end><if_stmt>name<eq>"gpt2_gelu"<block_start><return>Gpt2GELU()<block_end><return>nn.ReLU()<block_end><def_stmt>_cat_dir h:torch.Tensor<arrow>torch.Tensor<block_start>"""Concat forward and backword state vectors. The shape of the hidden is `[#layers * #dirs, B, H]`. The docs say you can separate directions with `h.view(#l, #dirs, B, H)` with the forward dir being index 0 and backwards dir being 1. This means that before separating with the view the forward dir are the even indices in the first dim while the backwards dirs are the odd ones. Here we select the even and odd values and concatenate them :param h: The hidden shape as it comes back from PyTorch modules """<line_sep><return>torch.cat([h[0:h.size(0):2] h[1:h.size(0):2]] dim=-1)<block_end><def_stmt>concat_state_dirs state<block_start>"""Convert the bidirectional out of an RNN so the forward and backward values are a single vector."""<if_stmt>isinstance(state tuple)<block_start><return>tuple(_cat_dir(h)<for>h state)<block_end><return>_cat_dir(state)<block_end><class_stmt>Conv1DSame(nn.Module)<block_start>"""Perform a 1D convolution with output size same as input size To make this operation work as expected, we cannot just use `padding=kernel_size//2` inside of the convolution operation. Instead, we zeropad the input using the `ConstantPad1d` module """<def_stmt>__init__ self in_channels:int out_channels:int kernel_size:int bias:bool=<true> groups:int=1 unif:float=0.0 initializer:Optional[str]=<none> activation:Optional[str]=<none><block_start>"""Create a 1D conv to produce the same output size as input :param in_channels: The number of input feature maps :param out_channels: The number of output feature maps :param kernel_size: The kernel size :param bias: Is bias on? :param groups: Number of conv groups """<line_sep>super().__init__()<line_sep>end_pad=kernel_size<floordiv>2<line_sep>start_pad=end_pad-1<if>kernel_size%2<eq>0<else>end_pad<line_sep>self.conv=nn.Sequential(nn.ConstantPad1d((start_pad end_pad) 0.) pytorch_conv1d(in_channels out_channels kernel_size unif=unif initializer=initializer bias=bias groups=groups) get_activation(activation))<block_end><def_stmt>forward self x:torch.Tensor<arrow>torch.Tensor<block_start>"""Do convolution1d on an input tensor, `[B, C, T]` :param x: The input tensor of shape `[B, C, T]` :return: The output tensor of shape `[B, H, T]` """<line_sep><return>self.conv(x)<block_end><block_end><class_stmt>ConvEncoder(nn.Module)<block_start>"""1D Convolutional layer encoder with given activation function, optional dropout This module takes in a temporal signal of either shape `[B, C, T]` or `[B, T, C]`, depending on the constructor and produces an output signal of the same orientation (`[B, H, T]` or `[B, T, H]`, respectively). We default to `[B, T, H]` orientation to make it more convenient for typical layout, but this requires transposing the last 2 dims before and after the convolution operation. """<def_stmt>__init__ self insz:int outsz:int filtsz:int pdrop:float=0.0 activation:str="relu" bias:bool=<true> groups:int=1 hidden_last=<true><block_start>"""Construct the encoder with optional dropout, given activation, and orientation :param insz: The number of input feature maps :param outsz: The number of output feature maps (or hidden size) :param filtsz: The kernel size :param pdrop: The amount of dropout to apply, this defaults to 0 :param activation: The activation function by name, defaults to `relu` :param bias: Use bias? :param groups: How many conv groups. Defaults to 1 :param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected """<line_sep>super().__init__()<line_sep>self.output_dim=outsz<line_sep>conv=Conv1DSame(insz outsz filtsz bias=bias groups=groups)<line_sep>act=get_activation(activation)<line_sep>dropout=nn.Dropout(pdrop)<if_stmt>hidden_last<block_start>self.conv=nn.Sequential(BTH2BHT() conv act dropout BHT2BTH())<block_end><else_stmt><block_start>self.conv=nn.Sequential(conv act dropout)<block_end><block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start><return>self.conv(input)<block_end><block_end><class_stmt>ConvEncoderStack(nn.Module)<block_start>"""Create a stack of convolutional encoders with residual connections between, using the `ConvEncoder` underneath This creates an encoder stack of convolutions, finally returning the last temporal output. Each layer uses zero-padding which causes the output of the convolution at each layer to be the same length. As in the `ConvEncoder` we support input tensor shapes of `[B, C, T]` or `[B, T, C]` depending on the constructor initialization, and transpose underneath the input and output of the stack if the orientation is defaulted to `[B, T, C]` """<def_stmt>__init__ self insz:int outsz:int filtsz:int nlayers:int=1 pdrop:float=0.0 activation:str="relu" bias:bool=<true> groups:int=1 hidden_last=<true><block_start>"""Construct the encoder stack :param insz: The input number of feature maps :param outsz: The output number of feature maps :param filtsz: The kernel size :param nlayers: The number of layers in the stack (defaults to a single layer) :param pdrop: The amount of dropout to apply (defaults to `0`) :param activation: The activation function to use as a string, defaults to `relu` :param bias: Use bias? :param groups: How many conv groups. Defaults to 1 :param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected """<line_sep>super().__init__()<if_stmt>hidden_last<block_start>first_layer=nn.Sequential(BTH2BHT() ConvEncoder(insz outsz filtsz pdrop activation bias groups hidden_last=<false>))<block_end><else_stmt><block_start>first_layer=ConvEncoder(insz outsz filtsz pdrop activation bias groups hidden_last=<false>)<block_end>subsequent_layer=ResidualBlock(ConvEncoder(outsz outsz filtsz pdrop activation bias groups hidden_last=<false>))<line_sep>self.layers=nn.ModuleList([first_layer]+[copy.deepcopy(subsequent_layer)<for>_ range(nlayers-1)])<if_stmt>hidden_last<block_start>self.layers.append(BHT2BTH())<block_end>self.output_dim=outsz<block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start>"""Apply a stack of 1D convolutions with residual connections between them :param input: A tensor of shape `[B, T, C]` or `[B, C, T]` depending on value of `hidden_last` :return: A tensor of shape `[B, T, H]` or `[B, H, T]` depending on the value of `hidden_last` """<line_sep>x=input<for_stmt>layer self.layers<block_start>x=layer(x)<block_end><return>x<block_end><block_end><def_stmt>bth2bht t:torch.Tensor<arrow>torch.Tensor<block_start>"""Transpose the 2nd and 3rd dim of a tensor"""<line_sep><return>t.transpose(1 2).contiguous()<block_end><class_stmt>BTH2BHT(nn.Module)<block_start>"""Utility layer to convert from `[B, T, H]` to `[B, H, T]` """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self t:torch.Tensor<arrow>torch.Tensor<block_start><return>bth2bht(t)<block_end><block_end><def_stmt>tbh2bht t:torch.Tensor<arrow>torch.Tensor<block_start>"""Permute the dimensions, first goes to third, second goes to first, last moves to second"""<line_sep><return>t.permute(1 2 0).contiguous()<block_end><class_stmt>TBH2BHT(nn.Module)<block_start>"""Utility layer to convert from `[T, B, H]` to `[B, H, T]` """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self t:torch.Tensor<arrow>torch.Tensor<block_start><return>tbh2bht(t)<block_end><block_end><def_stmt>tbh2bth t:torch.Tensor<arrow>torch.Tensor<block_start>"""Transpose the first 2 dims"""<line_sep><return>t.transpose(0 1).contiguous()<block_end><class_stmt>TBH2BTH(nn.Module)<block_start>"""Utility layer to convert from `[T, B, H]` to `[B, T, H]` """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self t:torch.Tensor<arrow>torch.Tensor<block_start><return>tbh2bth(t)<block_end><block_end><def_stmt>bth2tbh t:torch.Tensor<arrow>torch.Tensor<block_start>"""Transpose the first 2 dims"""<line_sep><return>t.transpose(0 1).contiguous()<block_end><class_stmt>BTH2TBH(nn.Module)<block_start>"""Utility layer to convert from `[B, T, H]` to `[T, B, H]` """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self t:torch.Tensor<arrow>torch.Tensor<block_start><return>bth2tbh(t)<block_end><block_end><def_stmt>bht2bth t:torch.Tensor<arrow>torch.Tensor<block_start><return>t.transpose(1 2).contiguous()<block_end><class_stmt>BHT2BTH(nn.Module)<block_start>"""Utility layer to convert from `[B, H, T]` to `[B, T, H]` """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self t:torch.Tensor<arrow>torch.Tensor<block_start><return>bht2bth(t)<block_end><block_end><class_stmt>ParallelConv(nn.Module)<block_start>"""Layer of parallel convolutions with varying filter sizes followed by max over time pooling This module takes an input tensor of any orientation based on its constructor, and pools its output to shape `[B, H]`, where `H` is `outsz * len(filtsz)` """<def_stmt>__init__ self insz:int outsz:int filtsz:List[int] activation:str="relu" input_fmt:str="bth"<block_start>""" Constructor for a parallel convolution from any orientation tensor input :param insz: The number of input feature maps :param outsz: The number of output feature maps :param filtsz: The kernel size as a list of parallel filters to apply, e.g. `[3, 4, 5]` :param activation: An activation function by name to apply :param input_fmt: A string for the orientation. Valid values are `bth` or `btc` meaning hidden units last, `bht` or `bct` meaning the temporal dim last or `tbh` or `tbc` meaning the hidden units last and the temporal dim first """<line_sep>super().__init__()<line_sep>self.requires_length=<false><line_sep>convs=[]<line_sep>outsz_filts=outsz<line_sep>self.input_fmt=input_fmt.lower()<if_stmt>type(outsz)<eq>int<block_start>outsz_filts=len(filtsz)<times>[outsz]<block_end>self.output_dim=sum(outsz_filts)<for_stmt>i,fsz enumerate(filtsz)<block_start><if_stmt>fsz%2<eq>0<block_start>conv=Conv1DSame(insz outsz_filts[i] fsz)<block_end><else_stmt><block_start>pad=fsz<floordiv>2<line_sep>conv=nn.Conv1d(insz outsz_filts[i] fsz padding=pad)<block_end>conv=nn.Sequential(conv get_activation(activation))<line_sep>convs.append(conv)<line_sep># Add the module so its managed correctly <block_end>self.convs=nn.ModuleList(convs)<block_end><def_stmt>transform_input self t:torch.Tensor<arrow>torch.Tensor<block_start><if_stmt>self.input_fmt<eq>"bth"<or>self.input_fmt<eq>"btc"<block_start><return>bth2bht(t)<block_end><elif_stmt>self.input_fmt<eq>"tbh"<or>self.input_fmt<eq>"tbc"<block_start><return>tbh2bht(t)<block_end><else_stmt><block_start><return>t<block_end><block_end><def_stmt>forward self inputs:torch.Tensor<arrow>torch.Tensor<block_start>"""Transform the input to `[B, C, T]` from any orientation and perform parallel 1D convs and max over time pool :param inputs: An input tensor of any format specified in the constructor :return: A `[B, H]` tensor representing the pooled outputs """<line_sep>mots=[]<line_sep>input_bct=self.transform_input(inputs)<for_stmt>conv self.convs# In Conv1d, data BxCxT, max over time <block_start>conv_out=conv(input_bct)<line_sep>mot,_=conv_out.max(2)<line_sep>mots.append(mot)<block_end>mots=torch.cat(mots 1)<line_sep><return>mots<block_end><block_end># self.conv_drop(mots) <class_stmt>Highway(nn.Module)<block_start>"""Highway layer as defined in https://arxiv.org/abs/1505.00387 """<def_stmt>__init__ self input_size:int **kwargs<block_start>"""Highway layer constructor :param input_size: The input hidden size :param kwargs: """<line_sep>super().__init__()<line_sep>self.proj=nn.Linear(input_size input_size)<line_sep>self.transform=nn.Linear(input_size input_size)<line_sep>self.transform.bias.data.fill_(-2.0)<line_sep>self.output_dim=input_size<block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start>"""Take a tensor in and produce the highway layer output :param input: Input tensor :return: output tensor """<line_sep>proj_result=torch.relu(self.proj(input))<line_sep>proj_gate=torch.sigmoid(self.transform(input))<line_sep>gated=(proj_gate<times>proj_result)+((1-proj_gate)<times>input)<line_sep><return>gated<block_end><block_end><def_stmt>pytorch_linear in_sz:int out_sz:int unif:float=0 initializer:str=<none> bias:bool=<true><block_start>"""Utility function that wraps a linear (AKA dense) layer creation, with options for weight init and bias"""<line_sep>l=nn.Linear(in_sz out_sz bias=bias)<if_stmt>unif<g>0<block_start>l.weight.data.uniform_(-unif unif)<block_end><elif_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal(l.weight)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform(l.weight)<block_end><else_stmt><block_start>nn.init.xavier_uniform_(l.weight)<block_end><if_stmt>bias<block_start>l.bias.data.zero_()<block_end><return>l<block_end><class_stmt>StackedLSTMCell(nn.Module)<block_start>"""A stacked LSTM cells applied at a timestep """<def_stmt>__init__ self num_layers:int input_size:int rnn_size:int dropout:float<block_start>super().__init__()<line_sep>self.dropout=nn.Dropout(dropout)<line_sep>self.num_layers=num_layers<line_sep>self.layers=nn.ModuleList()<for_stmt>i range(num_layers)<block_start>self.layers.append(nn.LSTMCell(input_size=input_size hidden_size=rnn_size bias=<false>))<line_sep>input_size=rnn_size<block_end><block_end><def_stmt>forward self input:torch.Tensor hidden:torch.Tensor<block_start>"""Apply a stack of LSTMs :param input: The input to the first LSTM `[B, H]` :param hidden: The previous `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)` :return: The output and hidden `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)` """<line_sep>h_0,c_0=hidden<line_sep>hs,cs=[] []<for_stmt>i,layer enumerate(self.layers)<block_start>h_i,c_i=layer(input (h_0[i] c_0[i]))<line_sep>input=h_i<if_stmt>i<ne>self.num_layers-1<block_start>input=self.dropout(input)<block_end>hs.append(h_i)<line_sep>cs.append(c_i)<block_end>hs=torch.stack(hs)<line_sep>cs=torch.stack(cs)<line_sep><return>input (hs cs)<block_end><block_end><class_stmt>StackedGRUCell(nn.Module)<block_start>"""A stacked GRU cells applied at a timestep """<def_stmt>__init__ self num_layers:int input_size:int rnn_size:int dropout:float<block_start>super().__init__()<line_sep>self.dropout=nn.Dropout(dropout)<line_sep>self.num_layers=num_layers<line_sep>self.layers=nn.ModuleList()<for_stmt>i range(num_layers)<block_start>self.layers.append(nn.GRUCell(input_size=input_size hidden_size=rnn_size))<line_sep>input_size=rnn_size<block_end><block_end><def_stmt>forward self input:torch.Tensor hidden:torch.Tensor<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>"""Apply a stack of GRUs :param input: The input to the first LSTM `[B, H]` :param hidden: The previous `h` where `h=(h_0, h_1,..)` :return: The output and hidden `h` where `h=(h_0, h_1,..)` """<line_sep>h_0=hidden<line_sep>hs=[]<for_stmt>i,layer enumerate(self.layers)<block_start>h_i=layer(input (h_0[i]))<line_sep>input=h_i<if_stmt>i<ne>self.num_layers<block_start>input=self.dropout(input)<block_end>hs.append(h_i)<block_end>hs=torch.stack(hs)<line_sep><return>input hs<block_end><block_end><class_stmt>Dense(nn.Module)<block_start>"""Dense (Linear) layer with optional activation given This module is the equivalent of the tf.keras.layer.Dense, module with optional activations applied """<def_stmt>__init__ self insz:int outsz:int activation:Optional[str]=<none> unif:float=0 initializer:Optional[str]=<none> <block_start>"""Constructor for "dense" or "linear" layer, with optional activation applied :param insz: The number of hidden units in the input :param outsz: The number of hidden units in the output :param activation: The activation function by name, defaults to `None`, meaning no activation is applied :param unif: An optional initialization value which can set the linear weights. If given, biases will init to 0 :param initializer: An initialization scheme by string name: `ortho`, `kaiming` or `he`, `xavier` or `glorot` """<line_sep>super().__init__()<line_sep>self.layer=pytorch_linear(insz outsz unif initializer)<line_sep>self.activation=get_activation(activation)<line_sep>self.output_dim=outsz<block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start>"""Run a linear projection over the input, followed by an optional activation given by constructor :param input: the input tensor :return: the transformed output """<line_sep><return>self.activation(self.layer(input))<block_end><block_end><class_stmt>WeightTieDense(nn.Module)<block_start>"""Do weight tying from the input parameter This module never copies the weight pointer, it lazily accesses to allow the tied variable to reset its parameters after initialization. This is helpful for cases where we have LMs and are reloading them after they have been initially created """<def_stmt>__init__ self tie:nn.Module bias=<false><block_start>super().__init__()<line_sep>self.tie=tie<line_sep>self.transform=self._get_transform(tie)<if_stmt>bias<block_start>bias=torch.nn.Parameter(torch.zeros(self.transform(self.weight.shape[0])))<block_end><else_stmt><block_start>bias=<none><block_end>self.register_parameter("bias" bias)<block_end><def_stmt>_get_transform self tie:nn.Module<block_start>emb=getattr(tie "embeddings" <none>)<if_stmt>emb<is><not><none><block_start><return>self._identity<block_end><return>self._transpose<block_end>@property<def_stmt>weight self<block_start>emb=getattr(self.tie "embeddings" <none>)<if_stmt>emb<is><not><none><block_start><return>getattr(emb "weight")<block_end><return>getattr(self.tie "weight")<block_end><def_stmt>_identity self x:torch.Tensor<arrow>torch.Tensor<block_start><return>x<block_end><def_stmt>_transpose self x:torch.Tensor<arrow>torch.Tensor<block_start><return>x.transpose(0 1).contiguous()<block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start><return>F.linear(input self.transform(self.weight) self.bias)<block_end><block_end><class_stmt>ResidualBlock(nn.Module)<block_start>"""Create a residual block by wrapping an layer with a residual connection"""<def_stmt>__init__ self layer:Optional[nn.Module]=<none> **kwargs<block_start>"""Wrap an layer with a residual connection :param layer: This layer will be applied to the input and added to the input :param kwargs: """<line_sep>super().__init__()<line_sep>self.layer=layer<if_stmt>self.layer<is><not><none><and>hasattr(layer "output_dim")<block_start>self.output_dim=layer.output_dim<block_end><block_end><def_stmt>forward self input:torch.Tensor<arrow>torch.Tensor<block_start>"""Apply a residual block :param input: A tensor to use as input and to add to output :return: The residual connection output """<line_sep><return>input+self.layer(input)<block_end><block_end><class_stmt>SkipConnection(ResidualBlock)<block_start>"""Subclass of ResidualBlock(Dense) with an activation function given """<def_stmt>__init__ self input_size:int activation:str="relu"<block_start>"""Create a `SkipConnection` :param input_size: The input dimension size :param activation: A string activation name """<line_sep>super().__init__(<none>)<line_sep>self.layer=Dense(input_size input_size activation=activation)<line_sep>self.output_dim=input_size<block_end><block_end><def_stmt>rnn_cell insz:int hsz:int rnntype:str nlayers:int dropout:float<block_start>"""This is a wrapper function around a stacked RNN cell :param insz: The input dimensions :param hsz: The hidden dimensions :param rnntype: An RNN type `gru` or `lstm` :param nlayers: The number of layers to stack :param dropout: The amount of dropout :return: """<if_stmt>rnntype<eq>"gru"<block_start>rnn=StackedGRUCell(nlayers insz hsz dropout)<block_end><else_stmt><block_start>rnn=StackedLSTMCell(nlayers insz hsz dropout)<block_end><return>rnn<block_end><def_stmt>pytorch_lstm insz:int hsz:int rnntype:str nlayers:int dropout:float unif:float=0 batch_first:bool=<false> initializer:str=<none> <arrow>torch.nn.LSTM<block_start>"""Wrapper around `torch.nn.LSTM`, mainly for weight initialization options :param insz: The input dimension :param hsz: The number of hidden units :param rnntype: A string description of the type of LSTM: `bi?lstm` or `lstm` :param nlayers: The number of layers :param dropout: How much dropout to apply :param unif: if uniform initialization, what range? :param batch_first: Should we do the RNN batch first or time first :param initializer: An optional string representing a style of initialization `ortho`, `he`/`kaiming`, `xavier`/`glorot` :return: An LSTM """<if_stmt>nlayers<eq>1<block_start>dropout=0.0<block_end>ndir=2<if>rnntype.startswith("b")<else>1<line_sep>layer_hsz=hsz<floordiv>ndir<line_sep>rnn=torch.nn.LSTM(insz layer_hsz nlayers dropout=dropout bidirectional=<true><if>ndir<g>1<else><false> batch_first=batch_first)<line_sep># , bias=False) <if_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal(rnn.weight_hh_l0)<line_sep>nn.init.orthogonal(rnn.weight_ih_l0)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform(rnn.weight_hh_l0)<line_sep>nn.init.kaiming_uniform(rnn.weight_ih_l0)<block_end><elif_stmt>unif<g>0<block_start><for_stmt>weight rnn.parameters()<block_start>weight.data.uniform_(-unif unif)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(rnn.weight_hh_l0)<line_sep>nn.init.xavier_uniform_(rnn.weight_ih_l0)<block_end><return>rnn<block_end><class_stmt>LSTMEncoderBase(nn.Module)<block_start>"""The LSTM encoder is a base for a set of encoders producing various outputs. All LSTM encoders inheriting this class will trim the input to the max length given in the batch. For example, if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will be length `S` (or more precisely, `[B, S, H]`) *PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this. Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl, set `batch_first=True`. *PyTorch Note*: Most `LSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the TensorFlow `LSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly. """<def_stmt>__init__ self insz:int hsz:int nlayers:int pdrop:float=0.0 requires_length:bool=<true> batch_first:bool=<false> unif:float=0 initializer:str=<none> **kwargs <block_start>"""Produce a stack of LSTMs with dropout performed on all but the last layer. :param insz: The size of the input :param hsz: The number of hidden units per LSTM :param nlayers: The number of layers of LSTMs to stack :param pdrop: The probability of dropping a unit value during dropout, defaults to 0 :param requires_length: Does this encoder require an input length in its inputs (defaults to `True`) :param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!) :param unif: PyTorch only! Initialization parameters for RNN :param initializer: PyTorch only! A string describing optional initialization type for RNN """<line_sep>super().__init__()<line_sep>self.requires_length=requires_length<line_sep>self.batch_first=batch_first<line_sep>self.nlayers=nlayers<if_stmt>nlayers<eq>1<block_start>pdrop=0.0<block_end>self.rnn=torch.nn.LSTM(insz hsz nlayers dropout=pdrop bidirectional=<false> batch_first=batch_first)<if_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal(self.rnn.weight_hh_l0)<line_sep>nn.init.orthogonal(self.rnn.weight_ih_l0)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform(self.rnn.weight_hh_l0)<line_sep>nn.init.kaiming_uniform(self.rnn.weight_ih_l0)<block_end><elif_stmt>unif<g>0<block_start><for_stmt>weight self.rnn.parameters()<block_start>weight.data.uniform_(-unif unif)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(self.rnn.weight_hh_l0)<line_sep>nn.init.xavier_uniform_(self.rnn.weight_ih_l0)<block_end>self.output_dim=hsz<block_end># def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: # tbc, lengths = tensor_and_lengths(inputs) # packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths, batch_first=self.batch_first) # output, hidden = self.rnn(packed) # output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) # return self.output_fn(output, hidden) # def output_fn(self, output, state): # return output, self.extract_top_state(state) <def_stmt>extract_top_state self state:Tuple[torch.Tensor torch.Tensor]<arrow>List[torch.Tensor]<block_start>"""Get a view of the top state of shape [B, H]` :param state: :return: """<line_sep># Select the topmost state with -1 and the only direction is forward (select with 0) top=[]<for_stmt>s state<block_start>top.append(s.view(self.nlayers 1 -1 self.output_dim)[-1 0])<block_end><return>top<block_end><block_end><class_stmt>LSTMEncoderSequence(LSTMEncoderBase)<block_start>"""LSTM encoder to produce the transduced output sequence. Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input sequence if the `max(lengths)` given is shorter than `T` during execution. *PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`, and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation. """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs The value `S` here is defined as `max(lengths)`, `S <= T` :param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]` :return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first` """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output<block_end><block_end><class_stmt>LSTMEncoderWithState(nn.Module)<block_start>"""LSTM encoder producing the hidden state and the output, where the input doesnt require any padding PyTorch note: This type of encoder doesnt inherit the `LSTMEncoderWithState` base """<def_stmt>__init__ self insz:int hsz:int nlayers:int pdrop:float=0.0 batch_first:bool=<false> unif:float=0 initializer:str=<none> **kwargs <block_start>""" :param insz: The size of the input :param hsz: The number of hidden units per LSTM :param nlayers: The number of layers of LSTMs to stack :param pdrop: The probability of dropping a unit value during dropout, defaults to 0 :param batch_first: PyTorch only! do batch first or time-first input? Defaults to `False` (differs from TF!) :param unif: PyTorch only! Initialization parameters for RNN :param initializer: PyTorch only! A string describing optional initialization type for RNN """<line_sep>super().__init__()<line_sep>self.requires_length=<false><line_sep>self.requires_state=<true><line_sep>self.batch_first=batch_first<line_sep>self.nlayers=nlayers<if_stmt>nlayers<eq>1<block_start>pdrop=0.0<block_end>self.rnn=torch.nn.LSTM(insz hsz nlayers dropout=pdrop bidirectional=<false> batch_first=batch_first)<if_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal(self.rnn.weight_hh_l0)<line_sep>nn.init.orthogonal(self.rnn.weight_ih_l0)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform(self.rnn.weight_hh_l0)<line_sep>nn.init.kaiming_uniform(self.rnn.weight_ih_l0)<block_end><elif_stmt>unif<g>0<block_start><for_stmt>weight self.rnn.parameters()<block_start>weight.data.uniform_(-unif unif)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(self.rnn.weight_hh_l0)<line_sep>nn.init.xavier_uniform_(self.rnn.weight_ih_l0)<block_end>self.output_dim=hsz<block_end><def_stmt>forward self input_and_prev_h:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>""" :param input_and_prev_h: The input at this timestep and the previous hidden unit or `None` :return: Raw `torch.nn.LSTM` output """<line_sep>inputs,hidden=input_and_prev_h<line_sep>output,hidden=self.rnn(inputs hidden)<line_sep><return>output hidden<block_end><block_end>##concat_state_dirs(hidden) <class_stmt>LSTMEncoderAll(LSTMEncoderBase)<block_start>"""LSTM encoder that passes along the full output and hidden states for each layer Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence, and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor `[B, S, H]` or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]` """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output hidden<block_end><block_end><class_stmt>LSTMEncoderHidden(LSTMEncoderBase)<block_start>"""LSTM encoder that returns the top hidden state Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and returns a hidden unit tensor of shape `[B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor of shape `[B, H]` representing the last RNNs hidden state """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>self.extract_top_state(hidden)[0]<block_end><block_end># TODO: this module only exists in pytorch. Do we eliminate it or put it in both? <class_stmt>LSTMEncoderSequenceHiddenContext(LSTMEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output self.extract_top_state(hidden)<block_end><block_end><class_stmt>BiLSTMEncoderBase(nn.Module)<block_start>"""BiLSTM encoder base for a set of encoders producing various outputs. All BiLSTM encoders inheriting this class will trim the input to the max length given in the batch. For example, if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the constructor will be applied to the forward direction and half to the backward direction, and these will get concatenated. *PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this. Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl, set `batch_first=True`. *PyTorch Note*: Most `BiLSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the TensorFlow `BiLSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly. """<def_stmt>__init__ self insz:int hsz:int nlayers:int pdrop:float=0.0 requires_length:bool=<true> batch_first:bool=<false> unif:float=0 initializer:str=<none> **kwargs <block_start>"""Produce a stack of LSTMs with dropout performed on all but the last layer. :param insz: The size of the input :param hsz: The number of hidden units per BiLSTM (`hsz//2` used for each direction and concatenated) :param nlayers: The number of layers of BiLSTMs to stack :param pdrop: The probability of dropping a unit value during dropout, defaults to 0 :param requires_length: Does this encoder require an input length in its inputs (defaults to `True`) :param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!) :param unif: PyTorch only! Initialization parameters for RNN :param initializer: PyTorch only! A string describing optional initialization type for RNN """<line_sep>super().__init__()<line_sep>self.requires_length=requires_length<line_sep>self.batch_first=batch_first<line_sep>self.nlayers=nlayers<if_stmt>nlayers<eq>1<block_start>pdrop=0.0<block_end>self.rnn=torch.nn.LSTM(insz hsz<floordiv>2 nlayers dropout=pdrop bidirectional=<true> batch_first=batch_first)<if_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal(self.rnn.weight_hh_l0)<line_sep>nn.init.orthogonal(self.rnn.weight_ih_l0)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform(self.rnn.weight_hh_l0)<line_sep>nn.init.kaiming_uniform(self.rnn.weight_ih_l0)<block_end><elif_stmt>unif<g>0<block_start><for_stmt>weight self.rnn.parameters()<block_start>weight.data.uniform_(-unif unif)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(self.rnn.weight_hh_l0)<line_sep>nn.init.xavier_uniform_(self.rnn.weight_ih_l0)<block_end>self.output_dim=hsz<block_end><def_stmt>extract_top_state self state# Select the topmost state with -1 and the only direction is forward (select with 0) <block_start><return>tuple(s.view(self.nlayers 1 -1 self.output_dim)[-1 0]<for>s state)<block_end><block_end># TODO: this module only exists in pytorch. Do we eliminate it or put it in both? <class_stmt>BiLSTMEncoderSequenceHiddenContext(BiLSTMEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output self.extract_top_state(concat_state_dirs(hidden))<block_end><block_end><class_stmt>BiLSTMEncoderAll(BiLSTMEncoderBase)<block_start>"""BiLSTM encoder that passes along the full output and hidden states for each layer Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence, and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor `[B, S, H] or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]` """<line_sep>tensor,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tensor lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output concat_state_dirs(hidden)<block_end><block_end><class_stmt>BiLSTMEncoderSequence(BiLSTMEncoderBase)<block_start>"""BiLSTM encoder to produce the transduced output sequence. Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input sequence if the `max(lengths)` given is shorter than `T` during execution. *PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`, and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation. """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs The value `S` here is defined as `max(lengths)`, `S <= T` :param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]` :return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first` """<line_sep>tensor,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tensor lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output<block_end><block_end><class_stmt>BiLSTMEncoderHidden(BiLSTMEncoderBase)<block_start>"""BiLSTM encoder that returns the top hidden state Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and returns a hidden unit tensor of shape `[B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor of shape `[B, H]` representing the last RNNs hidden state """<line_sep>tensor,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tensor lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>self.extract_top_state(concat_state_dirs(hidden))[0]<block_end><block_end># TODO: Add this to TF or remove <class_stmt>BiLSTMEncoderHiddenContext(BiLSTMEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>self.extract_top_state(concat_state_dirs(hidden))<block_end><block_end><class_stmt>GRUEncoderBase(nn.Module)<block_start>"""The GRU encoder is a base for a set of encoders producing various outputs. All GRU encoders inheriting this class will trim the input to the max length given in the batch. For example, if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will be length `S` (or more precisely, `[B, S, H]`) *PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this. Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl, set `batch_first=True`. *PyTorch Note*: Most `GRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the TensorFlow `GRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly. """<def_stmt>__init__ self insz:int hsz:int nlayers:int pdrop:float=0.0 requires_length:bool=<true> batch_first:bool=<false> unif:float=0 initializer:str=<none> **kwargs <block_start>"""Produce a stack of GRUs with dropout performed on all but the last layer. :param insz: The size of the input :param hsz: The number of hidden units per GRU :param nlayers: The number of layers of GRUs to stack :param pdrop: The probability of dropping a unit value during dropout, defaults to 0 :param requires_length: Does this encoder require an input length in its inputs (defaults to `True`) :param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!) :param unif: PyTorch only! Initialization parameters for RNN :param initializer: PyTorch only! A string describing optional initialization type for RNN """<line_sep>super().__init__()<line_sep>self.requires_length=requires_length<line_sep>self.batch_first=batch_first<line_sep>self.nlayers=nlayers<if_stmt>nlayers<eq>1<block_start>pdrop=0.0<block_end>self.rnn=torch.nn.GRU(insz hsz nlayers dropout=pdrop bidirectional=<false> batch_first=batch_first)<if_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal_(self.rnn.weight_ih_l0)<line_sep>nn.init.orthogonal_(self.rnn.weight_hh_l0)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform_(self.rnn.weight_ih_l0)<line_sep>nn.init.kaiming_uniform_(self.rnn.weight_hh_l0)<block_end><elif_stmt>unif<g>0<block_start><for_stmt>weight self.rnn.parameters()<block_start>weight.data.uniform_(-unif unif)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(self.rnn.weight_ih_l0)<line_sep>nn.init.xavier_uniform_(self.rnn.weight_hh_l0)<block_end>self.output_dim=hsz<block_end><def_stmt>extract_top_state self state:torch.Tensor<arrow>torch.Tensor<block_start><return>state[-1]<block_end><block_end><class_stmt>GRUEncoderSequence(GRUEncoderBase)<block_start>"""GRU encoder to produce the transduced output sequence. Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input sequence if the `max(lengths)` given is shorter than `T` during execution. *PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`, and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation. """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Take in a tuple of the sequence tensor `[T, B, H]` or `[B, T, H]` and its length, produce output sequence :param inputs: A tuple of the sequence tensor and its length :return: A sequence tensor of shape `[T, B, H]` or `[B, T, H]` """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output<block_end><block_end><class_stmt>GRUEncoderAll(GRUEncoderBase)<block_start>"""GRU encoder that passes along the full output and hidden states for each layer Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence, and a hidden vector `[L, B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor `[B, S, H]` or `[B, H, S]` , and a hidden tensor `[L, B, H]` """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output hidden<block_end><block_end><class_stmt>GRUEncoderHidden(GRUEncoderBase)<block_start>"""GRU encoder that returns the top hidden state Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and returns a hidden unit tensor of shape `[B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor of shape `[B, H]` representing the last RNNs hidden state """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>self.extract_top_state(hidden)<block_end><block_end><class_stmt>BiGRUEncoderBase(nn.Module)<block_start>"""BiGRU encoder base for a set of encoders producing various outputs. All BiGRU encoders inheriting this class will trim the input to the max length given in the batch. For example, if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the constructor will be applied to the forward direction and half to the backward direction, and these will get concatenated. *PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this. Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl, set `batch_first=True`. *PyTorch Note*: Most `BiGRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the TensorFlow `BiGRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly. """<def_stmt>__init__ self insz:int hsz:int nlayers:int pdrop:float=0.0 requires_length:bool=<true> batch_first:bool=<false> unif:float=0 initializer:str=<none> **kwargs <block_start>"""Produce a stack of GRUs with dropout performed on all but the last layer. :param insz: The size of the input :param hsz: The number of hidden units per BiGRU (`hsz//2` used for each direction and concatenated) :param nlayers: The number of layers of BiGRUs to stack :param pdrop: The probability of dropping a unit value during dropout, defaults to 0 :param requires_length: Does this encoder require an input length in its inputs (defaults to `True`) :param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!) :param unif: PyTorch only! Initialization parameters for RNN :param initializer: PyTorch only! A string describing optional initialization type for RNN """<line_sep>super().__init__()<line_sep>self.requires_length=requires_length<line_sep>self.batch_first=batch_first<line_sep>self.nlayers=nlayers<if_stmt>nlayers<eq>1<block_start>pdrop=0.0<block_end>self.rnn=torch.nn.GRU(insz hsz<floordiv>2 nlayers dropout=pdrop bidirectional=<true> batch_first=batch_first)<if_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal(self.rnn.weight_hh_l0)<line_sep>nn.init.orthogonal(self.rnn.weight_ih_l0)<block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform(self.rnn.weight_hh_l0)<line_sep>nn.init.kaiming_uniform(self.rnn.weight_ih_l0)<block_end><elif_stmt>unif<g>0<block_start><for_stmt>weight self.rnn.parameters()<block_start>weight.data.uniform_(-unif unif)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(self.rnn.weight_hh_l0)<line_sep>nn.init.xavier_uniform_(self.rnn.weight_ih_l0)<block_end>self.output_dim=hsz<block_end><def_stmt>extract_top_state self state:torch.Tensor<arrow>torch.Tensor# Select the topmost state with -1 and the only direction is forward (select with 0) <block_start><return>state[-1]<block_end><block_end># TODO: normalize across backends or remove <class_stmt>BiGRUEncoderSequenceHiddenContext(BiGRUEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output self.extract_top_state(_cat_dir(hidden))<block_end><block_end><class_stmt>BiGRUEncoderAll(BiGRUEncoderBase)<block_start>"""BiGRU encoder that passes along the full output and hidden states for each layer Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence, and a hidden vector `[L, B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor `[B, S, H] or `[B, H, S]` , and a hidden vector `[L, B, H]` """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output _cat_dir(hidden)<block_end><block_end><class_stmt>BiGRUEncoderSequence(BiGRUEncoderBase)<block_start>"""BiGRU encoder to produce the transduced output sequence. Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input sequence if the `max(lengths)` given is shorter than `T` during execution. *PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`, and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation. """<def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of GRUs The value `S` here is defined as `max(lengths)`, `S <= T` :param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]` :return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first` """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>output<block_end><block_end><class_stmt>BiGRUEncoderHidden(BiGRUEncoderBase)<block_start>"""GRU encoder that returns the top hidden state Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and returns a hidden unit tensor of shape `[B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """<def_stmt>forward self inputs<block_start>""" :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor of shape `[B, H]` representing the last RNNs hidden state """<line_sep>tbc,lengths=inputs<line_sep>packed=torch.nn.utils.rnn.pack_padded_sequence(tbc lengths.cpu() batch_first=self.batch_first)<line_sep>output,hidden=self.rnn(packed)<line_sep>output,_=torch.nn.utils.rnn.pad_packed_sequence(output batch_first=self.batch_first)<line_sep><return>self.extract_top_state(_cat_dir(hidden))<block_end><block_end><class_stmt>Reduction(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self inputs:List[torch.Tensor]<arrow>torch.Tensor<block_start><pass><block_end><def_stmt>set_output_dim self output_dims:List[int]<block_start><pass><block_end><block_end><class_stmt>ConcatReduction(Reduction)<block_start><def_stmt>__init__ self output_dims:List[int] axis=-1 **kwargs<block_start>super().__init__()<line_sep>self.axis=axis<line_sep>self.set_output_dim(output_dims)<block_end><def_stmt>set_output_dim self output_dims:List[int]<block_start>self.output_dim=sum(output_dims)<block_end><def_stmt>forward self inputs:List[torch.Tensor]<arrow>torch.Tensor<block_start><return>torch.cat(inputs self.axis)<block_end><block_end><class_stmt>ConcatSubtractReduction(Reduction)<block_start>"""This reduction assumes paired input and subtracts the two to get a distance It is useful for training sentence encoders and is used, for example, in SentenceBERT For this to work we assume that the inputs are paired, and subtract them """<def_stmt>__init__ self output_dims:List[int] axis=-1 **kwargs<block_start>super().__init__()<line_sep>self.axis=axis<line_sep>self.set_output_dim(output_dims)<block_end><def_stmt>set_output_dim self output_dims:List[int]<block_start>self.output_dim=3<times>output_dims[0]<block_end><def_stmt>forward self inputs:List[torch.Tensor]<arrow>torch.Tensor<block_start>sub=torch.abs(inputs[0]-inputs[1])<line_sep><return>torch.cat([inputs[0] inputs[1] sub] self.axis)<block_end><block_end><class_stmt>SumReduction(Reduction)<block_start><def_stmt>__init__ self output_dims:List[int] **kwargs<block_start>super().__init__()<line_sep>self.set_output_dim(output_dims)<block_end><def_stmt>set_output_dim self output_dims:List[int]# We could actually project if we needed, or at least should validate <block_start>self.output_dim=output_dims[0]<block_end><def_stmt>forward self inputs:List[torch.Tensor]<arrow>torch.Tensor<block_start><return>sum(inputs)<block_end><block_end><class_stmt>SumLayerNormReduction(Reduction)<block_start><def_stmt>__init__ self output_dims:List[int] layer_norm_eps:float=1.0e-12 **kwargs<block_start>super().__init__()<line_sep>self.set_output_dim(output_dims)<line_sep>self.ln=nn.LayerNorm(self.output_dim eps=layer_norm_eps)<block_end><def_stmt>set_output_dim self output_dims:List[int]<block_start>self.output_dim=output_dims[0]<block_end><def_stmt>forward self inputs:List[torch.Tensor]<arrow>torch.Tensor<block_start>output=sum(inputs)<line_sep><return>self.ln(output)<block_end><block_end><class_stmt>EmbeddingsStack(nn.Module)<block_start><def_stmt>__init__ self embeddings_dict:Dict[str nn.Embedding] dropout_rate:float=0.0 requires_length:bool=<false> reduction:Optional[Union[str nn.Module]]='concat' **kwargs <block_start>"""Takes in a dictionary where the keys are the input tensor names, and the values are the embeddings :param embeddings_dict: dictionary of each feature embedding :param dropout_rate: The dropout rate (0.0 means no dropout, 1.0 means complete) """<line_sep>super().__init__()<line_sep>self._keys:List[str]=[]<line_sep>embeddings_list=[]<line_sep>output_dims=[]<for_stmt>k,embedding embeddings_dict.items()<block_start>embeddings_list.append(embedding)<line_sep>self._keys.append(k)<line_sep>output_dims<augadd>[embedding.get_dsz()]<block_end>self.embeddings:nn.ModuleList=nn.ModuleList(embeddings_list)<line_sep># TODO: should we make a registry of options? <if_stmt>isinstance(reduction str)<block_start><if_stmt>reduction<eq>'sum'<block_start>self.reduction=SumReduction(output_dims)<block_end><elif_stmt>reduction<eq>'sum-layer-norm'<block_start>self.reduction=SumLayerNormReduction(output_dims layer_norm_eps=kwargs.get('layer_norm_eps' 1.0e-12))<block_end><elif_stmt>reduction<eq>'concat-subtract'<block_start>self.reduction=ConcatSubtractReduction(output_dims)<block_end><else_stmt><block_start>self.reduction=ConcatReduction(output_dims)<block_end><block_end><else_stmt><block_start>self.reduction=reduction<line_sep>self.reduction.set_output_dim(output_dims)<block_end>self.dsz=self.reduction.output_dim<line_sep>self.dropout=nn.Dropout(dropout_rate)<line_sep>self.requires_length=requires_length<block_end><def_stmt>__getitem__ self item:str<arrow>nn.Module<block_start>idx=self._keys.index(item)<if_stmt>idx<l>0<block_start><raise>Exception(f"Invalid item ({item})")<block_end><return>self.embeddings[idx]<block_end><def_stmt>forward self inputs:Dict[str torch.Tensor]<arrow>torch.Tensor<block_start>"""This method performs "embedding" of the inputs. The base method here then concatenates along depth dimension to form word embeddings :return: A 3-d vector where the last dimension is the concatenated dimensions of all embeddings """<line_sep>all_embeddings_out=[]<line_sep>i=0<for_stmt>embedding self.embeddings<block_start>k=self._keys[i]<line_sep>x=inputs[k]<line_sep># Its a hair faster to do this than using isinstance <if_stmt>x.__class__<eq>tuple<block_start>embeddings_out=embedding(*x)<block_end><else_stmt><block_start>embeddings_out=embedding(x)<block_end>all_embeddings_out.append(embeddings_out)<line_sep>i<augadd>1<block_end>word_embeddings=self.reduction(all_embeddings_out)<line_sep><return>self.dropout(word_embeddings)<block_end><def_stmt>keys self<block_start><return>self._keys<block_end>@property<def_stmt>output_dim self<block_start><return>self.dsz<block_end><def_stmt>items self<block_start><for_stmt>k,v zip(self.keys() self.embeddings)<block_start><yield>k v<block_end><block_end><block_end><class_stmt>DenseStack(nn.Module)<block_start>"""A stack of one or more hidden layers """<def_stmt>__init__ self insz:int hsz:Union[int List[int]] activation:Union[str List[str]]="relu" pdrop_value:float=0.5 init=<none> skip_connect=<false> layer_norm=<false> **kwargs <block_start>"""Stack 1 or more hidden layers, optionally (forming an MLP) :param insz: The number of input units :param hsz: The number of hidden units :param activation: The name of the activation function to use :param pdrop_value: The dropout probability :param init: The initializer :param skip_connect: whether use skip connection when insz is equal to outsz for a layer :param layer_norm: whether use layer norm in each layer """<line_sep>super().__init__()<line_sep>hszs=listify(hsz)<line_sep>self.output_dim=hsz[-1]<line_sep>activations=listify(activation)<if_stmt>len(activations)<eq>1<block_start>activations=activations<times>len(hszs)<block_end><if_stmt>len(activations)<ne>len(hszs)<block_start><raise>ValueError("Number of activations must match number of hidden sizes in a stack!")<block_end>current=insz<line_sep>layer_stack=[]<if_stmt>layer_norm<block_start>layer_norm_eps=kwargs.get('layer_norm_eps' 1e-6)<block_end><for_stmt>hsz,activation zip(hszs activations)<block_start><if_stmt>skip_connect<and>current<eq>hsz<block_start>layer=SkipConnection(current activation)<block_end><else_stmt><block_start>layer=Dense(current hsz activation)<block_end><if_stmt>layer_norm<block_start>layer=nn.Sequential(layer nn.LayerNorm(hsz eps=layer_norm_eps))<block_end>layer_stack.append(WithDropout(layer pdrop_value))<line_sep>current=hsz<block_end>self.layer_stack=nn.Sequential(*layer_stack)<line_sep>self.requires_length=<false><block_end><def_stmt>forward self inputs:torch.Tensor<arrow>torch.Tensor<block_start>"""Stack 1 or more hidden layers, optionally (forming an MLP) :param inputs: The fixed representation of the model :Keyword Arguments: * *hsz* -- (``int``) The number of hidden units (defaults to `100`) :return: The final layer """<line_sep><return>self.layer_stack(inputs)<block_end><block_end><class_stmt>VectorSequenceAttention(nn.Module)<block_start><def_stmt>__init__ self hsz:int<block_start>super().__init__()<line_sep>self.hsz=hsz<line_sep>self.W_c=nn.Linear(2<times>self.hsz hsz bias=<false>)<block_end><def_stmt>forward self query_t keys_bth values_bth keys_mask=<none># Output(t) = B x H x 1 # Keys = B x T x H # a = B x T x 1 <block_start>a=self._attention(query_t keys_bth keys_mask)<line_sep>attended=self._update(a query_t values_bth)<line_sep><return>attended<block_end><def_stmt>_attention self query_t keys_bth keys_mask<block_start><pass><block_end><def_stmt>_update self a query_t values_bth# a = B x T # Want to apply over context, scaled by a # (B x 1 x T) (B x T x H) = (B x 1 x H) <block_start>a=a.view(a.size(0) 1 a.size(1))<line_sep>c_t=torch.bmm(a values_bth).squeeze(1)<line_sep>attended=torch.cat([c_t query_t] -1)<line_sep>attended=torch.tanh(self.W_c(attended))<line_sep><return>attended<block_end><block_end><def_stmt>dot_product_attention_weights query_t:torch.Tensor keys_bth:torch.Tensor keys_mask:torch.Tensor<arrow>torch.Tensor<block_start>a=keys_bth@query_t.unsqueeze(2)<line_sep>a=a.squeeze(2).masked_fill(keys_mask<eq>MASK_FALSE -1e9)<line_sep>a=F.softmax(a dim=-1)<line_sep><return>a<block_end><def_stmt>dot_product_attention_weights_lengths query_t:torch.Tensor keys_bth:torch.Tensor keys_lengths:torch.Tensor<arrow>torch.Tensor<block_start>mask=sequence_mask(keys_lengths keys_bth.shape[1]).to(keys_bth.device)<line_sep><return>dot_product_attention_weights(query_t keys_bth mask)<block_end><class_stmt>LuongDotProductAttention(VectorSequenceAttention)<block_start><def_stmt>__init__ self hsz<block_start>super().__init__(hsz)<block_end><def_stmt>_attention self query_t keys_bth keys_mask<block_start><return>dot_product_attention_weights(query_t keys_bth keys_mask)<block_end><block_end><class_stmt>ScaledDotProductAttention(VectorSequenceAttention)<block_start><def_stmt>__init__ self hsz<block_start>super().__init__(hsz)<block_end><def_stmt>_attention self query_t keys_bth keys_mask<block_start>a=(keys_bth@query_t.unsqueeze(2))/math.sqrt(self.hsz)<line_sep>a=a.squeeze(2).masked_fill(keys_mask<eq>MASK_FALSE -1e9)<line_sep>a=F.softmax(a dim=-1)<line_sep><return>a<block_end><block_end><class_stmt>LuongGeneralAttention(VectorSequenceAttention)<block_start><def_stmt>__init__ self hsz<block_start>super().__init__(hsz)<line_sep>self.W_a=nn.Linear(self.hsz self.hsz bias=<false>)<block_end><def_stmt>_attention self query_t keys_bth keys_mask<block_start>a=keys_bth@self.W_a(query_t).unsqueeze(2)<line_sep>a=a.squeeze(2).masked_fill(keys_mask<eq>MASK_FALSE -1e9)<line_sep>a=F.softmax(a dim=-1)<line_sep><return>a<block_end><block_end><class_stmt>BahdanauAttention(VectorSequenceAttention)<block_start><def_stmt>__init__ self hsz<block_start>super().__init__(hsz)<line_sep>self.hsz=hsz<line_sep>self.W_a=nn.Linear(self.hsz self.hsz bias=<false>)<line_sep>self.E_a=nn.Linear(self.hsz self.hsz bias=<false>)<line_sep>self.v=nn.Linear(self.hsz 1 bias=<false>)<block_end><def_stmt>_attention self query_t keys_bth keys_mask<block_start>B,T,H=keys_bth.shape<line_sep>q=self.W_a(query_t.view(-1 self.hsz)).view(B 1 H)<line_sep>u=self.E_a(keys_bth).view(B T H)<line_sep>z=torch.tanh(q+u)<line_sep>a=self.v(z.view(-1 self.hsz)).view(B T)<line_sep>a=a.masked_fill(keys_mask<eq>MASK_FALSE -1e9)<line_sep>a=F.softmax(a dim=-1)<line_sep><return>a<block_end><def_stmt>_update self a query_t values_bth<block_start>query_t=query_t.view(-1 self.hsz)<line_sep># a = B x T # Want to apply over context, scaled by a # (B x 1 x T) (B x T x H) = (B x 1 x H) -> (B x H) a=a.view(a.size(0) 1 a.size(1))<line_sep>c_t=(a@values_bth).squeeze(1)<line_sep># (B x 2H) attended=torch.cat([c_t query_t] -1)<line_sep>attended=self.W_c(attended)<line_sep><return>attended<block_end><block_end><class_stmt>FineTuneModel(nn.Module)<block_start><def_stmt>__init__ self nc embeddings stack_model=<none><block_start>super().__init__()<if_stmt>isinstance(embeddings dict)<block_start>self.finetuned=EmbeddingsStack(embeddings)<block_end><else_stmt><block_start>self.finetuned=embeddings<block_end>self.stack_model=stack_model<line_sep>output_dim=self.finetuned.output_dim<if>stack_model<is><none><else>stack_model.output_dim<line_sep>self.output_layer=Dense(output_dim nc activation="log_softmax")<block_end><def_stmt>forward self inputs<block_start>base_layers=self.finetuned(inputs)<line_sep>stacked=self.stack_model(base_layers)<if>self.stack_model<is><not><none><else>base_layers<line_sep><return>self.output_layer(stacked)<block_end><block_end><class_stmt>CompositePooling(nn.Module)<block_start>"""Composite pooling allows for multiple sub-modules during pooling to be used in parallel """<def_stmt>__init__ self models<block_start>""" Note, this currently requires that each submodel is an eight_mile model with an `output_dim` attr """<line_sep>super().__init__()<line_sep>self.models=nn.ModuleList(models)<line_sep>self.output_dim=sum(m.output_dim<for>m self.models)<line_sep>self.requires_length=any(getattr(m "requires_length" <false>)<for>m self.models)<block_end><def_stmt>forward self inputs<block_start>inputs,lengths=tensor_and_lengths(inputs)<line_sep>pooled=[]<for_stmt>sub_model self.models<block_start><if_stmt>getattr(sub_model "requires_length" <false>)<block_start>pooled.append(sub_model((inputs lengths)))<block_end><else_stmt><block_start>pooled.append(sub_model(inputs))<block_end><block_end><return>torch.cat(pooled -1)<block_end><block_end><class_stmt>EmbedPoolStackModel(nn.Module)<block_start>"""This provides an idiom for classification consisting of multiple phases In the first phase, we embed the input tensors, and subsequently pool them to a fixed width representation. Finally, we allow multiple hidden "stacking" layers, ultimately ending in a projection to the output space """<def_stmt>__init__ self nc:int embeddings:nn.Module pool_model:nn.Module stack_model:Optional[nn.Module]=<none> output_model:Optional[nn.Module]=<none> <block_start>super().__init__()<line_sep>self.embed_model=embeddings<line_sep>self.pool_model=pool_model<line_sep>self.stack_model=stack_model<if>stack_model<else>nn.Identity()<line_sep>output_dim=self.pool_model.output_dim<if>stack_model<is><none><else>stack_model.output_dim<line_sep>self.output_layer=Dense(output_dim nc activation="log_softmax")<if>output_model<is><none><else>output_model<block_end><def_stmt>forward self inputs:Dict[str torch.Tensor]<block_start>lengths=inputs["lengths"]<line_sep>embedded=self.embed_model(inputs)<line_sep>embedded=(embedded lengths)<line_sep>pooled=self.pool_model(embedded)<line_sep>stacked=self.stack_model(pooled)<line_sep><return>self.output_layer(stacked)<block_end><block_end><class_stmt>PassThru(nn.Module)<block_start><def_stmt>__init__ self input_dim<block_start>super().__init__()<line_sep>self.output_dim=input_dim<block_end><def_stmt>forward self inputs:torch.Tensor<arrow>torch.Tensor<block_start><return>inputs<block_end><block_end><class_stmt>WithoutLength(nn.Module)<block_start>"""Wrapper layer to remove lengths from the input """<def_stmt>__init__ self layer:nn.Module<block_start>super().__init__()<line_sep>self.layer=layer<line_sep>self.output_dim=self.layer.output_dim<if>hasattr(self.layer "output_dim")<else>0<block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start><return>self.layer(inputs[0])<block_end><block_end><class_stmt>WithDropout(nn.Module)<block_start>"""Wrapper for any layer that surrounds it with dropout"""<def_stmt>__init__ self layer:nn.Module pdrop:float=0.5 variational=<false> batch_first=<false><block_start>"""Create a dropout wrapper around the given layer :param layer: Some sort of layer :param pdrop: A dropout value """<line_sep>super().__init__()<line_sep>self.layer=layer<line_sep>self.dropout=VariationalDropout(pdrop batch_first=batch_first)<if>variational<else>nn.Dropout(pdrop)<line_sep>self.output_dim=self.layer.output_dim<if>hasattr(self.layer "output_dim")<else>0<block_end><def_stmt>forward self inputs:torch.Tensor<arrow>torch.Tensor<block_start>"""Apply the layer followed by dropout :param inputs: input tensor :return: output transformed by the held layer and subsequent dropout """<line_sep><return>self.dropout(self.layer(inputs))<block_end><block_end><class_stmt>WithDropoutOnFirst(nn.Module)<block_start>"""Wrapper for any layer that surrounds it with dropout This exists primarily for the LSTMEncoderWithState to allow dropout on the output while passing back the hidden state """<def_stmt>__init__ self layer:nn.Module pdrop:float=0.5 variational=<false><block_start>"""Create a dropout wrapper around the given layer :param layer: Some sort of layer :param pdrop: A dropout value """<line_sep>super().__init__()<line_sep>self.layer=layer<line_sep>self.dropout=VariationalDropout(pdrop)<if>variational<else>nn.Dropout(pdrop)<line_sep>self.output_dim=self.layer.output_dim<if>hasattr(self.layer "output_dim")<else>0<block_end><def_stmt>forward self inputs:Tuple[torch.Tensor]<arrow>torch.Tensor<block_start>"""Apply the layer followed by dropout :param inputs: input tensor :return: output transformed by the held layer and subsequent dropout """<line_sep>outputs=self.layer(inputs)<line_sep><return>self.dropout(outputs[0]) outputs[1]<block_end><block_end><def_stmt>transition_mask vocab span_type s_idx e_idx pad_idx=<none><block_start>"""Create a mask to enforce span sequence transition constraints. Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill` """<line_sep>np_mask=transition_mask_np(vocab span_type s_idx e_idx pad_idx=pad_idx)<line_sep><return>torch.from_numpy(np_mask)<eq>0<block_end>@torch.jit.script<def_stmt>inplace_assign data:torch.Tensor index:torch.Tensor new_data:torch.Tensor<arrow>torch.Tensor<block_start>new_data=new_data.unsqueeze(0)<line_sep>index=index.expand(1 new_data.size(1))<line_sep>data.scatter_(0 index new_data)<line_sep><return>data<block_end>@torch.jit.script<def_stmt>i2t i:int<arrow>torch.Tensor<block_start><return>torch.tensor(i).unsqueeze(0)<block_end>@torch.jit.script<def_stmt>script_viterbi unary:torch.Tensor trans:torch.Tensor start_idx:int end_idx:int<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>seq_len:int=unary.size(0)<line_sep>num_tags:int=unary.size(1)<line_sep>fill_value:float=-1e4<line_sep># dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1 alphas=torch.full((num_tags ) fill_value dtype=torch.float device=unary.device)<line_sep>broadcast_idx=torch.full((num_tags ) start_idx dtype=torch.long)<line_sep>alphas=alphas.scatter(0 broadcast_idx torch.zeros((num_tags )))<line_sep>alphas=alphas.unsqueeze(0)<line_sep>backpointers:torch.Tensor=torch.zeros(num_tags dtype=torch.long).unsqueeze(0)<for_stmt>i range(seq_len)<block_start>unary_t=unary[i :]<line_sep>next_tag_var=alphas+trans<line_sep>viterbi,best_tag_ids=torch.max(next_tag_var 1)<line_sep>backpointers=torch.cat([backpointers best_tag_ids.unsqueeze(0)] 0)<line_sep>alphas=(viterbi+unary_t).unsqueeze(0)<block_end>terminal_vars=alphas.squeeze(0)+trans[end_idx :]<line_sep>path_score,best_tag_id=torch.max(terminal_vars 0)<line_sep>best_path=best_tag_id.unsqueeze(0)<for_stmt>i range(unary.size(0))<block_start>t=seq_len-i-1<line_sep>best_tag_id=backpointers[t+1 best_tag_id]<line_sep>best_path=torch.cat([best_path best_tag_id.unsqueeze(0)] -1)<block_end>new_path_vec=best_path.flip(0)<line_sep><return>new_path_vec[1:] path_score<block_end><class_stmt>ViterbiBatchSize1(nn.Module)<block_start><def_stmt>__init__ self start_idx:int end_idx:int<block_start>super().__init__()<line_sep>self.start_idx=start_idx<line_sep>self.end_idx=end_idx<block_end><def_stmt>forward self unary:torch.Tensor trans:torch.Tensor _:torch.Tensor<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>unary=unary.squeeze(1)<line_sep>trans=trans.squeeze(0)<line_sep>path,score=script_viterbi(unary trans self.start_idx self.end_idx)<line_sep><return>path.unsqueeze(1) score<block_end><block_end><class_stmt>Viterbi(nn.Module)<block_start><def_stmt>__init__ self start_idx:int end_idx:int<block_start>super().__init__()<line_sep>self.start_idx=start_idx<line_sep>self.end_idx=end_idx<line_sep># r, start_idx: int, end_idx: int, norm = lambda x, y: x <block_end><def_stmt>forward self unary:torch.Tensor trans:torch.Tensor lengths:torch.Tensor<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>"""Do Viterbi decode on a batch. :param unary: torch.FloatTensor: [T, B, N] :param trans: torch.FloatTensor: [1, N, N] :param norm: Callable: This function should take the initial and a dim to normalize along. :return: torch.LongTensor: [T, B] the padded paths :return: torch.FloatTensor: [B] the path scores """<line_sep>seq_len,batch_size,tag_size=unary.size()<line_sep>min_length=torch.min(lengths)<line_sep>backpointers=[]<line_sep># Alphas: [B, 1, N] alphas=torch.full((batch_size 1 tag_size) -1e4 device=unary.device)<line_sep>alphas[: 0 self.start_idx]=0<line_sep># alphas = self.norm(alphas) <for_stmt>i,unary_t enumerate(unary)<block_start>next_tag_var=alphas+trans<line_sep>viterbi,best_tag_ids=torch.max(next_tag_var 2)<line_sep>backpointers.append(best_tag_ids)<line_sep>new_alphas=viterbi+unary_t<line_sep>new_alphas.unsqueeze_(1)<line_sep># This part generates a warning <if_stmt>i<ge>min_length<block_start>mask=(i<l>lengths).view(-1 1 1)<line_sep>alphas=alphas.masked_fill(mask 0)+new_alphas.masked_fill(mask<eq>MASK_FALSE 0)<block_end><else_stmt><block_start>alphas=new_alphas<block_end><block_end># Add end tag terminal_var=alphas.squeeze(1)+trans[: self.end_idx :]<line_sep>path_score,best_tag_id=torch.max(terminal_var 1)<line_sep># Flip lengths rev_len=seq_len-lengths-1<line_sep>best_path=[best_tag_id]<for_stmt>i range(len(backpointers))<block_start>t=len(backpointers)-i-1<line_sep>backpointer_t=backpointers[t]<line_sep># Get new best tag candidate new_best_tag_id=backpointer_t.gather(1 best_tag_id.unsqueeze(1)).squeeze(1)<line_sep># We are going backwards now, if flipped length was passed # these you aren't in your real results yet mask=i<g>rev_len<line_sep>best_tag_id=best_tag_id.masked_fill(mask 0)+new_best_tag_id.masked_fill(mask<eq>MASK_FALSE 0)<line_sep>best_path.append(best_tag_id)<block_end>_=best_path.pop()<line_sep>best_path.reverse()<line_sep>best_path=torch.stack(best_path)<line_sep># Mask out the extra tags (This might be pointless given thathatt anything that # will use this as a dense tensor downstream will mask it itself?) seq_mask=sequence_mask(lengths seq_len).to(best_path.device).transpose(0 1)<line_sep>best_path=best_path.masked_fill(seq_mask<eq>MASK_FALSE 0)<line_sep><return>best_path path_score<block_end><block_end>@torch.jit.script<def_stmt>script_viterbi_log_softmax_norm unary:torch.Tensor trans:torch.Tensor start_idx:int end_idx:int<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>seq_len:int=unary.size(0)<line_sep>num_tags:int=unary.size(1)<line_sep>fill_value:float=-1e4<line_sep># dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1 alphas=torch.full((num_tags ) fill_value dtype=torch.float device=unary.device)<line_sep>broadcast_idx=torch.full((num_tags ) start_idx dtype=torch.long)<line_sep>alphas=alphas.scatter(0 broadcast_idx torch.zeros((num_tags )))<line_sep>alphas=alphas.unsqueeze(0)<line_sep>alphas=torch.log(F.softmax(alphas dim=-1))<line_sep>backpointers:torch.Tensor=torch.zeros(num_tags dtype=torch.long).unsqueeze(0)<for_stmt>i range(seq_len)<block_start>unary_t=unary[i :]<line_sep>next_tag_var=alphas+trans<line_sep>viterbi,best_tag_ids=torch.max(next_tag_var 1)<line_sep>backpointers=torch.cat([backpointers best_tag_ids.unsqueeze(0)] 0)<line_sep>alphas=(viterbi+unary_t).unsqueeze(0)<block_end>terminal_vars=alphas.squeeze(0)+trans[end_idx :]<line_sep>path_score,best_tag_id=torch.max(terminal_vars 0)<line_sep>best_path=best_tag_id.unsqueeze(0)<for_stmt>i range(unary.size(0))<block_start>t=seq_len-i-1<line_sep>best_tag_id=backpointers[t+1 best_tag_id]<line_sep>best_path=torch.cat([best_path best_tag_id.unsqueeze(0)] -1)<block_end>new_path_vec=best_path.flip(0)<line_sep><return>new_path_vec[1:] path_score<block_end><class_stmt>ViterbiLogSoftmaxNormBatchSize1(nn.Module)<block_start><def_stmt>__init__ self start_idx:int end_idx:int<block_start>super().__init__()<line_sep>self.start_idx=start_idx<line_sep>self.end_idx=end_idx<block_end><def_stmt>forward self unary:torch.Tensor trans:torch.Tensor _:torch.Tensor<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>unary=unary.squeeze(1)<line_sep>trans=trans.squeeze(0)<line_sep>path,score=script_viterbi_log_softmax_norm(unary trans self.start_idx self.end_idx)<line_sep><return>path.unsqueeze(1) score<block_end><block_end><class_stmt>ViterbiLogSoftmaxNorm(Viterbi)<block_start><def_stmt>forward self unary:torch.Tensor trans:torch.Tensor lengths:torch.Tensor<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>"""Do Viterbi decode on a batch. :param unary: torch.FloatTensor: [T, B, N] :param trans: torch.FloatTensor: [1, N, N] :param norm: Callable: This function should take the initial and a dim to normalize along. :return: torch.LongTensor: [T, B] the padded paths :return: torch.FloatTensor: [B] the path scores """<line_sep>seq_len,batch_size,tag_size=unary.size()<line_sep>min_length=torch.min(lengths)<line_sep>backpointers=[]<line_sep># Alphas: [B, 1, N] alphas=torch.full((batch_size 1 tag_size) -1e4 device=unary.device)<line_sep>alphas[: 0 self.start_idx]=0<line_sep>alphas=F.log_softmax(alphas dim=-1)<for_stmt>i,unary_t enumerate(unary)<block_start>next_tag_var=alphas+trans<line_sep>viterbi,best_tag_ids=torch.max(next_tag_var 2)<line_sep>backpointers.append(best_tag_ids)<line_sep>new_alphas=viterbi+unary_t<line_sep>new_alphas.unsqueeze_(1)<if_stmt>i<ge>min_length<block_start>mask=(i<l>lengths).view(-1 1 1)<line_sep>alphas=alphas.masked_fill(mask 0)+new_alphas.masked_fill(mask<eq>MASK_FALSE 0)<block_end><else_stmt><block_start>alphas=new_alphas<block_end><block_end># Add end tag terminal_var=alphas.squeeze(1)+trans[: self.end_idx :]<line_sep>path_score,best_tag_id=torch.max(terminal_var 1)<line_sep># Flip lengths rev_len=seq_len-lengths-1<line_sep>best_path=[best_tag_id]<for_stmt>i range(len(backpointers))<block_start>t=len(backpointers)-i-1<line_sep>backpointer_t=backpointers[t]<line_sep># Get new best tag candidate new_best_tag_id=backpointer_t.gather(1 best_tag_id.unsqueeze(1)).squeeze(1)<line_sep># We are going backwards now, if flipped length was passed # these you aren't in your real results yet mask=i<g>rev_len<line_sep>best_tag_id=best_tag_id.masked_fill(mask 0)+new_best_tag_id.masked_fill(mask<eq>MASK_FALSE 0)<line_sep>best_path.append(best_tag_id)<block_end>_=best_path.pop()<line_sep>best_path.reverse()<line_sep>best_path=torch.stack(best_path)<line_sep># Mask out the extra tags (This might be pointless given that anything that # will use this as a dense tensor downstream will mask it itself?) seq_mask=sequence_mask(lengths seq_len).to(best_path.device).transpose(0 1)<line_sep>best_path=best_path.masked_fill(seq_mask<eq>MASK_FALSE 0)<line_sep><return>best_path path_score<block_end><block_end><def_stmt>ident x<block_start><return>x<block_end><class_stmt>TaggerGreedyDecoder(nn.Module)<block_start><def_stmt>__init__ self num_tags:int constraint_mask:Optional[torch.Tensor]=<none> batch_first:bool=<true> reduction:str="batch" <block_start>"""A Greedy decoder and loss module for taggers. :param num_tags: `int` The number of output classes :param constraint_mask: `Tensor[1, N, N]` A mask with valid transitions as 1 and invalid as 0 :param batch_first: `bool` Should the batch dimensions be first? :param reduction: `str` Should the loss be calculated at the token level or batch level """<line_sep>super().__init__()<line_sep>self.num_tags=num_tags<if_stmt>constraint_mask<is><not><none><block_start>constraint_mask=F.log_softmax(torch.zeros(constraint_mask.shape).masked_fill(constraint_mask -1e4) dim=1)<line_sep>self.register_buffer("constraint_mask" constraint_mask)<block_end><else_stmt><block_start>self.constraint_mask=<none><block_end># FIXME: we cant do it like this if using TorchScript self.to_batch_first=ident<if>batch_first<else>tbh2bth<line_sep>self.to_time_first=bth2tbh<if>batch_first<else>ident<line_sep>self.batch_first=batch_first<line_sep>self.loss=SequenceLoss(LossFn=nn.CrossEntropyLoss avg=reduction)<line_sep>self.viterbi=ViterbiLogSoftmaxNorm(Offsets.GO Offsets.EOS)<block_end>@property<def_stmt>transitions self<block_start><return>self.constraint_mask<block_end><def_stmt>neg_log_loss self inputs tags lengths<block_start>unaries=self.to_batch_first(inputs)<line_sep>tags=self.to_batch_first(tags)<line_sep><return>self.loss(unaries tags)<block_end><def_stmt>forward self inputs<arrow>torch.Tensor<block_start>unaries,lengths=tensor_and_lengths(inputs)<line_sep># If there is a constraint mask do a masked viterbi <if_stmt>self.constraint_mask<is><not><none><block_start>probv=self.to_time_first(unaries)<line_sep>probv=F.log_softmax(probv dim=-1)<line_sep>preds,scores=self.viterbi(probv self.constraint_mask lengths)<if_stmt>self.batch_first<block_start><return>tbh2bth(preds)# , scores <block_end><else_stmt><block_start><return>preds<block_end><block_end><else_stmt># Decoding doesn't care about batch/time first <block_start>_,preds=torch.max(unaries -1)<line_sep>mask=sequence_mask(lengths unaries.shape[1]).to(preds.device)<line_sep># The mask gets generated as batch first mask=mask<if>self.batch_first<else>mask.transpose(0 1)<line_sep>preds=preds.masked_fill(mask<eq>MASK_FALSE 0)<block_end><return>preds<block_end># , None <def_stmt>extra_repr self<arrow>str<block_start>str_=f"n_tags={self.num_tags}, batch_first={self.batch_first}"<if_stmt>self.constraint_mask<is><not><none><block_start>str_<augadd>", constrained=True"<block_end><return>str_<block_end><block_end><class_stmt>CRF(nn.Module)<block_start><def_stmt>__init__ self num_tags:int constraint_mask:Optional[torch.Tensor]=<none> batch_first:bool=<true> idxs:Tuple[int int]=(Offsets.GO Offsets.EOS) <block_start>"""Initialize the object. :param num_tags: int, The number of tags in your output (emission size) :param constraint: torch.ByteTensor, Constraints on the transitions [1, N, N] :param idxs: Tuple(int. int), The index of the start and stop symbol in emissions. :param batch_first: bool, if the input [B, T, ...] or [T, B, ...] Note: if idxs is none then the CRF adds these symbols to the emission vectors and n_tags is assumed to be the number of output tags. if idxs is not none then the first element is assumed to be the start index and the second idx is assumed to be the end index. In this case n_tags is assumed to include the start and end symbols. """<line_sep>super().__init__()<line_sep>self.start_idx,self.end_idx=idxs<line_sep>self.num_tags=num_tags<if_stmt>constraint_mask<is><not><none><block_start>self.register_buffer("constraint_mask" constraint_mask)<block_end><else_stmt><block_start>self.constraint_mask=<none><block_end>self.transitions_p=nn.Parameter(torch.Tensor(1 self.num_tags self.num_tags).zero_())<line_sep>self.batch_first=batch_first<line_sep>self.viterbi=Viterbi(self.start_idx self.end_idx)<block_end><def_stmt>extra_repr self<arrow>str<block_start>str_="n_tags=%d, batch_first=%s"%(self.num_tags self.batch_first)<if_stmt>self.constraint_mask<is><not><none><block_start>str_<augadd>", constrained=True"<block_end><return>str_<block_end>@property<def_stmt>transitions self<block_start><if_stmt>self.constraint_mask<is><not><none><block_start><return>self.transitions_p.masked_fill(self.constraint_mask -1e4)<block_end><return>self.transitions_p<block_end><def_stmt>neg_log_loss self unary tags lengths<block_start>"""Neg Log Loss with a Batched CRF. :param unary: torch.FloatTensor: [T, B, N] or [B, T, N] :param tags: torch.LongTensor: [T, B] or [B, T] :param lengths: torch.LongTensor: [B] :return: torch.FloatTensor: [B] """<line_sep># Convert from [B, T, N] -> [T, B, N] <if_stmt>self.batch_first<block_start>unary=unary.transpose(0 1)<line_sep>tags=tags.transpose(0 1)<block_end>_,batch_size,_=unary.size()<line_sep>fwd_score=self._forward_alg(unary lengths)<line_sep>gold_score=self.score_sentence(unary tags lengths)<line_sep>loss=fwd_score-gold_score<line_sep>batch_loss=torch.mean(loss)<line_sep><return>batch_loss<block_end><def_stmt>score_sentence self unary:torch.Tensor tags:torch.Tensor lengths:torch.Tensor<arrow>torch.Tensor<block_start>"""Score a batch of sentences. :param unary: torch.FloatTensor: [T, B, N] :param tags: torch.LongTensor: [T, B] :param lengths: torch.LongTensor: [B] :param min_length: torch.LongTensor: [] :return: torch.FloatTensor: [B] """<line_sep>batch_size=lengths.shape[0]<assert_stmt>lengths.shape[0]<eq>unary.shape[1]<line_sep>trans=self.transitions.squeeze(0)# [N, N] start=torch.full((1 batch_size) self.start_idx dtype=tags.dtype device=tags.device)# [1, B] tags=torch.cat([start tags] 0)# [T + 1, B] # Unfold gives me all slices of size 2 (this tag next tag) from dimension T tag_pairs=tags.unfold(0 2 1)<line_sep># Move the pair dim to the front and split it into two indices=tag_pairs.permute(2 0 1).chunk(2)<line_sep>trans_score=trans[[indices[1] indices[0]]].squeeze(0)<line_sep># Pull out the values of the tags from the unary scores. unary_score=unary.gather(2 tags[1:].unsqueeze(-1)).squeeze(-1)<line_sep>mask=sequence_mask(lengths).transpose(0 1).to(tags.device)<line_sep>scores=unary_score+trans_score<line_sep>scores=scores.masked_fill(mask<eq>MASK_FALSE 0)<line_sep>scores=scores.sum(0)<line_sep>eos_scores=trans[self.end_idx tags.gather(0 lengths.unsqueeze(0)).squeeze(0)]<line_sep>scores=scores+eos_scores<line_sep><return>scores<block_end><def_stmt>_forward_alg self unary:torch.Tensor lengths:torch.Tensor<arrow>torch.Tensor<block_start>"""For CRF forward on a batch. :param unary: torch.FloatTensor: [T, B, N] :param lengths: torch.LongTensor: [B] :return: torch.FloatTensor: [B] """<line_sep># alphas: [B, 1, N] min_length=torch.min(lengths)<line_sep>batch_size=lengths.shape[0]<line_sep>lengths.shape[0]<eq>unary.shape[1]<line_sep>alphas=torch.full((batch_size 1 self.num_tags) -1e4 device=unary.device)<line_sep>alphas[: 0 self.start_idx]=0.0<line_sep># alphas.requires_grad = True trans=self.transitions# [1, N, N] <for_stmt>i,unary_t enumerate(unary)# unary_t: [B, N] <block_start>unary_t=unary_t.unsqueeze(2)# [B, N, 1] # Broadcast alphas along the rows of trans # Broadcast trans along the batch of alphas # [B, 1, N] + [1, N, N] -> [B, N, N] # Broadcast unary_t along the cols of result # [B, N, N] + [B, N, 1] -> [B, N, N] scores=alphas+trans+unary_t<line_sep>new_alphas=vec_log_sum_exp(scores 2).transpose(1 2)<line_sep># If we haven't reached your length zero out old alpha and take new one. # If we are past your length, zero out new_alpha and keep old one. <if_stmt>i<ge>min_length<block_start>mask=(i<l>lengths).view(-1 1 1)<line_sep>alphas=alphas.masked_fill(mask 0)+new_alphas.masked_fill(mask<eq>MASK_FALSE 0)<block_end><else_stmt><block_start>alphas=new_alphas<block_end><block_end>terminal_vars=alphas+trans[: self.end_idx]<line_sep>alphas=vec_log_sum_exp(terminal_vars 2)<line_sep><return>alphas.view(batch_size)<block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>unary,lengths=inputs<if_stmt>self.training<block_start><if_stmt>self.batch_first<block_start>unary=unary.transpose(0 1)<block_end>forward=self._forward_alg(unary lengths)<line_sep># if self.batch_first: # forward = forward.transpose(0, 1) <return>forward<block_end><with_stmt>torch.no_grad()<block_start><return>self.decode(unary lengths)[0]<block_end><block_end>@jit.export<def_stmt>decode self unary:torch.Tensor lengths:torch.Tensor<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>"""Do Viterbi decode on a batch. :param unary: torch.FloatTensor: [T, B, N] or [B, T, N] :param lengths: torch.LongTensor: [B] :return: torch.LongTensor: [B] the paths :return: torch.FloatTensor: [B] the path score """<if_stmt>self.batch_first<block_start>unary=unary.transpose(0 1)<block_end>trans=self.transitions# [1, N, N] path,score=self.viterbi(unary trans lengths)<if_stmt>self.batch_first<block_start>path=path.transpose(0 1)<block_end><return>path score<block_end><block_end><class_stmt>SequenceModel(nn.Module)<block_start><def_stmt>__init__ self nc:int embeddings:nn.Module transducer:nn.Module decoder:Optional[nn.Module]=<none><block_start>super().__init__()<line_sep>self.embed_model=embeddings<line_sep>self.transducer_model=transducer<line_sep># TODO: make this a separate model! <if_stmt>transducer.output_dim<ne>nc<block_start>self.proj_layer=Dense(transducer.output_dim nc)<block_end><else_stmt><block_start>self.proj_layer=nn.Identity()<block_end>self.decoder_model=decoder<block_end><def_stmt>transduce self inputs:Dict[str torch.Tensor]<arrow>torch.Tensor<block_start>lengths=inputs["lengths"]<line_sep>embedded=self.embed_model(inputs)<line_sep>embedded=(embedded lengths)<line_sep># transduced = self.transducer_model(embedded) transduced=self.proj_layer(self.transducer_model(embedded))<line_sep><return>transduced<block_end><def_stmt>decode self transduced:torch.Tensor lengths:torch.Tensor<arrow>torch.Tensor<block_start><return>self.decoder_model((transduced lengths))<block_end><def_stmt>forward self inputs:Dict[str torch.Tensor]<arrow>torch.Tensor<block_start><pass><block_end><block_end><class_stmt>TagSequenceModel(SequenceModel)<block_start><def_stmt>__init__ self nc:int embeddings:nn.Module transducer:nn.Module decoder:Optional[nn.Module]=<none><block_start>decoder_model=CRF(nc batch_first=<true>)<if>decoder<is><none><else>decoder<line_sep>super().__init__(nc embeddings transducer decoder_model)<block_end><def_stmt>neg_log_loss self unary:torch.Tensor tags:torch.Tensor lengths:torch.Tensor<arrow>torch.Tensor<block_start><return>self.decoder_model.neg_log_loss(unary tags lengths)<block_end><def_stmt>forward self inputs:Dict[str torch.Tensor]<arrow>torch.Tensor<block_start>transduced=self.transduce(inputs)<line_sep>path=self.decode(transduced inputs["lengths"])<line_sep><return>path<block_end><block_end><class_stmt>LangSequenceModel(nn.Module)<block_start><def_stmt>__init__ self nc:int embeddings:nn.Module transducer:nn.Module decoder:Optional[nn.Module]=<none> name:Optional[str]=<none> <block_start>super().__init__()<line_sep>self.embed_model=embeddings<line_sep>self.transducer_model=transducer<if_stmt>hasattr(transducer "requires_state")<and>transducer.requires_state<block_start>self._call=self._call_with_state<line_sep>self.requires_state=<true><block_end><else_stmt><block_start>self._call=self._call_without_state<line_sep>self.requires_state=<false><block_end>self.output_layer=nn.Linear(self.transducer_model.output_dim nc)<line_sep>self.decoder_model=decoder<block_end><def_stmt>forward self inputs:Dict[str torch.Tensor]<arrow>Tuple[torch.Tensor Optional[torch.Tensor]]<block_start><return>self._call(inputs)<block_end><def_stmt>_call_with_state self inputs:Dict[str torch.Tensor]<arrow>Tuple[torch.Tensor Optional[torch.Tensor]]<block_start>h=inputs["h"]<line_sep>embedded=self.embed_model(inputs)<line_sep>transduced,hidden=self.transducer_model((embedded h))<line_sep>transduced=self.output_layer(transduced)<line_sep><return>transduced hidden<block_end><def_stmt>_call_without_state self inputs:Dict[str torch.Tensor]<arrow>Tuple[torch.Tensor Optional[torch.Tensor]]<block_start>embedded=self.embed_model(inputs)<line_sep>transduced=self.transducer_model((embedded <none>))<line_sep>transduced=self.output_layer(transduced)<line_sep><return>transduced <none><block_end><block_end><def_stmt>pytorch_embedding weights:torch.Tensor finetune:bool=<true><arrow>nn.Embedding<block_start>"""Creation function for making an nn.Embedding with the given weights :param weights: The weights to use :param finetune: Should we fine-tune the embeddings or freeze them """<line_sep>lut=nn.Embedding(weights.shape[0] weights.shape[1] padding_idx=Offsets.PAD)<del_stmt>lut.weight<line_sep>lut.weight=nn.Parameter(torch.FloatTensor(weights) requires_grad=finetune)<line_sep><return>lut<block_end><def_stmt>subsequent_mask size:int<block_start>""" Creates a lower triangular mask to mask future :param size: Temporal length :return: A tensor of type `uint8` that is 1s along diagonals and below, zero o.w """<line_sep>attn_shape=(1 1 size size)<line_sep>sub_mask=np.tril(np.ones(attn_shape)).astype("uint8")<line_sep><return>torch.from_numpy(sub_mask)<block_end><class_stmt>SequenceSequenceAttention(nn.Module)<block_start><def_stmt>__init__ self hsz:int=<none> pdrop:float=0.1 **kwargs<block_start>super().__init__()<line_sep>self.hsz=hsz<line_sep>self.dropout=nn.Dropout(pdrop)<line_sep>self.attn=<none><block_end><def_stmt>forward self qkvm:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>query,key,value,mask=qkvm<line_sep>a=self._attention(query key mask)<line_sep>self.attn=a<line_sep>a=self.dropout(a)<line_sep><return>self._update(a value)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start><pass><block_end><def_stmt>_update self a:torch.Tensor value:torch.Tensor<arrow>torch.Tensor<block_start>"""Attention weights are applied for each value, but in a series of efficient matrix operations. In the case of self-attention, the key and query (used to create the attention weights) and values are all low order projections of the same input. :param a: The attention weights [B, H, T_q, T_k] :param values: The values [B, H, T_k, D] :returns: A tensor of shape [B, H, T_q, D] """<line_sep><return>torch.matmul(a value)<block_end><block_end><class_stmt>SeqScaledDotProductAttention(SequenceSequenceAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762 We apply the query to the keys to receive our weights via softmax in a series of efficient matrix operations. In the case of self-attention the key and query are all low order projections of the same input. :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: A tensor that is (BxHxTxT) """<line_sep># (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k) d_k=query.size(-1)<line_sep>scores=torch.matmul(query key.transpose(-2 -1))/math.sqrt(d_k)<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)# [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k] <block_end><return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SeqScaledDotProductAttentionALiBi(SequenceSequenceAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 num_heads=<none> **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<line_sep>self.num_heads=num_heads<line_sep>slopes=torch.tensor(get_alibi_slopes(self.num_heads))<line_sep>self.register_buffer("slopes" slopes)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>"""Attention with Linear Biases, defined in https://arxiv.org/pdf/2108.12409.pdf :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: A tensor that is (BxHxTxT) """<line_sep># (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k) d_k=query.size(-1)<line_sep>scores=torch.matmul(query key.transpose(-2 -1))/math.sqrt(d_k)<line_sep>T_k=scores.shape[-1]<line_sep>T_q=scores.shape[-2]<line_sep>offsets=-torch.abs(torch.arange(T_q).view(-1 1)-torch.arange(T_k).view(1 -1)).to(self.slopes.device)# [T_q, T_k] alibi=self.slopes.unsqueeze(-1).unsqueeze(-1)<times>offsets.unsqueeze(0)# [H, T_q, T_k] alibi=alibi.unsqueeze(0)# [1, H, T_q, T_k] scores<augadd>alibi<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end># [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k] <return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SeqScaledDotProductAttentionT5(SequenceSequenceAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 num_heads=<none> bidirectional=<true> num_buckets=32 max_distance=128 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<line_sep>self.num_heads=num_heads<line_sep>self.bidirectional=bidirectional<line_sep>self.num_buckets=num_buckets<line_sep>self.max_distance=max_distance<line_sep>rel_embedding=torch.nn.init.kaiming_normal_(torch.empty((self.num_heads self.num_buckets) dtype=torch.float) nonlinearity='linear')<line_sep>self.rel_embedding=nn.Parameter(rel_embedding requires_grad=<true>)<block_end><def_stmt>_relative_position_bucket self relative_position<block_start>"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014 """<line_sep>ret=0<line_sep>n=-relative_position<line_sep>num_buckets=self.num_buckets<if_stmt>self.bidirectional<block_start>num_buckets<augfloordiv>2<line_sep>ret<augadd>torch.lt(n 0).to(dtype=torch.long)<times>num_buckets<line_sep>n=torch.abs(n).to(dtype=torch.long)<block_end><else_stmt><block_start>n=torch.maximum(n 0).to(dtype=torch.long)<block_end># now n is in the range [0, inf) max_exact=num_buckets<floordiv>2<line_sep>is_small=torch.lt(n max_exact)<line_sep>val_if_large=max_exact+(torch.log(n.to(dtype=torch.float32)/max_exact)/math.log(self.max_distance/max_exact)<times>(num_buckets-max_exact)).to(dtype=torch.long)<line_sep>val_if_large=torch.minimum(val_if_large torch.tensor(num_buckets-1))<line_sep>ret<augadd>torch.where(is_small n val_if_large)<line_sep><return>ret<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>"""Relative Attention described in https://arxiv.org/abs/1910.10683 :param query: a query for alignment. :param key: a set of keys from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: A tensor that is (BxHxTxT) """<line_sep># (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k) d_k=query.size(-1)<line_sep>scores=torch.matmul(query key.transpose(-2 -1))/math.sqrt(d_k)<line_sep>T_k=scores.shape[-1]<line_sep>T_q=scores.shape[-2]<line_sep>memory_position=torch.arange(T_k).view(1 -1)<line_sep>query_position=torch.arange(T_q).view(-1 1)<line_sep>relative_position=memory_position-query_position<line_sep>rp_bucket=self._relative_position_bucket(relative_position)<line_sep>relative_attention_bias=self.rel_embedding[: rp_bucket]<line_sep>scores<augadd>relative_attention_bias<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end># [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k] <return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SeqDotProductAttention(SequenceSequenceAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>scores=torch.matmul(query key.transpose(-2 -1))<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end><return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SeqDotProductAttentionALiBi(SequenceSequenceAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 num_heads=<none> **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<line_sep>self.num_heads=num_heads<line_sep>slopes=torch.tensor(get_alibi_slopes(self.num_heads))<line_sep>self.register_buffer("slopes" slopes)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>scores=torch.matmul(query key.transpose(-2 -1))<line_sep>T_k=scores.shape[-1]<line_sep>T_q=scores.shape[-2]<line_sep>offsets=-torch.abs(torch.arange(T_q).view(1 -1)-torch.arange(T_k).view(-1 1)).to(self.slopes.device)# [T_q, T_k] alibi=self.slopes.unsqueeze(-1).unsqueeze(-1)<times>offsets.unsqueeze(0)# [H, T_q, T_k] alibi=alibi.unsqueeze(0)# [1, H, T_q, T_k] scores<augadd>alibi<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end><return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SeqDotProductAttentionT5(SequenceSequenceAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 num_heads=<none> bidirectional=<true> num_buckets=32 max_distance=128 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<line_sep>self.num_heads=num_heads<line_sep>self.bidirectional=bidirectional<line_sep>self.num_buckets=num_buckets<line_sep>self.max_distance=max_distance<line_sep>rel_embedding=torch.nn.init.kaiming_normal_(torch.empty((self.num_heads self.num_buckets) dtype=torch.float) nonlinearity='linear')<line_sep>self.rel_embedding=nn.Parameter(rel_embedding requires_grad=<true>)<block_end><def_stmt>_relative_position_bucket self relative_position<block_start>"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014 """<line_sep>ret=0<line_sep>n=-relative_position<line_sep>num_buckets=self.num_buckets<if_stmt>self.bidirectional<block_start>num_buckets<augfloordiv>2<line_sep>ret<augadd>torch.lt(n 0).to(dtype=torch.long)<times>num_buckets<line_sep>n=torch.abs(n).to(dtype=torch.long)<block_end><else_stmt><block_start>n=torch.maximum(n 0).to(dtype=torch.long)<block_end># now n is in the range [0, inf) max_exact=num_buckets<floordiv>2<line_sep>is_small=torch.lt(n max_exact)<line_sep>val_if_large=max_exact+(torch.log(n.to(dtype=torch.float32)/max_exact)/math.log(self.max_distance/max_exact)<times>(num_buckets-max_exact)).to(dtype=torch.long)<line_sep>val_if_large=torch.minimum(val_if_large torch.tensor(num_buckets-1))<line_sep>ret<augadd>torch.where(is_small n val_if_large)<line_sep><return>ret<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>"""Relative Attention described in https://arxiv.org/abs/1910.10683 :param query: a query for alignment. :param key: a set of keys from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: A tensor that is (BxHxTxT) """<line_sep># (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k) scores=torch.matmul(query key.transpose(-2 -1))<line_sep>T_k=scores.shape[-1]<line_sep>T_q=scores.shape[-2]<line_sep>memory_position=torch.arange(T_k).view(1 -1)<line_sep>query_position=torch.arange(T_q).view(-1 1)<line_sep>relative_position=memory_position-query_position<line_sep>rp_bucket=self._relative_position_bucket(relative_position)<line_sep>relative_attention_bias=self.rel_embedding[: rp_bucket]<line_sep>scores<augadd>relative_attention_bias<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end># [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k] <return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SequenceSequenceRelativeAttention(nn.Module)<block_start>"""This form of attention is specified in Shaw et al 2018: https://www.aclweb.org/anthology/N18-2074.pdf """<def_stmt>__init__ self hsz:int=<none> pdrop:float=0.1 **kwargs<block_start>super().__init__()<line_sep>self.hsz=hsz<line_sep>self.dropout=nn.Dropout(pdrop)<line_sep>self.attn=<none><block_end><def_stmt>forward self q_k_v_ek_ev_m:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Take in a tuple of tensors corresponding to the query, key, value, edges_key, edges_value and mask variables :param q_k_v_ek_ev_m: A tuple consisting of query, key, value, `edges_key`, `edges_value` and `mask` respectively :return: An updated value Tensor """<line_sep>query,key,value,edges_key,edges_value,mask=q_k_v_ek_ev_m<line_sep>a=self._attention(query key edges_key mask)<line_sep>self.attn=a<line_sep>a=self.dropout(a)<line_sep><return>self._update(a value edges_value)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor edges_key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start><pass><block_end><def_stmt>_update self a:torch.Tensor value:torch.Tensor edges_value:torch.Tensor<arrow>torch.Tensor<block_start>"""Attention weights are applied for each value, but in a series of efficient matrix operations. In the case of self-attention, the key and query (used to create the attention weights) and values are all low order projections of the same input. :param a: The attention weights [B, H, T_q, T_k] :param value: The values [B, H, T_k, D] :param edge_value: The edge values [T_q, T_k, D] :returns: A tensor of shape [B, H, T, D] """<line_sep>B,H,T_k,D=value.shape<line_sep>updated_values=torch.matmul(a value)# [B, H, T_q, D] <if_stmt>edges_value<is><not><none><block_start>a=a.view(B<times>H -1 T_k).transpose(0 1)# (T_q, BxH, T_k) t=torch.matmul(a edges_value)# (T_q, BxH, D) update_edge_values=t.transpose(0 1).view(B H -1 D)<line_sep><return>updated_values+update_edge_values<block_end><else_stmt><block_start><return>updated_values<block_end><block_end><block_end><class_stmt>SeqScaledDotProductRelativeAttention(SequenceSequenceRelativeAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor edges_key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762 We apply the query to the keys to receive our weights via softmax in a series of efficient matrix operations. In the case of self-attntion the key and query are all low order projections of the same input. :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :param edges_key: a matrix of relative embeddings between each word in a sequence [T_q x T_k x D] :return: A tensor that is (B x H x T_q x T_k) """<line_sep>B,H,T_q,d_k=query.shape# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k) scores_qk=torch.matmul(query key.transpose(-2 -1))<line_sep>tbhd=query.reshape(B<times>H T_q d_k).transpose(0 1)# [T_q, B*H, d_k] scores_qek=torch.matmul(tbhd edges_key.transpose(-2 -1))# [T_q, B*H, T_k] scores_qek=scores_qek.transpose(0 1).view(B H T_q -1)# [B, H, T_q, T_k] scores=(scores_qk+scores_qek)/math.sqrt(d_k)<line_sep># only for cross-attention T_q != T_k. for such case, mask should be src_mask, which is a sequence_mask with # dimension [B, 1, 1, T_k], and will be broadcast to dim of scores: <if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end><return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>SeqDotProductRelativeAttention(SequenceSequenceRelativeAttention)<block_start><def_stmt>__init__ self pdrop:float=0.1 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor edges_key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>B,H,T_q,d_k=query.shape<line_sep>scores_qk=torch.matmul(query key.transpose(-2 -1))<line_sep>tbhd=query.reshape(B<times>H T_q d_k).transpose(0 1)<line_sep>scores_qek=torch.matmul(tbhd edges_key.transpose(-2 -1))<line_sep>scores_qek=scores_qek.transpose(0 1).view(B H T_q -1)<line_sep>scores=scores_qk+scores_qek<if_stmt>mask<is><not><none><block_start>scores=scores.masked_fill(mask<eq>MASK_FALSE -1e9)<block_end><return>F.softmax(scores dim=-1)<block_end><block_end><def_stmt>unfold_tensor tensor dim window_sz<block_start>"""Unfold a tensor by applying a sliding window on a certain dimension with step 1 and padding of 0's. The window dimension is added as the last dimension :param tensor: the tensor to be unfolded, with shape [d_1, d_2, ..., T, ..., d_n] :param dim: the dimension along which unfolding is applied :param window_sz: sliding window size, need to be an odd number :return: the unfolded tensor with shape [d_1, d_2, ..., T, ..., d_n, window_sz] """<line_sep>half_window=(window_sz-1)<floordiv>2<if_stmt>dim<l>0<block_start>dim=len(tensor.shape)+dim<block_end># torch.nn.functional.pad apply backwardly from the last dimension padding=[0 0]<times>(len(tensor.shape)-dim-1)+[half_window half_window]<line_sep><return>F.pad(tensor padding).unfold(dim window_sz 1)<block_end><class_stmt>SeqScaledWindowedRelativeAttention(SequenceSequenceRelativeAttention)<block_start>"""This class implements windowed relative attention, i.e. preventing attention beyond rpr_k. For efficiency, _attention and _update are implemented in a different way."""<def_stmt>__init__ self pdrop:float=0.1 **kwargs<block_start>super().__init__(pdrop=pdrop **kwargs)<block_end><def_stmt>_unfold_mask self mask batchsz rpr_k<block_start>"""Transform mask into the unfolded format."""<line_sep>window_sz=2<times>rpr_k+1<line_sep>T=mask.shape[3]<if_stmt>mask.shape[2]<g>1# mask is from a subsequent mask, with [1, 1, T, T] or [B, 1, T, T] <block_start>logger.warning("Using subsequent mask with long sequence may cause OOM error.")<line_sep>mask=mask.expand(batchsz 1 T T)# expand sequence/subsequent mask into a uniform dim mask=F.pad(mask [rpr_k rpr_k])# pad both sides with rpr_k, [B, 1, T, T + 2*rpr_k] seq=torch.arange(T+2<times>rpr_k)<line_sep>indices=seq.unfold(0 window_sz 1)# indices of a sliding window, [T, W] indices=indices.unsqueeze(0).unsqueeze(0).expand(batchsz 1 T window_sz).to(mask.device)<line_sep><return>torch.gather(mask -1 indices)# [B, 1, T, W]): <block_end><else_stmt># mask is a sequence mask [B, 1, 1, T] <block_start>unfolded=unfold_tensor(mask dim=-1 window_sz=window_sz)# [B, 1, 1, T, W] <return>unfolded.squeeze(1)<block_end><block_end># [B, 1, T, W] <def_stmt>_attention self query:torch.Tensor key:torch.Tensor rpr_key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor<block_start>"""Implementation of attention considering RA masking: using torch.Tensor.unfold to create an extra dimension representing the sliding window. Then when applying matmul, Q, K, V share the same T dimension. :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :param rpr_key: tensor of the rpr_key embeddings [W, d_k] :return: A tensor that is [B, H, T, 1, W] to be matmul with values """<line_sep>B,H,T,d_k=query.shape<line_sep>window_sz=rpr_key.shape[0]<line_sep>rpr_k=(window_sz-1)<floordiv>2<line_sep>query=query.unsqueeze(-2)# [B, H, T, 1, d_k] key=unfold_tensor(key dim=2 window_sz=window_sz)# [B, H, T, d_k, W] rpr_key=rpr_key.transpose(0 1).unsqueeze(0).unsqueeze(0).unsqueeze(0)# [1, 1, 1, d_k, W] scores_qk=torch.matmul(query key)# [B, H, T, 1, W] scores_qrk=torch.matmul(query rpr_key)# [B, H, T, 1, W] scores=(scores_qk+scores_qrk)/math.sqrt(d_k)<if_stmt>mask<is><not><none><block_start>mask=self._unfold_mask(mask B rpr_k).unsqueeze(-2)# [B, 1, T, 1, W] scores=scores.masked_fill(mask<eq><false> -1e9)<block_end><return>F.softmax(scores dim=-1)<block_end><def_stmt>_update self a:torch.Tensor value:torch.Tensor rpr_value:torch.Tensor<arrow>torch.Tensor# a has dim [B, H, T, 1, W] <block_start>window_sz=a.shape[-1]<line_sep>value=unfold_tensor(value dim=2 window_sz=window_sz).transpose(-1 -2)# [B, H, T, W, d_value] updated_values=torch.matmul(a value)# [B, H, T, 1, d_value] <if_stmt>rpr_value<is><not><none><block_start>rpr_value=rpr_value.unsqueeze(0).unsqueeze(0).unsqueeze(0)# [1, 1, 1, W, d_value] update_rpr_values=torch.matmul(a rpr_value)# [B, H, T, 1, d_value] <return>(updated_values+update_rpr_values).squeeze(3)# [B, H, T, d_value] <block_end><else_stmt><block_start><return>updated_values.squeeze(3)<block_end><block_end><block_end><class_stmt>SeqBahdanauAttention(SequenceSequenceAttention)<block_start><def_stmt>__init__ self hsz:int pdrop:float=0.1 **kwargs<block_start>super().__init__(hsz pdrop=pdrop **kwargs)<line_sep>self.V=pytorch_linear(self.hsz 1 bias=<false>)<block_end><def_stmt>_attention self query:torch.Tensor key:torch.Tensor mask:Optional[torch.Tensor]=<none><arrow>torch.Tensor# [B, H, T, 1, D] + [B, H, 1, T, D] = [B, H, T, T, D] <block_start>additive=query.unsqueeze(-2)+key.unsqueeze(-3)<line_sep>non_linear=torch.tanh(additive)<line_sep># [B, H, T, T, D] @ [D, 1] = [B, H, T, T, 1] scores=self.V(non_linear)<line_sep># [B, H, T, T] scores=scores.squeeze(-1)<line_sep><return>F.softmax(scores dim=-1)<block_end><block_end><class_stmt>MultiHeadedAttention(nn.Module)<block_start>""" Multi-headed attention from https://arxiv.org/abs/1706.03762 via http://nlp.seas.harvard.edu/2018/04/03/attention.html Multi-headed attention provides multiple looks of low-order projections K, Q and V using an attention function (specifically `scaled_dot_product_attention` in the paper. This allows multiple relationships to be illuminated via attention on different positional and representational information from each head. The number of heads `h` times the low-order projection dim `d_k` is equal to `d_model` (which is asserted upfront). This means that each weight matrix can be simply represented as a linear transformation from `d_model` to `d_model`, and partitioned into heads after the fact. Finally, an output projection is applied which brings the output space back to `d_model`, in preparation for the sub-sequent `FFN` sub-layer. There are 3 uses of multi-head attention in the Transformer. For encoder-decoder layers, the queries come from the previous decoder layer, and the memory keys come from the encoder. For encoder layers, the K, Q and V all come from the output of the previous layer of the encoder. And for self-attention in the decoder, K, Q and V all come from the decoder, but here it is masked to prevent using future values """<def_stmt>__init__ self num_heads:int d_model:int dropout:float=0.1 scale:bool=<false> d_k:Optional[int]=<none> ra_type:Optional[str]=<none> <block_start>"""Constructor for multi-headed attention :param h: The number of heads :param d_model: The model hidden size :param dropout (``float``): The amount of dropout to use :param scale: Should we scale the dot product attention :param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly :param ra_type: If there is an attention bias term, that will be encapsulated in the attention computation """<line_sep>super().__init__()<if_stmt>d_k<is><none><block_start>self.d_k=d_model<floordiv>num_heads<if_stmt>d_model%num_heads<ne>0<block_start><raise>Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")<block_end><block_end><else_stmt><block_start>self.d_k=d_k<block_end>self.h=num_heads<line_sep># for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V # project to 1 head with dim d_model <if_stmt>self.h<g>1<block_start>self.d_value=self.d_k<block_end><else_stmt><block_start>self.d_value=d_model<block_end>self.w_Q=Dense(d_model self.d_k<times>self.h)<line_sep>self.w_K=Dense(d_model self.d_k<times>self.h)<line_sep>self.w_V=Dense(d_model self.d_value<times>self.h)<if_stmt>self.h<g>1# w_O is not needed for single headed attention <block_start>self.w_O=Dense(self.d_k<times>self.h d_model)<block_end><if_stmt>scale<block_start><if_stmt>ra_type<eq>'alibi'<block_start>self.attn_fn=SeqScaledDotProductAttentionALiBi(dropout num_heads=num_heads)<block_end><elif_stmt>ra_type<eq>'t5'# TODO: pass through options <block_start>self.attn_fn=SeqScaledDotProductAttentionT5(dropout num_heads=num_heads)<block_end><else_stmt><block_start>self.attn_fn=SeqScaledDotProductAttention(dropout)<block_end><block_end><else_stmt><block_start><if_stmt>ra_type<eq>'alibi'<block_start>self.attn_fn=SeqDotProductAttentionALiBi(dropout num_heads=num_heads)<block_end><elif_stmt>ra_type<eq>'t5'# TODO: pass through options <block_start>self.attn_fn=SeqDotProductAttentionT5(dropout num_heads=num_heads)<block_end><else_stmt><block_start>self.attn_fn=SeqDotProductAttention(dropout)<block_end><block_end>self.attn=<none><block_end><def_stmt>forward self qkvm:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Low-order projections of query, key and value into multiple heads, then attention application and dropout :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: Multi-head attention output, result of attention application to sequence (B, T, d_model) """<line_sep>query,key,value,mask=qkvm<line_sep>batchsz=query.size(0)<line_sep># (B, H, T, D) query=self.w_Q(query).view(batchsz -1 self.h self.d_k).transpose(1 2)<line_sep>key=self.w_K(key).view(batchsz -1 self.h self.d_k).transpose(1 2)<line_sep>value=self.w_V(value).view(batchsz -1 self.h self.d_value).transpose(1 2)<line_sep>x=self.attn_fn((query key value mask))<line_sep>self.attn=self.attn_fn.attn<line_sep>x=x.transpose(1 2).contiguous().view(batchsz -1 self.h<times>self.d_value)<if_stmt>self.h<g>1<block_start><return>self.w_O(x)<block_end><else_stmt><block_start><return>x<block_end><block_end><block_end><class_stmt>MultiHeadedRelativeAttention(nn.Module)<block_start>""" Multi-headed relative attention from Shaw et al 2018 (https://www.aclweb.org/anthology/N18-2074.pdf) This method follows the same approach of MultiHeadedAttention, but it computes Relative Position Representations (RPR) which are used as part of the attention computations. To facilitate this, the model has its own internal embeddings lookup table, and it has an updated computation for both the attention weights and the application of those weights to follow them. """<def_stmt>__init__ self num_heads:int d_model:int rpr_k:int dropout:float=0.1 scale:bool=<false> d_k:Optional[int]=<none> windowed_ra:bool=<false> rpr_value_on:bool=<true><block_start>"""Constructor for multi-headed attention :param num_heads: The number of heads :param d_model: The model hidden size :param rpr_k: distance within which relative positional embedding will be considered :param windowed_ra: whether prevent attention beyond rpr_k :param dropout (``float``): The amount of dropout to use :param scale: Should we scale the dot product attention :param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly """<line_sep>super().__init__()<if_stmt>d_k<is><none><block_start>self.d_k=d_model<floordiv>num_heads<if_stmt>d_model%num_heads<ne>0<block_start><raise>Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")<block_end><block_end><else_stmt><block_start>self.d_k=d_k<block_end>self.h=num_heads<line_sep># for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V # project to 1 head with dim d_model <if_stmt>self.h<g>1<block_start>self.d_value=self.d_k<block_end><else_stmt><block_start>self.d_value=d_model<block_end>self.rpr_k=rpr_k<line_sep>self.rpr_value_on=rpr_value_on<line_sep>self.rpr_key=nn.Embedding(2<times>rpr_k+1 self.d_k)<if_stmt>self.rpr_value_on<block_start>self.rpr_value=nn.Embedding(2<times>rpr_k+1 self.d_value)<block_end>self.windowed_ra=windowed_ra<line_sep>self.w_Q=Dense(d_model self.d_k<times>self.h)<line_sep>self.w_K=Dense(d_model self.d_k<times>self.h)<line_sep>self.w_V=Dense(d_model self.d_value<times>self.h)<if_stmt>self.h<g>1# w_O is not needed for sinlge headed attention <block_start>self.w_O=Dense(self.d_k<times>self.h d_model)<block_end><if_stmt>scale<block_start><if_stmt>windowed_ra<block_start>self.attn_fn=SeqScaledWindowedRelativeAttention(dropout)<block_end><else_stmt><block_start>self.attn_fn=SeqScaledDotProductRelativeAttention(dropout)<block_end><block_end><else_stmt><block_start>self.attn_fn=SeqDotProductRelativeAttention(dropout)<block_end>self.attn=<none><block_end><def_stmt>make_rpr self q_len k_len device<arrow>Tuple[torch.Tensor torch.Tensor]<block_start>"""Create a matrix shifted by self.rpr_k and bounded between 0 and 2*self.rpr_k to provide 0-based indexing for embedding """<line_sep>q_seq=torch.arange(q_len).to(device)<line_sep>k_seq=torch.arange(k_len).to(device)<line_sep>window_len=2<times>self.rpr_k<line_sep>edges=k_seq.view(1 -1)-q_seq.view(-1 1)+self.rpr_k# [q_len, k_len] edges=torch.clamp(edges 0 window_len)<if_stmt>self.rpr_value_on<block_start><return>self.rpr_key(edges) self.rpr_value(edges)# [q_len, k_len, d_k] <block_end><else_stmt><block_start><return>self.rpr_key(edges) <none><block_end><block_end><def_stmt>make_windowed_rpr self device<block_start>window_len=2<times>self.rpr_k+1<line_sep>window=torch.arange(window_len).to(device)<if_stmt>self.rpr_value_on<block_start><return>self.rpr_key(window) self.rpr_value(window)<block_end><else_stmt><block_start><return>self.rpr_key(window) <none><block_end><block_end><def_stmt>forward self qkvm:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Low-order projections of query, key and value into multiple heads, then attention application and dropout :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: Multi-head attention output, result of attention application to sequence (B, T, d_model) """<line_sep>query,key,value,mask=qkvm<line_sep>batchsz=query.size(0)<line_sep>query_len=query.size(1)<line_sep>key_len=key.size(1)# key and value have the same length, but query can have a different length # (B, H, T, D) query=self.w_Q(query).view(batchsz -1 self.h self.d_k).transpose(1 2)<line_sep>key=self.w_K(key).view(batchsz -1 self.h self.d_k).transpose(1 2)<line_sep>value=self.w_V(value).view(batchsz -1 self.h self.d_value).transpose(1 2)<if_stmt>self.windowed_ra<block_start>rpr_key,rpr_value=self.make_windowed_rpr(query.device)<block_end><else_stmt><block_start>rpr_key,rpr_value=self.make_rpr(query_len key_len query.device)<block_end>x=self.attn_fn((query key value rpr_key rpr_value mask))<line_sep>self.attn=self.attn_fn.attn<line_sep>x=x.transpose(1 2).contiguous().view(batchsz -1 self.h<times>self.d_value)<if_stmt>self.h<g>1<block_start><return>self.w_O(x)<block_end><else_stmt><block_start><return>x<block_end><block_end><block_end><class_stmt>TransformerEncoderBase(nn.Module)<block_start><def_stmt>__init__ self num_heads:int d_model:int pdrop:float scale:bool=<true> activation_type:str="gelu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[int]=<none> ffn_pdrop:Optional[float]=0.0 layer_norm_eps:float=1.0e-6 windowed_ra:Optional[bool]=<false> rpr_value_on:bool=<true> ra_type:Optional[str]=<none> **kwargs <block_start>super().__init__()<line_sep>self.d_model=d_model<line_sep>self.d_ff=d_ff<if>d_ff<is><not><none><else>4<times>d_model<if_stmt>rpr_k<is><not><none><and>rpr_k<ne>0<block_start>self.self_attn=MultiHeadedRelativeAttention(num_heads d_model rpr_k pdrop scale d_k=d_k windowed_ra=windowed_ra rpr_value_on=rpr_value_on)<block_end><else_stmt><block_start>self.self_attn=MultiHeadedAttention(num_heads d_model pdrop scale=scale d_k=d_k ra_type=ra_type)<block_end>self.ffn=nn.Sequential(Dense(self.d_model self.d_ff) get_activation(activation_type) nn.Dropout(ffn_pdrop) Dense(self.d_ff self.d_model) )<line_sep>self.ln1=nn.LayerNorm(self.d_model eps=layer_norm_eps)<line_sep>self.ln2=nn.LayerNorm(self.d_model eps=layer_norm_eps)<line_sep>self.dropout=nn.Dropout(pdrop)<block_end><block_end><class_stmt>PreLNTransformerEncoder(TransformerEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>""" :param inputs: `(x, mask)` :return: The output tensor """<line_sep>x,mask=inputs<line_sep>h=self.ln1(x)<line_sep>x=x+self.dropout(self.self_attn((h h h mask)))<line_sep>x=x+self.dropout(self.ffn(self.ln2(x)))<line_sep><return>x<block_end><block_end><class_stmt>PreLNBeforeResConnTransformerEncoder(TransformerEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>""" :param inputs: `(x, mask)` :return: The output tensor """<line_sep>x,mask=inputs<line_sep>x=self.ln1(x)<line_sep>h=self.self_attn((x x x mask))<line_sep>x=x+self.dropout(h)<line_sep>x=self.ln2(x)<line_sep>x=x+self.dropout(self.ffn(x))<line_sep><return>x<block_end><block_end><class_stmt>PostLNTransformerEncoder(TransformerEncoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>""" :param inputs: `(x, mask)` :return: The output tensor """<line_sep>x,mask=inputs<line_sep>h=self.self_attn((x x x mask))<line_sep>x=x+self.dropout(h)<line_sep>x=self.ln2(x)<line_sep>x=x+self.dropout(self.ffn(x))<line_sep>x=self.ln1(x)<line_sep><return>x<block_end><block_end><class_stmt>SpatialGatingUnit(nn.Module)<block_start>"""Spatial gating unit There are 2 ways we can look at this unit, as an MLP or a Conv with kernel length 1 l = nn.Linear(T, T) c = nn.Conv1d(T, T, 1) l(x.transpose(1, 2)).transpose(1, 2) c(x) """<def_stmt>__init__ self d_ffn:int nctx:int layer_norm_eps:float=1.0e-6<block_start>super().__init__()<line_sep>self.norm=nn.LayerNorm(d_ffn<floordiv>2 eps=layer_norm_eps)<line_sep>self.proj=pytorch_conv1d(nctx nctx 1)<line_sep>nn.init.constant_(self.proj.bias 1.0)<block_end><def_stmt>split self x<block_start>u,v=x.chunk(2 dim=-1)<line_sep><return>u v<block_end><def_stmt>forward self x<block_start>u,v=self.split(x)<line_sep>v=self.norm(v)<line_sep>v=self.proj(v)<line_sep><return>u<times>v<block_end><block_end><class_stmt>GatedMLPEncoder(nn.Module)<block_start>"""Following https://arxiv.org/pdf/2105.08050.pdf """<def_stmt>__init__ self d_model:int pdrop:float nctx:int=256 activation_type:str="gelu" d_ff:Optional[int]=<none> ffn_pdrop:Optional[float]=0.0 layer_norm_eps:float=1.0e-6<block_start>super().__init__()<line_sep>self.d_model=d_model<line_sep>self.d_ff=d_ff<if>d_ff<is><not><none><else>4<times>d_model<line_sep>self.to_ffn=Dense(self.d_model self.d_ff)<line_sep>self.activation=get_activation(activation_type)<line_sep>self.ffn_drop=nn.Dropout(ffn_pdrop)<line_sep>self.from_sgu=Dense(self.d_ff<floordiv>2 self.d_model)<line_sep>self.norm=nn.LayerNorm(self.d_model eps=layer_norm_eps)<line_sep>self.dropout=nn.Dropout(pdrop)<line_sep>self.spatial_gating_unit=SpatialGatingUnit(self.d_ff nctx layer_norm_eps)<block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Do gMLP forward TODO: we arent using the mask ATM :param inputs: `(x, mask)` :return: The output tensor """<line_sep># The shortcut here happens pretty early shortcut,mask=inputs<line_sep># A "channel" norm x=self.norm(shortcut)<line_sep># A "channel" FFN x=self.dropout(self.to_ffn(x))<line_sep># gelu according to https://arxiv.org/pdf/2105.08050.pdf x=self.activation(x)<line_sep># "spatial" projection (over T) x=self.spatial_gating_unit(x)<line_sep># "channel" projection x=self.from_sgu(x)<line_sep>x=self.dropout(x)<line_sep><return>x+shortcut<block_end><block_end><class_stmt>TransformerDecoderBase(nn.Module)<block_start><def_stmt>__init__ self num_heads:int d_model:int pdrop:float scale:bool=<true> activation_type:str="gelu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[int]=<none> ffn_pdrop:Optional[float]=0.0 layer_norm_eps:float=1.0e-6 rpr_value_on:bool=<true> ra_type:Optional[str]=<none> <block_start>super().__init__()<line_sep>self.d_model=d_model<line_sep>self.d_ff=d_ff<if>d_ff<is><not><none><else>4<times>d_model<if_stmt>rpr_k<is><not><none><block_start>self.self_attn=MultiHeadedRelativeAttention(num_heads d_model rpr_k pdrop scale d_k=d_k rpr_value_on=rpr_value_on)<line_sep>self.src_attn=MultiHeadedRelativeAttention(num_heads d_model rpr_k pdrop scale d_k=d_k rpr_value_on=rpr_value_on)<block_end><else_stmt><block_start>self.self_attn=MultiHeadedAttention(num_heads d_model pdrop scale d_k=d_k ra_type=ra_type)<line_sep>self.src_attn=MultiHeadedAttention(num_heads d_model pdrop scale d_k=d_k ra_type=ra_type)<block_end>self.ffn=nn.Sequential(Dense(self.d_model self.d_ff) nn.Dropout(ffn_pdrop) get_activation(activation_type) Dense(self.d_ff self.d_model) )<line_sep>self.ln1=nn.LayerNorm(self.d_model eps=layer_norm_eps)<line_sep>self.ln2=nn.LayerNorm(self.d_model eps=layer_norm_eps)<line_sep>self.ln3=nn.LayerNorm(self.d_model eps=layer_norm_eps)<line_sep>self.dropout=nn.Dropout(pdrop)<block_end><block_end><class_stmt>PreLNTransformerDecoder(TransformerDecoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,memory,src_mask,tgt_mask=inputs<line_sep>h=self.ln1(x)<line_sep>x=x+self.dropout(self.self_attn((h h h tgt_mask)))<line_sep>h=self.ln2(x)<line_sep>x=x+self.dropout(self.src_attn((h memory memory src_mask)))<line_sep>h=self.ln3(x)<line_sep>x=x+self.dropout(self.ffn(h))<line_sep><return>x<block_end><block_end><class_stmt>PreLNBeforeResConnTransformerDecoder(TransformerDecoderBase)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,memory,src_mask,tgt_mask=inputs<line_sep>x=self.ln1(x)<line_sep>x=x+self.dropout(self.self_attn((x x x tgt_mask)))<line_sep>x=self.ln2(x)<line_sep>x=x+self.dropout(self.src_attn((x memory memory src_mask)))<line_sep>x=self.ln3(x)<line_sep>x=x+self.dropout(self.ffn(x))<line_sep><return>x<block_end><block_end><class_stmt>PostLNTransformerDecoder(nn.Module)<block_start><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,memory,src_mask,tgt_mask=inputs<line_sep>x=x+self.dropout(self.self_attn((x x x tgt_mask)))<line_sep>x=self.ln2(x)<line_sep>x=x+self.dropout(self.src_attn((x memory memory src_mask)))<line_sep>x=self.ln3(x)<line_sep>x=x+self.dropout(self.ffn(x))<line_sep>x=self.ln1(x)<line_sep><return>x<block_end><block_end><class_stmt>TransformerEncoderStack(nn.Module)<block_start><def_stmt>__init__ self num_heads:int d_model:int pdrop:float scale:bool=<true> layers:int=1 activation:str="relu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[Union[int List[int]]]=<none> ffn_pdrop:Optional[float]=0.0 layer_norms_after:bool=<false> layer_norm_eps:float=1.0e-6 windowed_ra:Optional[bool]=<false> rpr_value_on:bool=<true> layer_drop:float=0.0 ra_type:Optional[str]=<none> transformer_type:Optional[str]=<false> **kwargs <block_start>super().__init__()<line_sep>self.encoders=nn.ModuleList()<if_stmt>layer_norms_after<or>transformer_type<eq>"post-layer-norm"<block_start>logger.info("Using post-layer-norm transformer (encoder)")<line_sep>TransformerEncoder=PostLNTransformerEncoder<line_sep>self.ln=nn.Identity()<block_end><elif_stmt>transformer_type<eq>"pre-layer-norm"<block_start>TransformerEncoder=PreLNTransformerEncoder<line_sep>self.ln=nn.LayerNorm(d_model eps=layer_norm_eps)<block_end><else_stmt># transformer_type == "pre-layer-norm-before-resconn" <block_start>logger.info("Using layer norm before residual connections (encoder)")<if_stmt>layer_norms_after<block_start><raise>Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)" )<block_end>TransformerEncoder=PreLNBeforeResConnTransformerEncoder<line_sep>self.ln=nn.LayerNorm(d_model eps=layer_norm_eps)<block_end>self.output_dim=d_model<line_sep>self.layer_drop=layer_drop<if_stmt><not>is_sequence(rpr_k)<block_start>rpr_k=[rpr_k]<times>layers<block_end><elif_stmt>len(rpr_k)<eq>1<block_start>rpr_k=[rpr_k[0]]<times>layers<block_end><for_stmt>i range(layers)<block_start>self.encoders.append(TransformerEncoder(num_heads d_model pdrop scale activation d_ff d_k rpr_k=rpr_k[i] ffn_pdrop=ffn_pdrop layer_norm_eps=layer_norm_eps windowed_ra=windowed_ra rpr_value_on=rpr_value_on ra_type=ra_type))<block_end><block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,mask=inputs<for_stmt>layer self.encoders<block_start>pdrop=np.random.random()<if_stmt><not>self.training<or>(pdrop<ge>self.layer_drop)<block_start>x=layer((x mask))<block_end><block_end><return>self.ln(x)<block_end><block_end><class_stmt>GatedMLPEncoderStack(nn.Module)<block_start>"""Following https://arxiv.org/pdf/2105.08050.pdf """<def_stmt>__init__ self d_model:int pdrop:float layers:int=1 nctx:int=256 activation:str="gelu" d_ff:Optional[int]=<none> ffn_pdrop:Optional[float]=0.0 layer_norm_eps:float=1.0e-6 layer_drop:float=0.0 **kwargs <block_start>super().__init__()<line_sep>self.encoders=nn.ModuleList()<line_sep>self.ln=nn.LayerNorm(d_model eps=layer_norm_eps)<line_sep>self.output_dim=d_model<line_sep>self.layer_drop=layer_drop<for_stmt>i range(layers)<block_start>self.encoders.append(GatedMLPEncoder(d_model pdrop nctx activation d_ff ffn_pdrop=ffn_pdrop layer_norm_eps=layer_norm_eps ))<block_end><block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,mask=inputs<for_stmt>layer self.encoders<block_start>pdrop=np.random.random()<if_stmt><not>self.training<or>(pdrop<ge>self.layer_drop)<block_start>x=layer((x mask))<block_end><block_end><return>self.ln(x)<block_end><block_end><class_stmt>TransformerEncoderStackWithLengths(TransformerEncoderStack)<block_start><def_stmt>__init__ self num_heads:int d_model:int pdrop:bool scale:bool=<true> layers:int=1 activation:str="relu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[Union[int List[int]]]=<none> input_sz:Optional[int]=<none> ffn_pdrop:Optional[float]=0.0 layer_norms_after:bool=<false> layer_norm_eps:float=1.0e-6 windowed_ra:Optional[bool]=<false> rpr_value_on:bool=<true> layer_drop:float=0.0 ra_type:Optional[str]=<none> transformer_type:Optional[str]=<none> **kwargs <block_start>super().__init__(num_heads d_model pdrop scale layers activation d_ff d_k rpr_k ffn_pdrop layer_norms_after layer_norm_eps windowed_ra rpr_value_on layer_drop ra_type transformer_type **kwargs)<line_sep>self.proj=WithDropout(pytorch_linear(input_sz d_model) pdrop)<block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,lengths=inputs<line_sep>x=self.proj(x)<line_sep>max_seqlen=x.shape[1]<line_sep>mask=sequence_mask(lengths max_seqlen).to(x.device)<line_sep><return>super().forward((x mask.unsqueeze(1).unsqueeze(1)))<block_end><block_end><class_stmt>TransformerEncoderStackWithTimeMask(TransformerEncoderStack)<block_start><def_stmt>__init__ self num_heads:int d_model:int pdrop:bool scale:bool=<true> layers:int=1 activation:str="relu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[Union[int List[int]]]=<none> input_sz:Optional[int]=<none> ffn_pdrop:Optional[float]=0.0 layer_norms_after:bool=<false> layer_norm_eps:float=1.0e-6 windowed_ra:Optional[bool]=<false> rpr_value_on:bool=<true> layer_drop:float=0.0 ra_type:Optional[str]=<none> transformer_type:Optional[str]=<none> **kwargs <block_start>super().__init__(num_heads d_model pdrop scale layers activation d_ff d_k rpr_k ffn_pdrop layer_norms_after layer_norm_eps windowed_ra rpr_value_on layer_drop ra_type transformer_type **kwargs)<line_sep>self.proj=WithDropout(pytorch_linear(input_sz d_model) pdrop)<block_end><def_stmt>forward self inputs:Tuple[torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>x,lengths=inputs<line_sep>x=self.proj(x)<line_sep>max_seqlen=x.shape[1]<line_sep>mask=subsequent_mask(max_seqlen).to(x.device)<line_sep><return>super().forward((x mask.unsqueeze(1).unsqueeze(1)))<block_end><block_end><class_stmt>TransformerDecoderStack(nn.Module)<block_start><def_stmt>__init__ self num_heads:int d_model:int pdrop:float scale:bool=<true> layers:int=1 activation_type:str="relu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[Union[int List[int]]]=<none> ffn_pdrop:Optional[float]=0.0 layer_norms_after:bool=<false> layer_norm_eps:float=1.0e-6 layer_drop:float=0.0 rpr_value_on:bool=<true> ra_type:Optional[str]=<none> transformer_type:Optional[str]=<none> **kwargs <block_start>super().__init__()<line_sep>self.decoders=nn.ModuleList()<line_sep>self.layer_drop=layer_drop<if_stmt>layer_norms_after<or>transformer_type<eq>"post-layer-norm"<block_start>logger.info("Using post-layer-norm transformer (decoder)")<line_sep>TransformerDecoder=PostLNTransformerDecoder<line_sep>self.ln=nn.Identity()<block_end><elif_stmt>transformer_type<eq>"pre-layer-norm"<block_start>TransformerDecoder=PreLNTransformerDecoder<line_sep>self.ln=nn.LayerNorm(d_model eps=layer_norm_eps)<block_end><else_stmt># transformer_type == "pre-layer-norm-before-resconn" <block_start>logger.info("Using layer norm before residual connections (decoder)")<if_stmt>layer_norms_after<block_start><raise>Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)" )<block_end>TransformerDecoder=PreLNBeforeResConnTransformerDecoder<line_sep>self.ln=nn.LayerNorm(d_model eps=layer_norm_eps)<block_end><if_stmt><not>is_sequence(rpr_k)<block_start>rpr_k=[rpr_k]<times>layers<block_end><elif_stmt>len(rpr_k)<eq>1<block_start>rpr_k=[rpr_k[0]]<times>layers<block_end><for_stmt>i range(layers)<block_start>self.decoders.append(TransformerDecoder(num_heads d_model pdrop scale activation_type d_ff d_k=d_k rpr_k=rpr_k[i] ffn_pdrop=ffn_pdrop layer_norm_eps=layer_norm_eps rpr_value_on=rpr_value_on ra_type=ra_type))<block_end><block_end><def_stmt>forward self inputs<block_start>x,memory,src_mask,tgt_mask=inputs<for_stmt>layer self.decoders<block_start>pdrop=np.random.random()<if_stmt><not>self.training<or>(pdrop<ge>self.layer_drop)<block_start>x=layer((x memory src_mask tgt_mask))<block_end><block_end><return>self.ln(x)<block_end><block_end><def_stmt>update_lengths lengths eoses idx<block_start>"""Update the length of a generated tensor based on the first EOS found. This is useful for a decoding situation where tokens after an EOS can be something other than EOS. This also makes sure that a second generated EOS doesn't affect the lengths. :param lengths: `torch.LongTensor`: The lengths where zero means an unfinished sequence. :param eoses: `torch.ByteTensor`: A mask that has 1 for sequences that generated an EOS. :param idx: `int`: What value to fill the finished lengths with (normally the current decoding timestep). :returns: `torch.Tensor`: The updated lengths tensor (same shape and type). """<line_sep># If a length is 0 it has never had a length set so it is eligible to have # this EOS be the length. updatable_lengths=lengths<eq>0<line_sep># If this length can be updated AND this token is an eos lengths_mask=updatable_lengths&eoses<line_sep><return>lengths.masked_fill(lengths_mask idx)<block_end><def_stmt>gnmt_length_penalty lengths alpha=0.8<block_start>"""Calculate a length penalty from https://arxiv.org/pdf/1609.08144.pdf The paper states the penalty as (5 + |Y|)^a / (5 + 1)^a. This is implemented as ((5 + |Y|) / 6)^a for a (very) tiny performance boost :param lengths: `torch.LongTensor`: [B, K] The lengths of the beams. :param alpha: `float`: A hyperparameter. See Table 2 for a search on this parameter. :returns: `torch.FloatTensor`: [B, K, 1] The penalties. """<line_sep>lengths=lengths.to(torch.float)<line_sep>penalty=torch.pow(((5+lengths)/6) alpha)<line_sep><return>penalty.unsqueeze(-1)<block_end><def_stmt>no_length_penalty lengths<block_start>"""A dummy function that returns a no penalty (1)."""<line_sep><return>torch.ones_like(lengths).to(torch.float).unsqueeze(-1)<block_end><def_stmt>repeat_batch t K dim=0<block_start>"""Repeat a tensor while keeping the concept of a batch. :param t: `torch.Tensor`: The tensor to repeat. :param K: `int`: The number of times to repeat the tensor. :param dim: `int`: The dimension to repeat in. This should be the batch dimension. :returns: `torch.Tensor`: The repeated tensor. The new shape will be batch size * K at dim, the rest of the shapes will be the same. Example:: >>> a = torch.arange(10).view(2, -1) >>> a tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> a.repeat(2, 1) tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> repeat_batch(a, 2) tensor([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [5, 6, 7, 8, 9]]) """<line_sep>shape=t.shape<line_sep>tiling=[1]<times>(len(shape)+1)<line_sep>tiling[dim+1]=K<line_sep>tiled=t.unsqueeze(dim+1).repeat(tiling)<line_sep>old_bsz=shape[dim]<line_sep>new_bsz=old_bsz<times>K<line_sep>new_shape=list(shape[:dim])+[new_bsz]+list(shape[dim+1:])<line_sep><return>tiled.view(new_shape)<block_end><class_stmt>BeamSearchBase<block_start><def_stmt>__init__ self beam=1 length_penalty=<none> **kwargs<block_start>self.length_penalty=length_penalty<if>length_penalty<else>no_length_penalty<line_sep>self.K=beam<block_end><def_stmt>init self encoder_outputs<block_start><pass><block_end><def_stmt>step self paths extra<block_start><pass><block_end><def_stmt>update self beams extra<block_start><pass><block_end><def_stmt>__call__ self encoder_outputs **kwargs<block_start>"""Perform batched Beam Search. Note: The paths and lengths generated do not include the <GO> token. :param encoder_outputs: `namedtuple` The outputs of the encoder class. :param init: `Callable(ecnoder_outputs: encoder_outputs, K: int)` -> Any: A callable that is called once at the start of the search to initialize things. This returns a blob that is passed to other callables. :param step: `Callable(paths: torch.LongTensor, extra) -> (probs: torch.FloatTensor, extra): A callable that is does a single decoding step. It returns the log probabilities over the vocabulary in the last dimension. It also returns any state the decoding process needs. :param update: `Callable(beams: torch.LongTensor, extra) -> extra: A callable that is called to edit the decoding state based on the selected best beams. :param length_penalty: `Callable(lengths: torch.LongTensor) -> torch.floatTensor A callable that generates a penalty based on the lengths. Lengths is [B, K] and the returned penalty should be [B, K, 1] (or [B, K, V] to have token based penalties?) :Keyword Arguments: * *beam* -- `int`: The number of beams to use. * *mxlen* -- `int`: The max number of steps to run the search for. :returns: tuple(preds: torch.LongTensor, lengths: torch.LongTensor, scores: torch.FloatTensor) preds: The predicted values: [B, K, max(lengths)] lengths: The length of each prediction [B, K] scores: The score of each path [B, K] """<line_sep>mxlen=kwargs.get("mxlen" 100)<line_sep>bsz=encoder_outputs.output.shape[0]<line_sep>device=encoder_outputs.output.device<with_stmt>torch.no_grad()<block_start>extra=self.init(encoder_outputs)<line_sep>paths=torch.full((bsz self.K 1) Offsets.GO dtype=torch.long device=device)<line_sep># This tracks the log prob of each beam. This is distinct from score which # is based on the log prob and penalties. log_probs=torch.zeros((bsz self.K) dtype=torch.float device=device)<line_sep># Tracks the lengths of the beams, unfinished beams have lengths of zero. lengths=torch.zeros((bsz self.K) dtype=torch.long device=device)<for_stmt>i range(mxlen-1)<block_start>probs,extra=self.step(paths extra)<line_sep>V=probs.shape[-1]<line_sep>probs=probs.view((bsz self.K V))# [B, K, V] <if_stmt>i<g>0# This mask is for all beams that are done. <block_start>done_mask=(lengths<ne>0).unsqueeze(-1)# [B, K, 1] # Can creating this mask be moved out of the loop? It never changes but we don't have V # This mask selects the EOS token eos_mask=torch.zeros((1 1 V) dtype=done_mask.dtype device=device)<line_sep>eos_mask[: : Offsets.EOS]=1<line_sep># This mask selects the EOS token of only the beams that are done. mask=done_mask&eos_mask<line_sep># Put all probability mass on the EOS token for finished beams. # Otherwise as the other beams get longer they will all give # up and eventually select this beam and all outputs become # the same. probs=probs.masked_fill(done_mask -np.inf)<line_sep>probs=probs.masked_fill(mask 0)<line_sep>probs=log_probs.unsqueeze(-1)+probs# [B, K, V] # Calculate the score of the beam based on the current length. path_scores=probs/self.length_penalty(lengths.masked_fill(lengths<eq>0 i+1))<block_end><else_stmt># On the first step we only look at probabilities for the first beam. # If we don't then the probs will be the same for each beam # This means the same token will be selected for each beam # And we won't get any diversity. # Using only the first beam ensures K different starting points. <block_start>path_scores=probs[: 0 :]<block_end>flat_scores=path_scores.view(bsz -1)# [B, K * V] best_scores,best_idx=flat_scores.topk(self.K 1)<line_sep># Get the log_probs of the best scoring beams log_probs=probs.view(bsz -1).gather(1 best_idx).view(bsz self.K)<line_sep>best_beams=best_idx<floordiv>V# Get which beam it came from best_idx=best_idx%V# Get the index of the word regardless of which beam it is. # Best Beam index is relative within the batch (only [0, K)). # This makes the index global (e.g. best beams for the second # batch example is in [K, 2*K)). offsets=torch.arange(bsz dtype=torch.long device=device)<times>self.K<line_sep>offset_beams=best_beams+offsets.unsqueeze(-1)<line_sep>flat_beams=offset_beams.view(bsz<times>self.K)<line_sep># Select the paths to extend based on the best beams flat_paths=paths.view(bsz<times>self.K -1)<line_sep>new_paths=flat_paths[flat_beams :].view(bsz self.K -1)<line_sep># Add the selected outputs to the paths paths=torch.cat([new_paths best_idx.unsqueeze(-1)] dim=2)<line_sep># Select the lengths to keep tracking based on the valid beams left. lengths=lengths.view(-1)[flat_beams].view((bsz self.K))<line_sep>extra=self.update(flat_beams extra)<line_sep># Updated lengths based on if we hit EOS last=paths[: : -1]<line_sep>eoses=last<eq>Offsets.EOS<line_sep>lengths=update_lengths(lengths eoses i+1)<if_stmt>(lengths<ne>0).all()<block_start><break><block_end><block_end><else_stmt># This runs if the loop didn't break meaning one beam hit the max len # Add an EOS to anything that hasn't hit the end. This makes the scores real. <block_start>probs,extra=self.step(paths extra)<line_sep>V=probs.size(-1)<line_sep>probs=probs.view((bsz self.K V))<line_sep>probs=probs[: : Offsets.EOS]# Select the score of EOS # If any of the beams are done mask out the score of this EOS (they already had an EOS) probs=probs.masked_fill((lengths<ne>0) 0)<line_sep>log_probs=log_probs+probs<line_sep>end_tokens=torch.full((bsz self.K 1) Offsets.EOS device=device dtype=paths.dtype)<line_sep>paths=torch.cat([paths end_tokens] dim=2)<line_sep>lengths=update_lengths(lengths torch.ones_like(lengths)<eq>1 mxlen)<line_sep>lengths=update_lengths(lengths torch.ones_like(lengths)<eq>1 mxlen)<line_sep>best_scores=log_probs/self.length_penalty(lengths).squeeze(-1)<block_end><block_end># Slice off the Offsets.GO token paths=paths[: : 1:]<line_sep><return>paths lengths best_scores<block_end><block_end><def_stmt>checkpoint_for model_base epoch tick_type='epoch'<block_start><return>'{}-{}-{}'.format(model_base tick_type epoch+1)<block_end><def_stmt>rm_old_checkpoints base_path current_epoch last_n=10<block_start><for_stmt>i range(0 current_epoch-last_n)<block_start>checkpoint_i=checkpoint_for(base_path i)<for_stmt>extension ('.pth' '.npz')<block_start>checkpoint_name=checkpoint_i+extension<if_stmt>os.path.exists(checkpoint_name)<block_start>os.remove(checkpoint_name)<block_end><block_end><block_end><block_end><def_stmt>find_latest_checkpoint checkpoint_dir:str wildcard="checkpoint"<arrow>Tuple[str int]<block_start>step_num=0<for_stmt>f glob.glob(os.path.join(checkpoint_dir f"{wildcard}*"))<block_start>base=os.path.basename(f)<if_stmt>"-"<not><in>base<block_start><continue><block_end>last=base.split("-")[-1]<for_stmt>x ('.pth' '.npz')<block_start>last=last.replace(x '' -1)<block_end>this_step_num=int(last)<if_stmt>this_step_num<g>step_num<block_start>checkpoint=f<line_sep>step_num=this_step_num<block_end><block_end><return>checkpoint step_num<block_end><def_stmt>save_checkpoint model:torch.nn.Module model_base:str count:int tick_type:str='epoch' save_npz:bool=<false><block_start><import_from_stmt>eight_mile.pytorch.serialize save_tlm_npz save_tlm_output_npz save_transformer_seq2seq_npz save_transformer_de_npz<line_sep>checkpoint_name=checkpoint_for(model_base count tick_type=tick_type)<line_sep># Its possible due to how its called that we might save the same checkpoint twice if we dont check first <if_stmt>os.path.exists(checkpoint_name)<block_start>logger.info("Checkpoint already exists: %s" checkpoint_name)<line_sep><return><block_end>logger.info("Creating checkpoint: %s" checkpoint_name)<line_sep>model_=model.module<if>hasattr(model 'module')<else>model<line_sep>torch.save(model_.state_dict() checkpoint_name+'.pth')<if_stmt>save_npz<block_start><if_stmt>hasattr(model_ 'decoder')<block_start>save_transformer_seq2seq_npz(model_ checkpoint_name+'.npz')<block_end><elif_stmt>hasattr(model_ 'reduction_layer')<block_start>save_transformer_de_npz(model_ checkpoint_name+'.npz')<block_end><elif_stmt>hasattr(model_ 'output_layer')<block_start>save_tlm_output_npz(model_ checkpoint_name+'.npz')<block_end><else_stmt><block_start>save_tlm_npz(model_ checkpoint_name+'.npz')<block_end><block_end><if_stmt>tick_type<eq>'epoch'<block_start>rm_old_checkpoints(model_base count)<block_end><block_end><def_stmt>init_distributed local_rank<block_start><if_stmt>local_rank<eq>-1# https://github.com/kubeflow/pytorch-operator/issues/128 # https://github.com/pytorch/examples/blob/master/imagenet/main.py <block_start>logger.info("Setting local rank to RANK env variable")<line_sep>local_rank=int(os.environ['RANK'])<block_end>logger.warning("Local rank (%d)" local_rank)<line_sep># In an env like k8s with kubeflow each worker will only see a single gpu # with an id of 0. If the gpu count is 1 then we are probably in an env like # that so we should just use the first (and only) gpu avaiable <if_stmt>torch.cuda.device_count()<eq>1<block_start>torch.cuda.set_device(0)<line_sep>device=torch.device("cuda" 0)<block_end># This program assumes multiprocess/multi-device on a single node. Each # process gets a rank (via cli or ENV variable) and uses that rank to select # which gpu to use. This only makes sense on a single node, if you had 4 # processes on 2 nodes where each node has 2 GPUs then the ranks would be # 0, 1, 2, 3 but the gpus numbers would be node 0: 0, 1 and node 1: 0, 1 # and this assignment to gpu 3 would fail. On a single node with 4 processes # and 4 gpus the rank and gpu ids will align and this will work <else_stmt><block_start>torch.cuda.set_device(local_rank)<line_sep>device=torch.device("cuda" local_rank)<block_end>torch.distributed.init_process_group(backend='nccl' init_method='env://')<line_sep><return>device local_rank<block_end><class_stmt>AttentionReduction(nn.Module)<block_start>""" This is a reduction that is given Q, K, V and a mask vector. Different from base reductions, which get an embedding stack """<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self qkvm:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""Inputs are the same as for a normal attention function, but the output here is a single tensor, ``[B, H]`` :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: sentence-level encoding with dim [B, d_model] """<block_end><block_end><class_stmt>SingleHeadReduction(AttentionReduction)<block_start>""" Implementation of the "self_attention_head" layer from the conveRT paper (https://arxiv.org/pdf/1911.03688.pdf) """<def_stmt>__init__ self d_model:int dropout:float=0.0 scale:bool=<false> d_k:Optional[int]=<none> pooling:str='sqrt_length' <block_start>""" :param d_model: The model hidden size :param dropout (``float``): The amount of dropout to use :param scale: should we scale the dot product attention :param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly """<line_sep>super().__init__()<line_sep>self.output_dim=d_model<if_stmt>d_k<is><none><block_start>self.d_k=d_model<block_end><else_stmt><block_start>self.d_k=d_k<block_end>self.w_Q=Dense(d_model self.d_k)<line_sep>self.w_K=Dense(d_model self.d_k)<if_stmt>scale<block_start>self.attn_fn=SeqScaledDotProductAttention(dropout)<block_end><else_stmt><block_start>self.attn_fn=SeqDotProductAttention(dropout)<block_end>self.attn=<none><line_sep>pooling=pooling.lower()<line_sep>self.fill=0<if_stmt>pooling<eq>'max'<block_start>self.pool=self._max_pool<line_sep>self.fill=-1e9<block_end><elif_stmt>pooling<eq>'mean'<block_start>self.pool=self._mean_pool<block_end><else_stmt><block_start>self.pool=self._sqrt_length_pool<block_end><block_end><def_stmt>_sqrt_length_pool self x seq_lengths<block_start>x=x.sum(dim=1)# [B, D] x=x<times>seq_lengths.float().sqrt().unsqueeze(-1)<line_sep><return>x<block_end><def_stmt>_mean_pool self x seq_lengths<block_start><return>torch.sum(x 1 keepdim=<false>)/torch.unsqueeze(seq_lengths -1).to(x.dtype).to(x.device)<block_end><def_stmt>_max_pool self x _<block_start>x,_=torch.max(x 1 keepdim=<false>)<line_sep><return>x<block_end><def_stmt>forward self qkvm:Tuple[torch.Tensor torch.Tensor torch.Tensor torch.Tensor]<arrow>torch.Tensor<block_start>"""According to conveRT model's graph, they project token encodings to lower-dimensional query and key in single head, use them to calculate the attention score matrix that has dim [B, T, T], then sum over the query dim to get a tensor with [B, 1, T] (meaning the amount of attentions each token gets from all other tokens), scale it by sqrt of sequence lengths, then use it as the weight to weighted sum the token encoding to get the sentence encoding. we implement it in an equivalent way that can best make use of the eight_mile codes: do the matrix multiply with value first, then sum over the query dimension. :param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D :param key: a set of keys from encoder or self :param value: a set of values from encoder or self :param mask: masking (for destination) to prevent seeing what we shouldnt :return: sentence-level encoding with dim [B, d_model] """<line_sep>query,key,value,mask=qkvm<line_sep>batchsz=query.size(0)<line_sep>seq_mask=mask.squeeze(1).squeeze(1)# [B, T] seq_lengths=seq_mask.sum(dim=1)<line_sep># (B, H, T, D), still have num_heads = 1 to use the attention function defined in eight_miles query=self.w_Q(query).view(batchsz -1 1 self.d_k).transpose(1 2)<line_sep>key=self.w_K(key).view(batchsz -1 1 self.d_k).transpose(1 2)<line_sep>value=value.view(batchsz -1 1 self.output_dim).transpose(1 2)<line_sep>x=self.attn_fn((query key value mask))# [B, 1, T, D] self.attn=self.attn_fn.attn<line_sep>x=x.squeeze(1)# [B, T, D] x=x.masked_fill(seq_mask.unsqueeze(-1)<eq>MASK_FALSE self.fill)<line_sep><return>self.pool(x seq_lengths)<block_end><block_end><class_stmt>TransformerDiscriminator(nn.Module)<block_start>"""A Transformer model that tries to predict if each token is real or fake This model is based on [ELECTRA: Pre-Training Text Encoders as Discriminators Rather Than Generators, Clark et al. 2019](https://openreview.net/pdf?id=r1xMH1BtvB). """<def_stmt>__init__ self embeddings num_heads:int d_model:int dropout:bool layers:int=1 activation:str="relu" d_ff:Optional[int]=<none> d_k:Optional[int]=<none> rpr_k:Optional[Union[int List[int]]]=<none> layer_norms_after:bool=<false> layer_norm_eps:float=1.0e-6 embeddings_reduction:str='sum' **kwargs <block_start>super().__init__()<line_sep>self.embeddings=EmbeddingsStack(embeddings dropout reduction=embeddings_reduction)<line_sep>self.weight_std=kwargs.get('weight_std' 0.02)<assert_stmt>self.embeddings.dsz<eq>d_model<line_sep>self.transformer=TransformerEncoderStack(num_heads d_model=d_model pdrop=dropout scale=<true> layers=layers activation=activation d_ff=d_ff rpr_k=rpr_k d_k=d_k layer_norms_after=layer_norms_after layer_norm_eps=layer_norm_eps)<line_sep>self.proj_to_output=pytorch_linear(d_model 1)<line_sep>self.apply(self.init_layer_weights)<line_sep>self.lengths_feature=kwargs.get('lengths_feature' list(self.embeddings.keys())[0])<block_end><def_stmt>init_layer_weights self module<block_start><if_stmt>isinstance(module (nn.Linear nn.Embedding nn.LayerNorm))<block_start>module.weight.data.normal_(mean=0.0 std=self.weight_std)<block_end><if_stmt>isinstance(module (nn.Linear nn.LayerNorm))<and>module.bias<is><not><none><block_start>module.bias.data.zero_()<block_end><block_end><def_stmt>forward self features<block_start>embedded=self.embeddings(features)<line_sep>x=features[self.lengths_feature]<line_sep>input_mask=torch.zeros(x.shape device=x.device dtype=torch.long).masked_fill(x<ne>Offsets.PAD 1).unsqueeze(1).unsqueeze(1)<line_sep>transformer_out=self.transformer((embedded input_mask))<line_sep>binary=self.proj_to_output(transformer_out)<line_sep><return>torch.sigmoid(binary)<block_end><def_stmt>create_loss self<block_start><return>nn.BCELoss(reduction="none")<block_end><block_end><class_stmt>PooledSequenceCriterion(nn.Module)<block_start><def_stmt>__init__ self LossFn=nn.BCEWithLogitsLoss avg='token'<block_start>super().__init__()<if_stmt>avg<eq>'token'<block_start>self.crit=LossFn()<line_sep>self._norm=self._no_norm<block_end><else_stmt><block_start>self.crit=LossFn()<line_sep>self._norm=self._batch_norm<block_end><block_end><def_stmt>_batch_norm self loss inputs<block_start><return>loss/inputs.size()[0]<block_end><def_stmt>_no_norm self loss inputs<block_start><return>loss<block_end><def_stmt>forward self inputs targets<block_start>"""Evaluate some loss over a sequence. :param inputs: torch.FloatTensor, [B, C] The scores from the model. Batch First :param targets: torch.LongTensor, The labels. :returns: torch.FloatTensor, The loss. """<line_sep>#inputs = inputs.transpose(0, 1) C=inputs.shape[-1]<line_sep>flat_targets=torch.nn.functional.one_hot(targets C)<line_sep># Get the offsets of the non-zero targets, the values of these are all on flat_targets=(torch.sum(flat_targets axis=1)<ne>0).float()<line_sep>flat_targets[: Offsets.PAD]=0<line_sep>flat_targets[: Offsets.EOS]=0<line_sep>flat_targets[: Offsets.GO]=0<if_stmt>len(inputs.shape)<g>2<block_start>max_per_vocab=inputs.max(0)[0]<line_sep>loss=self.crit(max_per_vocab flat_targets)<block_end><else_stmt><block_start>loss=self.crit(inputs flat_targets)<block_end><return>self._norm(loss inputs)<block_end><block_end><class_stmt>SequenceCriterion(nn.Module)<block_start><def_stmt>__init__ self LossFn=nn.NLLLoss avg='token'<block_start>super().__init__()<if_stmt>avg<eq>'token'# self.crit = LossFn(ignore_index=Offsets.PAD, reduction='elementwise-mean') <block_start>self.crit=LossFn(ignore_index=Offsets.PAD size_average=<true>)<line_sep>self._norm=self._no_norm<block_end><else_stmt><block_start>self.crit=LossFn(ignore_index=Offsets.PAD size_average=<false>)<line_sep>self._norm=self._batch_norm<block_end><block_end><def_stmt>_batch_norm self loss inputs<block_start><return>loss/inputs.size()[0]<block_end><def_stmt>_no_norm self loss inputs<block_start><return>loss<block_end><def_stmt>forward self inputs targets<block_start>"""Evaluate some loss over a sequence. :param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First :param targets: torch.LongTensor, The labels. :returns: torch.FloatTensor, The loss. """<line_sep>total_sz=targets.nelement()<line_sep>loss=self.crit(inputs.view(total_sz -1) targets.view(total_sz))<line_sep><return>self._norm(loss inputs)<block_end><block_end><def_stmt>pytorch_conv1d in_channels out_channels fsz unif=0 padding=0 initializer=<none> stride=1 bias=<true> groups=1<block_start>c=nn.Conv1d(in_channels out_channels fsz padding=padding stride=stride bias=bias groups=groups)<if_stmt>unif<g>0<block_start>c.weight.data.uniform_(-unif unif)<block_end><elif_stmt>initializer<eq>"ortho"<block_start>nn.init.orthogonal_(c.weight)<if_stmt>bias<block_start>nn.init.constant_(c.bias 0)<block_end><block_end><elif_stmt>initializer<eq>"he"<or>initializer<eq>"kaiming"<block_start>nn.init.kaiming_uniform_(c.weight)<if_stmt>bias<block_start>nn.init.constant_(c.bias 0)<block_end><block_end><elif_stmt>initializer<eq>"normal"<block_start>nn.init.normal(mean=0 std=unif)<if_stmt>bias<block_start>nn.init.constant_(c.bias 0)<block_end><block_end><else_stmt><block_start>nn.init.xavier_uniform_(c.weight)<if_stmt>bias<block_start>nn.init.constant_(c.bias 0)<block_end><block_end><return>c<block_end><def_stmt>tie_weight to_layer from_layer<block_start>"""Assigns a weight object to the layer weights. This method exists to duplicate baseline functionality across packages. :param to_layer: the pytorch layer to assign weights to :param from_layer: pytorch layer to retrieve weights from """<line_sep>to_layer.weight=from_layer.weight<block_end><class_stmt>BilinearAttention(nn.Module)<block_start><def_stmt>__init__ self in_hsz:int out_hsz:int=1 bias_x:bool=<true> bias_y:bool=<true><block_start>super().__init__()<line_sep>self.in_hsz=in_hsz<line_sep>self.out_hsz=out_hsz<line_sep>self.bias_x=bias_x<line_sep>self.bias_y=bias_y<line_sep>a1=in_hsz<line_sep>a2=in_hsz<if_stmt>self.bias_x<block_start>a1<augadd>1<block_end><if_stmt>self.bias_y<block_start>a2<augadd>1<block_end>self.weight=nn.Parameter(torch.Tensor(out_hsz in_hsz+bias_x in_hsz+bias_y))<line_sep>self.reset_parameters()<block_end><def_stmt>reset_parameters self<block_start>nn.init.zeros_(self.weight)<line_sep>#nn.init.orthogonal_(self.weight) <block_end><def_stmt>forward self x y mask<block_start>r""" Args: x: ``[B, T, H]``. y: ``[B, T, H]``. Returns: ~torch.Tensor: A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``. If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically. """<if_stmt>self.bias_x<is><true><block_start>ones=torch.ones(x.shape[:-1]+(1 ) device=x.device)<line_sep>x=torch.cat([x ones] -1)<block_end><if_stmt>self.bias_y<is><true><block_start>ones=torch.ones(x.shape[:-1]+(1 ) device=y.device)<line_sep>y=torch.cat([y ones] -1)<block_end>x=x.unsqueeze(1)<line_sep>y=y.unsqueeze(1)<line_sep>u=x@self.weight<line_sep>s=u@y.transpose(-2 -1)<if_stmt>self.out_hsz<eq>1<block_start>s=s.squeeze(1)<block_end>s=s.masked_fill((mask.bool()<eq>MASK_FALSE).unsqueeze(1) -1e9)<line_sep><return>s<block_end><block_end><class_stmt>TripletLoss(nn.Module)<block_start>"""Provide a Triplet Loss using the reversed batch for negatives"""<def_stmt>__init__ self model<block_start>super().__init__()<line_sep>self.score=nn.CosineSimilarity(dim=1)<line_sep>self.model=model<block_end><def_stmt>forward self inputs targets# reverse the batch and use as a negative example <block_start>neg=targets.flip(0)<line_sep>query=self.model.encode_query(inputs)<line_sep>response=self.model.encode_response(targets)<line_sep>neg_response=self.model.encode_response(neg)<line_sep>pos_score=self.score(query response)<line_sep>neg_score=self.score(query neg_response)<line_sep>score=neg_score-pos_score<line_sep>score=score.masked_fill(score<l>0.0 0.0).sum(0)<line_sep><return>score<block_end><block_end><class_stmt>ContrastiveLoss(nn.Module)<block_start><def_stmt>__init__ self model t=1.0 train_temperature=<true><block_start>super().__init__()<line_sep>self.model=model<if_stmt>t<is><none><block_start>t=1.0<block_end>self.t=nn.Parameter(torch.tensor(t).float() requires_grad=train_temperature)<block_end><def_stmt>forward self inputs targets<block_start>query=self.model.encode_query(inputs)# [B, H] response=self.model.encode_response(targets)# [B, H] query=F.normalize(query p=2 dim=1)<line_sep>response=F.normalize(response p=2 dim=1)<line_sep>labels=torch.arange(query.shape[0] device=query.device)<line_sep>logits=torch.mm(query response.T)<times>self.t.exp()<line_sep>loss=F.cross_entropy(logits labels)<line_sep><return>loss<block_end><block_end><class_stmt>SymmetricContrastiveLoss(nn.Module)<block_start><def_stmt>__init__ self model t=1.0 train_temperature=<true><block_start>super().__init__()<line_sep>self.model=model<if_stmt>t<is><none><block_start>t=1.0<block_end>self.t=nn.Parameter(torch.tensor(t).float() requires_grad=train_temperature)<block_end><def_stmt>forward self inputs targets<block_start>query=self.model.encode_query(inputs)# [B, H] response=self.model.encode_response(targets)# [B, H] query=F.normalize(query p=2 dim=1)<line_sep>response=F.normalize(response p=2 dim=1)<line_sep>labels=torch.arange(query.shape[0] device=query.device)<line_sep>logits=torch.mm(query response.T)<times>self.t.exp()<line_sep>loss_1=F.cross_entropy(logits labels)<line_sep>loss_2=F.cross_entropy(logits.T labels)<line_sep>loss=(loss_1+loss_2)<times>0.5<line_sep><return>loss<block_end><block_end><class_stmt>AllLoss(nn.Module)<block_start><def_stmt>__init__ self model warmup_steps=10000 reduction_type='sum'<block_start>r"""Loss from here https://arxiv.org/pdf/1705.00652.pdf see section 4 We want to minimize the negative log prob of y given x -log P(y|x) P(y|x) P(x) = P(x, y) Chain Rule of Probability P(y|x) = P(x, y) / P(x) Algebra P(y|x) = P(x, y) / \sum_\hat(y) P(x, y = \hat(y)) Marginalize over all possible ys to get the probability of x P_approx(y|x) = P(x, y) / \sum_i^k P(x, y_k) Approximate the Marginalization by just using the ys in the batch S(x, y) is the score (cosine similarity between x and y in this case) from our neural network P(x, y) = e^S(x, y) P(y|x) = e^S(x, y) / \sum_i^k e^S(x, y_k) log P(y|x) = log( e^S(x, y) / \sum_i^k e^S(x, y_k)) log P(y|x) = S(x, y) - log \sum_i^k e^S(x, y_k) -log P(y|x) = -(S(x, y) - log \sum_i^k e^S(x, y_k)) """<line_sep>super().__init__()<line_sep>self.score=nn.CosineSimilarity(dim=-1)<line_sep>self.model=model<line_sep>self.max_scale=math.sqrt(self.model.embeddings.output_dim)<line_sep>self.steps=0<line_sep>self.warmup_steps=warmup_steps<line_sep>self.reduction=torch.mean<if>reduction_type<eq>'mean'<else>torch.sum<block_end><def_stmt>forward self inputs targets# This is the cosine distance annealing referred to in https://arxiv.org/pdf/1911.03688.pdf <block_start>fract=min(self.steps/self.warmup_steps 1)<line_sep>c=(self.max_scale-1)<times>fract+1<line_sep>self.steps<augadd>1<line_sep># These will get broadcast to [B, B, H] query=self.model.encode_query(inputs).unsqueeze(1)# [B, 1, H] response=self.model.encode_response(targets).unsqueeze(0)# [1, B, H] # all_scores is now a batch x batch matrix where index (i, j) is the score between # the i^th x vector and the j^th y vector all_score=c<times>self.score(query response)# [B, B] # The diagonal has the scores of correct pair, (i, i) pos_score=torch.diag(all_score)<line_sep># vec_log_sum_exp will calculate the batched log_sum_exp in a numerically stable way # the result is a [B, 1] vector which we squeeze to make it [B] to match the diag # Because we are minimizing the negative log we turned the division into a subtraction here loss=pos_score-vec_log_sum_exp(all_score -1).squeeze()<line_sep># Batch loss loss=self.reduction(loss)<line_sep># minimize the negative loss <return>-loss<block_end><block_end><class_stmt>CosineSimilarityLoss(nn.Module)<block_start><def_stmt>__init__ self neg_value=0.3 pos_value=0.8<block_start>super().__init__()<line_sep>self.pos_value=pos_value<line_sep>self.neg_value=neg_value<block_end><def_stmt>forward self embeddings_reduction labels<block_start>hsz=int(embeddings_reduction.shape[-1]<floordiv>2)<line_sep>label_values=torch.zeros_like(labels dtype=torch.float)<line_sep>label_values[labels<eq>0]=self.neg_value<line_sep>label_values[labels<eq>1]=self.pos_value<line_sep>output=torch.cosine_similarity(embeddings_reduction[: :hsz] embeddings_reduction[: hsz:])<line_sep>loss=F.mse_loss(output label_values.view(-1) reduction='mean')<line_sep><return>loss<block_end><block_end><class_stmt>OnlineContrastiveLoss(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self embeddings_reduction labels<block_start>hsz=int(embeddings_reduction.shape[-1]<floordiv>2)<line_sep>x=embeddings_reduction[: :hsz]<line_sep>y=embeddings_reduction[: hsz:]<line_sep>distance_matrix=1-F.cosine_similarity(x y)<line_sep>negs=distance_matrix[labels<eq>0]<line_sep>poss=distance_matrix[labels<eq>1]<line_sep># select hard positive and hard negative pairs negative_pairs=negs[negs<l>(poss.max()<if>len(poss)<g>1<else>negs.mean())]<line_sep>positive_pairs=poss[poss<g>(negs.min()<if>len(negs)<g>1<else>poss.mean())]<line_sep>positive_loss=positive_pairs.pow(2).sum()<line_sep>negative_loss=F.relu(0.5-negative_pairs).pow(2).sum()<line_sep>loss=positive_loss+negative_loss<line_sep><return>loss<block_end><block_end><class_stmt>TwoHeadConcat(AttentionReduction)<block_start>"""Use two parallel SingleHeadReduction, and concatenate the outputs. It is used in the conveRT paper (https://arxiv.org/pdf/1911.03688.pdf)"""<def_stmt>__init__ self d_model dropout scale=<false> d_k=<none> pooling='sqrt_length'<block_start>"""Two parallel 1-head self-attention, then concatenate the output :param d_model: dim of the self-attention :param dropout: dropout of the self-attention :param scale: scale fo the self-attention :param d_k: d_k of the self-attention :return: concatenation of the two 1-head attention """<line_sep>super().__init__()<line_sep>self.output_dim=2<times>d_model<line_sep>self.reduction1=SingleHeadReduction(d_model dropout scale=scale d_k=d_k pooling=pooling)<line_sep>self.reduction2=SingleHeadReduction(d_model dropout scale=scale d_k=d_k pooling=pooling)<block_end><def_stmt>forward self inputs:torch.Tensor<block_start>x=inputs<line_sep>encoding1=self.reduction1(x)<line_sep>encoding2=self.reduction2(x)<line_sep>x=torch.cat([encoding1 encoding2] dim=-1)<line_sep><return>x<block_end><block_end><class_stmt>ConveRTFFN(nn.Module)<block_start>"""Implementation of the FFN layer from the convert paper (https://arxiv.org/pdf/1911.03688.pdf)"""<def_stmt>__init__ self insz hszs outsz pdrop<block_start>""" :param insz: input dim :param hszs: list of hidden sizes :param outsz: output dim :param pdrop: dropout of each hidden layer """<line_sep>super().__init__()<line_sep>self.dense_stack=DenseStack(insz hszs activation='gelu' pdrop_value=pdrop skip_connect=<true> layer_norm=<true>)<line_sep>self.final=Dense(hszs[-1] outsz)<line_sep>self.proj=Dense(insz outsz)<if>insz<ne>outsz<else>nn.Identity()<line_sep>self.ln1=nn.LayerNorm(insz eps=1e-6)<line_sep>self.ln2=nn.LayerNorm(outsz eps=1e-6)<block_end><def_stmt>forward self inputs<block_start>x=self.ln1(inputs)<line_sep>x=self.dense_stack(x)<line_sep>x=self.final(x)<line_sep>x=x+self.proj(inputs)<line_sep><return>self.ln2(x)<block_end><block_end><class_stmt>DualEncoderModel(nn.Module)<block_start>"""Abstract base for dual encoders We can assume that our dual encoder needs to end up in the same output plane between the encoders, and we can define the set of losses here that we are likely to need for most. """<def_stmt>__init__ self in_sz:int stacking_layers:Union[int List[int]]=<none> d_out:int=512 ffn_pdrop=0.1 in_sz_2=<none> output_layer=<false> output_activation='tanh' output_shared=<false><block_start>super().__init__()<if_stmt><not>in_sz_2<block_start>in_sz_2=in_sz<block_end><if_stmt>stacking_layers<block_start>stacking_layers=listify(stacking_layers)<block_end><if_stmt>stacking_layers<block_start>self.ff1=ConveRTFFN(in_sz stacking_layers d_out ffn_pdrop)<line_sep>self.ff2=ConveRTFFN(in_sz_2 stacking_layers d_out ffn_pdrop)<block_end><elif_stmt>output_layer<or>in_sz<ne>d_out<or>in_sz<ne>in_sz_2<block_start>activation=output_activation<if>output_layer<else><none><line_sep>self.ff1=Dense(in_sz d_out activation=activation)<if_stmt>in_sz<eq>in_sz_2<and>output_shared<block_start>self.ff2=self.ff1<block_end><else_stmt><block_start>self.ff2=Dense(in_sz_2 d_out activation=activation)<block_end><block_end><else_stmt><block_start>self.ff1=nn.Identity()<line_sep>self.ff2=nn.Identity()<block_end>self.output_dim=d_out<block_end><def_stmt>encode_query_base self query:torch.Tensor<arrow>torch.Tensor<block_start><pass><block_end><def_stmt>encode_response_base self response:torch.Tensor<arrow>torch.Tensor<block_start><pass><block_end><def_stmt>encode_query self query:torch.Tensor<arrow>torch.Tensor<block_start>tensor=self.encode_query_base(query)<line_sep><return>self.ff1(tensor)<block_end><def_stmt>encode_response self response:torch.Tensor<arrow>torch.Tensor<block_start>tensor=self.encode_response_base(response)<line_sep><return>self.ff2(tensor)<block_end><def_stmt>forward self query response<block_start>encoded_query=self.encode_query(query)<line_sep>encoded_response=self.encode_response(response)<line_sep><return>encoded_query encoded_response<block_end><def_stmt>create_loss self loss_type='symmetric' init_temp=<none> learn_temp=<false><block_start><if_stmt>loss_type<eq>'all'<block_start><return>AllLoss(self)<block_end><elif_stmt>loss_type<eq>'all_mean'<block_start><return>AllLoss(self reduction_type='mean')<block_end><elif_stmt>loss_type<eq>'contrastive'<block_start><return>ContrastiveLoss(self init_temp learn_temp)<block_end><elif_stmt>loss_type<eq>'symmetric'<block_start><return>SymmetricContrastiveLoss(self init_temp learn_temp)<block_end><return>TripletLoss(self)<block_end><block_end><class_stmt>BasicDualEncoderModel(DualEncoderModel)<block_start>"""A simple encoder where the encoders are injected and supply the `encode_query_base` and `encode_response_base` """<def_stmt>__init__ self encoder_1:nn.Module encoder_2:nn.Module stacking_layers:Union[int List[int]]=<none> d_out:int=512 ffn_pdrop=0.1<block_start>super().__init__(encoder_1.output_dim stacking_layers d_out ffn_pdrop in_sz_2=encoder_2.output_dim)<line_sep>self.encoder_1=encoder_1<line_sep>self.encoder_2=encoder_2<block_end><def_stmt>encode_query_base self query:torch.Tensor<arrow>torch.Tensor<block_start><return>self.encoder_1(query)<block_end><def_stmt>encode_response_base self response:torch.Tensor<arrow>torch.Tensor<block_start><return>self.encoder_2(response)<block_end><block_end><class_stmt>PairedModel(DualEncoderModel)<block_start>"""Legacy model for transformer-based dual encoder This is a dual-encoder transformer model which shares the lower layer encoder transformer sub-graph The reduction layer is attention based and takes the same input as the transformer layers. It pools the reprs Finally, the feed-forward stacks are applied via subclassing. Note that this model predates the more abstract `AbstractDualEncoder` which could accomplish the same thing by injecting the same `nn.Module` for encoder_1 and encoder_2 consisting of the transformer and reduction """<def_stmt>__init__ self embeddings d_model:int d_ff:int dropout:float num_heads:int num_layers:int stacking_layers:Optional[nn.Module]=<none> d_out:Optional[int]=<none> d_k:Optional[int]=<none> weight_std:float=0.02 rpr_k:Optional[int]=<none> reduction_d_k:int=64 ffn_pdrop:float=0.1 windowed_ra:bool=<false> rpr_value_on:bool=<false> reduction_type:str="2ha" freeze_encoders:bool=<false> layer_norms_after:bool=<false> embeddings_reduction:str='sum' layer_norm_eps:float=1e-6 output_layer:bool=<false> output_activation:str='tanh' output_shared:bool=<false> transformer_type:Optional[str]=<none> **kwargs<block_start>super().__init__(2<times>d_model<if>reduction_type.startswith("2")<else>d_model stacking_layers d_out<if>d_out<is><not><none><else>d_model ffn_pdrop <none> output_layer output_activation output_shared)<line_sep>reduction_type=reduction_type.lower()<line_sep>self.reduce_fn=self._reduce_3<if_stmt>reduction_type<eq>"2ha"<block_start>self.reduction_layer=TwoHeadConcat(d_model dropout scale=<false> d_k=reduction_d_k)<block_end><elif_stmt>reduction_type<eq>"2ha_mean"<block_start>self.reduction_layer=TwoHeadConcat(d_model dropout scale=<false> d_k=reduction_d_k pooling="mean")<block_end><elif_stmt>reduction_type<eq>"2ha_max"<block_start>self.reduction_layer=TwoHeadConcat(d_model dropout scale=<false> d_k=reduction_d_k pooling="max")<block_end><elif_stmt>reduction_type<eq>"sha"<block_start>self.reduction_layer=SingleHeadReduction(d_model dropout scale=<false> d_k=reduction_d_k)<block_end><elif_stmt>reduction_type<eq>"sha_mean"<block_start>self.reduction_layer=SingleHeadReduction(d_model dropout scale=<false> d_k=reduction_d_k pooling="mean")<block_end><elif_stmt>reduction_type<eq>"sha_max"<block_start>self.reduction_layer=SingleHeadReduction(d_model dropout scale=<false> d_k=reduction_d_k pooling="max")<block_end><elif_stmt>reduction_type<eq>'max'<block_start>self.reduce_fn=self._reduce_1<line_sep>self.reduction_layer=MaxPool1D(self.output_dim)<block_end><elif_stmt>reduction_type<eq>'mean'<block_start>self.reduce_fn=self._reduce_1<line_sep>self.reduction_layer=MeanPool1D(self.output_dim)<block_end><elif_stmt>reduction_type<eq>'cls'<or>reduction_type<eq>'zero'<block_start>self.reduce_fn=self._reduce_0<block_end><else_stmt><block_start><raise>Exception("Unknown exception type")<block_end>self.weight_std=weight_std<line_sep>ra_type=kwargs.get('ra_type')<line_sep>self.transformer=TransformerEncoderStack(num_heads=num_heads d_model=d_model pdrop=dropout layers=num_layers activation='gelu' d_ff=d_ff ffn_pdrop=ffn_pdrop d_k=d_k rpr_k=rpr_k windowed_ra=windowed_ra rpr_value_on=rpr_value_on layer_norms_after=layer_norms_after layer_norm_eps=layer_norm_eps ra_type=ra_type transformer_type=transformer_type)<line_sep>self.embeddings=EmbeddingsStack({'x':embeddings} 0.0 <false> embeddings_reduction)<line_sep>self.freeze=freeze_encoders<line_sep>self.apply(self.init_layer_weights)<block_end><def_stmt>init_layer_weights self module<block_start><if_stmt>isinstance(module (nn.Linear nn.Embedding nn.LayerNorm))<block_start>module.weight.data.normal_(mean=0.0 std=self.weight_std)<block_end><if_stmt>isinstance(module (nn.Linear nn.LayerNorm))<and>module.bias<is><not><none><block_start>module.bias.data.zero_()<block_end><block_end><def_stmt>_reduce_3 self encoded att_mask<block_start>"""The attention modules originally created for DE have 3 (redundant) inputs, so use all 3 here """<line_sep><return>self.reduction_layer((encoded encoded encoded att_mask))<block_end><def_stmt>_reduce_1 self encoded att_mask<block_start>"""The standard reduction modules use an input and a length """<line_sep>lengths=att_mask.squeeze(1).squeeze(1).sum(-1)<line_sep><return>self.reduction_layer((encoded lengths))<block_end><def_stmt>_reduce_0 self encoded _<block_start>"""The [CLS] or <s> reduction on the first token just needs the first timestep """<line_sep><return>encoded[: 0]<block_end><def_stmt>encode_query_base self query<block_start>query_mask=(query<ne>Offsets.PAD)<line_sep>att_mask=query_mask.unsqueeze(1).unsqueeze(1)<with_stmt>torch.no_grad()<if>self.freeze<else>contextlib.ExitStack()<block_start>embedded=self.embeddings({'x':query})<line_sep>encoded_query=self.transformer((embedded att_mask))<block_end>encoded_query=self.reduce_fn(encoded_query att_mask)<line_sep><return>encoded_query<block_end><def_stmt>encode_response_base self response<block_start>response_mask=(response<ne>Offsets.PAD)<line_sep>att_mask=response_mask.unsqueeze(1).unsqueeze(1)<with_stmt>torch.no_grad()<if>self.freeze<else>contextlib.ExitStack()<block_start>embedded=self.embeddings({'x':response})<line_sep>encoded_response=self.transformer((embedded att_mask))<block_end>encoded_response=self.reduce_fn(encoded_response att_mask)<line_sep><return>encoded_response<block_end><block_end><class_stmt>TransformerBoWPairedModel(DualEncoderModel)<block_start>"""2 Encoders (E1, E2). E1 is a Transformer followed by attention reduction. E2 is just a pooling of embeddings """<def_stmt>__init__ self embeddings d_model d_ff dropout num_heads num_layers stacking_layers=<none> d_out=512 d_k=<none> weight_std=0.02 rpr_k=<none> reduction_d_k=64 ffn_pdrop=0.1 windowed_ra=<false> rpr_value_on=<false> reduction_type_1="2ha" freeze_encoders=<false> layer_norms_after=<false> transformer_type:Optional[str]=<none> **kwargs<block_start>super().__init__(d_model stacking_layers d_out ffn_pdrop)<line_sep>reduction_type_1=reduction_type_1.lower()<if_stmt>reduction_type_1<eq>"2ha"<block_start>self.reduction_layer_1=nn.Sequential(TwoHeadConcat(d_model dropout scale=<false> d_k=reduction_d_k) nn.Linear(2<times>d_model d_model))<block_end><elif_stmt>reduction_type_1<eq>"2ha_mean"<block_start>self.reduction_layer_1=nn.Sequential(TwoHeadConcat(d_model dropout scale=<false> d_k=reduction_d_k pooling="mean") nn.Linear(2<times>d_model d_model))<block_end><elif_stmt>reduction_type_1<eq>"2ha_max"<block_start>self.reduction_layer_1=nn.Sequential(TwoHeadConcat(d_model dropout scale=<false> d_k=reduction_d_k pooling="max") nn.Linear(2<times>d_model d_model))<block_end><elif_stmt>reduction_type_1<eq>"sha"<block_start>self.reduction_layer_1=SingleHeadReduction(d_model dropout scale=<false> d_k=reduction_d_k)<block_end><elif_stmt>reduction_type_1<eq>"sha_mean"<block_start>self.reduction_layer_1=SingleHeadReduction(d_model dropout scale=<false> d_k=reduction_d_k pooling="mean")<block_end><elif_stmt>reduction_type_1<eq>"sha_max"<block_start>self.reduction_layer_1=SingleHeadReduction(d_model dropout scale=<false> d_k=reduction_d_k pooling="max")<block_end><else_stmt><block_start><raise>Exception("Unknown exception type")<block_end>self.weight_std=weight_std<line_sep>ra_type=kwargs.get('ra_type')<line_sep>self.transformer=TransformerEncoderStack(num_heads=num_heads d_model=d_model pdrop=dropout layers=num_layers activation='gelu' d_ff=d_ff ffn_pdrop=ffn_pdrop d_k=d_k rpr_k=rpr_k windowed_ra=windowed_ra rpr_value_on=rpr_value_on layer_norms_after=layer_norms_after ra_type=ra_type transformer_type=transformer_type)<line_sep>self.embeddings=EmbeddingsStack({'x':embeddings})<line_sep>self.freeze=freeze_encoders<line_sep>self.reduction_layer_2=MaxPool1D(d_out)<if>reduction_type_1.endswith('max')<else>MeanPool1D(d_out)<line_sep>self.apply(self.init_layer_weights)<block_end><def_stmt>init_layer_weights self module<block_start><if_stmt>isinstance(module (nn.Linear nn.Embedding nn.LayerNorm))<block_start>module.weight.data.normal_(mean=0.0 std=self.weight_std)<block_end><if_stmt>isinstance(module (nn.Linear nn.LayerNorm))<and>module.bias<is><not><none><block_start>module.bias.data.zero_()<block_end><block_end><def_stmt>encode_query_base self query<block_start>query_mask=(query<ne>Offsets.PAD)<line_sep>att_mask=query_mask.unsqueeze(1).unsqueeze(1)<with_stmt>torch.no_grad()<if>self.freeze<else>contextlib.ExitStack()<block_start>embedded=self.embeddings({'x':query})<line_sep>encoded_query=self.transformer((embedded att_mask))<block_end>encoded_query=self.reduction_layer_1((encoded_query encoded_query encoded_query att_mask))<line_sep><return>encoded_query<block_end><def_stmt>encode_response_base self response<block_start>response_lengths=torch.sum(response<ne>Offsets.PAD dim=1)<with_stmt>torch.no_grad()<if>self.freeze<else>contextlib.ExitStack()<block_start>embedded=self.embeddings({'x':response})<block_end>encoded_response=self.reduction_layer_2((embedded response_lengths))<line_sep><return>encoded_response<block_end><block_end><class_stmt>CudaTimer<block_start>"""A CUDA timer context manager that can be used to track and record events The timer is only enabled if `MEAD_PYTORCH_TIMER` is true. If its enabled, it will cause a large slowdown (similar to `CUDA_LAUNCH_BLOCKING`). """<def_stmt>__init__ self name sync_before=<true><block_start>""" :param name: :param sync_before: """<line_sep>self.enabled=str2bool(os.getenv('MEAD_PYTORCH_TIMER' <false>))<if_stmt>self.enabled<block_start>self._name=name<line_sep>self._start=torch.cuda.Event(enable_timing=<true>)<line_sep>self._end=torch.cuda.Event(enable_timing=<true>)<if_stmt>sync_before<block_start>torch.cuda.synchronize()<block_end><block_end><block_end><def_stmt>__enter__ self<block_start><if_stmt>self.enabled<block_start>self._start.record()<block_end><block_end><def_stmt>__exit__ self exc_type exc_value exc_traceback<block_start><if_stmt>self.enabled<block_start>self._end.record()<line_sep>torch.cuda.synchronize()<line_sep>elapsed=self._start.elapsed_time(self._end)<line_sep>print(f"({os.getpid()}) {self._name} {elapsed}")<block_end><block_end><block_end><class_stmt>WeightedNLLLoss(nn.Module)<block_start>"""Weight individual training examples """<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.loss=nn.NLLLoss(reduction='none')<block_end><def_stmt>forward self pred y weight<block_start>loss=self.loss(pred y)<line_sep>weight=weight.type_as(loss)<line_sep><return>torch.dot(loss weight)/len(weight)<block_end><block_end><class_stmt>WeightedMultiHeadNLLLoss(nn.Module)<block_start>"""Weight individual training examples with multiple heads """<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.loss=nn.NLLLoss(reduction='none')<block_end><def_stmt>forward self preds targets weights<block_start>loss=sum([self.loss(pred targets[: i])<for>i,pred enumerate(preds)])<line_sep>weights=weights.type_as(loss)<line_sep><return>torch.dot(loss weights)/len(weights)<block_end><block_end><class_stmt>WeightedSequenceLoss(nn.Module)<block_start>"""Weight individual training examples """<def_stmt>__init__ self LossFn:nn.Module=nn.NLLLoss avg:str="token"<block_start>super().__init__()<line_sep>self.avg=avg<line_sep>self.crit=LossFn(ignore_index=Offsets.PAD reduction="none")<if_stmt>avg<eq>'token'<block_start>self._reduce=self._mean<block_end><else_stmt><block_start>self._reduce=self._sum<block_end><block_end><def_stmt>_mean self loss<block_start><return>loss.mean(axis=1)<block_end><def_stmt>_sum self loss<block_start><return>loss.sum(axis=1)<block_end><def_stmt>forward self inputs:torch.Tensor targets:torch.Tensor weight:torch.Tensor<arrow>torch.Tensor<block_start>"""Evaluate some loss over a sequence. :param inputs: torch.FloatTensor, [B, T, C] The scores from the model. Batch First :param targets: torch.LongTensor, [B, T] The labels. :param weight: sample weights [B, ] :returns: torch.FloatTensor, The loss. """<line_sep>total_sz=targets.nelement()<line_sep>batchsz=weight.shape[0]<line_sep>loss=self.crit(inputs.view(total_sz -1) targets.view(total_sz)).view(batchsz -1)# [B, T] loss=torch.dot(self._reduce(loss) weight.type_as(loss))/batchsz<line_sep><return>loss<block_end><def_stmt>extra_repr self<block_start><return>f"reduction={self.avg}"<block_end><block_end>
# Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>unittest<import_from_stmt>blinkpy.common.host Host<import_from_stmt>blinkpy.common.host_mock MockHost<import_from_stmt>blinkpy.web_tests.breakpad.dump_reader_multipart DumpReaderMultipart<class_stmt>TestDumpReaderMultipart(unittest.TestCase)<block_start>_MULTIPART_DUMP=['--boundary' 'Content-Disposition: form-data; name="prod"' '' 'content_shell' '--boundary' 'Content-Disposition: form-data; name="pid"' '' '4711' '--boundary' 'Content-Disposition: form-data; name="upload_file_minidump"; filename="dump"' 'Content-Type: application/octet-stream' '' 'MDMP' '--boundary--' ]<def_stmt>test_check_generate_breakpad_symbols_actually_exists self<block_start>host=Host()<line_sep>dump_reader=DumpReaderMultipart(host build_dir=<none>)<line_sep>self.assertTrue(host.filesystem.exists(dump_reader._path_to_generate_breakpad_symbols()))<block_end><def_stmt>test_check_is_functional_breakpad_tools_not_found self<block_start>host=MockHost()<line_sep>build_dir="/mock-checkout/out/Debug"<line_sep>host.filesystem.maybe_make_directory(build_dir)<line_sep>dump_reader=DumpReaderMultipart(host build_dir)<line_sep>dump_reader._file_extension=<lambda>:'dmp'<line_sep>dump_reader._binaries_to_symbolize=<lambda>:['content_shell']<line_sep>self.assertFalse(dump_reader.check_is_functional())<block_end><def_stmt>test_get_pid_from_dump self<block_start>host=MockHost()<line_sep>dump_file='/crash-dumps/dump.dmp'<line_sep>expected_pid='4711'<line_sep>host.filesystem.write_text_file(dump_file "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))<line_sep>build_dir="/mock-checkout/out/Debug"<line_sep>host.filesystem.maybe_make_directory(build_dir)<line_sep>host.filesystem.exists=<lambda>x:<true><line_sep># The mock file object returned by open_binary_file_for_reading doesn't # have readline(), however, the real File object does. host.filesystem.open_binary_file_for_reading=host.filesystem.open_text_file_for_reading<line_sep>dump_reader=DumpReaderMultipart(host build_dir)<line_sep>dump_reader._file_extension=<lambda>:'dmp'<line_sep>dump_reader._binaries_to_symbolize=<lambda>:['content_shell']<line_sep>self.assertTrue(dump_reader.check_is_functional())<line_sep>self.assertEqual(expected_pid dump_reader._get_pid_from_dump(dump_file))<block_end><def_stmt>test_get_stack_from_dump self<block_start>host=MockHost()<line_sep>dump_file='/crash-dumps/dump.dmp'<line_sep>host.filesystem.write_text_file(dump_file "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))<line_sep>build_dir="/mock-checkout/out/Debug"<line_sep>host.filesystem.maybe_make_directory(build_dir)<line_sep>host.filesystem.exists=<lambda>x:<true><line_sep># The mock file object returned by open_binary_file_for_reading doesn't # have readline(), however, the real File object does. host.filesystem.open_binary_file_for_reading=host.filesystem.open_text_file_for_reading<line_sep>dump_reader=DumpReaderMultipart(host build_dir)<line_sep>dump_reader._file_extension=<lambda>:'dmp'<line_sep>dump_reader._binaries_to_symbolize=<lambda>:['content_shell']<line_sep>self.assertTrue(dump_reader.check_is_functional())<line_sep>self.assertEqual("MOCK output of child process" dump_reader._get_stack_from_dump(dump_file))<line_sep>self.assertEqual(2 len(host.executive.calls))<line_sep>cmd_line=" ".join(host.executive.calls[0])<line_sep>self.assertIn('generate_breakpad_symbols.py' cmd_line)<line_sep>cmd_line=" ".join(host.executive.calls[1])<line_sep>self.assertIn('minidump_stackwalk' cmd_line)<block_end><block_end>
<import_stmt>os<import_stmt>sys<import_from_stmt>pyspark.sql.types *<line_sep>PATH="/home/ubuntu/work/ml-resources/spark-ml/data"<line_sep>SPARK_HOME="/home/ubuntu/work/spark-2.0.0-bin-hadoop2.7/"<line_sep>os.environ['SPARK_HOME']=SPARK_HOME<line_sep>sys.path.append(SPARK_HOME+"/python")<import_from_stmt>pyspark SparkContext<import_from_stmt>pyspark SparkConf<import_from_stmt>pyspark.sql SparkSession<line_sep>conf=SparkConf().setAppName("First Spark App").setMaster("local")<line_sep>sc=SparkContext(conf=conf)<line_sep>spark=SparkSession(sc)<def_stmt>get_user_data <block_start>custom_schema=StructType([StructField("no" StringType() <true>) StructField("age" IntegerType() <true>) StructField("gender" StringType() <true>) StructField("occupation" StringType() <true>) StructField("zipCode" StringType() <true>)])<import_from_stmt>pyspark.sql SQLContext<import_from_stmt>pyspark.sql.types *<line_sep>sql_context=SQLContext(sc)<line_sep>user_df=sql_context.read.format('com.databricks.spark.csv').options(header='false' delimiter='|').load("%s/ml-100k/u.user"%PATH schema=custom_schema)<line_sep><return>user_df<block_end><def_stmt>get_movie_data_df <block_start>custom_schema=StructType([StructField("no" StringType() <true>) StructField("moviename" StringType() <true>) StructField("date" StringType() <true>) StructField("f1" StringType() <true>) StructField("url" StringType() <true>) StructField("f2" IntegerType() <true>) StructField("f3" IntegerType() <true>) StructField("f4" IntegerType() <true>) StructField("f5" IntegerType() <true>) StructField("f6" IntegerType() <true>) StructField("f7" IntegerType() <true>) StructField("f8" IntegerType() <true>) StructField("f9" IntegerType() <true>) StructField("f10" IntegerType() <true>) StructField("f11" IntegerType() <true>) StructField("f12" IntegerType() <true>) StructField("f13" IntegerType() <true>) StructField("f14" IntegerType() <true>) StructField("f15" IntegerType() <true>) StructField("f16" IntegerType() <true>) StructField("f17" IntegerType() <true>) StructField("f18" IntegerType() <true>) StructField("f19" IntegerType() <true>)])<import_from_stmt>pyspark.sql SQLContext<import_from_stmt>pyspark.sql.types *<line_sep>sql_context=SQLContext(sc)<line_sep>movie_df=sql_context.read.format('com.databricks.spark.csv').options(header='false' delimiter='|').load("%s/ml-100k/u.item"%PATH schema=custom_schema)<line_sep><return>movie_df<block_end><def_stmt>get_movie_data <block_start><return>sc.textFile("%s/ml-100k/u.item"%PATH)<block_end><def_stmt>get_rating_data <block_start><return>sc.textFile("%s/ml-100k/u.data"%PATH)<block_end>
<import_from_stmt>rest_framework.test APITestCase APIClient<import_from_stmt>django.urls reverse<import_from_stmt>rest_framework.authtoken.models Token<class_stmt>UserTest(APITestCase)<block_start>""" Test the User APIv2 endpoint. """<line_sep>fixtures=['dojo_testdata.json']<def_stmt>setUp self<block_start>token=Token.objects.get(user__username='admin')<line_sep>self.client=APIClient()<line_sep>self.client.credentials(HTTP_AUTHORIZATION='Token '+token.key)<block_end><def_stmt>test_user_list self<block_start>r=self.client.get(reverse('user-list'))<line_sep>self.assertEqual(r.status_code 200 r.content[:1000])<line_sep>user_list=r.json()['results']<line_sep>self.assertTrue(len(user_list)<ge>1 r.content[:1000])<for_stmt>user user_list<block_start><for_stmt>item ['username' 'first_name' 'last_name' 'email']<block_start>self.assertIn(item user r.content[:1000])<block_end><for_stmt>item ['password']<block_start>self.assertNotIn(item user r.content[:1000])<block_end><block_end><block_end><def_stmt>test_user_add self# simple user without password <block_start>r=self.client.post(reverse('user-list') {"username":"api-user-1"} format='json')<line_sep>self.assertEqual(r.status_code 201 r.content[:1000])<line_sep># user with good password password='<PASSWORD>!@#$'<line_sep>r=self.client.post(reverse('user-list') {"username":"api-user-2" "password":password} format='json')<line_sep>self.assertEqual(r.status_code 201 r.content[:1000])<line_sep># test password by fetching API key r=self.client.post(reverse('api-token-auth') {"username":"api-user-2" "password":password} format='json')<line_sep>self.assertEqual(r.status_code 200 r.content[:1000])<line_sep># user with weak password r=self.client.post(reverse('user-list') {"username":"api-user-3" "password":"<PASSWORD>"} format='json')<line_sep>self.assertEqual(r.status_code 400 r.content[:1000])<line_sep>self.assertIn('The password must contain at least 1 digit, 0-9.' r.content.decode("utf-8"))<block_end><def_stmt>test_user_change_password self# some user <block_start>r=self.client.post(reverse('user-list') {"username":"api-user-4"} format='json')<line_sep>self.assertEqual(r.status_code 201 r.content[:1000])<line_sep>user_id=r.json()['id']<line_sep>r=self.client.put("{}{}/".format(reverse('user-list') user_id) {"username":"api-user-4" "first_name":"first"} format='json' )<line_sep>self.assertEqual(r.status_code 200 r.content[:1000])<line_sep>r=self.client.patch("{}{}/".format(reverse('user-list') user_id) {"last_name":"last"} format='json')<line_sep>self.assertEqual(r.status_code 200 r.content[:1000])<line_sep>r=self.client.put("{}{}/".format(reverse('user-list') user_id) {"username":"api-user-4" "password":"<PASSWORD>!@#$"} format='json')<line_sep>self.assertEqual(r.status_code 400 r.content[:1000])<line_sep>self.assertIn("Update of password though API is not allowed" r.content.decode("utf-8"))<line_sep>r=self.client.patch("{}{}/".format(reverse('user-list') user_id) {"password":"<PASSWORD>!@#$"} format='json')<line_sep>self.assertEqual(r.status_code 400 r.content[:1000])<line_sep>self.assertIn("Update of password though API is not allowed" r.content.decode("utf-8"))<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>pytest<import_from_stmt>mock Mock<import_from_stmt>bravado_core.exception SwaggerMappingError<import_from_stmt>bravado_core.operation Operation<import_from_stmt>bravado_core.param get_param_type_spec<import_from_stmt>bravado_core.param Param<import_from_stmt>bravado_core.spec Spec<line_sep>@pytest.fixture<def_stmt>body_param_spec <block_start><return>{'name':'body' 'in':'body' 'description':'pet id' 'required':<true> 'schema':{'type':'string' } }<block_end><def_stmt>test_location_is_body empty_swagger_spec body_param_spec<block_start>param=Param(empty_swagger_spec Mock(spec=Operation) body_param_spec)<assert_stmt>body_param_spec['schema']<eq>get_param_type_spec(param)<block_end><def_stmt>test_location_is_not_body empty_swagger_spec<block_start><for_stmt>location ('path' 'query' 'header' 'formData' )<block_start>param_spec={'name':'petId' 'in':location 'description':'ID of pet that needs to be updated' 'required':<true> 'type':'string' }<line_sep>param=Param(empty_swagger_spec Mock(spec=Operation) param_spec)<assert_stmt>param_spec<eq>get_param_type_spec(param)<block_end><block_end><def_stmt>test_location_invalid empty_swagger_spec body_param_spec<block_start>body_param_spec['in']='foo'<line_sep>param=Param(empty_swagger_spec Mock(spec=Operation) body_param_spec)<with_stmt>pytest.raises(SwaggerMappingError)<as>excinfo<block_start>get_param_type_spec(param)<block_end><assert_stmt>'location foo'<in>str(excinfo.value)<block_end><def_stmt>test_ref minimal_swagger_dict body_param_spec<block_start>minimal_swagger_dict['parameters']={'PetIdParam':body_param_spec }<line_sep>param_ref_spec={'$ref':'#/parameters/PetIdParam'}<line_sep>swagger_spec=Spec(minimal_swagger_dict)<line_sep>param=Param(swagger_spec Mock(spec=Operation) param_ref_spec)<assert_stmt>{'type':'string'}<eq>get_param_type_spec(param)<block_end>
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- <import_from_stmt>msrest Serializer Deserializer<import_from_stmt>...client Client<import_from_stmt>...v5_1.build models<class_stmt>BuildClient(Client)<block_start>"""Build :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """<def_stmt>__init__ self base_url=<none> creds=<none><block_start>super(BuildClient self).__init__(base_url creds)<line_sep>client_models={k:v<for>k,v models.__dict__.items()<if>isinstance(v type)}<line_sep>self._serialize=Serializer(client_models)<line_sep>self._deserialize=Deserializer(client_models)<block_end>resource_area_identifier='965220d5-5bb9-42cf-8d67-9b146df2a5a4'<def_stmt>create_artifact self artifact project build_id<block_start>"""CreateArtifact. Associates an artifact with a build. :param :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>` artifact: The artifact. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>content=self._serialize.body(artifact 'BuildArtifact')<line_sep>response=self._send(http_method='POST' location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984' version='5.1' route_values=route_values content=content)<line_sep><return>self._deserialize('BuildArtifact' response)<block_end><def_stmt>get_artifact self project build_id artifact_name<block_start>"""GetArtifact. Gets a specific artifact for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str artifact_name: The name of the artifact. :rtype: :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>artifact_name<is><not><none><block_start>query_parameters['artifactName']=self._serialize.query('artifact_name' artifact_name 'str')<block_end>response=self._send(http_method='GET' location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('BuildArtifact' response)<block_end><def_stmt>get_artifact_content_zip self project build_id artifact_name **kwargs<block_start>"""GetArtifactContentZip. Gets a specific artifact for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str artifact_name: The name of the artifact. :rtype: object """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>artifact_name<is><not><none><block_start>query_parameters['artifactName']=self._serialize.query('artifact_name' artifact_name 'str')<block_end>response=self._send(http_method='GET' location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984' version='5.1' route_values=route_values query_parameters=query_parameters accept_media_type='application/zip')<if_stmt>"callback"<in>kwargs<block_start>callback=kwargs["callback"]<block_end><else_stmt><block_start>callback=<none><block_end><return>self._client.stream_download(response callback=callback)<block_end><def_stmt>get_artifacts self project build_id<block_start>"""GetArtifacts. Gets all artifacts for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [BuildArtifact] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>response=self._send(http_method='GET' location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[BuildArtifact]' self._unwrap_collection(response))<block_end><def_stmt>get_file self project build_id artifact_name file_id file_name **kwargs<block_start>"""GetFile. Gets a file from the build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str artifact_name: The name of the artifact. :param str file_id: The primary key for the file. :param str file_name: The name that the file will be set to. :rtype: object """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>artifact_name<is><not><none><block_start>query_parameters['artifactName']=self._serialize.query('artifact_name' artifact_name 'str')<block_end><if_stmt>file_id<is><not><none><block_start>query_parameters['fileId']=self._serialize.query('file_id' file_id 'str')<block_end><if_stmt>file_name<is><not><none><block_start>query_parameters['fileName']=self._serialize.query('file_name' file_name 'str')<block_end>response=self._send(http_method='GET' location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984' version='5.1' route_values=route_values query_parameters=query_parameters accept_media_type='application/octet-stream')<if_stmt>"callback"<in>kwargs<block_start>callback=kwargs["callback"]<block_end><else_stmt><block_start>callback=<none><block_end><return>self._client.stream_download(response callback=callback)<block_end><def_stmt>delete_build self project build_id<block_start>"""DeleteBuild. Deletes a build. :param str project: Project ID or project name :param int build_id: The ID of the build. """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>self._send(http_method='DELETE' location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf' version='5.1' route_values=route_values)<block_end><def_stmt>get_build self project build_id property_filters=<none><block_start>"""GetBuild. Gets a build :param str project: Project ID or project name :param int build_id: :param str property_filters: :rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>property_filters<is><not><none><block_start>query_parameters['propertyFilters']=self._serialize.query('property_filters' property_filters 'str')<block_end>response=self._send(http_method='GET' location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('Build' response)<block_end><def_stmt>get_builds self project definitions=<none> queues=<none> build_number=<none> min_time=<none> max_time=<none> requested_for=<none> reason_filter=<none> status_filter=<none> result_filter=<none> tag_filters=<none> properties=<none> top=<none> continuation_token=<none> max_builds_per_definition=<none> deleted_filter=<none> query_order=<none> branch_name=<none> build_ids=<none> repository_id=<none> repository_type=<none><block_start>"""GetBuilds. Gets a list of builds. :param str project: Project ID or project name :param [int] definitions: A comma-delimited list of definition IDs. If specified, filters to builds for these definitions. :param [int] queues: A comma-delimited list of queue IDs. If specified, filters to builds that ran against these queues. :param str build_number: If specified, filters to builds that match this build number. Append * to do a prefix search. :param datetime min_time: If specified, filters to builds that finished/started/queued after this date based on the queryOrder specified. :param datetime max_time: If specified, filters to builds that finished/started/queued before this date based on the queryOrder specified. :param str requested_for: If specified, filters to builds requested for the specified user. :param str reason_filter: If specified, filters to builds that match this reason. :param str status_filter: If specified, filters to builds that match this status. :param str result_filter: If specified, filters to builds that match this result. :param [str] tag_filters: A comma-delimited list of tags. If specified, filters to builds that have the specified tags. :param [str] properties: A comma-delimited list of properties to retrieve. :param int top: The maximum number of builds to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of builds. :param int max_builds_per_definition: The maximum number of builds to return per definition. :param str deleted_filter: Indicates whether to exclude, include, or only return deleted builds. :param str query_order: The order in which builds should be returned. :param str branch_name: If specified, filters to builds that built branches that built this branch. :param [int] build_ids: A comma-delimited list that specifies the IDs of builds to retrieve. :param str repository_id: If specified, filters to builds that built from this repository. :param str repository_type: If specified, filters to builds that built from repositories of this type. :rtype: :class:`<GetBuildsResponseValue>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>query_parameters={}<if_stmt>definitions<is><not><none><block_start>definitions=",".join(map(str definitions))<line_sep>query_parameters['definitions']=self._serialize.query('definitions' definitions 'str')<block_end><if_stmt>queues<is><not><none><block_start>queues=",".join(map(str queues))<line_sep>query_parameters['queues']=self._serialize.query('queues' queues 'str')<block_end><if_stmt>build_number<is><not><none><block_start>query_parameters['buildNumber']=self._serialize.query('build_number' build_number 'str')<block_end><if_stmt>min_time<is><not><none><block_start>query_parameters['minTime']=self._serialize.query('min_time' min_time 'iso-8601')<block_end><if_stmt>max_time<is><not><none><block_start>query_parameters['maxTime']=self._serialize.query('max_time' max_time 'iso-8601')<block_end><if_stmt>requested_for<is><not><none><block_start>query_parameters['requestedFor']=self._serialize.query('requested_for' requested_for 'str')<block_end><if_stmt>reason_filter<is><not><none><block_start>query_parameters['reasonFilter']=self._serialize.query('reason_filter' reason_filter 'str')<block_end><if_stmt>status_filter<is><not><none><block_start>query_parameters['statusFilter']=self._serialize.query('status_filter' status_filter 'str')<block_end><if_stmt>result_filter<is><not><none><block_start>query_parameters['resultFilter']=self._serialize.query('result_filter' result_filter 'str')<block_end><if_stmt>tag_filters<is><not><none><block_start>tag_filters=",".join(tag_filters)<line_sep>query_parameters['tagFilters']=self._serialize.query('tag_filters' tag_filters 'str')<block_end><if_stmt>properties<is><not><none><block_start>properties=",".join(properties)<line_sep>query_parameters['properties']=self._serialize.query('properties' properties 'str')<block_end><if_stmt>top<is><not><none><block_start>query_parameters['$top']=self._serialize.query('top' top 'int')<block_end><if_stmt>continuation_token<is><not><none><block_start>query_parameters['continuationToken']=self._serialize.query('continuation_token' continuation_token 'str')<block_end><if_stmt>max_builds_per_definition<is><not><none><block_start>query_parameters['maxBuildsPerDefinition']=self._serialize.query('max_builds_per_definition' max_builds_per_definition 'int')<block_end><if_stmt>deleted_filter<is><not><none><block_start>query_parameters['deletedFilter']=self._serialize.query('deleted_filter' deleted_filter 'str')<block_end><if_stmt>query_order<is><not><none><block_start>query_parameters['queryOrder']=self._serialize.query('query_order' query_order 'str')<block_end><if_stmt>branch_name<is><not><none><block_start>query_parameters['branchName']=self._serialize.query('branch_name' branch_name 'str')<block_end><if_stmt>build_ids<is><not><none><block_start>build_ids=",".join(map(str build_ids))<line_sep>query_parameters['buildIds']=self._serialize.query('build_ids' build_ids 'str')<block_end><if_stmt>repository_id<is><not><none><block_start>query_parameters['repositoryId']=self._serialize.query('repository_id' repository_id 'str')<block_end><if_stmt>repository_type<is><not><none><block_start>query_parameters['repositoryType']=self._serialize.query('repository_type' repository_type 'str')<block_end>response=self._send(http_method='GET' location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep>response_value=self._deserialize('[Build]' self._unwrap_collection(response))<line_sep>continuation_token=self._get_continuation_token(response)<line_sep><return>self.GetBuildsResponseValue(response_value continuation_token)<block_end><class_stmt>GetBuildsResponseValue(object)<block_start><def_stmt>__init__ self value continuation_token<block_start>""" Response for the get_builds method :param value: :type value: :class:`<[Build]> <azure.devops.v5_1.build.models.[Build]>` :param continuation_token: The continuation token to be used to get the next page of results. :type continuation_token: str """<line_sep>self.value=value<line_sep>self.continuation_token=continuation_token<block_end><block_end><def_stmt>queue_build self build project ignore_warnings=<none> check_in_ticket=<none> source_build_id=<none><block_start>"""QueueBuild. Queues a build :param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build: :param str project: Project ID or project name :param bool ignore_warnings: :param str check_in_ticket: :param int source_build_id: :rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>query_parameters={}<if_stmt>ignore_warnings<is><not><none><block_start>query_parameters['ignoreWarnings']=self._serialize.query('ignore_warnings' ignore_warnings 'bool')<block_end><if_stmt>check_in_ticket<is><not><none><block_start>query_parameters['checkInTicket']=self._serialize.query('check_in_ticket' check_in_ticket 'str')<block_end><if_stmt>source_build_id<is><not><none><block_start>query_parameters['sourceBuildId']=self._serialize.query('source_build_id' source_build_id 'int')<block_end>content=self._serialize.body(build 'Build')<line_sep>response=self._send(http_method='POST' location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf' version='5.1' route_values=route_values query_parameters=query_parameters content=content)<line_sep><return>self._deserialize('Build' response)<block_end><def_stmt>update_build self build project build_id retry=<none><block_start>"""UpdateBuild. Updates a build. :param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build: The build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param bool retry: :rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>retry<is><not><none><block_start>query_parameters['retry']=self._serialize.query('retry' retry 'bool')<block_end>content=self._serialize.body(build 'Build')<line_sep>response=self._send(http_method='PATCH' location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf' version='5.1' route_values=route_values query_parameters=query_parameters content=content)<line_sep><return>self._deserialize('Build' response)<block_end><def_stmt>update_builds self builds project<block_start>"""UpdateBuilds. Updates multiple builds. :param [Build] builds: The builds to update. :param str project: Project ID or project name :rtype: [Build] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>content=self._serialize.body(builds '[Build]')<line_sep>response=self._send(http_method='PATCH' location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf' version='5.1' route_values=route_values content=content)<line_sep><return>self._deserialize('[Build]' self._unwrap_collection(response))<block_end><def_stmt>get_build_changes self project build_id continuation_token=<none> top=<none> include_source_change=<none><block_start>"""GetBuildChanges. Gets the changes associated with a build :param str project: Project ID or project name :param int build_id: :param str continuation_token: :param int top: The maximum number of changes to return :param bool include_source_change: :rtype: :class:`<GetBuildChangesResponseValue>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>continuation_token<is><not><none><block_start>query_parameters['continuationToken']=self._serialize.query('continuation_token' continuation_token 'str')<block_end><if_stmt>top<is><not><none><block_start>query_parameters['$top']=self._serialize.query('top' top 'int')<block_end><if_stmt>include_source_change<is><not><none><block_start>query_parameters['includeSourceChange']=self._serialize.query('include_source_change' include_source_change 'bool')<block_end>response=self._send(http_method='GET' location_id='54572c7b-bbd3-45d4-80dc-28be08941620' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep>response_value=self._deserialize('[Change]' self._unwrap_collection(response))<line_sep>continuation_token=self._get_continuation_token(response)<line_sep><return>self.GetBuildChangesResponseValue(response_value continuation_token)<block_end><class_stmt>GetBuildChangesResponseValue(object)<block_start><def_stmt>__init__ self value continuation_token<block_start>""" Response for the get_build_changes method :param value: :type value: :class:`<[Change]> <azure.devops.v5_1.build.models.[Change]>` :param continuation_token: The continuation token to be used to get the next page of results. :type continuation_token: str """<line_sep>self.value=value<line_sep>self.continuation_token=continuation_token<block_end><block_end><def_stmt>get_build_controller self controller_id<block_start>"""GetBuildController. Gets a controller :param int controller_id: :rtype: :class:`<BuildController> <azure.devops.v5_1.build.models.BuildController>` """<line_sep>route_values={}<if_stmt>controller_id<is><not><none><block_start>route_values['controllerId']=self._serialize.url('controller_id' controller_id 'int')<block_end>response=self._send(http_method='GET' location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('BuildController' response)<block_end><def_stmt>get_build_controllers self name=<none><block_start>"""GetBuildControllers. Gets controller, optionally filtered by name :param str name: :rtype: [BuildController] """<line_sep>query_parameters={}<if_stmt>name<is><not><none><block_start>query_parameters['name']=self._serialize.query('name' name 'str')<block_end>response=self._send(http_method='GET' location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6' version='5.1' query_parameters=query_parameters)<line_sep><return>self._deserialize('[BuildController]' self._unwrap_collection(response))<block_end><def_stmt>create_definition self definition project definition_to_clone_id=<none> definition_to_clone_revision=<none><block_start>"""CreateDefinition. Creates a new definition. :param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The definition. :param str project: Project ID or project name :param int definition_to_clone_id: :param int definition_to_clone_revision: :rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>query_parameters={}<if_stmt>definition_to_clone_id<is><not><none><block_start>query_parameters['definitionToCloneId']=self._serialize.query('definition_to_clone_id' definition_to_clone_id 'int')<block_end><if_stmt>definition_to_clone_revision<is><not><none><block_start>query_parameters['definitionToCloneRevision']=self._serialize.query('definition_to_clone_revision' definition_to_clone_revision 'int')<block_end>content=self._serialize.body(definition 'BuildDefinition')<line_sep>response=self._send(http_method='POST' location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6' version='5.1' route_values=route_values query_parameters=query_parameters content=content)<line_sep><return>self._deserialize('BuildDefinition' response)<block_end><def_stmt>delete_definition self project definition_id<block_start>"""DeleteDefinition. Deletes a definition and all associated builds. :param str project: Project ID or project name :param int definition_id: The ID of the definition. """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>definition_id<is><not><none><block_start>route_values['definitionId']=self._serialize.url('definition_id' definition_id 'int')<block_end>self._send(http_method='DELETE' location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6' version='5.1' route_values=route_values)<block_end><def_stmt>get_definition self project definition_id revision=<none> min_metrics_time=<none> property_filters=<none> include_latest_builds=<none><block_start>"""GetDefinition. Gets a definition, optionally at a specific revision. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :param int revision: The revision number to retrieve. If this is not specified, the latest version will be returned. :param datetime min_metrics_time: If specified, indicates the date from which metrics should be included. :param [str] property_filters: A comma-delimited list of properties to include in the results. :param bool include_latest_builds: :rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>definition_id<is><not><none><block_start>route_values['definitionId']=self._serialize.url('definition_id' definition_id 'int')<block_end>query_parameters={}<if_stmt>revision<is><not><none><block_start>query_parameters['revision']=self._serialize.query('revision' revision 'int')<block_end><if_stmt>min_metrics_time<is><not><none><block_start>query_parameters['minMetricsTime']=self._serialize.query('min_metrics_time' min_metrics_time 'iso-8601')<block_end><if_stmt>property_filters<is><not><none><block_start>property_filters=",".join(property_filters)<line_sep>query_parameters['propertyFilters']=self._serialize.query('property_filters' property_filters 'str')<block_end><if_stmt>include_latest_builds<is><not><none><block_start>query_parameters['includeLatestBuilds']=self._serialize.query('include_latest_builds' include_latest_builds 'bool')<block_end>response=self._send(http_method='GET' location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('BuildDefinition' response)<block_end><def_stmt>get_definitions self project name=<none> repository_id=<none> repository_type=<none> query_order=<none> top=<none> continuation_token=<none> min_metrics_time=<none> definition_ids=<none> path=<none> built_after=<none> not_built_after=<none> include_all_properties=<none> include_latest_builds=<none> task_id_filter=<none> process_type=<none> yaml_filename=<none><block_start>"""GetDefinitions. Gets a list of definitions. :param str project: Project ID or project name :param str name: If specified, filters to definitions whose names match this pattern. :param str repository_id: A repository ID. If specified, filters to definitions that use this repository. :param str repository_type: If specified, filters to definitions that have a repository of this type. :param str query_order: Indicates the order in which definitions should be returned. :param int top: The maximum number of definitions to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions. :param datetime min_metrics_time: If specified, indicates the date from which metrics should be included. :param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve. :param str path: If specified, filters to definitions under this folder. :param datetime built_after: If specified, filters to definitions that have builds after this date. :param datetime not_built_after: If specified, filters to definitions that do not have builds after this date. :param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned. :param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition. :param str task_id_filter: If specified, filters to definitions that use the specified task. :param int process_type: If specified, filters to definitions with the given process type. :param str yaml_filename: If specified, filters to YAML definitions that match the given filename. :rtype: :class:`<GetDefinitionsResponseValue>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>query_parameters={}<if_stmt>name<is><not><none><block_start>query_parameters['name']=self._serialize.query('name' name 'str')<block_end><if_stmt>repository_id<is><not><none><block_start>query_parameters['repositoryId']=self._serialize.query('repository_id' repository_id 'str')<block_end><if_stmt>repository_type<is><not><none><block_start>query_parameters['repositoryType']=self._serialize.query('repository_type' repository_type 'str')<block_end><if_stmt>query_order<is><not><none><block_start>query_parameters['queryOrder']=self._serialize.query('query_order' query_order 'str')<block_end><if_stmt>top<is><not><none><block_start>query_parameters['$top']=self._serialize.query('top' top 'int')<block_end><if_stmt>continuation_token<is><not><none><block_start>query_parameters['continuationToken']=self._serialize.query('continuation_token' continuation_token 'str')<block_end><if_stmt>min_metrics_time<is><not><none><block_start>query_parameters['minMetricsTime']=self._serialize.query('min_metrics_time' min_metrics_time 'iso-8601')<block_end><if_stmt>definition_ids<is><not><none><block_start>definition_ids=",".join(map(str definition_ids))<line_sep>query_parameters['definitionIds']=self._serialize.query('definition_ids' definition_ids 'str')<block_end><if_stmt>path<is><not><none><block_start>query_parameters['path']=self._serialize.query('path' path 'str')<block_end><if_stmt>built_after<is><not><none><block_start>query_parameters['builtAfter']=self._serialize.query('built_after' built_after 'iso-8601')<block_end><if_stmt>not_built_after<is><not><none><block_start>query_parameters['notBuiltAfter']=self._serialize.query('not_built_after' not_built_after 'iso-8601')<block_end><if_stmt>include_all_properties<is><not><none><block_start>query_parameters['includeAllProperties']=self._serialize.query('include_all_properties' include_all_properties 'bool')<block_end><if_stmt>include_latest_builds<is><not><none><block_start>query_parameters['includeLatestBuilds']=self._serialize.query('include_latest_builds' include_latest_builds 'bool')<block_end><if_stmt>task_id_filter<is><not><none><block_start>query_parameters['taskIdFilter']=self._serialize.query('task_id_filter' task_id_filter 'str')<block_end><if_stmt>process_type<is><not><none><block_start>query_parameters['processType']=self._serialize.query('process_type' process_type 'int')<block_end><if_stmt>yaml_filename<is><not><none><block_start>query_parameters['yamlFilename']=self._serialize.query('yaml_filename' yaml_filename 'str')<block_end>response=self._send(http_method='GET' location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep>response_value=self._deserialize('[BuildDefinitionReference]' self._unwrap_collection(response))<line_sep>continuation_token=self._get_continuation_token(response)<line_sep><return>self.GetDefinitionsResponseValue(response_value continuation_token)<block_end><class_stmt>GetDefinitionsResponseValue(object)<block_start><def_stmt>__init__ self value continuation_token<block_start>""" Response for the get_definitions method :param value: :type value: :class:`<[BuildDefinitionReference]> <azure.devops.v5_1.build.models.[BuildDefinitionReference]>` :param continuation_token: The continuation token to be used to get the next page of results. :type continuation_token: str """<line_sep>self.value=value<line_sep>self.continuation_token=continuation_token<block_end><block_end><def_stmt>restore_definition self project definition_id deleted<block_start>"""RestoreDefinition. Restores a deleted definition :param str project: Project ID or project name :param int definition_id: The identifier of the definition to restore. :param bool deleted: When false, restores a deleted definition. :rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>definition_id<is><not><none><block_start>route_values['definitionId']=self._serialize.url('definition_id' definition_id 'int')<block_end>query_parameters={}<if_stmt>deleted<is><not><none><block_start>query_parameters['deleted']=self._serialize.query('deleted' deleted 'bool')<block_end>response=self._send(http_method='PATCH' location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('BuildDefinition' response)<block_end><def_stmt>update_definition self definition project definition_id secrets_source_definition_id=<none> secrets_source_definition_revision=<none><block_start>"""UpdateDefinition. Updates an existing definition. :param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The new version of the definition. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :param int secrets_source_definition_id: :param int secrets_source_definition_revision: :rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>definition_id<is><not><none><block_start>route_values['definitionId']=self._serialize.url('definition_id' definition_id 'int')<block_end>query_parameters={}<if_stmt>secrets_source_definition_id<is><not><none><block_start>query_parameters['secretsSourceDefinitionId']=self._serialize.query('secrets_source_definition_id' secrets_source_definition_id 'int')<block_end><if_stmt>secrets_source_definition_revision<is><not><none><block_start>query_parameters['secretsSourceDefinitionRevision']=self._serialize.query('secrets_source_definition_revision' secrets_source_definition_revision 'int')<block_end>content=self._serialize.body(definition 'BuildDefinition')<line_sep>response=self._send(http_method='PUT' location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6' version='5.1' route_values=route_values query_parameters=query_parameters content=content)<line_sep><return>self._deserialize('BuildDefinition' response)<block_end><def_stmt>get_build_log self project build_id log_id start_line=<none> end_line=<none> **kwargs<block_start>"""GetBuildLog. Gets an individual log file for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int log_id: The ID of the log file. :param long start_line: The start line. :param long end_line: The end line. :rtype: object """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end><if_stmt>log_id<is><not><none><block_start>route_values['logId']=self._serialize.url('log_id' log_id 'int')<block_end>query_parameters={}<if_stmt>start_line<is><not><none><block_start>query_parameters['startLine']=self._serialize.query('start_line' start_line 'long')<block_end><if_stmt>end_line<is><not><none><block_start>query_parameters['endLine']=self._serialize.query('end_line' end_line 'long')<block_end>response=self._send(http_method='GET' location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df' version='5.1' route_values=route_values query_parameters=query_parameters accept_media_type='text/plain')<if_stmt>"callback"<in>kwargs<block_start>callback=kwargs["callback"]<block_end><else_stmt><block_start>callback=<none><block_end><return>self._client.stream_download(response callback=callback)<block_end><def_stmt>get_build_log_lines self project build_id log_id start_line=<none> end_line=<none><block_start>"""GetBuildLogLines. Gets an individual log file for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int log_id: The ID of the log file. :param long start_line: The start line. :param long end_line: The end line. :rtype: [str] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end><if_stmt>log_id<is><not><none><block_start>route_values['logId']=self._serialize.url('log_id' log_id 'int')<block_end>query_parameters={}<if_stmt>start_line<is><not><none><block_start>query_parameters['startLine']=self._serialize.query('start_line' start_line 'long')<block_end><if_stmt>end_line<is><not><none><block_start>query_parameters['endLine']=self._serialize.query('end_line' end_line 'long')<block_end>response=self._send(http_method='GET' location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('[str]' self._unwrap_collection(response))<block_end><def_stmt>get_build_logs self project build_id<block_start>"""GetBuildLogs. Gets the logs for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [BuildLog] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>response=self._send(http_method='GET' location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[BuildLog]' self._unwrap_collection(response))<block_end><def_stmt>get_build_logs_zip self project build_id **kwargs<block_start>"""GetBuildLogsZip. Gets the logs for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: object """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>response=self._send(http_method='GET' location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df' version='5.1' route_values=route_values accept_media_type='application/zip')<if_stmt>"callback"<in>kwargs<block_start>callback=kwargs["callback"]<block_end><else_stmt><block_start>callback=<none><block_end><return>self._client.stream_download(response callback=callback)<block_end><def_stmt>get_build_log_zip self project build_id log_id start_line=<none> end_line=<none> **kwargs<block_start>"""GetBuildLogZip. Gets an individual log file for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int log_id: The ID of the log file. :param long start_line: The start line. :param long end_line: The end line. :rtype: object """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end><if_stmt>log_id<is><not><none><block_start>route_values['logId']=self._serialize.url('log_id' log_id 'int')<block_end>query_parameters={}<if_stmt>start_line<is><not><none><block_start>query_parameters['startLine']=self._serialize.query('start_line' start_line 'long')<block_end><if_stmt>end_line<is><not><none><block_start>query_parameters['endLine']=self._serialize.query('end_line' end_line 'long')<block_end>response=self._send(http_method='GET' location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df' version='5.1' route_values=route_values query_parameters=query_parameters accept_media_type='application/zip')<if_stmt>"callback"<in>kwargs<block_start>callback=kwargs["callback"]<block_end><else_stmt><block_start>callback=<none><block_end><return>self._client.stream_download(response callback=callback)<block_end><def_stmt>get_build_option_definitions self project=<none><block_start>"""GetBuildOptionDefinitions. Gets all build definition options supported by the system. :param str project: Project ID or project name :rtype: [BuildOptionDefinition] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>response=self._send(http_method='GET' location_id='591cb5a4-2d46-4f3a-a697-5cd42b6bd332' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[BuildOptionDefinition]' self._unwrap_collection(response))<block_end><def_stmt>get_definition_revisions self project definition_id<block_start>"""GetDefinitionRevisions. Gets all revisions of a definition. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :rtype: [BuildDefinitionRevision] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>definition_id<is><not><none><block_start>route_values['definitionId']=self._serialize.url('definition_id' definition_id 'int')<block_end>response=self._send(http_method='GET' location_id='7c116775-52e5-453e-8c5d-914d9762d8c4' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[BuildDefinitionRevision]' self._unwrap_collection(response))<block_end><def_stmt>get_build_settings self project=<none><block_start>"""GetBuildSettings. Gets the build settings. :param str project: Project ID or project name :rtype: :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>response=self._send(http_method='GET' location_id='aa8c1c9c-ef8b-474a-b8c4-785c7b191d0d' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('BuildSettings' response)<block_end><def_stmt>update_build_settings self settings project=<none><block_start>"""UpdateBuildSettings. Updates the build settings. :param :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>` settings: The new settings. :param str project: Project ID or project name :rtype: :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>content=self._serialize.body(settings 'BuildSettings')<line_sep>response=self._send(http_method='PATCH' location_id='aa8c1c9c-ef8b-474a-b8c4-785c7b191d0d' version='5.1' route_values=route_values content=content)<line_sep><return>self._deserialize('BuildSettings' response)<block_end><def_stmt>add_build_tag self project build_id tag<block_start>"""AddBuildTag. Adds a tag to a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str tag: The tag to add. :rtype: [str] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end><if_stmt>tag<is><not><none><block_start>route_values['tag']=self._serialize.url('tag' tag 'str')<block_end>response=self._send(http_method='PUT' location_id='6e6114b2-8161-44c8-8f6c-c5505782427f' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[str]' self._unwrap_collection(response))<block_end><def_stmt>add_build_tags self tags project build_id<block_start>"""AddBuildTags. Adds tags to a build. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>content=self._serialize.body(tags '[str]')<line_sep>response=self._send(http_method='POST' location_id='6e6114b2-8161-44c8-8f6c-c5505782427f' version='5.1' route_values=route_values content=content)<line_sep><return>self._deserialize('[str]' self._unwrap_collection(response))<block_end><def_stmt>delete_build_tag self project build_id tag<block_start>"""DeleteBuildTag. Removes a tag from a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str tag: The tag to remove. :rtype: [str] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end><if_stmt>tag<is><not><none><block_start>route_values['tag']=self._serialize.url('tag' tag 'str')<block_end>response=self._send(http_method='DELETE' location_id='6e6114b2-8161-44c8-8f6c-c5505782427f' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[str]' self._unwrap_collection(response))<block_end><def_stmt>get_build_tags self project build_id<block_start>"""GetBuildTags. Gets the tags for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>response=self._send(http_method='GET' location_id='6e6114b2-8161-44c8-8f6c-c5505782427f' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[str]' self._unwrap_collection(response))<block_end><def_stmt>get_tags self project<block_start>"""GetTags. Gets a list of all build and definition tags in the project. :param str project: Project ID or project name :rtype: [str] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>response=self._send(http_method='GET' location_id='d84ac5c6-edc7-43d5-adc9-1b34be5dea09' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[str]' self._unwrap_collection(response))<block_end><def_stmt>delete_template self project template_id<block_start>"""DeleteTemplate. Deletes a build definition template. :param str project: Project ID or project name :param str template_id: The ID of the template. """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>template_id<is><not><none><block_start>route_values['templateId']=self._serialize.url('template_id' template_id 'str')<block_end>self._send(http_method='DELETE' location_id='e884571e-7f92-4d6a-9274-3f5649900835' version='5.1' route_values=route_values)<block_end><def_stmt>get_template self project template_id<block_start>"""GetTemplate. Gets a specific build definition template. :param str project: Project ID or project name :param str template_id: The ID of the requested template. :rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>template_id<is><not><none><block_start>route_values['templateId']=self._serialize.url('template_id' template_id 'str')<block_end>response=self._send(http_method='GET' location_id='e884571e-7f92-4d6a-9274-3f5649900835' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('BuildDefinitionTemplate' response)<block_end><def_stmt>get_templates self project<block_start>"""GetTemplates. Gets all definition templates. :param str project: Project ID or project name :rtype: [BuildDefinitionTemplate] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end>response=self._send(http_method='GET' location_id='e884571e-7f92-4d6a-9274-3f5649900835' version='5.1' route_values=route_values)<line_sep><return>self._deserialize('[BuildDefinitionTemplate]' self._unwrap_collection(response))<block_end><def_stmt>save_template self template project template_id<block_start>"""SaveTemplate. Updates an existing build definition template. :param :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>` template: The new version of the template. :param str project: Project ID or project name :param str template_id: The ID of the template. :rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>template_id<is><not><none><block_start>route_values['templateId']=self._serialize.url('template_id' template_id 'str')<block_end>content=self._serialize.body(template 'BuildDefinitionTemplate')<line_sep>response=self._send(http_method='PUT' location_id='e884571e-7f92-4d6a-9274-3f5649900835' version='5.1' route_values=route_values content=content)<line_sep><return>self._deserialize('BuildDefinitionTemplate' response)<block_end><def_stmt>get_build_timeline self project build_id timeline_id=<none> change_id=<none> plan_id=<none><block_start>"""GetBuildTimeline. Gets details for a build :param str project: Project ID or project name :param int build_id: :param str timeline_id: :param int change_id: :param str plan_id: :rtype: :class:`<Timeline> <azure.devops.v5_1.build.models.Timeline>` """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end><if_stmt>timeline_id<is><not><none><block_start>route_values['timelineId']=self._serialize.url('timeline_id' timeline_id 'str')<block_end>query_parameters={}<if_stmt>change_id<is><not><none><block_start>query_parameters['changeId']=self._serialize.query('change_id' change_id 'int')<block_end><if_stmt>plan_id<is><not><none><block_start>query_parameters['planId']=self._serialize.query('plan_id' plan_id 'str')<block_end>response=self._send(http_method='GET' location_id='8baac422-4c6e-4de5-8532-db96d92acffa' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('Timeline' response)<block_end><def_stmt>get_build_work_items_refs self project build_id top=<none><block_start>"""GetBuildWorkItemsRefs. Gets the work items associated with a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int top: The maximum number of work items to return. :rtype: [ResourceRef] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>top<is><not><none><block_start>query_parameters['$top']=self._serialize.query('top' top 'int')<block_end>response=self._send(http_method='GET' location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee' version='5.1' route_values=route_values query_parameters=query_parameters)<line_sep><return>self._deserialize('[ResourceRef]' self._unwrap_collection(response))<block_end><def_stmt>get_build_work_items_refs_from_commits self commit_ids project build_id top=<none><block_start>"""GetBuildWorkItemsRefsFromCommits. Gets the work items associated with a build, filtered to specific commits. :param [str] commit_ids: A comma-delimited list of commit IDs. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified. :rtype: [ResourceRef] """<line_sep>route_values={}<if_stmt>project<is><not><none><block_start>route_values['project']=self._serialize.url('project' project 'str')<block_end><if_stmt>build_id<is><not><none><block_start>route_values['buildId']=self._serialize.url('build_id' build_id 'int')<block_end>query_parameters={}<if_stmt>top<is><not><none><block_start>query_parameters['$top']=self._serialize.query('top' top 'int')<block_end>content=self._serialize.body(commit_ids '[str]')<line_sep>response=self._send(http_method='POST' location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee' version='5.1' route_values=route_values query_parameters=query_parameters content=content)<line_sep><return>self._deserialize('[ResourceRef]' self._unwrap_collection(response))<block_end><block_end>
<import_from_stmt>django.db migrations<def_stmt>rename_sslyze_parser apps schema_editor<block_start>Test_Type_model=apps.get_model('dojo' 'Test_Type')<try_stmt><block_start>test_type_sslyze=Test_Type_model.objects.get(name='SSLyze 3 Scan (JSON)')<line_sep>test_type_sslyze.name='SSLyze Scan (JSON)'<line_sep>test_type_sslyze.save()<block_end><except_stmt>Test_Type_model.DoesNotExist# This happens when a new instance of DD is initialized <block_start><pass><block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('dojo' '0146_lead_optional') ]<line_sep>operations=[migrations.RunPython(rename_sslyze_parser) ]<block_end>
<import_from_stmt>collections defaultdict<import_stmt>graphene<import_stmt>pytest<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>....shipping.error_codes ShippingErrorCode<import_from_stmt>..mutations BaseChannelListingMutation<def_stmt>test_validate_duplicated_channel_ids channel_PLN channel_USD# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_USD.id)<line_sep>second_channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>errors=defaultdict(list)<line_sep># when result=BaseChannelListingMutation.validate_duplicated_channel_ids([channel_id] [second_channel_id] errors ShippingErrorCode.DUPLICATED_INPUT_ITEM.value )<line_sep># then <assert_stmt>result<is><none><assert_stmt>errors["input"]<eq>[]<block_end><def_stmt>test_validate_duplicated_channel_ids_with_duplicates channel_PLN# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>second_channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>error_code=ShippingErrorCode.DUPLICATED_INPUT_ITEM.value<line_sep>errors=defaultdict(list)<line_sep># when result=BaseChannelListingMutation.validate_duplicated_channel_ids([channel_id] [second_channel_id] errors error_code)<line_sep># then <assert_stmt>result<is><none><assert_stmt>errors["input"][0].code<eq>error_code<block_end><def_stmt>test_validate_duplicated_channel_values channel_PLN channel_USD# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>second_channel_id=graphene.Node.to_global_id("Channel" channel_USD.id)<line_sep>error_code=ShippingErrorCode.DUPLICATED_INPUT_ITEM.value<line_sep>errors=defaultdict(list)<line_sep>field="add_channels"<line_sep># when result=BaseChannelListingMutation.validate_duplicated_channel_values([channel_id second_channel_id] field errors error_code)<line_sep># then <assert_stmt>result<is><none><assert_stmt>errors[field]<eq>[]<block_end><def_stmt>test_validate_duplicated_channel_values_with_duplicates channel_PLN# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>second_channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>error_code=ShippingErrorCode.DUPLICATED_INPUT_ITEM.value<line_sep>errors=defaultdict(list)<line_sep>field="add_channels"<line_sep># when result=BaseChannelListingMutation.validate_duplicated_channel_values([channel_id second_channel_id] field errors error_code)<line_sep># then <assert_stmt>result<is><none><assert_stmt>errors[field][0].code<eq>error_code<block_end><def_stmt>test_clean_channels_add_channels channel_PLN# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>error_code=ShippingErrorCode.DUPLICATED_INPUT_ITEM.value<line_sep>errors=defaultdict(list)<line_sep># when result=BaseChannelListingMutation.clean_channels(<none> {"add_channels":[{"channel_id":channel_id}]} errors error_code)<line_sep># then <assert_stmt>result<eq>{"add_channels":[{"channel_id":channel_id "channel":channel_PLN}] "remove_channels":[] }<assert_stmt>errors["input"]<eq>[]<block_end><def_stmt>test_clean_channels_remove_channels channel_PLN# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>error_code=ShippingErrorCode.DUPLICATED_INPUT_ITEM.value<line_sep>errors=defaultdict(list)<line_sep># when result=BaseChannelListingMutation.clean_channels(<none> {"remove_channels":[channel_id]} errors error_code)<line_sep># then <assert_stmt>result<eq>{"add_channels":[] "remove_channels":[str(channel_PLN.id)]}<assert_stmt>errors["input"]<eq>[]<block_end><def_stmt>test_test_clean_channels_with_errors channel_PLN# given <block_start>channel_id=graphene.Node.to_global_id("Channel" channel_PLN.id)<line_sep>error_code=ShippingErrorCode.DUPLICATED_INPUT_ITEM.value<line_sep>errors=defaultdict(list)<line_sep># when result=BaseChannelListingMutation.clean_channels(<none> {"remove_channels":[channel_id channel_id]} errors error_code)<line_sep># then <assert_stmt>result<eq>{}<assert_stmt>errors["remove_channels"][0].code<eq>error_code<block_end><def_stmt>test_test_clean_channels_invalid_object_type channel_PLN# given <block_start>channel_id=graphene.Node.to_global_id("Product" channel_PLN.id)<line_sep>error_code=ShippingErrorCode.GRAPHQL_ERROR.value<line_sep>errors=defaultdict(list)<line_sep># when <with_stmt>pytest.raises(ValidationError)<as>error<block_start>BaseChannelListingMutation.clean_channels(<none> {"remove_channels":[channel_id]} errors error_code)<block_end># then <assert_stmt>(error.value.error_dict["remove_channels"][0].message<eq>f"Must receive Channel id: {channel_id}.")<block_end>
#! /usr/bin/python2 # -*- coding: utf-8 -*- """ Clock function to take running time following Segmatch. """<line_sep># BSD 3-Clause License # # Copyright (c) 2019, FPAI # Copyright (c) 2019, SeriouslyHAO # Copyright (c) 2019, xcj2019 # Copyright (c) 2019, Leonfirst # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>datetime<class_stmt>Clock(object)<block_start><def_stmt>__init__ self<block_start>self.kSecondsToMiliseconds=1000.0<line_sep>self.kMicrosecondsToMiliseconds=0.001<line_sep>self.start()<block_end><def_stmt>start self<block_start>self.real_time_start_=datetime.datetime.now()<block_end><def_stmt>takeTime self<block_start>seconds=(datetime.datetime.now()-self.real_time_start_).seconds<line_sep>useconds=(datetime.datetime.now()-self.real_time_start_).microseconds<line_sep>self.real_time_ms_=(seconds<times>self.kSecondsToMiliseconds+useconds<times>self.kMicrosecondsToMiliseconds)+0.5<block_end><def_stmt>getRealTime self<block_start><return>self.real_time_ms_<block_end><def_stmt>takeRealTime self<block_start>self.takeTime()<line_sep><return>self.getRealTime()<block_end><block_end>
<import_stmt>os<import_stmt>glob<line_sep>subdirs=glob.glob("tests/periodicities/*")<line_sep>subdirs=['tests/periodicities/Month' 'tests/periodicities/Minute' 'tests/periodicities/Week' 'tests/periodicities/Business_Hour' 'tests/periodicities/Business_Day' 'tests/periodicities/Second' 'tests/periodicities/Semi_Month' 'tests/periodicities/Hour' 'tests/periodicities/Day']<line_sep>#print(subdirs) print("PYTHON=python3\n\n")<line_sep>lAllTarget=""<for_stmt>subdir1 sorted(subdirs)<block_start>lBase=os.path.basename(subdir1)<line_sep>test_target=""<for_stmt>filename sorted(glob.glob(subdir1+"/*.py"))<block_start>bn=os.path.basename(filename)<line_sep>logfile=bn.replace("/" "_")<line_sep>logfile="logs/periodicities_"+logfile.replace(".py" ".log")<line_sep>print("#PROCESSING FILE : " filename bn logfile)<line_sep>print(bn " : " "\n\t" "-$(PYTHON) " filename " > " logfile " 2>&1")<line_sep>test_target=bn+" "+test_target<line_sep><block_end>lAllTarget=lAllTarget+" "+lBase<line_sep>print("\n\n" lBase ": " test_target "\n" "\n")<line_sep><block_end>print("\n# ********************************************** \n")<line_sep>print("all: " lAllTarget "\n\t\n")<line_sep>
<import_from_stmt>forest_fire.server server<line_sep>server.launch()<line_sep>
<import_from_future_stmt> unicode_literals<import_from_stmt>django.contrib.auth.models User<import_from_stmt>djblets.webapi.errors PERMISSION_DENIED<import_from_stmt>reviewboard.reviews.models ScreenshotComment<import_from_stmt>reviewboard.webapi.resources resources<import_from_stmt>reviewboard.webapi.tests.base BaseWebAPITestCase<import_from_stmt>reviewboard.webapi.tests.mimetypes screenshot_comment_item_mimetype screenshot_comment_list_mimetype <import_from_stmt>reviewboard.webapi.tests.mixins BasicTestsMetaclass ReviewRequestChildItemMixin ReviewRequestChildListMixin <import_from_stmt>reviewboard.webapi.tests.mixins_comment CommentItemMixin CommentListMixin <import_from_stmt>reviewboard.webapi.tests.urls get_review_screenshot_comment_item_url get_review_screenshot_comment_list_url <class_stmt>BaseTestCase(BaseWebAPITestCase)<block_start>fixtures=['test_users']<def_stmt>_create_screenshot_review_with_issue self publish=<false> comment_text=<none><block_start>"""Sets up a review for a screenshot that includes an open issue. If `publish` is True, the review is published. The review request is always published. Returns the response from posting the comment, the review object, and the review request object. """<if_stmt><not>comment_text<block_start>comment_text='Test screenshot comment with an opened issue'<block_end>review_request=self.create_review_request(publish=<true> submitter=self.user)<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=self.user publish=publish)<line_sep>comment=self.create_screenshot_comment(review screenshot comment_text issue_opened=<true>)<line_sep><return>comment review review_request<block_end><block_end><class_stmt>ResourceListTests(CommentListMixin ReviewRequestChildListMixin BaseTestCase metaclass=BasicTestsMetaclass)<block_start>"""Testing the ReviewScreenshotCommentResource list APIs."""<line_sep>sample_api_url='review-requests/<id>/reviews/<id>/screenshot-comments/'<line_sep>resource=resources.review_screenshot_comment<def_stmt>setup_review_request_child_test self review_request<block_start>self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=self.user)<line_sep><return>(get_review_screenshot_comment_list_url(review) screenshot_comment_list_mimetype)<block_end><def_stmt>compare_item self item_rsp comment<block_start>self.assertEqual(item_rsp['id'] comment.pk)<line_sep>self.assertEqual(item_rsp['text'] comment.text)<line_sep>self.assertEqual(item_rsp['x'] comment.x)<line_sep>self.assertEqual(item_rsp['y'] comment.y)<line_sep>self.assertEqual(item_rsp['w'] comment.w)<line_sep>self.assertEqual(item_rsp['h'] comment.h)<line_sep>self.assertEqual(item_rsp['extra_data'] comment.extra_data)<if_stmt>comment.rich_text<block_start>self.assertEqual(item_rsp['text_type'] 'markdown')<block_end><else_stmt><block_start>self.assertEqual(item_rsp['text_type'] 'plain')<block_end><block_end># # HTTP GET tests # <def_stmt>setup_basic_get_test self user with_local_site local_site_name populate_items<block_start>review_request=self.create_review_request(with_local_site=with_local_site submitter=user publish=<true>)<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=user)<if_stmt>populate_items<block_start>items=[self.create_screenshot_comment(review screenshot)]<block_end><else_stmt><block_start>items=[]<block_end><return>(get_review_screenshot_comment_list_url(review local_site_name) screenshot_comment_list_mimetype items)<block_end># # HTTP POST tests # <def_stmt>setup_basic_post_test self user with_local_site local_site_name post_valid_data<block_start>review_request=self.create_review_request(with_local_site=with_local_site submitter=user publish=<true>)<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=user)<line_sep><return>(get_review_screenshot_comment_list_url(review local_site_name) screenshot_comment_item_mimetype {'screenshot_id':screenshot.pk 'text':'Test comment' 'x':2 'y':2 'w':10 'h':10 } [review screenshot])<block_end><def_stmt>check_post_result self user rsp review screenshot<block_start>comment=ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])<line_sep>self.compare_item(rsp['screenshot_comment'] comment)<block_end><def_stmt>test_post_with_issue self<block_start>"""Testing the POST review-requests/<id>/reviews/<id>/screenshot-comments/ API with an issue """<line_sep>comment_text="Test screenshot comment with an opened issue"<line_sep>comment,review,review_request=self._create_screenshot_review_with_issue(publish=<false> comment_text=comment_text)<line_sep>rsp=self.api_get(get_review_screenshot_comment_list_url(review) expected_mimetype=screenshot_comment_list_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep>self.assertIn('screenshot_comments' rsp)<line_sep>self.assertEqual(len(rsp['screenshot_comments']) 1)<line_sep>self.assertEqual(rsp['screenshot_comments'][0]['text'] comment_text)<line_sep>self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])<block_end><block_end><class_stmt>ResourceItemTests(CommentItemMixin ReviewRequestChildItemMixin BaseTestCase metaclass=BasicTestsMetaclass)<block_start>"""Testing the ReviewScreenshotCommentResource item APIs."""<line_sep>fixtures=['test_users']<line_sep>sample_api_url='review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'<line_sep>resource=resources.review_screenshot_comment<def_stmt>compare_item self item_rsp comment<block_start>self.assertEqual(item_rsp['id'] comment.pk)<line_sep>self.assertEqual(item_rsp['text'] comment.text)<line_sep>self.assertEqual(item_rsp['x'] comment.x)<line_sep>self.assertEqual(item_rsp['y'] comment.y)<line_sep>self.assertEqual(item_rsp['w'] comment.w)<line_sep>self.assertEqual(item_rsp['h'] comment.h)<line_sep>self.assertEqual(item_rsp['extra_data'] comment.extra_data)<if_stmt>comment.rich_text<block_start>self.assertEqual(item_rsp['text_type'] 'markdown')<block_end><else_stmt><block_start>self.assertEqual(item_rsp['text_type'] 'plain')<block_end><block_end><def_stmt>setup_review_request_child_test self review_request<block_start>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=self.user)<line_sep>comment=self.create_screenshot_comment(review screenshot)<line_sep><return>(get_review_screenshot_comment_item_url(review comment.pk) screenshot_comment_item_mimetype)<block_end># # HTTP DELETE tests # <def_stmt>setup_basic_delete_test self user with_local_site local_site_name<block_start>review_request=self.create_review_request(with_local_site=with_local_site submitter=user publish=<true>)<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=user)<line_sep>comment=self.create_screenshot_comment(review screenshot)<line_sep><return>(get_review_screenshot_comment_item_url(review comment.pk local_site_name) [comment review])<block_end><def_stmt>check_delete_result self user comment review<block_start>self.assertNotIn(comment review.screenshot_comments.all())<block_end><def_stmt>test_delete_with_does_not_exist_error self<block_start>"""Testing the DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API with Does Not Exist error """<line_sep>review_request=self.create_review_request(publish=<true>)<line_sep>self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=self.user)<line_sep>self.api_delete(get_review_screenshot_comment_item_url(review 123) expected_status=404)<block_end># # HTTP GET tests # <def_stmt>setup_basic_get_test self user with_local_site local_site_name<block_start>review_request=self.create_review_request(with_local_site=with_local_site submitter=user publish=<true>)<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=user)<line_sep>comment=self.create_screenshot_comment(review screenshot)<line_sep><return>(get_review_screenshot_comment_item_url(review comment.pk local_site_name) screenshot_comment_item_mimetype comment)<block_end># # HTTP PUT tests # <def_stmt>setup_basic_put_test self user with_local_site local_site_name put_valid_data<block_start>review_request=self.create_review_request(with_local_site=with_local_site submitter=user publish=<true>)<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=user)<line_sep>comment=self.create_screenshot_comment(review screenshot)<line_sep><return>(get_review_screenshot_comment_item_url(review comment.pk local_site_name) screenshot_comment_item_mimetype {'text':'Test comment'} comment [])<block_end><def_stmt>check_put_result self user item_rsp comment *args<block_start>comment=ScreenshotComment.objects.get(pk=comment.pk)<line_sep>self.assertEqual(item_rsp['text_type'] 'plain')<line_sep>self.assertEqual(item_rsp['text'] 'Test comment')<line_sep>self.compare_item(item_rsp comment)<block_end><def_stmt>test_put_with_issue self<block_start>"""Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API with an issue, removing issue_opened """<line_sep>comment,review,review_request=self._create_screenshot_review_with_issue()<line_sep>rsp=self.api_put(get_review_screenshot_comment_item_url(review comment.pk) {'issue_opened':<false>} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep>self.assertFalse(rsp['screenshot_comment']['issue_opened'])<block_end><def_stmt>test_put_issue_status_before_publish self<block_start>"""Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API with an issue, before review is published """<line_sep>comment,review,review_request=self._create_screenshot_review_with_issue()<line_sep># The issue_status should not be able to be changed while the review is # unpublished. rsp=self.api_put(get_review_screenshot_comment_item_url(review comment.pk) {'issue_status':'resolved'} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep># The issue_status should still be "open" self.assertEqual(rsp['screenshot_comment']['issue_status'] 'open')<block_end><def_stmt>test_put_issue_status_after_publish self<block_start>"""Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API with an issue, after review is published """<line_sep>comment,review,review_request=self._create_screenshot_review_with_issue(publish=<true>)<line_sep>rsp=self.api_put(get_review_screenshot_comment_item_url(review comment.pk) {'issue_status':'resolved'} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep>self.assertEqual(rsp['screenshot_comment']['issue_status'] 'resolved')<block_end><def_stmt>test_put_issue_status_by_issue_creator self<block_start>"""Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API permissions for issue creator """<line_sep>comment,review,review_request=self._create_screenshot_review_with_issue(publish=<true>)<line_sep># Change the owner of the review request so that it's not owned by # self.user review_request.submitter=User.objects.get(username='doc')<line_sep>review_request.save()<line_sep># The review/comment (and therefore issue) is still owned by self.user, # so we should be able to change the issue status. rsp=self.api_put(get_review_screenshot_comment_item_url(review comment.pk) {'issue_status':'dropped'} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep>self.assertEqual(rsp['screenshot_comment']['issue_status'] 'dropped')<block_end><def_stmt>test_put_issue_status_by_uninvolved_user self<block_start>"""Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API permissions for an uninvolved user """<line_sep>comment,review,review_request=self._create_screenshot_review_with_issue(publish=<true>)<line_sep># Change the owner of the review request and review so that they're not # owned by self.user. new_owner=User.objects.get(username='doc')<line_sep>review_request.submitter=new_owner<line_sep>review_request.save()<line_sep>review.user=new_owner<line_sep>review.save()<line_sep>rsp=self.api_put(get_review_screenshot_comment_item_url(review comment.pk) {'issue_status':'dropped'} expected_status=403)<line_sep>self.assertEqual(rsp['stat'] 'fail')<line_sep>self.assertEqual(rsp['err']['code'] PERMISSION_DENIED.code)<block_end><def_stmt>test_put_deleted_screenshot_comment_issue_status self<block_start>"""Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API with an issue and a deleted screenshot """<line_sep>comment_text="Test screenshot comment with an opened issue"<line_sep>x,y,w,h=(2 2 10 10)<line_sep>review_request=self.create_review_request(publish=<true> submitter=self.user target_people=[self.user])<line_sep>screenshot=self.create_screenshot(review_request)<line_sep>review=self.create_review(review_request user=self.user)<line_sep>comment=self.create_screenshot_comment(review screenshot comment_text x y w h issue_opened=<true>)<line_sep># First, let's ensure that the user that has created the comment # cannot alter the issue_status while the review is unpublished. rsp=self.api_put(get_review_screenshot_comment_item_url(review comment.pk) {'issue_status':'resolved'} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep># The issue_status should still be "open" self.assertEqual(rsp['screenshot_comment']['issue_status'] 'open')<line_sep># Next, let's publish the review, and try altering the issue_status. # This should be allowed, since the review request was made by the # current user. review.public=<true><line_sep>review.save()<line_sep>rsp=self.api_put(rsp['screenshot_comment']['links']['self']['href'] {'issue_status':'resolved'} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep>self.assertEqual(rsp['screenshot_comment']['issue_status'] 'resolved')<line_sep># Delete the screenshot. self._delete_screenshot(review_request screenshot)<line_sep>review_request.publish(review_request.submitter)<line_sep># Try altering the issue_status. This should be allowed. rsp=self.api_put(rsp['screenshot_comment']['links']['self']['href'] {'issue_status':'open'} expected_mimetype=screenshot_comment_item_mimetype)<line_sep>self.assertEqual(rsp['stat'] 'ok')<line_sep>self.assertEqual(rsp['screenshot_comment']['issue_status'] 'open')<block_end><block_end>
# -------------------------------------------------------------------------------------- # Copyright (c) 2013-2021, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # -------------------------------------------------------------------------------------- """ Demonstrate all the ways to initialize a value 1. Pass the value directly 2. Assign the default value explicitly 3. Provide the value during initialization of the object 4. Provide factory callable that returns a value 5. Use a _default_* static method """<import_stmt>sys<import_from_stmt>atom.api Atom Int Str<def_stmt>get_mother <block_start><return>"Maude "+get_last_name()<block_end><def_stmt>get_last_name <block_start>"""Return a last name based on the system byteorder."""<line_sep><return>sys.byteorder.capitalize()<block_end><class_stmt>Person(Atom)<block_start>"""A simple class representing a person object."""<line_sep>first_name=Str("Bob")<line_sep>age=Int(default=40)<line_sep>address=Str()<line_sep>mother=Str(factory=get_mother)<line_sep>last_name=Str()<def_stmt>_default_last_name self<block_start><return>get_last_name()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>bob=Person(address="101 Main")<line_sep>print((bob.first_name bob.last_name bob.age))<line_sep>print(bob.mother)<block_end>
""" Check for Office file types ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft VBA macros (Visual Basic for Applications), mainly for malware analysis. Author: <NAME> - http://www.decalage.info License: BSD, see source code or documentation Project Repository: https://github.com/decalage2/ViperMonkey """<line_sep># === LICENSE ================================================================== # ViperMonkey is copyright (c) 2015-2016 <NAME> (http://www.decalage.info) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Office magic numbers. magic_nums={"office97":"D0 CF 11 E0 A1 B1 1A E1" # Office 97 "office2007":"50 4B 3 4" # Office 2007+ (PKZip) }<line_sep># PE magic number. pe_magic_num="4D 5A"<def_stmt>get_1st_8_bytes fname is_data<block_start>info=<none><line_sep>is_data=(is_data<or>(len(fname)<g>200))<if_stmt>(<not>is_data)<block_start><try_stmt><block_start>tmp=open(fname 'rb')<line_sep>tmp.close()<block_end><except_stmt><block_start>is_data=<true><block_end><block_end><if_stmt>(<not>is_data)<block_start><with_stmt>open(fname 'rb')<as>f<block_start>info=f.read(8)<block_end><block_end><else_stmt><block_start>info=fname[:9]<block_end>curr_magic=""<for_stmt>b info<block_start>curr_magic<augadd>hex(ord(b)).replace("0x" "").upper()+" "<block_end><return>curr_magic<block_end><def_stmt>is_pe_file fname is_data<block_start>""" Check to see if the given file is a PE executable. return - True if it is a PE file, False if not. """<line_sep># Read the 1st 8 bytes of the file. curr_magic=get_1st_8_bytes(fname is_data)<line_sep># See if we the known magic #. <return>(curr_magic.startswith(pe_magic_num))<block_end><def_stmt>is_office_file fname is_data<block_start>""" Check to see if the given file is a MS Office file format. return - True if it is an Office file, False if not. """<line_sep># Read the 1st 8 bytes of the file. curr_magic=get_1st_8_bytes(fname is_data)<line_sep># See if we have 1 of the known magic #s. <for_stmt>typ magic_nums.keys()<block_start>magic=magic_nums[typ]<if_stmt>(curr_magic.startswith(magic))<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>is_office97_file fname is_data# Read the 1st 8 bytes of the file. <block_start>curr_magic=get_1st_8_bytes(fname is_data)<line_sep># See if we have the Office97 magic #. <return>(curr_magic.startswith(magic_nums["office97"]))<block_end><def_stmt>is_office2007_file fname is_data# Read the 1st 8 bytes of the file. <block_start>curr_magic=get_1st_8_bytes(fname is_data)<line_sep># See if we have the Office 2007 magic #. <return>(curr_magic.startswith(magic_nums["office2007"]))<block_end>
<import_from_stmt>st2tests.base BaseSensorTestCase<import_from_stmt>third_party_resource ThirdPartyResource<class_stmt>ThirdPartyResourceTestCase(BaseSensorTestCase)<block_start>sensor_cls=ThirdPartyResource<def_stmt>test_k8s_object_to_st2_trigger_bad_object self<block_start>k8s_obj={'type':'kanye' 'object':{'kind':'president' 'metadata':{'name':'west' 'namespace':'westashians'# uid missing # label missing }}}<line_sep>sensor=self.get_sensor_instance()<line_sep>self.assertRaises(KeyError sensor._k8s_object_to_st2_trigger k8s_obj)<block_end><def_stmt>test_k8s_object_to_st2_trigger self<block_start>k8s_obj={'type':'kanye' 'object':{'kind':'president' 'metadata':{'name':'west' 'namespace':'westashians' 'uid':'coinye' 'labels':['rapper' 'train wrecker']}}}<line_sep>sensor=self.get_sensor_instance()<line_sep>payload=sensor._k8s_object_to_st2_trigger(k8s_obj)<line_sep>self.assertTrue('resource'<in>payload)<line_sep>self.assertEqual(payload['resource'] k8s_obj['type'])<line_sep>self.assertTrue('object_kind'<in>payload)<line_sep>self.assertEqual(payload['object_kind'] k8s_obj['object']['kind'])<line_sep>self.assertTrue('name'<in>payload)<line_sep>self.assertEqual(payload['name'] k8s_obj['object']['metadata']['name'])<line_sep>self.assertTrue('labels'<in>payload)<line_sep>self.assertListEqual(payload['labels'] k8s_obj['object']['metadata']['labels'])<line_sep>self.assertTrue('namespace'<in>payload)<line_sep>self.assertEqual(payload['namespace'] k8s_obj['object']['metadata']['namespace'])<line_sep>self.assertTrue('uid'<in>payload)<line_sep>self.assertEqual(payload['uid'] k8s_obj['object']['metadata']['uid'])<block_end><def_stmt>test_get_trigger_payload_from_line self<block_start>line='{"object": {"kind": "president", '+'"metadata": {"labels": ["rapper", "train wrecker"], '+'"namespace": "westashians", '+'"name": "west", "uid": "coinye"}}, "type": "kanye"}'<line_sep>sensor=self.get_sensor_instance()<line_sep>payload=sensor._get_trigger_payload_from_line(line)<line_sep>self.assertTrue(payload<is><not><none>)<line_sep>self.assertTrue('resource'<in>payload)<line_sep>self.assertTrue('object_kind'<in>payload)<line_sep>self.assertTrue('name'<in>payload)<line_sep>self.assertTrue('labels'<in>payload)<line_sep>self.assertTrue('namespace'<in>payload)<line_sep>self.assertTrue('uid'<in>payload)<block_end><block_end>
<import_from_stmt>cx_core integration<as>integration_module<import_from_stmt>cx_core.controller Controller<def_stmt>test_get_integrations fake_controller:Controller<block_start>integrations=integration_module.get_integrations(fake_controller {})<line_sep>inteagration_names={i.name<for>i integrations}<assert_stmt>inteagration_names<eq>{"z2m" "zha" "deconz" "state" "mqtt" "lutron_caseta" }<block_end>
<import_stmt>datetime<import_stmt>os<import_from_stmt>io BytesIO<import_stmt>logging<import_from_stmt>functools wraps<import_from_stmt>copy deepcopy<import_from_stmt>collections Counter<import_stmt>slugify<import_stmt>yaml<import_stmt>mistune<import_stmt>requests<import_from_stmt>flask Blueprint Flask render_template abort send_file make_response<import_from_stmt>flask_cors CORS<import_from_stmt>flask_jsonpify jsonify<import_from_stmt>flask_basicauth BasicAuth<import_from_stmt>datapackage_pipelines.status status_mgr<import_from_stmt>datapackage_pipelines.utilities.stat_utils user_facing_stats<line_sep>YAML_DUMPER=yaml.CDumper<if>'CDumper'<in>yaml.__dict__<else>yaml.Dumper<def_stmt>datestr x<block_start><if_stmt>x<is><none><block_start><return>''<block_end><return>str(datetime.datetime.fromtimestamp(x))<block_end><def_stmt>yamlize x<block_start>ret=yaml.dump(x default_flow_style=<false> Dumper=YAML_DUMPER)<line_sep><return>ret<block_end>markdown=mistune.Markdown(hard_wrap=<true>)<line_sep>status=status_mgr()<def_stmt>make_hierarchies statuses<block_start><def_stmt>group lvl<block_start>pipelines=list(filter(<lambda>x:len(x['id'])<eq>1 lvl))<line_sep>children_=list(filter(<lambda>x:len(x['id'])<g>1 lvl))<line_sep>groups_={}<for_stmt>child children_<block_start>child_key=child['id'].pop(0)<line_sep>groups_.setdefault(child_key []).append(child)<block_end>children_=dict((k group(v))<for>k,v groups_.items())<for_stmt>p pipelines<block_start>p['id']=p['id'][0]<block_end><return>{'pipelines':pipelines 'children':children_}<block_end><def_stmt>flatten children_<block_start><for_stmt>k,v children_.items()<block_start>v['children']=flatten(v['children'])<line_sep>child_keys=list(v['children'].keys())<if_stmt>len(child_keys)<eq>1<and>len(v['pipelines'])<eq>0<block_start>child_key=child_keys[0]<line_sep>children_['/'.join([k child_key])]=v['children'][child_key]<del_stmt>children_[k]<block_end><block_end><return>children_<block_end>statuses=[{'id':st['id'].split('/') 'title':st.get('title') 'stats':st.get('stats') 'slug':st.get('slug')}<for>st statuses]<line_sep>groups=group(statuses)<line_sep>children=groups.get('children' {})<line_sep>groups['children']=flatten(children)<line_sep><return>groups<block_end><def_stmt>basic_auth_required view_func<block_start>""" A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. """<line_sep>@wraps(view_func)<def_stmt>wrapper *args **kwargs<block_start><if_stmt>app.config.get('BASIC_AUTH_ACTIVE' <false>)<block_start><if_stmt>basic_auth.authenticate()<block_start><return>view_func(*args **kwargs)<block_end><else_stmt><block_start><return>basic_auth.challenge()<block_end><block_end><else_stmt><block_start><return>view_func(*args **kwargs)<block_end><block_end><return>wrapper<block_end>blueprint=Blueprint('dpp' 'dpp')<line_sep>@blueprint.route("")@blueprint.route("<path:pipeline_path>")@basic_auth_required<def_stmt>main pipeline_path=<none><block_start>pipeline_ids=sorted(status.all_pipeline_ids())<line_sep># If we have a pipeline_path, filter the pipeline ids. <if_stmt>pipeline_path<is><not><none><block_start><if_stmt><not>pipeline_path.startswith('./')<block_start>pipeline_path='./'+pipeline_path<block_end>pipeline_ids=[p<for>p pipeline_ids<if>p.startswith(pipeline_path)]<block_end>statuses=[]<for_stmt>pipeline_id pipeline_ids<block_start>pipeline_status=status.get(pipeline_id)<line_sep>ex=pipeline_status.get_last_execution()<line_sep>success_ex=pipeline_status.get_last_successful_execution()<line_sep>pipeline_obj={'id':pipeline_id.lstrip('./') 'title':pipeline_status.pipeline_details.get('title') 'stats':user_facing_stats(ex.stats)<if>ex<else><none> 'slug':slugify.slugify(pipeline_id) 'trigger':ex.trigger<if>ex<else><none> 'error_log':pipeline_status.errors() 'state':pipeline_status.state() 'pipeline':pipeline_status.pipeline_details 'message':pipeline_status.state().capitalize() 'dirty':pipeline_status.dirty() 'runnable':pipeline_status.runnable() 'class':{'INIT':'primary' 'QUEUED':'primary' 'INVALID':'danger' 'RUNNING':'warning' 'SUCCEEDED':'success' 'FAILED':'danger'}[pipeline_status.state()] 'ended':datestr(ex.finish_time)<if>ex<else><none> 'started':datestr(ex.start_time)<if>ex<else><none> 'last_success':datestr(success_ex.finish_time)<if>success_ex<else><none> }<line_sep>statuses.append(pipeline_obj)<block_end><def_stmt>state_and_not_dirty state p<block_start><return>p.get('state')<eq>state<and><not>p.get('dirty')<block_end><def_stmt>state_or_dirty state p<block_start><return>p.get('state')<eq>state<or>p.get('dirty')<block_end>categories=[['ALL' 'All Pipelines' <lambda>_ __:<true>] ['INVALID' "Can't start" <lambda>_ p:<not>p['runnable']] ['QUEUED' 'Waiting to run' <lambda>state p:p['state']<eq>state] ['RUNNING' 'Running' state_and_not_dirty] ['FAILED' 'Failed Execution' state_and_not_dirty] ['SUCCEEDED' 'Successful Execution' state_and_not_dirty] ]<for_stmt>item categories<block_start>item.append([p<for>p deepcopy(statuses)<if>item[2](item[0] p)])<line_sep>item.append(len(item[-1]))<line_sep>item.append(make_hierarchies(item[-2]))<block_end><return>render_template('dashboard.html' categories=categories yamlize=yamlize markdown=markdown)<block_end>@blueprint.route("api/raw/status")@basic_auth_required<def_stmt>pipeline_raw_api_status <block_start>pipelines=sorted(status.all_statuses() key=<lambda>x:x.get('id'))<for_stmt>pipeline pipelines# can get the full details from api/raw/<path:pipeline_id> <block_start><for_stmt>attr ["pipeline" "reason" "error_log"]<block_start><if_stmt>attr<in>pipeline<block_start><del_stmt>pipeline[attr]<block_end><block_end><block_end><return>jsonify(pipelines)<block_end>@blueprint.route("api/raw/<path:pipeline_id>")@basic_auth_required<def_stmt>pipeline_raw_api pipeline_id<block_start><if_stmt><not>pipeline_id.startswith('./')<block_start>pipeline_id='./'+pipeline_id<block_end>pipeline_status=status.get(pipeline_id)<if_stmt><not>pipeline_status.pipeline_details<block_start>abort(404)<block_end>last_execution=pipeline_status.get_last_execution()<line_sep>last_successful_execution=pipeline_status.get_last_successful_execution()<line_sep>ret={"id":pipeline_id "cache_hash":pipeline_status.cache_hash "dirty":pipeline_status.dirty() "queued":last_execution.queue_time<if>last_execution<else><none> "started":last_execution.start_time<if>last_execution<else><none> "ended":last_execution.finish_time<if>last_execution<else><none> "reason":last_execution.log<if>last_execution<else><none> "error_log":pipeline_status.errors() "stats":last_execution.stats<if>last_execution<else><none> "success":last_execution.success<if>last_execution<else><none> "last_success":last_successful_execution.finish_time<if>last_successful_execution<else><none> "trigger":last_execution.trigger<if>last_execution<else><none> "pipeline":pipeline_status.pipeline_details "source":pipeline_status.source_spec "message":pipeline_status.state().capitalize() "state":pipeline_status.state() }<line_sep><return>jsonify(ret)<block_end>@blueprint.route("api/<field>/<path:pipeline_id>")@basic_auth_required<def_stmt>pipeline_api field pipeline_id<block_start><if_stmt><not>pipeline_id.startswith('./')<block_start>pipeline_id='./'+pipeline_id<block_end>pipeline_status=status.get(pipeline_id)<if_stmt><not>pipeline_status.pipeline_details<block_start>abort(404)<block_end>ret=<none><if_stmt>field<eq>'pipeline'<block_start>ret=pipeline_status.pipeline_details<line_sep>ret=yamlize(ret)<block_end><elif_stmt>field<eq>'source'<block_start>ret=pipeline_status.source_spec<line_sep>ret=yamlize(ret)<block_end><elif_stmt>field<eq>'log'<block_start>ex=pipeline_status.get_last_execution()<line_sep>ret=ex.log<if>ex<else>''<block_end><else_stmt><block_start>abort(400)<block_end>ret=ret.split('\n')<line_sep>ret={'text':ret}<line_sep><return>jsonify(ret)<block_end><def_stmt>_make_badge_response subject text colour<block_start>image_url='https://img.shields.io/badge/{}-{}-{}.svg'.format(subject text colour)<line_sep>r=requests.get(image_url)<line_sep>buffer_image=BytesIO(r.content)<line_sep>buffer_image.seek(0)<line_sep>res=make_response(send_file(buffer_image mimetype='image/svg+xml'))<line_sep>res.headers['Cache-Control']='max-age=0, no-cache, no-store, must-revalidate'<line_sep>res.headers['Expires']='0'<line_sep><return>res<block_end>@blueprint.route("badge/<path:pipeline_id>")<def_stmt>badge pipeline_id<block_start>'''An individual pipeline status'''<if_stmt><not>pipeline_id.startswith('./')<block_start>pipeline_id='./'+pipeline_id<block_end>pipeline_status=status.get(pipeline_id)<line_sep>status_color='lightgray'<if_stmt>pipeline_status.pipeline_details<block_start>status_text=pipeline_status.state().lower()<line_sep>last_execution=pipeline_status.get_last_execution()<line_sep>success=last_execution.success<if>last_execution<else><none><if_stmt>success<is><true><block_start>stats=last_execution.stats<if>last_execution<else><none><line_sep>record_count=stats.get('count_of_rows')<if_stmt>record_count<is><not><none><block_start>status_text<augadd>' (%d records)'%record_count<block_end>status_color='brightgreen'<block_end><elif_stmt>success<is><false><block_start>status_color='red'<block_end><block_end><else_stmt><block_start>status_text="not found"<block_end><return>_make_badge_response('pipeline' status_text status_color)<block_end>@blueprint.route("badge/collection/<path:pipeline_path>")<def_stmt>badge_collection pipeline_path<block_start>'''Status badge for a collection of pipelines.'''<line_sep>all_pipeline_ids=sorted(status.all_pipeline_ids())<if_stmt><not>pipeline_path.startswith('./')<block_start>pipeline_path='./'+pipeline_path<block_end># Filter pipeline ids to only include those that start with pipeline_path. path_pipeline_ids=[p<for>p all_pipeline_ids<if>p.startswith(pipeline_path)]<line_sep>statuses=[]<for_stmt>pipeline_id path_pipeline_ids<block_start>pipeline_status=status.get(pipeline_id)<if_stmt>pipeline_status<is><none><block_start>abort(404)<block_end>status_text=pipeline_status.state().lower()<line_sep>statuses.append(status_text)<block_end>status_color='lightgray'<line_sep>status_counter=Counter(statuses)<if_stmt>status_counter<block_start><if_stmt>len(status_counter)<eq>1<and>status_counter['succeeded']<g>0<block_start>status_color='brightgreen'<block_end><elif_stmt>status_counter['failed']<g>0<block_start>status_color='red'<block_end><elif_stmt>status_counter['failed']<eq>0<block_start>status_color='yellow'<block_end>status_text=', '.join(['{} {}'.format(v k)<for>k,v status_counter.items()])<block_end><else_stmt><block_start>status_text="not found"<block_end><return>_make_badge_response('pipelines' status_text status_color)<block_end>app=Flask(__name__)<line_sep>app.config['JSONIFY_PRETTYPRINT_REGULAR']=<true><if_stmt>os.environ.get('DPP_BASIC_AUTH_USERNAME' <false>)<and>os.environ.get('DPP_BASIC_AUTH_PASSWORD' <false>)<block_start>app.config['BASIC_AUTH_USERNAME']=os.environ['DPP_BASIC_AUTH_USERNAME']<line_sep>app.config['BASIC_AUTH_PASSWORD']=os.environ['DPP_BASIC_AUTH_PASSWORD']<line_sep>app.config['BASIC_AUTH_ACTIVE']=<true><block_end>basic_auth=BasicAuth(app)<line_sep>CORS(app)<line_sep>url_prefix=os.environ.get('DPP_BASE_PATH' '/')<if_stmt><not>url_prefix.endswith('/')<block_start>url_prefix<augadd>'/'<block_end>logging.info('Serving on path %s' url_prefix)<line_sep>app.register_blueprint(blueprint url_prefix=url_prefix)<line_sep>
# flake8: noqa """This is the main public API of Morepath. Additional public APIs can be imported from the :mod:`morepath.error` and :mod:`morepath.pdbsupport` modules. For custom directive implementations that interact with core directives for grouping or subclassing purposes, or that need to use one of the Morepath registries, you may need to import from :mod:`morepath.directive`. The other submodules are considered private. If you find yourself needing to import from them in application or extension code, please report an issue about it on the Morepath issue tracker. """<import_from_stmt>dectate commit<import_from_stmt>.app App dispatch_method<import_from_stmt>.core excview_tween_factory<as>EXCVIEW poisoned_host_header_protection_tween_factory<as>HOST_HEADER_PROTECTION model_predicate name_predicate request_method_predicate <import_from_stmt>.core request_method_predicate<as>LAST_VIEW_PREDICATE<import_from_stmt>.view render_json render_html redirect<import_from_stmt>.request Request Response<import_from_stmt>.autosetup scan autoscan<import_from_stmt>.authentication Identity IdentityPolicy NO_IDENTITY<import_from_stmt>.converter Converter<import_from_stmt>.reify reify<import_from_stmt>.run run<line_sep>
<import_from_stmt>locust HttpUser TaskSet task constant<import_from_stmt>locust LoadTestShape<class_stmt>UserTasks(TaskSet)<block_start>@task<def_stmt>get_root self<block_start>self.client.get("/")<block_end><block_end><class_stmt>WebsiteUser(HttpUser)<block_start>wait_time=constant(0.5)<line_sep>tasks=[UserTasks]<block_end><class_stmt>StagesShape(LoadTestShape)<block_start>""" A simply load test shape class that has different user and spawn_rate at different stages. Keyword arguments: stages -- A list of dicts, each representing a stage with the following keys: duration -- When this many seconds pass the test is advanced to the next stage users -- Total user count spawn_rate -- Number of users to start/stop per second stop -- A boolean that can stop that test at a specific stage stop_at_end -- Can be set to stop once all stages have run. """<line_sep>stages=[{"duration":60 "users":10 "spawn_rate":10} {"duration":100 "users":50 "spawn_rate":10} {"duration":180 "users":100 "spawn_rate":10} {"duration":220 "users":30 "spawn_rate":10} {"duration":230 "users":10 "spawn_rate":10} {"duration":240 "users":1 "spawn_rate":1} ]<def_stmt>tick self<block_start>run_time=self.get_run_time()<for_stmt>stage self.stages<block_start><if_stmt>run_time<l>stage["duration"]<block_start>tick_data=(stage["users"] stage["spawn_rate"])<line_sep><return>tick_data<block_end><block_end><return><none><block_end><block_end>
<import_stmt>typer<def_stmt>name_callback value:str<block_start><if_stmt>value<ne>"Camila"<block_start><raise>typer.BadParameter("Only Camila is allowed")<block_end><return>value<block_end><def_stmt>main name:str=typer.Option(<ellipsis> callback=name_callback)<block_start>typer.echo(f"Hello {name}")<block_end><if_stmt>__name__<eq>"__main__"<block_start>typer.run(main)<block_end>
"""Randomize the minitaur_gym_alternating_leg_env when reset() is called. The randomization include swing_offset, extension_offset of all legs that mimics bent legs, desired_pitch from user input, battery voltage and motor damping. """<import_stmt>os inspect<line_sep>currentdir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))<line_sep>parentdir=os.path.dirname(os.path.dirname(currentdir))<line_sep>parentdir=os.path.dirname(os.path.dirname(parentdir))<line_sep>os.sys.path.insert(0 parentdir)<import_stmt>numpy<as>np<import_stmt>tf.compat.v1<as>tf<import_from_stmt>pybullet_envs.minitaur.envs env_randomizer_base<line_sep># Absolute range. NUM_LEGS=4<line_sep>BATTERY_VOLTAGE_RANGE=(14.8 16.8)<line_sep>MOTOR_VISCOUS_DAMPING_RANGE=(0 0.01)<class_stmt>MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase)<block_start>"""A randomizer that changes the minitaur_gym_alternating_leg_env."""<def_stmt>__init__ self perturb_swing_bound=0.1 perturb_extension_bound=0.1 perturb_desired_pitch_bound=0.01<block_start>super(MinitaurAlternatingLegsEnvRandomizer self).__init__()<line_sep>self.perturb_swing_bound=perturb_swing_bound<line_sep>self.perturb_extension_bound=perturb_extension_bound<line_sep>self.perturb_desired_pitch_bound=perturb_desired_pitch_bound<block_end><def_stmt>randomize_env self env<block_start>perturb_magnitude=np.random.uniform(low=-self.perturb_swing_bound high=self.perturb_swing_bound size=NUM_LEGS)<line_sep>env.set_swing_offset(perturb_magnitude)<line_sep>tf.logging.info("swing_offset: {}".format(perturb_magnitude))<line_sep>perturb_magnitude=np.random.uniform(low=-self.perturb_extension_bound high=self.perturb_extension_bound size=NUM_LEGS)<line_sep>env.set_extension_offset(perturb_magnitude)<line_sep>tf.logging.info("extension_offset: {}".format(perturb_magnitude))<line_sep>perturb_magnitude=np.random.uniform(low=-self.perturb_desired_pitch_bound high=self.perturb_desired_pitch_bound)<line_sep>env.set_desired_pitch(perturb_magnitude)<line_sep>tf.logging.info("desired_pitch: {}".format(perturb_magnitude))<line_sep>randomized_battery_voltage=np.random.uniform(BATTERY_VOLTAGE_RANGE[0] BATTERY_VOLTAGE_RANGE[1])<line_sep>env.minitaur.SetBatteryVoltage(randomized_battery_voltage)<line_sep>tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage))<line_sep>randomized_motor_damping=np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0] MOTOR_VISCOUS_DAMPING_RANGE[1])<line_sep>env.minitaur.SetMotorViscousDamping(randomized_motor_damping)<line_sep>tf.logging.info("motor_damping: {}".format(randomized_motor_damping))<block_end><block_end>
<import_from_stmt>typing Dict Tuple Optional<import_from_stmt>pathlib Path<import_stmt>asyncio<import_from_stmt>._mask Mask<import_from_stmt>._event Event<import_from_stmt>._base InotifyBase<line_sep>__all__=('Minotaur' )<class_stmt>Notification<block_start>__slots__=('_path' '_type' '_isdir' '_unmount' '_qoverflow' )<def_stmt>__init__ self path:Path type:Mask isdir:bool unmount:bool qoverflow:bool=<false><block_start>self._path=path<line_sep>self._type=type<line_sep>self._isdir=bool(isdir)<line_sep>self._unmount=bool(unmount)<line_sep>self._qoverflow=bool(qoverflow)<block_end>@property<def_stmt>isdir self<arrow>bool<block_start><return>self._isdir<block_end>@property<def_stmt>unmount self<arrow>bool<block_start><return>self._unmount<block_end>@property<def_stmt>qoverflow self<arrow>bool<block_start><return>self._qoverflow<block_end>@property<def_stmt>path self<arrow>Path<block_start><return>self._path<block_end><def_stmt>__repr__ self<arrow>str<block_start>t=self._isdir<and>'dir'<or>'file'<line_sep><return>f'{type(self).__name__}({self._type.name} {t} {self._path})'<block_end>@classmethod<def_stmt>create cls path:Path mask:Mask<arrow>'Notification'<block_start><return>cls(path mask&Mask.EVENT_TYPE bool(mask&Mask.ISDIR) bool(mask&Mask.UNMOUNT) bool(mask&Mask.Q_OVERFLOW))<block_end><block_end><class_stmt>Minotaur(InotifyBase)<block_start>""" Fancy interface for Inotify which does questionable things like: 1. Resolve watch-descriptors back to paths (which races with renames of original paths and can't be used safely, but other inotify packages provide this feature, so here it is for your delectation). 2. Link rename_from/rename_to events together. This feature would be useful but isn't yet actually implemented. Working on it... """<line_sep>__slots__=('_wdmap' '_cmap' )<line_sep>_wdmap:Dict[int Path]<line_sep>_cmap:Dict[Tuple[int int] Event]<def_stmt>__init__ self blocking:bool=<true> cloexec:bool=<true> loop:Optional[asyncio.AbstractEventLoop]=<none> <arrow><none><block_start>super().__init__(blocking cloexec loop)<line_sep>self._wdmap={}<line_sep>self._cmap={}<block_end><def_stmt>add_watch self p:Path mask:Mask<arrow>int<block_start><try_stmt><block_start>wd=super().add_watch(p mask)<block_end><except_stmt>Exception<block_start><raise><block_end><else_stmt><block_start>self._wdmap[wd]=p.resolve()<block_end><return>wd<block_end><def_stmt>rm_watch self wd:int<arrow>int<block_start><try_stmt><block_start><return>super().rm_watch(wd)<block_end><except_stmt>Exception<block_start><raise><block_end><else_stmt><block_start><del_stmt>self._wdmap[wd]<block_end><block_end><def_stmt>_resolve_path self wd:int name:Path<arrow>Path<block_start><try_stmt><block_start>base_dir=self._wdmap[wd]<block_end><except_stmt>KeyError<block_start>path=name<block_end><else_stmt><block_start>path=base_dir/name<block_end><return>path<block_end><def_stmt>__next__ self<arrow>Notification<block_start>evt=super()._next_event()<if_stmt>evt<is><none><block_start><raise>StopIteration<block_end># TODO: Link rename_from/rename_to together if we have them path=self._resolve_path(evt.wd evt.name)<line_sep><return>Notification.create(path evt.mask)<block_end><async_keyword><def_stmt>__anext__ self<arrow>Notification<block_start>evt=<await>super()._next_event_async()<if_stmt>evt<is><none><block_start><raise>StopIteration<block_end>path=self._resolve_path(evt.wd evt.name)<line_sep><return>Notification.create(path evt.mask)<block_end><block_end>
"""! @brief Collection of examples devoted to containers. @authors <NAME> (<EMAIL>) @date 2014-2020 @copyright BSD-3-Clause """<line_sep>
<import_stmt>pytest<import_stmt>torch<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>manual_seed_zero <block_start>torch.manual_seed(0)<block_end>@pytest.fixture(scope='session')<def_stmt>cuda_sleep # Warm-up CUDA. <block_start>torch.empty(1 device='cuda')<line_sep># From test/test_cuda.py in PyTorch. start=torch.cuda.Event(enable_timing=<true>)<line_sep>end=torch.cuda.Event(enable_timing=<true>)<line_sep>start.record()<line_sep>torch.cuda._sleep(1000000)<line_sep>end.record()<line_sep>end.synchronize()<line_sep>cycles_per_ms=1000000/start.elapsed_time(end)<def_stmt>cuda_sleep seconds<block_start>torch.cuda._sleep(int(seconds<times>cycles_per_ms<times>1000))<block_end><return>cuda_sleep<block_end><def_stmt>pytest_report_header <block_start><return>f'torch: {torch.__version__}'<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>..Qt QtCore QtGui<class_stmt>DockDrop(object)<block_start>"""Provides dock-dropping methods"""<def_stmt>__init__ self allowedAreas=<none><block_start>object.__init__(self)<if_stmt>allowedAreas<is><none><block_start>allowedAreas=['center' 'right' 'left' 'top' 'bottom']<block_end>self.allowedAreas=set(allowedAreas)<line_sep>self.setAcceptDrops(<true>)<line_sep>self.dropArea=<none><line_sep>self.overlay=DropAreaOverlay(self)<line_sep>self.overlay.raise_()<block_end><def_stmt>resizeOverlay self size<block_start>self.overlay.resize(size)<block_end><def_stmt>raiseOverlay self<block_start>self.overlay.raise_()<block_end><def_stmt>dragEnterEvent self ev<block_start>src=ev.source()<if_stmt>hasattr(src 'implements')<and>src.implements('dock')#print "drag enter accept" <block_start>ev.accept()<block_end><else_stmt>#print "drag enter ignore" <block_start>ev.ignore()<block_end><block_end><def_stmt>dragMoveEvent self ev#print "drag move" # QDragMoveEvent inherits QDropEvent which provides posF() # PyQt6 provides only position() <block_start>posF=ev.posF()<if>hasattr(ev 'posF')<else>ev.position()<line_sep>ld=posF.x()<line_sep>rd=self.width()-ld<line_sep>td=posF.y()<line_sep>bd=self.height()-td<line_sep>mn=min(ld rd td bd)<if_stmt>mn<g>30<block_start>self.dropArea="center"<block_end><elif_stmt>(ld<eq>mn<or>td<eq>mn)<and>mn<g>self.height()/3.<block_start>self.dropArea="center"<block_end><elif_stmt>(rd<eq>mn<or>ld<eq>mn)<and>mn<g>self.width()/3.<block_start>self.dropArea="center"<block_end><elif_stmt>rd<eq>mn<block_start>self.dropArea="right"<block_end><elif_stmt>ld<eq>mn<block_start>self.dropArea="left"<block_end><elif_stmt>td<eq>mn<block_start>self.dropArea="top"<block_end><elif_stmt>bd<eq>mn<block_start>self.dropArea="bottom"<block_end><if_stmt>ev.source()<is>self<and>self.dropArea<eq>'center'#print " no self-center" <block_start>self.dropArea=<none><line_sep>ev.ignore()<block_end><elif_stmt>self.dropArea<not><in>self.allowedAreas#print " not allowed" <block_start>self.dropArea=<none><line_sep>ev.ignore()<block_end><else_stmt>#print " ok" <block_start>ev.accept()<block_end>self.overlay.setDropArea(self.dropArea)<block_end><def_stmt>dragLeaveEvent self ev<block_start>self.dropArea=<none><line_sep>self.overlay.setDropArea(self.dropArea)<block_end><def_stmt>dropEvent self ev<block_start>area=self.dropArea<if_stmt>area<is><none><block_start><return><block_end><if_stmt>area<eq>'center'<block_start>area='above'<block_end>self.area.moveDock(ev.source() area self)<line_sep>self.dropArea=<none><line_sep>self.overlay.setDropArea(self.dropArea)<block_end><block_end><class_stmt>DropAreaOverlay(QtGui.QWidget)<block_start>"""Overlay widget that draws drop areas during a drag-drop operation"""<def_stmt>__init__ self parent<block_start>QtGui.QWidget.__init__(self parent)<line_sep>self.dropArea=<none><line_sep>self.hide()<line_sep>self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TransparentForMouseEvents)<block_end><def_stmt>setDropArea self area<block_start>self.dropArea=area<if_stmt>area<is><none><block_start>self.hide()<block_end><else_stmt>## Resize overlay to just the region where drop area should be displayed. ## This works around a Qt bug--can't display transparent widgets over QGLWidget <block_start>prgn=self.parent().rect()<line_sep>rgn=QtCore.QRect(prgn)<line_sep>w=min(30 prgn.width()/3.)<line_sep>h=min(30 prgn.height()/3.)<if_stmt>self.dropArea<eq>'left'<block_start>rgn.setWidth(w)<block_end><elif_stmt>self.dropArea<eq>'right'<block_start>rgn.setLeft(rgn.left()+prgn.width()-w)<block_end><elif_stmt>self.dropArea<eq>'top'<block_start>rgn.setHeight(h)<block_end><elif_stmt>self.dropArea<eq>'bottom'<block_start>rgn.setTop(rgn.top()+prgn.height()-h)<block_end><elif_stmt>self.dropArea<eq>'center'<block_start>rgn.adjust(w h -w -h)<block_end>self.setGeometry(rgn)<line_sep>self.show()<block_end>self.update()<block_end><def_stmt>paintEvent self ev<block_start><if_stmt>self.dropArea<is><none><block_start><return><block_end>p=QtGui.QPainter(self)<line_sep>rgn=self.rect()<line_sep>p.setBrush(QtGui.QBrush(QtGui.QColor(100 100 255 50)))<line_sep>p.setPen(QtGui.QPen(QtGui.QColor(50 50 150) 3))<line_sep>p.drawRect(rgn)<block_end><block_end>
"""Consts for Kaiterra integration."""<import_from_stmt>datetime timedelta<import_from_stmt>homeassistant.const CONCENTRATION_MICROGRAMS_PER_CUBIC_METER CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER CONCENTRATION_PARTS_PER_BILLION CONCENTRATION_PARTS_PER_MILLION PERCENTAGE Platform <line_sep>DOMAIN="kaiterra"<line_sep>DISPATCHER_KAITERRA="kaiterra_update"<line_sep>AQI_SCALE={"cn":[0 50 100 150 200 300 400 500] "in":[0 50 100 200 300 400 500] "us":[0 50 100 150 200 300 500] }<line_sep>AQI_LEVEL={"cn":["Good" "Satisfactory" "Moderate" "Unhealthy for sensitive groups" "Unhealthy" "Very unhealthy" "Hazardous" ] "in":["Good" "Satisfactory" "Moderately polluted" "Poor" "Very poor" "Severe" ] "us":["Good" "Moderate" "Unhealthy for sensitive groups" "Unhealthy" "Very unhealthy" "Hazardous" ] }<line_sep>ATTR_VOC="volatile_organic_compounds"<line_sep>ATTR_AQI_LEVEL="air_quality_index_level"<line_sep>ATTR_AQI_POLLUTANT="air_quality_index_pollutant"<line_sep>AVAILABLE_AQI_STANDARDS=["us" "cn" "in"]<line_sep>AVAILABLE_UNITS=["x" PERCENTAGE "C" "F" CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER CONCENTRATION_MICROGRAMS_PER_CUBIC_METER CONCENTRATION_PARTS_PER_MILLION CONCENTRATION_PARTS_PER_BILLION ]<line_sep>AVAILABLE_DEVICE_TYPES=["laseregg" "sensedge"]<line_sep>CONF_AQI_STANDARD="aqi_standard"<line_sep>CONF_PREFERRED_UNITS="preferred_units"<line_sep>DEFAULT_AQI_STANDARD="us"<line_sep>DEFAULT_PREFERRED_UNIT:list[str]=[]<line_sep>DEFAULT_SCAN_INTERVAL=timedelta(seconds=30)<line_sep>PLATFORMS=[Platform.SENSOR Platform.AIR_QUALITY]<line_sep>
<import_from_stmt>flask request<import_from_stmt>apps.auth.auth_require required<import_from_stmt>apps.project.business.issue IssueBusiness IssueRecordBusiness IssueDashBoardBusiness<import_from_stmt>apps.project.extentions parse_json_form validation parse_list_args2<import_from_stmt>library.api.render json_detail_render json_list_render2<import_from_stmt>library.api.tBlueprint tblueprint<line_sep>bpname='issue'<line_sep>view_permission=f'{bpname}_view'<line_sep>modify_permission=f'{bpname}_modify'<line_sep>issue=tblueprint(bpname __name__)<line_sep># 新增issue @issue.route('/' methods=['POST'])@required(modify_permission)@validation('POST:issue_create')<def_stmt>issue_add_handler <block_start>""" @api {post} /v1/issue 新增 缺陷 @apiName CreateIssue @apiGroup 项目 @apiDescription 新增 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 123, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>(system version project_id module_id creator modifier handler issue_type chance level priority stage title attach handle_status description comment detection_chance requirement_id case_covered tag)=parse_json_form('issue_create')<line_sep>ret=IssueBusiness.create(system version project_id module_id creator modifier handler issue_type chance level priority stage title attach handle_status description comment detection_chance requirement_id case_covered tag)<line_sep><return>json_detail_render(ret)<block_end># 根据id修改,删除issue @issue.route('/<int:issue_id>' methods=['POST'])@required(modify_permission)@validation('POST:issue_modify')<def_stmt>issue_modify_handler issue_id<block_start>""" @api {post} /v1/issue/{int:id} 修改 缺陷 @apiName ModifyIssue @apiGroup 项目 @apiDescription 修改 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 1, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>(system version project_id module_id modifier handler issue_type chance level priority stage title attach handle_status description comment detection_chance requirement_id case_covered tag)=parse_json_form('issue_modify')<line_sep>ret=IssueBusiness.modify(issue_id system version project_id module_id modifier handler issue_type chance level priority stage title attach handle_status description comment detection_chance requirement_id case_covered tag)<line_sep><return>json_detail_render(ret)<block_end># 根据id修改,删除issue @issue.route('/<int:issue_id>' methods=['DELETE'])<def_stmt>issue_delete_handler issue_id<block_start>""" @api {delete} /v1/issue/{int:id} 删除 缺陷 @apiName DeleteIssue @apiGroup 项目 @apiDescription 删除 缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>ret=IssueBusiness.delete(issue_id)<line_sep><return>json_detail_render(ret)<block_end># 切换issue状态 @issue.route('/handlestatus/<int:issue_id>' methods=['POST'])@required(modify_permission)@validation('POST:handle_status')<def_stmt>issue_board_status_handler issue_id<block_start>""" @api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态 @apiName ModifyIssueStatus @apiGroup 项目 @apiDescription 切换 缺陷状态 @apiParamExample {json} Request-Example: { "handle_status": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>handle_status=parse_json_form('handle_status')[0]<line_sep>ret=IssueBusiness.status_switch(issue_id handle_status)<line_sep><return>json_detail_render(ret)<block_end># 切换issue处理人 @issue.route('/handler/<int:issue_id>' methods=['POST'])@validation('POST:handler_switch')@required(modify_permission)<def_stmt>issue_handler_switch_handler issue_id<block_start>""" @api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人 @apiName ModifyIssueSwitch @apiGroup 项目 @apiDescription 切换 缺陷处理人 @apiParamExample {json} Request-Example: { "handler": 11 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>handler=parse_json_form('handler_switch')<line_sep>ret=IssueBusiness.handler_switch(issue_id handler)<line_sep><return>json_detail_render(ret)<block_end># 切换issue等级 @issue.route('/level/<int:issue_id>' methods=['POST'])@required(modify_permission)@validation('POST:level_switch')<def_stmt>issue_level_switch_handler issue_id<block_start>""" @api {post} /v1/issue/level/{int:id} 切换 缺陷等级 @apiName ModifyIssueLevel @apiGroup 项目 @apiDescription 切换 缺陷等级 @apiParamExample {json} Request-Example: { "level": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>level=parse_json_form('level_switch')<line_sep>ret=IssueBusiness.level_switch(issue_id level)<line_sep><return>json_detail_render(ret)<block_end># 切换issue优先级 @issue.route('/priority/<int:issue_id>' methods=['POST'])@required(modify_permission)@validation('POST:priority_switch')<def_stmt>issue_priority_switch_handler issue_id<block_start>""" @api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级 @apiName ModifyIssuePriority @apiGroup 项目 @apiDescription 切换 缺陷优先级 @apiParamExample {json} Request-Example: { "priority": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>priority=parse_json_form('priority_switch')<line_sep>ret=IssueBusiness.priority_switch(issue_id priority)<line_sep><return>json_detail_render(ret)<block_end># 修改issue的comment @issue.route('/comment/<int:issue_id>' methods=['POST'])@validation('POST:add_comment')@required(modify_permission)<def_stmt>issue_add_comment_handler issue_id<block_start>""" @api {post} /v1/issue/comment/{int:id} 切换 缺陷备注 @apiName ModifyIssueComment @apiGroup 项目 @apiDescription 切换 缺陷备注 @apiParamExample {json} Request-Example: { "comment": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>comment=parse_json_form('add_comment')<line_sep>ret=IssueBusiness.add_comment(issue_id comment)<line_sep><return>json_detail_render(ret)<block_end># 查询issue-projectid,versionid @issue.route('/' methods=['GET'])<def_stmt>issue_query_all_handler <block_start>""" @api {get} /v1/issue/ 查询 issue 列表 @apiName SearchIssue @apiGroup 项目 @apiDescription 查询 issue 列表 @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割 @apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割 @apiParam {int} [title] 标题 @apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割 @apiParam {string} [module_id] 模块 ID,使用 ',' 分割 @apiParam {string} [priority] 优先级 ID,使用 ',' 分割 @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "creator_id": "1,2,3,4", "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 2, "comment": "", "creation_time": "2019-08-08 20:58:49", "creator": [ { "id": 96, "name": "张宇" } ], "description": "", "detection_chance": "", "handle_status": 2, "handler": [ { "id": 96, "name": "张宇" } ], "issue_number": "T398", "issue_type": 1, "issueid": 398, "level": 1, "modified_time": "2019-08-08 20:58:49", "modifier": [], "module": [ { "id": 329, "name": "用例二级2222" } ], "priority": 1, "project_id": 4, "rank": 12, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": "", "test_time": "", "title": "1.2.7issuse55555", "version": [ { "id": 173, "name": "1.2.7" } ], "weight": "" } ], "message": "ok", "page_index": 1, "page_size": 1, "total": 8 } """<line_sep>requirement_id=request.args.get('requirement_id')<if_stmt>requirement_id<block_start>page_size,page_index=parse_list_args2()<line_sep>data,count=IssueBusiness.paginate_data_by_rid(page_size page_index requirement_id)<line_sep><return>json_list_render2(0 data page_size page_index count)<block_end><else_stmt><block_start>page_size,page_index=parse_list_args2()<line_sep>data,count=IssueBusiness.paginate_data(page_size page_index)<line_sep><return>json_list_render2(0 data page_size page_index count)<block_end><block_end># 查询issue历史记录 @issue.route('/record' methods=['GET'])<def_stmt>issue_record_query_all_handler <block_start>""" @api {get} /v1/issue/record 查询 缺陷历史记录列表 @apiName GetIssueRecordList @apiGroup 项目 @apiDescription 查询 缺陷历史记录列表 @apiParam {int} projectid 项目 ID @apiParam {int} versionid 版本 ID @apiParamExample {json} Request-Example: ?projectid=1 @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 0, "comment": "", "creation_time": "2019-05-10 16:23:28", "creator": [ { "id": 12, "name": "刘焕焕" } ], "description": "<p>分享微信不成功.</p>", "detection_chance": 0, "handle_status": 1, "handler": [ { "id": 12, "name": "刘焕焕" } ], "issue_number": "T309", "issue_type": 0, "issueid": 309, "level": 1, "modified_time": "2019-05-13 13:02:45", "modifier": [], "module": [ { "id": 291, "name": "V2.4.9版本用例飞科" } ], "priority": 1, "project_id": 1, "rank": 20, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": 1, "test_time": "", "title": "分享微信不成功", "version": [ { "id": 128, "name": "V2.4.9" } ], "weight": "" } ], "message": "ok" } """<line_sep>data=IssueRecordBusiness.query_all_json()<line_sep><return>json_detail_render(0 data)<block_end># 查询issue历史记录详情 @issue.route('/record/detail/<int:issue_id>' methods=['GET'])<def_stmt>issue_record_detail_handler issue_id<block_start>""" @api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情 @apiName GetIssueRecordDetailById @apiGroup 项目 @apiDescription 查询 缺陷历史记录详情 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } """<line_sep>data=IssueRecordBusiness.query_record_detail(issue_id)<line_sep><return>json_detail_render(0 data)<block_end># 根据id查询issue @issue.route('/<int:issue_id>' methods=['GET'])<def_stmt>issue_query_handler issue_id<block_start>""" @api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id) @apiName GetIssueById @apiGroup 项目 @apiDescription 查询 缺陷详情 通过 ID @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "attach":"attach", "chance":1, "comment":"", "creation_time":"2018-12-18 20:28:39", "creator":[ { "id":1, "name":"王金龙" } ], "description":"description", "handle_status":3, "handler":[ { "id":1, "name":"王金龙" } ], "issue_number":"T1", "issue_type":1, "issueid":1, "level":1, "modified_time":"2019-03-01 16:46:10", "modifier":[ { "id":1, "name":"王金龙" } ], "module":[ { "id":1, "name":"音频" } ], "priority":1, "project_id":1, "reopen":0, "repair_time":"0:00:05", "requirement_id":"", "requirement_title":"", "stage":1, "status":0, "system":0, "test_time":"2 days, 20:21:05", "title":"title", "version":[ { "id":1, "name":"str" } ], "weight":"" } ], "message":"ok" } """<line_sep>data=IssueBusiness.query_by_id(issue_id)<line_sep><return>json_detail_render(0 data)<block_end># issue关闭和打开的dashboard @issue.route('/dashboard' methods=['POST'])@required(view_permission)@validation('POST:issue_dashboard')<def_stmt>issue_dashboard_work_handler <block_start>start_date,end_date=parse_json_form('issue_dashboard')<line_sep>data=IssueDashBoardBusiness.issue_dashboard(start_date end_date)<line_sep><return>json_detail_render(0 data)<block_end># 查询测试人员每天创建的issue个数 @issue.route('/dashboard/tester' methods=['POST'])@required(view_permission)@validation('POST:issue_dashboard')<def_stmt>tester_issue_work_handler <block_start>start_date,end_date=parse_json_form('issue_dashboard')<line_sep>data=IssueDashBoardBusiness.issue_all_tester_dashboard(start_date end_date)<line_sep><return>json_detail_render(0 data)<block_end># issue的状态分布和优先级分布 @issue.route('/dashboard/project' methods=['POST'])@required(view_permission)@validation('POST:issue_dashboard')<def_stmt>issue_project_dashboard_handler <block_start>""" @api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布 @apiName GetIssueByStatusAndPriority @apiGroup 项目 @apiDescription 查询 缺陷状态分布和优先级分布 @apiParamExample {json} Request-Example: { "start_date": "2019-01-02 10:10:11", "end_date": "2019-01-03 10:10:12", } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } """<line_sep>start_date,end_date=parse_json_form('issue_dashboard')<line_sep>data=IssueDashBoardBusiness.issue_project_dashboard(start_date end_date)<line_sep><return>json_detail_render(0 data)<block_end># 看板根据pro_id查询issue各个状态的数量 @issue.route('/dashboard/project/<int:pro_id>' methods=['GET'])<def_stmt>issue_query_pro_handler pro_id<block_start>""" @api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID @apiName GetBoardIssueByProjectId @apiGroup 项目 @apiDescription 根据 project ID 查询 看板缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "info":[ { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":2 }, { "count":1, "handle_status":3 } ], "total":3, "version":1 }, { "detail":[ { "count":1, "handle_status":4 } ], "total":1, "version":2 }, { "detail":[ { "count":1, "handle_status":1 } ], "total":1, "version":3 }, { "detail":[ { "count":3, "handle_status":4 } ], "total":3, "version":4 }, { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":4 } ], "total":2, "version":128 } ], "project_id":1 } ], "message":"ok" } """<line_sep>data=IssueDashBoardBusiness.issue_project_id_dashboard(pro_id)<line_sep><return>json_detail_render(0 data)<block_end># 绑定 issue 到 requirement @issue.route('/bind/requirement' methods=['POST'])@required(modify_permission)@validation('POST:issue_bind_requirement')<def_stmt>issue_bind_requirement <block_start>""" @api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement @apiName IssueBindRequirement @apiGroup 项目 @apiDescription 绑定 缺陷到需求 @apiParam {int} issue_id 缺陷 ID @apiParam {int} requirement_id 需求 ID @apiParamExample {json} Request-Example: { "issue": 11, "requirement_id": 22 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """<line_sep>requirement_id,issue_id=parse_json_form('issue_bind_requirement')<line_sep>ret,msg=IssueBusiness.issue_bind_requirement(issue_id requirement_id)<line_sep><return>json_detail_render(ret [] msg)<block_end># 导出 issue 列表 @issue.route('/export' methods=['GET'])<def_stmt>issue_export <block_start>""" @api {get} /v1/issue/ 导出 issue 到 xls @apiName IssueExportToXls @apiGroup 项目 @apiDescription 导出 issue 到 xls @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {int} [creator_id] 创建人 ID @apiParam {int} [title] 标题 @apiParam {int} [handle_status] 处理状态 ID @apiParam {int} [module_id] 模块 ID @apiParam {int} [priority] 优先级 ID @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls", "message": "ok" } """<line_sep>issue_url=IssueBusiness.export()<line_sep><return>json_detail_render(code=0 data=issue_url)<block_end>
#encoding:utf-8 <import_from_stmt>utils weighted_random_subreddit<line_sep>t_channel='@news756'<line_sep>subreddit=weighted_random_subreddit({'politics':0.5 'news':0.5})<def_stmt>send_post submission r2t<block_start><return>r2t.send_simple(submission text='{title}\n\n{self_text}\n\n/r/{subreddit_name}\n{short_link}' gif='{title}\n\n/r/{subreddit_name}\n{short_link}' img='{title}\n\n/r/{subreddit_name}\n{short_link}' album='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}' other='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}')<block_end>
""" DocumentNGramSymWinGraph.py Created on May 23, 2017, 4:56 PM """<import_stmt>networkx<as>nx<import_stmt>pygraphviz<as>pgv<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>networkx.drawing.nx_agraph graphviz_layout<import_from_stmt>DocumentNGramGraph DocumentNGramGraph<class_stmt>DocumentNGramSymWinGraph(DocumentNGramGraph)# an extension of DocumentNGramGraph # for symmetric windowing <block_start><def_stmt>buildGraph self verbose=<false> d=[]# set Data @class_variable <block_start>self.setData(d)<line_sep>Data=self._Data<line_sep># build ngram ng=self.build_ngram()<line_sep>s=len(ng)<line_sep># calculate window win=self._Dwin<floordiv>2<line_sep># initialize graph self._Graph=nx.Graph()<if_stmt>(s<ge>2<and>win<ge>1)# max possible window size (bounded by win) <block_start>o=min(win s)+1<line_sep>window=ng[1:o]<line_sep>i=o<line_sep># first build the full window <for_stmt>gram ng[0:s-1]<block_start><for_stmt>w window<block_start>self.addEdgeInc(gram w)<block_end>window.pop(0)<line_sep># if window's edge has reached # it's the limit of ng stop # appending <if_stmt>i<l>s<block_start>window.append(ng[i][:])<line_sep>i<augadd>1<block_end><block_end># print Graph (optional) <if_stmt>verbose<block_start>self.GraphDraw(self._GPrintVerbose)<block_end><block_end><return>self._Graph<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>pandas DataFrame Series concat <import_stmt>pandas._testing<as>tm<line_sep>@pytest.mark.parametrize("func" ["cov" "corr"])<def_stmt>test_ewm_pairwise_cov_corr func frame<block_start>result=getattr(frame.ewm(span=10 min_periods=5) func)()<line_sep>result=result.loc[(slice(<none>) 1) 5]<line_sep>result.index=result.index.droplevel(1)<line_sep>expected=getattr(frame[1].ewm(span=10 min_periods=5) func)(frame[5])<line_sep>tm.assert_series_equal(result expected check_names=<false>)<block_end>@pytest.mark.parametrize("name" ["cov" "corr"])<def_stmt>test_ewm_corr_cov name<block_start>A=Series(np.random.randn(50) index=np.arange(50))<line_sep>B=A[2:]+np.random.randn(48)<line_sep>A[:10]=np.NaN<line_sep>B[-10:]=np.NaN<line_sep>result=getattr(A.ewm(com=20 min_periods=5) name)(B)<assert_stmt>np.isnan(result.values[:14]).all()<assert_stmt><not>np.isnan(result.values[14:]).any()<block_end>@pytest.mark.parametrize("min_periods" [0 1 2])@pytest.mark.parametrize("name" ["cov" "corr"])<def_stmt>test_ewm_corr_cov_min_periods name min_periods# GH 7898 <block_start>A=Series(np.random.randn(50) index=np.arange(50))<line_sep>B=A[2:]+np.random.randn(48)<line_sep>A[:10]=np.NaN<line_sep>B[-10:]=np.NaN<line_sep>result=getattr(A.ewm(com=20 min_periods=min_periods) name)(B)<line_sep># binary functions (ewmcov, ewmcorr) with bias=False require at # least two values <assert_stmt>np.isnan(result.values[:11]).all()<assert_stmt><not>np.isnan(result.values[11:]).any()<line_sep># check series of length 0 empty=Series([] dtype=np.float64)<line_sep>result=getattr(empty.ewm(com=50 min_periods=min_periods) name)(empty)<line_sep>tm.assert_series_equal(result empty)<line_sep># check series of length 1 result=getattr(Series([1.0]).ewm(com=50 min_periods=min_periods) name)(Series([1.0]))<line_sep>tm.assert_series_equal(result Series([np.NaN]))<block_end>@pytest.mark.parametrize("name" ["cov" "corr"])<def_stmt>test_different_input_array_raise_exception name<block_start>A=Series(np.random.randn(50) index=np.arange(50))<line_sep>A[:10]=np.NaN<line_sep>msg="other must be a DataFrame or Series"<line_sep># exception raised is Exception <with_stmt>pytest.raises(ValueError match=msg)<block_start>getattr(A.ewm(com=20 min_periods=5) name)(np.random.randn(50))<block_end><block_end><def_stmt>create_mock_weights obj com adjust ignore_na<block_start><if_stmt>isinstance(obj DataFrame)<block_start><if_stmt><not>len(obj.columns)<block_start><return>DataFrame(index=obj.index columns=obj.columns)<block_end>w=concat([create_mock_series_weights(obj.iloc[: i] com=com adjust=adjust ignore_na=ignore_na)<for>i,_ enumerate(obj.columns)] axis=1 )<line_sep>w.index=obj.index<line_sep>w.columns=obj.columns<line_sep><return>w<block_end><else_stmt><block_start><return>create_mock_series_weights(obj com adjust ignore_na)<block_end><block_end><def_stmt>create_mock_series_weights s com adjust ignore_na<block_start>w=Series(np.nan index=s.index)<line_sep>alpha=1.0/(1.0+com)<if_stmt>adjust<block_start>count=0<for_stmt>i range(len(s))<block_start><if_stmt>s.iat[i]<eq>s.iat[i]<block_start>w.iat[i]=pow(1.0/(1.0-alpha) count)<line_sep>count<augadd>1<block_end><elif_stmt><not>ignore_na<block_start>count<augadd>1<block_end><block_end><block_end><else_stmt><block_start>sum_wts=0.0<line_sep>prev_i=-1<line_sep>count=0<for_stmt>i range(len(s))<block_start><if_stmt>s.iat[i]<eq>s.iat[i]<block_start><if_stmt>prev_i<eq>-1<block_start>w.iat[i]=1.0<block_end><else_stmt><block_start>w.iat[i]=alpha<times>sum_wts/pow(1.0-alpha count-prev_i)<block_end>sum_wts<augadd>w.iat[i]<line_sep>prev_i=count<line_sep>count<augadd>1<block_end><elif_stmt><not>ignore_na<block_start>count<augadd>1<block_end><block_end><block_end><return>w<block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])<def_stmt>test_ewm_consistency_mean consistency_data adjust ignore_na min_periods<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<line_sep>result=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean()<line_sep>weights=create_mock_weights(x com=com adjust=adjust ignore_na=ignore_na)<line_sep>expected=(x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill"))<line_sep>expected[x.expanding().count()<l>(max(min_periods 1)<if>min_periods<else>1)]=np.nan<line_sep>tm.assert_equal(result expected.astype("float64"))<block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])<def_stmt>test_ewm_consistency_consistent consistency_data adjust ignore_na min_periods<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<if_stmt>is_constant<block_start>count_x=x.expanding().count()<line_sep>mean_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean()<line_sep># check that correlation of a series with itself is either 1 or NaN corr_x_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).corr(x)<line_sep>exp=x.max()<if>isinstance(x Series)<else>x.max().max()<line_sep># check mean of constant series expected=x<times>np.nan<line_sep>expected[count_x<ge>max(min_periods 1)]=exp<line_sep>tm.assert_equal(mean_x expected)<line_sep># check correlation of constant series with itself is NaN expected[:]=np.nan<line_sep>tm.assert_equal(corr_x_x expected)<block_end><block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])<def_stmt>test_ewm_consistency_var_debiasing_factors consistency_data adjust ignore_na min_periods<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<line_sep># check variance debiasing factors var_unbiased_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=<false>)<line_sep>var_biased_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=<true>)<line_sep>weights=create_mock_weights(x com=com adjust=adjust ignore_na=ignore_na)<line_sep>cum_sum=weights.cumsum().fillna(method="ffill")<line_sep>cum_sum_sq=(weights<times>weights).cumsum().fillna(method="ffill")<line_sep>numerator=cum_sum<times>cum_sum<line_sep>denominator=numerator-cum_sum_sq<line_sep>denominator[denominator<le>0.0]=np.nan<line_sep>var_debiasing_factors_x=numerator/denominator<line_sep>tm.assert_equal(var_unbiased_x var_biased_x<times>var_debiasing_factors_x)<block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])@pytest.mark.parametrize("bias" [<true> <false>])<def_stmt>test_moments_consistency_var consistency_data adjust ignore_na min_periods bias<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<line_sep>mean_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean()<line_sep>var_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias)<assert_stmt><not>(var_x<l>0).any().any()<if_stmt>bias# check that biased var(x) == mean(x^2) - mean(x)^2 <block_start>mean_x2=((x<times>x).ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean())<line_sep>tm.assert_equal(var_x mean_x2-(mean_x<times>mean_x))<block_end><block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])@pytest.mark.parametrize("bias" [<true> <false>])<def_stmt>test_moments_consistency_var_constant consistency_data adjust ignore_na min_periods bias<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<if_stmt>is_constant<block_start>count_x=x.expanding(min_periods=min_periods).count()<line_sep>var_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias)<line_sep># check that variance of constant series is identically 0 <assert_stmt><not>(var_x<g>0).any().any()<line_sep>expected=x<times>np.nan<line_sep>expected[count_x<ge>max(min_periods 1)]=0.0<if_stmt><not>bias<block_start>expected[count_x<l>2]=np.nan<block_end>tm.assert_equal(var_x expected)<block_end><block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])@pytest.mark.parametrize("bias" [<true> <false>])<def_stmt>test_ewm_consistency_std consistency_data adjust ignore_na min_periods bias<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<line_sep>var_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias)<line_sep>std_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).std(bias=bias)<assert_stmt><not>(var_x<l>0).any().any()<assert_stmt><not>(std_x<l>0).any().any()<line_sep># check that var(x) == std(x)^2 tm.assert_equal(var_x std_x<times>std_x)<block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])@pytest.mark.parametrize("bias" [<true> <false>])<def_stmt>test_ewm_consistency_cov consistency_data adjust ignore_na min_periods bias<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<line_sep>var_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias)<assert_stmt><not>(var_x<l>0).any().any()<line_sep>cov_x_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).cov(x bias=bias)<assert_stmt><not>(cov_x_x<l>0).any().any()<line_sep># check that var(x) == cov(x, x) tm.assert_equal(var_x cov_x_x)<block_end>@pytest.mark.parametrize("min_periods" [0 1 2 3 4])@pytest.mark.parametrize("bias" [<true> <false>])<def_stmt>test_ewm_consistency_series_cov_corr consistency_data adjust ignore_na min_periods bias<block_start>x,is_constant,no_nans=consistency_data<line_sep>com=3.0<if_stmt>isinstance(x Series)<block_start>var_x_plus_y=((x+x).ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias))<line_sep>var_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias)<line_sep>var_y=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).var(bias=bias)<line_sep>cov_x_y=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).cov(x bias=bias)<line_sep># check that cov(x, y) == (var(x+y) - var(x) - # var(y)) / 2 tm.assert_equal(cov_x_y 0.5<times>(var_x_plus_y-var_x-var_y))<line_sep># check that corr(x, y) == cov(x, y) / (std(x) * # std(y)) corr_x_y=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).corr(x bias=bias)<line_sep>std_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).std(bias=bias)<line_sep>std_y=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).std(bias=bias)<line_sep>tm.assert_equal(corr_x_y cov_x_y/(std_x<times>std_y))<if_stmt>bias# check that biased cov(x, y) == mean(x*y) - # mean(x)*mean(y) <block_start>mean_x=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean()<line_sep>mean_y=x.ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean()<line_sep>mean_x_times_y=((x<times>x).ewm(com=com min_periods=min_periods adjust=adjust ignore_na=ignore_na).mean())<line_sep>tm.assert_equal(cov_x_y mean_x_times_y-(mean_x<times>mean_y))<block_end><block_end><block_end>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. <import_from_stmt>typing Callable Dict<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>nni.retiarii model_wrapper<import_from_stmt>nni.retiarii.nn.pytorch NasBench201Cell<line_sep>__all__=['NasBench201']<line_sep>OPS_WITH_STRIDE={'none':<lambda>C_in C_out stride:Zero(C_in C_out stride) 'avg_pool_3x3':<lambda>C_in C_out stride:Pooling(C_in C_out stride 'avg') 'max_pool_3x3':<lambda>C_in C_out stride:Pooling(C_in C_out stride 'max') 'conv_3x3':<lambda>C_in C_out stride:ReLUConvBN(C_in C_out (3 3) (stride stride) (1 1) (1 1)) 'conv_1x1':<lambda>C_in C_out stride:ReLUConvBN(C_in C_out (1 1) (stride stride) (0 0) (1 1)) 'skip_connect':<lambda>C_in C_out stride:nn.Identity()<if>stride<eq>1<and>C_in<eq>C_out<else>FactorizedReduce(C_in C_out stride) }<line_sep>PRIMITIVES=['none' 'skip_connect' 'conv_1x1' 'conv_3x3' 'avg_pool_3x3']<class_stmt>ReLUConvBN(nn.Module)<block_start><def_stmt>__init__ self C_in C_out kernel_size stride padding dilation<block_start>super(ReLUConvBN self).__init__()<line_sep>self.op=nn.Sequential(nn.ReLU(inplace=<false>) nn.Conv2d(C_in C_out kernel_size stride=stride padding=padding dilation=dilation bias=<false>) nn.BatchNorm2d(C_out))<block_end><def_stmt>forward self x<block_start><return>self.op(x)<block_end><block_end><class_stmt>SepConv(nn.Module)<block_start><def_stmt>__init__ self C_in C_out kernel_size stride padding dilation<block_start>super(SepConv self).__init__()<line_sep>self.op=nn.Sequential(nn.ReLU(inplace=<false>) nn.Conv2d(C_in C_in kernel_size=kernel_size stride=stride padding=padding dilation=dilation groups=C_in bias=<false>) nn.Conv2d(C_in C_out kernel_size=1 padding=0 bias=<false>) nn.BatchNorm2d(C_out) )<block_end><def_stmt>forward self x<block_start><return>self.op(x)<block_end><block_end><class_stmt>Pooling(nn.Module)<block_start><def_stmt>__init__ self C_in C_out stride mode<block_start>super(Pooling self).__init__()<if_stmt>C_in<eq>C_out<block_start>self.preprocess=<none><block_end><else_stmt><block_start>self.preprocess=ReLUConvBN(C_in C_out 1 1 0 1)<block_end><if_stmt>mode<eq>'avg'<block_start>self.op=nn.AvgPool2d(3 stride=stride padding=1 count_include_pad=<false>)<block_end><elif_stmt>mode<eq>'max'<block_start>self.op=nn.MaxPool2d(3 stride=stride padding=1)<block_end><else_stmt><block_start><raise>ValueError('Invalid mode={:} in Pooling'.format(mode))<block_end><block_end><def_stmt>forward self x<block_start><if_stmt>self.preprocess<block_start>x=self.preprocess(x)<block_end><return>self.op(x)<block_end><block_end><class_stmt>Zero(nn.Module)<block_start><def_stmt>__init__ self C_in C_out stride<block_start>super(Zero self).__init__()<line_sep>self.C_in=C_in<line_sep>self.C_out=C_out<line_sep>self.stride=stride<line_sep>self.is_zero=<true><block_end><def_stmt>forward self x<block_start><if_stmt>self.C_in<eq>self.C_out<block_start><if_stmt>self.stride<eq>1<block_start><return>x.mul(0.)<block_end><else_stmt><block_start><return>x[: : ::self.stride ::self.stride].mul(0.)<block_end><block_end><else_stmt><block_start>shape=list(x.shape)<line_sep>shape[1]=self.C_out<line_sep>zeros=x.new_zeros(shape dtype=x.dtype device=x.device)<line_sep><return>zeros<block_end><block_end><block_end><class_stmt>FactorizedReduce(nn.Module)<block_start><def_stmt>__init__ self C_in C_out stride<block_start>super(FactorizedReduce self).__init__()<line_sep>self.stride=stride<line_sep>self.C_in=C_in<line_sep>self.C_out=C_out<line_sep>self.relu=nn.ReLU(inplace=<false>)<if_stmt>stride<eq>2<block_start>C_outs=[C_out<floordiv>2 C_out-C_out<floordiv>2]<line_sep>self.convs=nn.ModuleList()<for_stmt>i range(2)<block_start>self.convs.append(nn.Conv2d(C_in C_outs[i] 1 stride=stride padding=0 bias=<false>))<block_end>self.pad=nn.ConstantPad2d((0 1 0 1) 0)<block_end><else_stmt><block_start><raise>ValueError('Invalid stride : {:}'.format(stride))<block_end>self.bn=nn.BatchNorm2d(C_out)<block_end><def_stmt>forward self x<block_start>x=self.relu(x)<line_sep>y=self.pad(x)<line_sep>out=torch.cat([self.convs[0](x) self.convs[1](y[: : 1: 1:])] dim=1)<line_sep>out=self.bn(out)<line_sep><return>out<block_end><block_end><class_stmt>ResNetBasicblock(nn.Module)<block_start><def_stmt>__init__ self inplanes planes stride<block_start>super(ResNetBasicblock self).__init__()<assert_stmt>stride<eq>1<or>stride<eq>2 'invalid stride {:}'.format(stride)<line_sep>self.conv_a=ReLUConvBN(inplanes planes 3 stride 1 1)<line_sep>self.conv_b=ReLUConvBN(planes planes 3 1 1 1)<if_stmt>stride<eq>2<block_start>self.downsample=nn.Sequential(nn.AvgPool2d(kernel_size=2 stride=2 padding=0) nn.Conv2d(inplanes planes kernel_size=1 stride=1 padding=0 bias=<false>))<block_end><elif_stmt>inplanes<ne>planes<block_start>self.downsample=ReLUConvBN(inplanes planes 1 1 0 1)<block_end><else_stmt><block_start>self.downsample=<none><block_end>self.in_dim=inplanes<line_sep>self.out_dim=planes<line_sep>self.stride=stride<line_sep>self.num_conv=2<block_end><def_stmt>forward self inputs<block_start>basicblock=self.conv_a(inputs)<line_sep>basicblock=self.conv_b(basicblock)<if_stmt>self.downsample<is><not><none><block_start>inputs=self.downsample(inputs)# residual <block_end><return>inputs+basicblock<block_end><block_end>@model_wrapper<class_stmt>NasBench201(nn.Module)<block_start>"""The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__. It's a stack of :class:`NasBench201Cell`. """<def_stmt>__init__ self stem_out_channels:int=16 num_modules_per_stack:int=5 num_labels:int=10<block_start>super().__init__()<line_sep>self.channels=C=stem_out_channels<line_sep>self.num_modules=N=num_modules_per_stack<line_sep>self.num_labels=num_labels<line_sep>self.stem=nn.Sequential(nn.Conv2d(3 C kernel_size=3 padding=1 bias=<false>) nn.BatchNorm2d(C))<line_sep>layer_channels=[C]<times>N+[C<times>2]+[C<times>2]<times>N+[C<times>4]+[C<times>4]<times>N<line_sep>layer_reductions=[<false>]<times>N+[<true>]+[<false>]<times>N+[<true>]+[<false>]<times>N<line_sep>C_prev=C<line_sep>self.cells=nn.ModuleList()<for_stmt>C_curr,reduction zip(layer_channels layer_reductions)<block_start><if_stmt>reduction<block_start>cell=ResNetBasicblock(C_prev C_curr 2)<block_end><else_stmt><block_start>ops:Dict[str Callable[[int int] nn.Module]]={prim:<lambda>C_in C_out:OPS_WITH_STRIDE[prim](C_in C_out 1)<for>prim PRIMITIVES}<line_sep>cell=NasBench201Cell(ops C_prev C_curr label='cell')<block_end>self.cells.append(cell)<line_sep>C_prev=C_curr<block_end>self.lastact=nn.Sequential(nn.BatchNorm2d(C_prev) nn.ReLU(inplace=<true>))<line_sep>self.global_pooling=nn.AdaptiveAvgPool2d(1)<line_sep>self.classifier=nn.Linear(C_prev self.num_labels)<block_end><def_stmt>forward self inputs<block_start>feature=self.stem(inputs)<for_stmt>cell self.cells<block_start>feature=cell(feature)<block_end>out=self.lastact(feature)<line_sep>out=self.global_pooling(out)<line_sep>out=out.view(out.size(0) -1)<line_sep>logits=self.classifier(out)<line_sep><return>logits<block_end><block_end>
<import_stmt>os<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>flopy<def_stmt>run <block_start>workspace=os.path.join("lake")<line_sep># make sure workspace directory exists <if_stmt><not>os.path.exists(workspace)<block_start>os.makedirs(workspace)<block_end>fext="png"<line_sep>narg=len(sys.argv)<line_sep>iarg=0<if_stmt>narg<g>1<block_start><while_stmt>iarg<l>narg-1<block_start>iarg<augadd>1<line_sep>basearg=sys.argv[iarg].lower()<if_stmt>basearg<eq>"--pdf"<block_start>fext="pdf"<block_end><block_end><block_end># save the starting path cwdpth=os.getcwd()<line_sep># change to the working directory os.chdir(workspace)<line_sep># We are creating a square model with a specified head equal to `h1` along all boundaries. # The head at the cell in the center in the top layer is fixed to `h2`. First, set the name # of the model and the parameters of the model: the number of layers `Nlay`, the number of rows # and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic # conductivity `Kh` name="lake_example"<line_sep>h1=100<line_sep>h2=90<line_sep>Nlay=10<line_sep>N=101<line_sep>L=400.0<line_sep>H=50.0<line_sep>Kh=1.0<line_sep># Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it # whatever you want). The modelname will be the name given to all MODFLOW files (input and output). # The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k' # for MODFLOW2000 or 'mf2005'for MODFLOW2005. ml=flopy.modflow.Modflow(modelname=name exe_name="mf2005" version="mf2005")<line_sep># Define the discretization of the model. All layers are given equal thickness. The `bot` array # is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and # `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed, # the Discretization file is built. bot=np.linspace(-H/Nlay -H Nlay)<line_sep>delrow=delcol=L/(N-1)<line_sep>dis=flopy.modflow.ModflowDis(ml nlay=Nlay nrow=N ncol=N delr=delrow delc=delcol top=0.0 botm=bot laycbd=0 )<line_sep># Next we specify the boundary conditions and starting heads with the Basic package. The `ibound` # array will be `1` in all cells in all layers, except for along the boundary and in the cell at # the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads # are used to define the heads in the fixed head cells (this is a steady simulation, so none of # the other starting values matter). So we set the starting heads to `h1` everywhere, except for # the head at the center of the model in the top layer. Nhalf=int((N-1)/2)<line_sep>ibound=np.ones((Nlay N N) dtype=int)<line_sep>ibound[: 0 :]=-1<line_sep>ibound[: -1 :]=-1<line_sep>ibound[: : 0]=-1<line_sep>ibound[: : -1]=-1<line_sep>ibound[0 Nhalf Nhalf]=-1<line_sep>start=h1<times>np.ones((N N))<line_sep>start[Nhalf Nhalf]=h2<line_sep># create external ibound array and starting head files files=[]<line_sep>hfile=f"{name}_strt.ref"<line_sep>np.savetxt(hfile start)<line_sep>hfiles=[]<for_stmt>kdx range(Nlay)<block_start>file=f"{name}_ib{kdx+1:02d}.ref"<line_sep>files.append(file)<line_sep>hfiles.append(hfile)<line_sep>np.savetxt(file ibound[kdx : :] fmt="%5d")<block_end>bas=flopy.modflow.ModflowBas(ml ibound=files strt=hfiles)<line_sep># The aquifer properties (really only the hydraulic conductivity) are defined with the # LPF package. lpf=flopy.modflow.ModflowLpf(ml hk=Kh)<line_sep># Finally, we need to specify the solver we want to use (PCG with default values), and the # output control (using the default values). Then we are ready to write all MODFLOW input # files and run MODFLOW. pcg=flopy.modflow.ModflowPcg(ml)<line_sep>oc=flopy.modflow.ModflowOc(ml)<line_sep>ml.write_input()<line_sep>ml.run_model()<line_sep># change back to the starting directory os.chdir(cwdpth)<line_sep># Once the model has terminated normally, we can read the heads file. First, a link to the heads # file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by # specifying, in this case, the step number and period number for which we want to retrieve data. # A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions # are used to make contours of the layers or a cross-section. hds=flopy.utils.HeadFile(os.path.join(workspace f"{name}.hds"))<line_sep>h=hds.get_data(kstpkper=(0 0))<line_sep>x=y=np.linspace(0 L N)<line_sep>c=plt.contour(x y h[0] np.arange(90 100.1 0.2))<line_sep>plt.clabel(c fmt="%2.1f")<line_sep>plt.axis("scaled")<line_sep>outfig=os.path.join(workspace f"lake1.{fext}")<line_sep>fig=plt.gcf()<line_sep>fig.savefig(outfig dpi=300)<line_sep>print("created..." outfig)<line_sep>x=y=np.linspace(0 L N)<line_sep>c=plt.contour(x y h[-1] np.arange(90 100.1 0.2))<line_sep>plt.clabel(c fmt="%1.1f")<line_sep>plt.axis("scaled")<line_sep>outfig=os.path.join(workspace f"lake2.{fext}")<line_sep>fig=plt.gcf()<line_sep>fig.savefig(outfig dpi=300)<line_sep>print("created..." outfig)<line_sep>z=np.linspace(-H/Nlay/2 -H+H/Nlay/2 Nlay)<line_sep>c=plt.contour(x z h[: 50 :] np.arange(90 100.1 0.2))<line_sep>plt.axis("scaled")<line_sep>outfig=os.path.join(workspace f"lake3.{fext}")<line_sep>fig=plt.gcf()<line_sep>fig.savefig(outfig dpi=300)<line_sep>print("created..." outfig)<line_sep><return>0<block_end><if_stmt>__name__<eq>"__main__"<block_start>success=run()<block_end>
<import_from_stmt>changes.api.base APIView<import_from_stmt>changes.lib.coverage get_coverage_by_build_id merged_coverage_data<import_from_stmt>changes.models.build Build<class_stmt>BuildTestCoverageAPIView(APIView)<block_start><def_stmt>get self build_id<block_start>build=Build.query.get(build_id)<if_stmt>build<is><none><block_start><return>'' 404<block_end>coverage=merged_coverage_data(get_coverage_by_build_id(build.id))<line_sep><return>self.respond(coverage)<block_end><block_end>
#Autogenerated schema <import_from_stmt>openpyxl.descriptors.serialisable Serialisable<import_from_stmt>openpyxl.descriptors Typed String Bool Sequence <import_from_stmt>openpyxl.descriptors.excel CellRange<class_stmt>Extension(Serialisable)<block_start>tagname="extension"<line_sep>uri=String(allow_none=<true>)<def_stmt>__init__ self uri=<none> <block_start>self.uri=uri<block_end><block_end><class_stmt>ExtensionList(Serialisable)<block_start>tagname="extensionList"<line_sep># uses element group EG_ExtensionList ext=Sequence(expected_type=Extension)<line_sep>__elements__=('ext' )<def_stmt>__init__ self ext=() <block_start>self.ext=ext<block_end><block_end><class_stmt>IgnoredError(Serialisable)<block_start>tagname="ignoredError"<line_sep>sqref=CellRange<line_sep>evalError=Bool(allow_none=<true>)<line_sep>twoDigitTextYear=Bool(allow_none=<true>)<line_sep>numberStoredAsText=Bool(allow_none=<true>)<line_sep>formula=Bool(allow_none=<true>)<line_sep>formulaRange=Bool(allow_none=<true>)<line_sep>unlockedFormula=Bool(allow_none=<true>)<line_sep>emptyCellReference=Bool(allow_none=<true>)<line_sep>listDataValidation=Bool(allow_none=<true>)<line_sep>calculatedColumn=Bool(allow_none=<true>)<def_stmt>__init__ self sqref=<none> evalError=<false> twoDigitTextYear=<false> numberStoredAsText=<false> formula=<false> formulaRange=<false> unlockedFormula=<false> emptyCellReference=<false> listDataValidation=<false> calculatedColumn=<false> <block_start>self.sqref=sqref<line_sep>self.evalError=evalError<line_sep>self.twoDigitTextYear=twoDigitTextYear<line_sep>self.numberStoredAsText=numberStoredAsText<line_sep>self.formula=formula<line_sep>self.formulaRange=formulaRange<line_sep>self.unlockedFormula=unlockedFormula<line_sep>self.emptyCellReference=emptyCellReference<line_sep>self.listDataValidation=listDataValidation<line_sep>self.calculatedColumn=calculatedColumn<block_end><block_end><class_stmt>IgnoredErrors(Serialisable)<block_start>tagname="ignoredErrors"<line_sep>ignoredError=Sequence(expected_type=IgnoredError)<line_sep>extLst=Typed(expected_type=ExtensionList allow_none=<true>)<line_sep>__elements__=('ignoredError' 'extLst')<def_stmt>__init__ self ignoredError=() extLst=<none> <block_start>self.ignoredError=ignoredError<line_sep>self.extLst=extLst<block_end><block_end>
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# <import_from_stmt>copy deepcopy<import_from_stmt>math sqrt<import_stmt>numpy<as>np<import_from_stmt>.unit_cell_lattice UnitCell UnitCellLattice<import_from_stmt>..geometry Cube<import_from_stmt>..tiling CubicTiling<import_from_stmt>..transform_func ScaleFunc RotateFunc<import_from_stmt>...util.util ListHasPoint<class_stmt>DiamondLattice(UnitCellLattice)<block_start>RefIAD=sqrt(3)/4<line_sep># === STANDARD CONSTRUCTOR <def_stmt>__init__ self IAD<block_start>RefUnitCellShape=Cube(1 BotBackLeftCorner=np.array([0 0 0] dtype=float))<line_sep>RefUnitCellTiling=CubicTiling(RefUnitCellShape)<line_sep>RefFracPositions=[np.array([0.0 0.0 0.0]) np.array([0.5 0.5 0.0]) np.array([0.0 0.5 0.5]) np.array([0.5 0.0 0.5]) np.array([0.25 0.25 0.25]) np.array([0.25 0.75 0.75]) np.array([0.75 0.25 0.75]) np.array([0.75 0.75 0.25])]<line_sep>RefUnitCell=UnitCell(RefUnitCellTiling RefFracPositions)<line_sep>UnitCellLattice.__init__(self RefUnitCell)<line_sep>self._IAD=DiamondLattice.RefIAD# IAD is set correctly after calling applyTransF self.applyTransF(ScaleFunc(IAD/DiamondLattice.RefIAD))<line_sep>self._NthNeighbors=[[[np.array([0.25 0.25 0.25]) np.array([-0.25 -0.25 0.25]) np.array([-0.25 0.25 -0.25]) np.array([0.25 -0.25 -0.25])] [np.array([-0.25 -0.25 -0.25]) np.array([0.25 0.25 -0.25]) np.array([0.25 -0.25 0.25]) np.array([-0.25 0.25 0.25])]] [[np.array([0.0 0.5 0.5]) np.array([0.0 0.5 -0.5]) np.array([0.0 -0.5 0.5]) np.array([0.0 -0.5 -0.5]) np.array([0.5 0.5 0.0]) np.array([0.5 0.0 0.5]) np.array([0.5 -0.5 0.0]) np.array([0.5 0.0 -0.5]) np.array([-0.5 0.5 0.0]) np.array([-0.5 0.0 0.5]) np.array([-0.5 -0.5 0.0]) np.array([-0.5 0.0 -0.5])] [np.array([0.0 0.5 0.5]) np.array([0.0 0.5 -0.5]) np.array([0.0 -0.5 0.5]) np.array([0.0 -0.5 -0.5]) np.array([0.5 0.5 0.0]) np.array([0.5 0.0 0.5]) np.array([0.5 -0.5 0.0]) np.array([0.5 0.0 -0.5]) np.array([-0.5 0.5 0.0]) np.array([-0.5 0.0 0.5]) np.array([-0.5 -0.5 0.0]) np.array([-0.5 0.0 -0.5])]]]<line_sep>self._typeDict={0:0 3:1}<line_sep>self._relativePositions={0:np.array([0.0 0.0 0.0]) 3:np.array([0.25 0.25 0.25])}<block_end># === CONSTRUCTOR - Aligned with {100} @classmethod<def_stmt>alignedWith100 cls IAD<block_start><return>cls(IAD)<block_end># Default implementation # === CONSTRUCTOR - Aligned with {110} @classmethod<def_stmt>aligndWith110 cls IAD<block_start>result=cls(IAD)<line_sep>thetaX=0<line_sep>thetaY=np.pi<times>0.25<line_sep>thetaZ=0<line_sep>result.applyTransF(RotateFunc.fromXYZAngles(thetaX thetaY thetaZ))<line_sep><return>result<block_end># === CONSTRUCTOR - Aligned with {111} @classmethod<def_stmt>alignedWith111 cls IAD blnTrianglesAlignedWithX=<true><block_start>result=cls(IAD)<line_sep>thetaX=-np.pi<times>0.25<line_sep>thetaY=-np.arctan2(-sqrt(2) 2)<line_sep>thetaZ=(np.pi<times>0.5<if>blnTrianglesAlignedWithX<else>0)<line_sep>result.applyTransF(RotateFunc.fromXYZAngles(thetaX thetaY thetaZ))<line_sep><return>result<block_end># === CONSTRUCTOR - Aligned with {xyz} @classmethod<def_stmt>alignedWith cls IAD MI<block_start><if_stmt>(type(MI)<is>str)<and>(len(MI)<eq>3)<and>all(x.isdigit()<for>x MI)<block_start><if_stmt>MI<in>['100' '010' '001']<block_start><return>cls(IAD)<block_end><elif_stmt>MI<in>['110' '101' '011']<block_start><return>cls.aligndWith110(IAD)<block_end><elif_stmt>MI<eq>'111'<block_start><return>cls.alignedWith111(IAD)<block_end><else_stmt><block_start>result=cls(IAD)<line_sep>a=np.array([0.0 0.0 1.0])<line_sep>b=np.array([float(MI[0]) float(MI[1]) float(MI[2])])<line_sep>axis=np.cross(a b)<line_sep>angle=np.arccos(np.dot(a b)/(np.linalg.norm(a)<times>np.linalg.norm(b)))<line_sep>result.applyTransF(RotateFunc.fromAxisAngle(axis angle))<line_sep><return>result<block_end><block_end><return>ValueError('DiamondLattice.alignedWith: Input direction is not correct.')<block_end># === MANIPULATION METHODS <def_stmt>applyTransF self TransF<block_start><if_stmt>isinstance(TransF ScaleFunc)<block_start><if_stmt>TransF.isIsometric<block_start>self._IAD<augmul>TransF.Scale[0]<block_end><else_stmt><block_start><raise>ValueError('DiamondLattice.applyTransF: Can only scale isometrically')<block_end><block_end>UnitCellLattice.applyTransF(self TransF)<block_end># === AUXILIARY METHODS <def_stmt>_getPointType self P<block_start><return>(int(round(P[0]<times>4))+int(round(P[1]<times>4))+int(round(P[2]<times>4)))%4<block_end># === PROPERTY EVALUATION METHODS # NOTE: inherited from UnitCellLattice # def isOnLattice(self,P): <def_stmt>areNeighbors self P1 P2<block_start><return>np.linalg.norm(P2-P1)<le>self.IAD<block_end><def_stmt>getNeighbors self P layer=1<block_start>RefP=self._getConvertToReference(P)<line_sep>PType=self._getPointType(RefP)<if_stmt>PType<not><in>self._typeDict.keys()<block_start><raise>ValueError('DiamondLattice.getNeighbors Should never reach here!')<block_end><if_stmt>layer<g>len(self._NthNeighbors)<block_start>self._calculateNeighbors(layer)<block_end>NBs=deepcopy(self._NthNeighbors[layer-1][self._typeDict[PType]])<for_stmt>NeighP NBs<block_start>NeighP<augadd>RefP<line_sep>self._convertFromReference(NeighP)<block_end><return>NBs<block_end><def_stmt>_calculateNeighbors self layer<block_start>NList=[]<for_stmt>k,v self._typeDict.items()<block_start>tmp=[np.array([0 0 0] dtype=float)]<for_stmt>nb self._NthNeighbors<block_start>tmp.extend(nb[v])<block_end>NList.append(tmp)<block_end><for_stmt>_ range(layer-len(self._NthNeighbors))<block_start>tmp=[[]<for>_ self._typeDict.keys()]<for_stmt>k,v self._typeDict.items()<block_start><for_stmt>P self._NthNeighbors[len(self._NthNeighbors)-1][v]<block_start>PType=self._getPointType(P+self._relativePositions[k])<for_stmt>Q self._NthNeighbors[0][self._typeDict[PType]]<block_start>N=P+Q<if_stmt><not>ListHasPoint(NList[v] N 0.001<times>DiamondLattice.RefIAD)<block_start>tmp[v].append(N)<line_sep>NList[v].append(N)<block_end><block_end><block_end><block_end>self._NthNeighbors.append(tmp)<block_end><block_end><def_stmt>isASite self P<block_start>RefP=self._getConvertToReference(P)<line_sep>PType=self._getPointType(RefP)<line_sep><return>PType<eq>0<block_end><def_stmt>isBSite self P<block_start>RefP=self._getConvertToReference(P)<line_sep>PType=self._getPointType(RefP)<line_sep><return>PType<eq>3<block_end><def_stmt>setDesign self D AType BType<block_start><for_stmt>i,P enumerate(D.Canvas.Points)<block_start><if_stmt>self.isASite(P)<block_start>D.setContent(i AType)<block_end><elif_stmt>self.isBSite(P)<block_start>D.setContent(i BType)<block_end><else_stmt><block_start><raise>ValueError('setDesign can not set site not on lattice')<block_end><block_end><block_end># === BASIC QUERY METHODS @property<def_stmt>IAD self<block_start><return>self._IAD<block_end>@property<def_stmt>Diamond100LayerSpacing self<block_start><return>self.IAD/sqrt(3)<block_end>@property<def_stmt>Diamond110LayerSpacing self<block_start><return>self.IAD<times>sqrt(2)/sqrt(3)<block_end>@property<def_stmt>Diamond111LayerSpacing self<block_start><return>self.IAD<times>4/3<block_end>@property<def_stmt>Diamond112LayerSpacing self<block_start><return>self.IAD<times>sqrt(2)/3<block_end><def_stmt>getLayerSpacing self MI<block_start><if_stmt>(type(MI)<is>str)<and>(len(MI)<eq>3)<and>all(x.isdigit()<for>x MI)<block_start><if_stmt>MI<in>['100' '010' '001']<block_start><return>self.Diamond100LayerSpacing<block_end><elif_stmt>MI<in>['110' '101' '011']<block_start><return>self.Diamond110LayerSpacing<block_end><elif_stmt>MI<eq>'111'<block_start><return>self.Diamond111LayerSpacing<block_end><elif_stmt>MI<in>['112' '121' '211']<block_start><return>self.Diamond112LayerSpacing<block_end><else_stmt><block_start><raise>NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')<block_end><block_end><return>ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')<block_end><def_stmt>getShellSpacing self MI<block_start><if_stmt>(type(MI)<is>str)<and>(len(MI)<eq>3)<and>all(x.isdigit()<for>x MI)<block_start><if_stmt>MI<in>['100' '010' '001' '110' '101' '011' '111']<block_start><return>self.IAD<times>sqrt(8)/sqrt(3)<block_end><elif_stmt>MI<in>['112' '121' '211']<block_start><return>self.IAD<times>sqrt(2)/sqrt(3)<block_end><else_stmt><block_start><raise>NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')<block_end><block_end><return>ValueError('The input direction is not correct.')<block_end><def_stmt>getUniqueLayerCount self MI<block_start><if_stmt>(type(MI)<is>str)<and>(len(MI)<eq>3)<and>all(x.isdigit()<for>x MI)<block_start><if_stmt>MI<in>['100' '010' '001']<block_start><return>4<block_end><elif_stmt>MI<in>['110' '101' '011']<block_start><return>2<block_end><elif_stmt>MI<eq>'111'<block_start><return>3<block_end><elif_stmt>MI<in>['112' '121' '211']<block_start><return>6<block_end><else_stmt><block_start><raise>NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')<block_end><block_end><return>ValueError('The input direction is not correct.')<block_end><block_end>
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2021/7/25 19:30 # @author : Mo # @function: predict model, 预测模块-多类分类 # 适配linux <import_stmt>platform<import_stmt>json<import_stmt>sys<import_stmt>os<line_sep>path_root=os.path.abspath(os.path.join(os.path.dirname(__file__) "../.."))<line_sep>path_sys=os.path.join(path_root "pytorch_nlu" "pytorch_textclassification")<line_sep>print(path_root)<line_sep># os.environ["CUDA_VISIBLE_DEVICES"] = "-1" <import_from_stmt>tcPredict TextClassificationPredict<if_stmt>__name__<eq>"__main__"<block_start>path_config="../output/text_classification/model_ERNIE/tc.config"<line_sep>tcp=TextClassificationPredict(path_config)<line_sep>texts=[{"text":"平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"} {"text":"平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"} {"text":"印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"} {"text":"桂林山水甲天下, 阳朔山水甲桂林"} ]<line_sep>res=tcp.predict(texts logits_type="sigmoid")<line_sep>print(res)<while_stmt><true><block_start>print("请输入:")<line_sep>question=input()<line_sep>res=tcp.predict([{"text":question}] logits_type="sigmoid")<line_sep>print(res)<block_end><block_end>
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests and benchmarks for creating RPC clusters on localhost."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>time<import_stmt>numpy<as>np<import_stmt>portpicker<import_stmt>tensorflow<as>tf<def_stmt>create_local_cluster num_workers num_ps protocol="grpc"<block_start>"""Create local GRPC servers and return their servers."""<line_sep>worker_ports=[portpicker.pick_unused_port()<for>_ range(num_workers)]<line_sep>ps_ports=[portpicker.pick_unused_port()<for>_ range(num_ps)]<line_sep>cluster_dict={"worker":["localhost:%s"%port<for>port worker_ports] "ps":["localhost:%s"%port<for>port ps_ports]}<line_sep>cs=tf.train.ClusterSpec(cluster_dict)<line_sep>workers=[tf.train.Server(cs job_name="worker" protocol=protocol task_index=ix start=<true>)<for>ix range(num_workers)]<line_sep>ps_servers=[tf.train.Server(cs job_name="ps" protocol=protocol task_index=ix start=<true>)<for>ix range(num_ps)]<line_sep><return>workers ps_servers<block_end><class_stmt>CreateLocalClusterTest(tf.test.TestCase)<block_start><def_stmt>testCreateLocalCluster self<block_start>workers,_=create_local_cluster(num_workers=2 num_ps=2)<line_sep>worker_sessions=[tf.Session(w.target)<for>w workers]<with_stmt>tf.device("/job:ps/task:0")<block_start>var0=tf.Variable(0.0)<block_end><with_stmt>tf.device("/job:ps/task:1")<block_start>var1=tf.Variable(1.0)<block_end>worker_sessions[0].run([var0.initializer var1.initializer])<with_stmt>tf.device("/job:ps/task:0")<block_start>var2=tf.Variable(2.0)<block_end><with_stmt>tf.device("/job:ps/task:1")<block_start>var3=tf.Variable(3.0)<block_end>worker_sessions[1].run([var2.initializer var3.initializer])<line_sep># Read values back in the opposite session self.assertAllEqual(0.0 var0.eval(session=worker_sessions[1]))<line_sep>self.assertAllEqual(1.0 var1.eval(session=worker_sessions[1]))<line_sep>self.assertAllEqual(2.0 var2.eval(session=worker_sessions[0]))<line_sep>self.assertAllEqual(3.0 var3.eval(session=worker_sessions[0]))<block_end><block_end><class_stmt>CreateLocalClusterBenchmark(tf.test.Benchmark)<block_start><def_stmt>benchmarkCreateLocalCluster self<block_start>deltas=[]<line_sep>iters=5<for_stmt>_ range(iters)<block_start>start_time=time.time()<line_sep>create_local_cluster(num_workers=1 num_ps=10)<line_sep>end_time=time.time()<line_sep>deltas.append(end_time-start_time)<block_end>median_deltas=np.median(deltas)<line_sep>print("\n\nbenchmark_create_local_cluster_1_worker_10_ps. "<concat>"iterations: %d, median wall time: %g\n\n"%(iters median_deltas))<line_sep>self.report_benchmark(iters=iters wall_time=median_deltas name="benchmark_create_local_cluster_1_worker_10_ps")<block_end><block_end><class_stmt>PartitionedVariablesBenchmark(tf.test.Benchmark)<block_start><def_stmt>benchmark_create_1000_partitions_with_100_parameter_servers self<block_start>workers,_=create_local_cluster(num_workers=1 num_ps=100)<line_sep>worker_sessions=[tf.Session(w.target)<for>w workers]<line_sep>worker=worker_sessions[0]<line_sep>partition_sizes=(1 512 1024<times>32 1024<times>128)<line_sep>partitioned=[]<for_stmt>partition_size partition_sizes# max_shard_bytes is 4, shape is 1000*partition_size float32s which should # partition into 1000 shards, each containing partition_size float32s. <block_start>print("Building partitioned variable with %d floats per partition"%partition_size)<with_stmt>tf.device(tf.train.replica_device_setter(ps_tasks=100))<block_start>partitioned_ix=tf.get_variable("partitioned_%d"%partition_size shape=[1000<times>partition_size] dtype=tf.float32 # Each partition to have exactly N float32s partitioner=tf.variable_axis_size_partitioner(max_shard_bytes=4<times>partition_size))<line_sep># Concatenates along axis 0 partitioned.append(tf.convert_to_tensor(partitioned_ix))<block_end><block_end>tf.global_variables_initializer().run(session=worker)<for_stmt>ix,partition_size enumerate(partition_sizes)<block_start>print("Running benchmark having partitions with %d floats"%partition_size)<line_sep>self.run_op_benchmark(worker partitioned[ix] name=("read_concat_1000_partitions_from_"<concat>"100_parameter_servers_partsize_%d_floats"%partition_size))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<import_stmt>numpy<as>np<class_stmt>ProbAbsoluteShading(object)<block_start><def_stmt>__init__ self params<block_start>self.params=params<block_end><def_stmt>cost self s_nz<block_start><if_stmt>self.params.abs_shading_weight<block_start><if_stmt>self.params.abs_shading_log<block_start><return>self.params.abs_shading_weight<times>np.abs(np.log(s_nz)-np.log(self.params.abs_shading_gray_point))<block_end><else_stmt><block_start><return>self.params.abs_shading_weight<times>np.abs(s_nz-self.params.abs_shading_gray_point)<block_end><block_end><else_stmt><block_start><return>0<block_end><block_end><block_end>
<def_stmt>find_accounts search_text# perform search... <block_start><if_stmt><not>db_is_available<block_start><return><none><block_end># returns a list of account IDs <return>db_search(search_text)<block_end>accounts=find_accounts('python')<if_stmt>accounts<is><none><block_start>print("Error: DB not available")<block_end><else_stmt><block_start>print("Accounts found: Would list them here...")<block_end><def_stmt>db_search search_text<block_start><return>[1 11]<block_end>db_is_availble=<true><line_sep>
""" PyTorch Profiler With TensorBoard ==================================== This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler to detect performance bottlenecks of the model. Introduction ------------ PyTorch 1.8 includes an updated profiler API capable of recording the CPU side operations as well as the CUDA kernel launches on the GPU side. The profiler can visualize this information in TensorBoard Plugin and provide analysis of the performance bottlenecks. In this tutorial, we will use a simple Resnet model to demonstrate how to use TensorBoard plugin to analyze model performance. Setup ----- To install ``torch`` and ``torchvision`` use the following command: :: pip install torch torchvision """<line_sep>###################################################################### # Steps # ----- # # 1. Prepare the data and model # 2. Use profiler to record execution events # 3. Run the profiler # 4. Use TensorBoard to view results and analyze model performance # 5. Improve performance with the help of profiler # 6. Analyze performance with other advanced features # # 1. Prepare the data and model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # First, import all necessary libraries: # <import_stmt>torch<import_stmt>torch.nn<import_stmt>torch.optim<import_stmt>torch.profiler<import_stmt>torch.utils.data<import_stmt>torchvision.datasets<import_stmt>torchvision.models<import_stmt>torchvision.transforms<as>T<line_sep>###################################################################### # Then prepare the input data. For this tutorial, we use the CIFAR10 dataset. # Transform it to the desired format and use DataLoader to load each batch. transform=T.Compose([T.Resize(224) T.ToTensor() T.Normalize((0.5 0.5 0.5) (0.5 0.5 0.5))])<line_sep>train_set=torchvision.datasets.CIFAR10(root='./data' train=<true> download=<true> transform=transform)<line_sep>train_loader=torch.utils.data.DataLoader(train_set batch_size=32 shuffle=<true>)<line_sep>###################################################################### # Next, create Resnet model, loss function, and optimizer objects. # To run on GPU, move model and loss to GPU device. device=torch.device("cuda:0")<line_sep>model=torchvision.models.resnet18(pretrained=<true>).cuda(device)<line_sep>criterion=torch.nn.CrossEntropyLoss().cuda(device)<line_sep>optimizer=torch.optim.SGD(model.parameters() lr=0.001 momentum=0.9)<line_sep>model.train()<line_sep>###################################################################### # Define the training step for each batch of input data. <def_stmt>train data<block_start>inputs,labels=data[0].to(device=device) data[1].to(device=device)<line_sep>outputs=model(inputs)<line_sep>loss=criterion(outputs labels)<line_sep>optimizer.zero_grad()<line_sep>loss.backward()<line_sep>optimizer.step()<block_end>###################################################################### # 2. Use profiler to record execution events # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The profiler is enabled through the context manager and accepts several parameters, # some of the most useful are: # # - ``schedule`` - callable that takes step (int) as a single parameter # and returns the profiler action to perform at each step. # # In this example with ``wait=1, warmup=1, active=3, repeat=2``, # profiler will skip the first step/iteration, # start warming up on the second, # record the following three iterations, # after which the trace will become available and on_trace_ready (when set) is called. # In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin. # # During ``wait`` steps, the profiler is disabled. # During ``warmup`` steps, the profiler starts tracing but the results are discarded. # This is for reducing the profiling overhead. # The overhead at the beginning of profiling is high and easy to bring skew to the profiling result. # During ``active`` steps, the profiler works and records events. # - ``on_trace_ready`` - callable that is called at the end of each cycle; # In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard. # After profiling, result files will be saved into the ``./log/resnet18`` directory. # Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard. # - ``record_shapes`` - whether to record shapes of the operator inputs. # - ``profile_memory`` - Track tensor memory allocation/deallocation. # - ``with_stack`` - Record source information (file and line number) for the ops. # If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_), # clicking a stack frame will navigate to the specific code line. <with_stmt>torch.profiler.profile(schedule=torch.profiler.schedule(wait=1 warmup=1 active=3 repeat=2) on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18') record_shapes=<true> with_stack=<true>)<as>prof<block_start><for_stmt>step,batch_data enumerate(train_loader)<block_start><if_stmt>step<ge>(1+1+3)<times>2<block_start><break><block_end>train(batch_data)<line_sep>prof.step()<block_end><block_end># Need to call this at the end of each step to notify profiler of steps' boundary. ###################################################################### # 3. Run the profiler # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Run the above code. The profiling result will be saved under ``./log/resnet18`` directory. ###################################################################### # 4. Use TensorBoard to view results and analyze model performance # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Install PyTorch Profiler TensorBoard Plugin. # # :: # # pip install torch_tb_profiler # ###################################################################### # Launch the TensorBoard. # # :: # # tensorboard --logdir=./log # ###################################################################### # Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser. # # :: # # http://localhost:6006/#pytorch_profiler # ###################################################################### # You could see Profiler plugin page as shown below. # # - Overview # .. image:: ../../_static/img/profiler_overview1.png # :scale: 25 % # # The overview shows a high-level summary of model performance. # # The "GPU Summary" panel shows the GPU configuration and the GPU usage. # In this example, the GPU Utilization is low. # The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_. # # The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution. # In this example, you can see the ``DataLoader`` overhead is significant. # # The bottom "Performance Recommendation" uses the profiling data # to automatically highlight likely bottlenecks, # and gives you actionable optimization suggestions. # # You can change the view page in left "Views" dropdown list. # # .. image:: ../../_static/img/profiler_views_list.png # :alt: # # # - Operator view # The operator view displays the performance of every PyTorch operator # that is executed either on the host or device. # # .. image:: ../../_static/img/profiler_operator_view.png # :scale: 25 % # The "Self" duration does not include its child operators’ time. # The "Total" duration includes its child operators’ time. # # - View call stack # Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown. # Then click a "View Callstack" in this sub-table, the call stack frames will be shown. # # .. image:: ../../_static/img/profiler_callstack.png # :scale: 25 % # # If the TensorBoard is launched inside VSCode # (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_), # clicking a call stack frame will navigate to the specific code line. # # .. image:: ../../_static/img/profiler_vscode.png # :scale: 25 % # # # - Kernel view # The GPU kernel view shows all kernels’ time spent on GPU. # # .. image:: ../../_static/img/profiler_kernel_view.png # :scale: 25 % # Mean Blocks per SM: # Blocks per SM = Blocks of this kernel / SM number of this GPU. # If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized. # "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each run’s duration as weight. # # Mean Est. Achieved Occupancy: # Est. Achieved Occupancy is defined in this column’s tooltip. # For most cases such as memory bandwidth bounded kernels, the higher the better. # "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name, # using each run’s duration as weight. # # - Trace view # The trace view shows timeline of profiled operators and GPU kernels. # You can select it to see details as below. # # .. image:: ../../_static/img/profiler_trace_view1.png # :scale: 25 % # # You can move the graph and zoom in/out with the help of right side toolbar. # And keyboard can also be used to zoom and move around inside the timeline. # The ‘w’ and ‘s’ keys zoom in centered around the mouse, # and the ‘a’ and ‘d’ keys move the timeline left and right. # You can hit these keys multiple times until you see a readable representation. # # In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time. # And during most of this period, the GPU is idle. # Because this function is loading data and transforming data on host side, # during which the GPU resource is wasted. ###################################################################### # 5. Improve performance with the help of profiler # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader. # The PyTorch DataLoader uses single process by default. # User could enable multi-process data loading by setting the parameter ``num_workers``. # `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details. # # In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below, # pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again. # # :: # # train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4) # ###################################################################### # Then let’s choose the recently profiled run in left "Runs" dropdown list. # # .. image:: ../../_static/img/profiler_overview2.png # :scale: 25 % # # From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms, # and the time reduction of ``DataLoader`` mainly contributes. # # .. image:: ../../_static/img/profiler_trace_view2.png # :scale: 25 % # # From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced, # and the GPU utilization is increased. ###################################################################### # 6. Analyze performance with other advanced features # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # - Memory view # To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``. # # Note: Because of the current non-optimized implementation of PyTorch profiler, # enabling ``profile_memory=True`` will take about several minutes to finish. # To save time, you can try our existing examples first by running: # # :: # # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo # # The profiler records all memory allocation/release events during profiling. # For every specific operator, the plugin aggregates all these memory events inside its life span. # # .. image:: ../../_static/img/profiler_memory_view.png # :scale: 25 % # # The memory type could be selected in "Device" selection box. # For example, "GPU0" means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs. # # The "Size Increase" sums up all allocation bytes and minus all the memory release bytes. # # The "Allocation Size" sums up all allocation bytes without considering the memory release. # # - Distributed view # The plugin now supports distributed view on profiling DDP with NCCL as backend. # # You can try it by using existing example on Azure: # # :: # # tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert # # .. image:: ../../_static/img/profiler_distributed_view.png # :scale: 25 % # # The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree. # From this view, User can figure out load balance issue among workers. # For example, if the computation + overlapping time of one worker is much larger than others, # there may be a problem of load balance or this worker may be a straggler. # # The "Synchronizing/Communication Overview" shows the efficiency of communication. # "Data Transfer Time" is the time for actual data exchanging. # "Synchronizing Time" is the time for waiting and synchronizing with other workers. # # If one worker’s "Synchronizing Time" is much shorter than that of other workers’, # this worker may be a straggler which may have more computation workload than other workers’. # # The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker. ###################################################################### # Learn More # ---------- # # Take a look at the following documents to continue your learning, # and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_. # # - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_ # - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
# python 3.7 """Utility functions for image editing from latent space."""<import_stmt>os.path<import_stmt>numpy<as>np<line_sep>__all__=['parse_indices' 'interpolate' 'mix_style' 'get_layerwise_manipulation_strength' 'manipulate' 'parse_boundary_list']<def_stmt>parse_indices obj min_val=<none> max_val=<none><block_start>"""Parses indices. If the input is a list or tuple, this function has no effect. The input can also be a string, which is either a comma separated list of numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will be ignored. Args: obj: The input object to parse indices from. min_val: If not `None`, this function will check that all indices are equal to or larger than this value. (default: None) max_val: If not `None`, this function will check that all indices are equal to or smaller than this field. (default: None) Returns: A list of integers. Raises: If the input is invalid, i.e., neither a list or tuple, nor a string. """<if_stmt>obj<is><none><or>obj<eq>''<block_start>indices=[]<block_end><elif_stmt>isinstance(obj int)<block_start>indices=[obj]<block_end><elif_stmt>isinstance(obj (list tuple np.ndarray))<block_start>indices=list(obj)<block_end><elif_stmt>isinstance(obj str)<block_start>indices=[]<line_sep>splits=obj.replace(' ' '').split(',')<for_stmt>split splits<block_start>numbers=list(map(int split.split('-')))<if_stmt>len(numbers)<eq>1<block_start>indices.append(numbers[0])<block_end><elif_stmt>len(numbers)<eq>2<block_start>indices.extend(list(range(numbers[0] numbers[1]+1)))<block_end><block_end><block_end><else_stmt><block_start><raise>ValueError(f'Invalid type of input: {type(obj)}!')<block_end><assert_stmt>isinstance(indices list)<line_sep>indices=sorted(list(set(indices)))<for_stmt>idx indices<block_start><assert_stmt>isinstance(idx int)<if_stmt>min_val<is><not><none><block_start><assert_stmt>idx<ge>min_val f'{idx} is smaller than min val `{min_val}`!'<block_end><if_stmt>max_val<is><not><none><block_start><assert_stmt>idx<le>max_val f'{idx} is larger than max val `{max_val}`!'<block_end><block_end><return>indices<block_end><def_stmt>interpolate src_codes dst_codes step=5<block_start>"""Interpolates two sets of latent codes linearly. Args: src_codes: Source codes, with shape [num, *code_shape]. dst_codes: Target codes, with shape [num, *code_shape]. step: Number of interplolation steps, with source and target included. For example, if `step = 5`, three more samples will be inserted. (default: 5) Returns: Interpolated codes, with shape [num, step, *code_shape]. Raises: ValueError: If the input two sets of latent codes are with different shapes. """<if_stmt><not>(src_codes.ndim<ge>2<and>src_codes.shape<eq>dst_codes.shape)<block_start><raise>ValueError(f'Shapes of source codes and target codes should both be '<concat>f'[num, *code_shape], but {src_codes.shape} and '<concat>f'{dst_codes.shape} are received!')<block_end>num=src_codes.shape[0]<line_sep>code_shape=src_codes.shape[1:]<line_sep>a=src_codes[: np.newaxis]<line_sep>b=dst_codes[: np.newaxis]<line_sep>l=np.linspace(0.0 1.0 step).reshape([step<if>axis<eq>1<else>1<for>axis range(a.ndim)])<line_sep>results=a+l<times>(b-a)<assert_stmt>results.shape<eq>(num step *code_shape)<line_sep><return>results<block_end><def_stmt>mix_style style_codes content_codes num_layers=1 mix_layers=<none> is_style_layerwise=<true> is_content_layerwise=<true><block_start>"""Mixes styles from style codes to those of content codes. Each style code or content code consists of `num_layers` codes, each of which is typically fed into a particular layer of the generator. This function mixes styles by partially replacing the codes of `content_codes` from some certain layers with those of `style_codes`. For example, if both style code and content code are with shape [10, 512], meaning to have 10 layers and each employs a 512-dimensional latent code. And the 1st, 2nd, and 3rd layers are the target layers to perform style mixing. Then the top half of the content code (with shape [3, 512]) will be replaced by the top half of the style code (also with shape [3, 512]). NOTE: This function also supports taking single-layer latent codes as inputs, i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this case, the corresponding code will be first repeated for `num_layers` before performing style mixing. Args: style_codes: Style codes, with shape [num_styles, *code_shape] or [num_styles, num_layers, *code_shape]. content_codes: Content codes, with shape [num_contents, *code_shape] or [num_contents, num_layers, *code_shape]. num_layers: Total number of layers in the generative model. (default: 1) mix_layers: Indices of the layers to perform style mixing. `None` means to replace all layers, in which case the content code will be completely replaced by style code. (default: None) is_style_layerwise: Indicating whether the input `style_codes` are layer-wise codes. (default: True) is_content_layerwise: Indicating whether the input `content_codes` are layer-wise codes. (default: True) num_layers Returns: Codes after style mixing, with shape [num_styles, num_contents, num_layers, *code_shape]. Raises: ValueError: If input `content_codes` or `style_codes` is with invalid shape. """<if_stmt><not>is_style_layerwise<block_start>style_codes=style_codes[: np.newaxis]<line_sep>style_codes=np.tile(style_codes [num_layers<if>axis<eq>1<else>1<for>axis range(style_codes.ndim)])<block_end><if_stmt><not>is_content_layerwise<block_start>content_codes=content_codes[: np.newaxis]<line_sep>content_codes=np.tile(content_codes [num_layers<if>axis<eq>1<else>1<for>axis range(content_codes.ndim)])<block_end><if_stmt><not>(style_codes.ndim<ge>3<and>style_codes.shape[1]<eq>num_layers<and>style_codes.shape[1:]<eq>content_codes.shape[1:])<block_start><raise>ValueError(f'Shapes of style codes and content codes should be '<concat>f'[num_styles, num_layers, *code_shape] and '<concat>f'[num_contents, num_layers, *code_shape] respectively, '<concat>f'but {style_codes.shape} and {content_codes.shape} are '<concat>f'received!')<block_end>layer_indices=parse_indices(mix_layers min_val=0 max_val=num_layers-1)<if_stmt><not>layer_indices<block_start>layer_indices=list(range(num_layers))<block_end>num_styles=style_codes.shape[0]<line_sep>num_contents=content_codes.shape[0]<line_sep>code_shape=content_codes.shape[2:]<line_sep>s=style_codes[: np.newaxis]<line_sep>s=np.tile(s [num_contents<if>axis<eq>1<else>1<for>axis range(s.ndim)])<line_sep>c=content_codes[np.newaxis]<line_sep>c=np.tile(c [num_styles<if>axis<eq>0<else>1<for>axis range(c.ndim)])<line_sep>from_style=np.zeros(s.shape dtype=bool)<line_sep>from_style[: : layer_indices]=<true><line_sep>results=np.where(from_style s c)<assert_stmt>results.shape<eq>(num_styles num_contents num_layers *code_shape)<line_sep><return>results<block_end><def_stmt>get_layerwise_manipulation_strength num_layers truncation_psi truncation_layers<block_start>"""Gets layer-wise strength for manipulation. Recall the truncation trick played on layer [0, truncation_layers): w = truncation_psi * w + (1 - truncation_psi) * w_avg So, when using the same boundary to manipulate different layers, layer [0, truncation_layers) and layer [truncation_layers, num_layers) should use different strength to eliminate the effect from the truncation trick. More concretely, the strength for layer [0, truncation_layers) is set as `truncation_psi`, while that for other layers are set as 1. """<line_sep>strength=[1.0<for>_ range(num_layers)]<if_stmt>truncation_layers<g>0<block_start><for_stmt>layer_idx range(0 truncation_layers)<block_start>strength[layer_idx]=truncation_psi<block_end><block_end><return>strength<block_end><def_stmt>manipulate latent_codes boundary start_distance=-5.0 end_distance=5.0 step=21 layerwise_manipulation=<false> num_layers=1 manipulate_layers=<none> is_code_layerwise=<false> is_boundary_layerwise=<false> layerwise_manipulation_strength=1.0<block_start>"""Manipulates the given latent codes with respect to a particular boundary. Basically, this function takes a set of latent codes and a boundary as inputs, and outputs a collection of manipulated latent codes. For example, let `step` to be 10, `latent_codes` to be with shape [num, *code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm. Then the output will be with shape [num, 10, *code_shape]. For each 10-element manipulated codes, the first code is `start_distance` away from the original code (i.e., the input) along the `boundary` direction, while the last code is `end_distance` away. Remaining codes are linearly interpolated. Here, `distance` is sign sensitive. NOTE: This function also supports layer-wise manipulation, in which case the generator should be able to take layer-wise latent codes as inputs. For example, if the generator has 18 convolutional layers in total, and each of which takes an independent latent code as input. It is possible, sometimes with even better performance, to only partially manipulate these latent codes corresponding to some certain layers yet keeping others untouched. NOTE: Boundary is assumed to be normalized to unit norm already. Args: latent_codes: The input latent codes for manipulation, with shape [num, *code_shape] or [num, num_layers, *code_shape]. boundary: The semantic boundary as reference, with shape [1, *code_shape] or [1, num_layers, *code_shape]. start_distance: Start point for manipulation. (default: -5.0) end_distance: End point for manipulation. (default: 5.0) step: Number of manipulation steps. (default: 21) layerwise_manipulation: Whether to perform layer-wise manipulation. (default: False) num_layers: Number of layers. Only active when `layerwise_manipulation` is set as `True`. Should be a positive integer. (default: 1) manipulate_layers: Indices of the layers to perform manipulation. `None` means to manipulate latent codes from all layers. (default: None) is_code_layerwise: Whether the input latent codes are layer-wise. If set as `False`, the function will first repeat the input codes for `num_layers` times before perform manipulation. (default: False) is_boundary_layerwise: Whether the input boundary is layer-wise. If set as `False`, the function will first repeat boundary for `num_layers` times before perform manipulation. (default: False) layerwise_manipulation_strength: Manipulation strength for each layer. Only active when `layerwise_manipulation` is set as `True`. This field can be used to resolve the strength discrepancy across layers when truncation trick is on. See function `get_layerwise_manipulation_strength()` for details. A tuple, list, or `numpy.ndarray` is expected. If set as a single number, this strength will be used for all layers. (default: 1.0) Returns: Manipulated codes, with shape [num, step, *code_shape] if `layerwise_manipulation` is set as `False`, or shape [num, step, num_layers, *code_shape] if `layerwise_manipulation` is set as `True`. Raises: ValueError: If the input latent codes, boundary, or strength are with invalid shape. """<if_stmt><not>(boundary.ndim<ge>2<and>boundary.shape[0]<eq>1)<block_start><raise>ValueError(f'Boundary should be with shape [1, *code_shape] or '<concat>f'[1, num_layers, *code_shape], but '<concat>f'{boundary.shape} is received!')<block_end><if_stmt><not>layerwise_manipulation<block_start><assert_stmt><not>is_code_layerwise<assert_stmt><not>is_boundary_layerwise<line_sep>num_layers=1<line_sep>manipulate_layers=<none><line_sep>layerwise_manipulation_strength=1.0<block_end># Preprocessing for layer-wise manipulation. # Parse indices of manipulation layers. layer_indices=parse_indices(manipulate_layers min_val=0 max_val=num_layers-1)<if_stmt><not>layer_indices<block_start>layer_indices=list(range(num_layers))<block_end># Make latent codes layer-wise if needed. <assert_stmt>num_layers<g>0<if_stmt><not>is_code_layerwise<block_start>x=latent_codes[: np.newaxis]<line_sep>x=np.tile(x [num_layers<if>axis<eq>1<else>1<for>axis range(x.ndim)])<block_end><else_stmt><block_start>x=latent_codes<if_stmt>x.shape[1]<ne>num_layers<block_start><raise>ValueError(f'Latent codes should be with shape [num, num_layers, '<concat>f'*code_shape], where `num_layers` equals to '<concat>f'{num_layers}, but {x.shape} is received!')<block_end><block_end># Make boundary layer-wise if needed. <if_stmt><not>is_boundary_layerwise<block_start>b=boundary<line_sep>b=np.tile(b [num_layers<if>axis<eq>0<else>1<for>axis range(b.ndim)])<block_end><else_stmt><block_start>b=boundary[0]<if_stmt>b.shape[0]<ne>num_layers<block_start><raise>ValueError(f'Boundary should be with shape [num_layers, '<concat>f'*code_shape], where `num_layers` equals to '<concat>f'{num_layers}, but {b.shape} is received!')<block_end><block_end># Get layer-wise manipulation strength. <if_stmt>isinstance(layerwise_manipulation_strength (int float))<block_start>s=[float(layerwise_manipulation_strength)<for>_ range(num_layers)]<block_end><elif_stmt>isinstance(layerwise_manipulation_strength (list tuple))<block_start>s=layerwise_manipulation_strength<if_stmt>len(s)<ne>num_layers<block_start><raise>ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '<concat>f'mismatches number of layers `{num_layers}`!')<block_end><block_end><elif_stmt>isinstance(layerwise_manipulation_strength np.ndarray)<block_start>s=layerwise_manipulation_strength<if_stmt>s.size<ne>num_layers<block_start><raise>ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '<concat>f'mismatches number of layers `{num_layers}`!')<block_end><block_end><else_stmt><block_start><raise>ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')<block_end>s=np.array(s).reshape([num_layers<if>axis<eq>0<else>1<for>axis range(b.ndim)])<line_sep>b=b<times>s<if_stmt>x.shape[1:]<ne>b.shape<block_start><raise>ValueError(f'Latent code shape {x.shape} and boundary shape '<concat>f'{b.shape} mismatch!')<block_end>num=x.shape[0]<line_sep>code_shape=x.shape[2:]<line_sep>x=x[: np.newaxis]<line_sep>b=b[np.newaxis np.newaxis :]<line_sep>l=np.linspace(start_distance end_distance step).reshape([step<if>axis<eq>1<else>1<for>axis range(x.ndim)])<line_sep>results=np.tile(x [step<if>axis<eq>1<else>1<for>axis range(x.ndim)])<line_sep>is_manipulatable=np.zeros(results.shape dtype=bool)<line_sep>is_manipulatable[: : layer_indices]=<true><line_sep>results=np.where(is_manipulatable x+l<times>b results)<assert_stmt>results.shape<eq>(num step num_layers *code_shape)<line_sep><return>results<if>layerwise_manipulation<else>results[: : 0]<block_end><def_stmt>parse_boundary_list boundary_list_path<block_start>"""Parses boundary list. Sometimes, a text file containing a list of boundaries will significantly simplify image manipulation with a large amount of boundaries. This function is used to parse boundary information from such list file. Basically, each item in the list should be with format `($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can disable a particular boundary. Sample: (age, z): $AGE_BOUNDARY_PATH (gender, w): $GENDER_BOUNDARY_PATH DISABLE(pose, wp): $POSE_BOUNDARY_PATH Args: boundary_list_path: Path to the boundary list. Returns: A dictionary, whose key is a two-element tuple (boundary_name, space_type) and value is the corresponding boundary path. Raise: ValueError: If the given boundary list does not exist. """<if_stmt><not>os.path.isfile(boundary_list_path)<block_start><raise>ValueError(f'Boundary list `boundary_list_path` does not exist!')<block_end>boundaries={}<with_stmt>open(boundary_list_path 'r')<as>f<block_start><for_stmt>line f<block_start><if_stmt>line[:len('DISABLE')]<eq>'DISABLE'<block_start><continue><block_end>boundary_info,boundary_path=line.strip().split(':')<line_sep>boundary_name,space_type=boundary_info.strip()[1:-1].split(',')<line_sep>boundary_name=boundary_name.strip()<line_sep>space_type=space_type.strip().lower()<line_sep>boundary_path=boundary_path.strip()<line_sep>boundaries[(boundary_name space_type)]=boundary_path<block_end><block_end><return>boundaries<block_end>
# # Copyright (C) 2018 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # model=Model()<line_sep>i1=Input("op1" "TENSOR_FLOAT32" "{1, 3, 3, 2}")<line_sep>f1=Input("op2" "TENSOR_FLOAT32" "{1, 2, 2, 4}")<line_sep>b1=Input("op3" "TENSOR_FLOAT32" "{4}")<line_sep>pad0=Int32Scalar("pad0" 0)<line_sep>act=Int32Scalar("act" 0)<line_sep>stride=Int32Scalar("stride" 1)<line_sep>cm=Int32Scalar("channelMultiplier" 2)<line_sep>output=Output("op4" "TENSOR_FLOAT32" "{1, 2, 2, 4}")<line_sep>model=model.Operation("DEPTHWISE_CONV_2D" i1 f1 b1 pad0 pad0 pad0 pad0 stride stride cm act).To(output)<line_sep>model=model.RelaxedExecution(<true>)<line_sep># Example 1. Input in operand 0, input0={i1:# input 0 [10 21 10 22 10 23 10 24 10 25 10 26 10 27 10 28 10 29] f1:[.25 0 .2 0 .25 0 0 .3 .25 0 0 0 .25 .1 0 0] b1:[1 2 3 4]}<line_sep># (i1 (conv) f1) + b1 # filter usage: # in_ch1 * f_1 --> output_d1 # in_ch1 * f_2 --> output_d2 # in_ch2 * f_3 --> output_d3 # in_ch3 * f_4 --> output_d4 output0={output:# output 0 [11 3 7.2 10.6 11 3 7.4 10.9 11 3 7.8 11.5 11 3 8.0 11.8]}<line_sep># Instantiate an example Example((input0 output0))<line_sep>
<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>chainercv.chainer_experimental.datasets.sliceable GetterDataset<import_from_stmt>chainercv.utils read_image<line_sep>linemod_object_diameters={'ape':0.103 'benchvise':0.286908 'cam':0.173 'can':0.202 'cat':0.155 'driller':0.262 'duck':0.109 'eggbox':0.176364 'glue':0.176 'holepuncher':0.162 'iron':0.303153 'lamp':0.285155 'phone':0.213}<class_stmt>LinemodDataset(GetterDataset)<block_start><def_stmt>__init__ self base_dir obj_name='ape' split='train' return_msk=<false><block_start>super(LinemodDataset self).__init__()<line_sep>split_path=os.path.join(base_dir 'LINEMOD' obj_name '{}.txt'.format(split))<line_sep>self.base_dir=base_dir<with_stmt>open(split_path 'r')<as>f<block_start>self.img_paths=f.readlines()<block_end>self.add_getter(('img' 'point' 'label') self._get_example)<if_stmt>return_msk<block_start>self.add_getter('msk' self._get_msk)<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.img_paths)<block_end><def_stmt>_get_example self i<block_start>img_path=os.path.join(self.base_dir self.img_paths[i].rstrip())<line_sep>img=read_image(img_path)<line_sep>anno_path=img_path.replace('images' 'labels').replace('JPEGImages' 'labels').replace('.jpg' '.txt').replace('.png' '.txt')<line_sep>anno=np.zeros(50<times>21)<if_stmt>os.path.getsize(anno_path)<block_start>_,H,W=img.shape<line_sep>tmp=read_truths_args(anno_path 8.0/W)<line_sep>size=tmp.size<if_stmt>size<g>50<times>21<block_start>anno=tmp[0:50<times>21]<block_end><elif_stmt>size<g>0<block_start>anno[0:size]=tmp<block_end><block_end>anno=anno.reshape(-1 21)<line_sep>anno=anno[:truths_length(anno)]<line_sep>point=anno[: 1:19].reshape(-1 9 2).astype(np.float32)<line_sep>point[: : 0]<augmul>W<line_sep>point[: : 1]<augmul>H<line_sep>label=anno[: 0].astype(np.int32)<line_sep><return>img point label<block_end><def_stmt>_get_msk self i<block_start>img_path=os.path.join(self.base_dir self.img_paths[i].rstrip())<line_sep>mskpath=img_path.replace('JPEGImages' 'mask').replace('/00' '/').replace('.jpg' '.png')<line_sep>msk=read_image(mskpath color=<false>)[0]<line_sep><return>msk<g>0<block_end><block_end><def_stmt>truths_length truths<block_start><for_stmt>i range(50)<block_start><if_stmt>truths[i][1]<eq>0<block_start><return>i<block_end><block_end><block_end><def_stmt>read_truths lab_path<block_start><if_stmt>os.path.getsize(lab_path)<block_start>truths=np.loadtxt(lab_path)<line_sep># to avoid single truth problem truths=truths.reshape(truths.size<floordiv>21 21)<line_sep><return>truths<block_end><else_stmt><block_start><return>np.array([])<block_end><block_end><def_stmt>read_truths_args lab_path min_box_scale<block_start>truths=read_truths(lab_path)<line_sep>new_truths=[]<for_stmt>i range(truths.shape[0])<block_start>new_truths.append([truths[i][0] truths[i][1] truths[i][2] truths[i][3] truths[i][4] truths[i][5] truths[i][6] truths[i][7] truths[i][8] truths[i][9] truths[i][10] truths[i][11] truths[i][12] truths[i][13] truths[i][14] truths[i][15] truths[i][16] truths[i][17] truths[i][18]])<block_end><return>np.array(new_truths)<block_end>
# -*- encoding: utf-8 -*- """Utility functions for computing combinations of dimensions and hierarchy levels"""<import_from_future_stmt> absolute_import<import_stmt>re<import_stmt>os.path<import_stmt>json<import_from_stmt>collections OrderedDict<import_from_stmt>.errors ModelInconsistencyError ArgumentError ConfigurationError<import_from_stmt>. compat<line_sep>__all__=["IgnoringDictionary" "MissingPackage" "localize_common" "localize_attributes" "get_localizable_attributes" "decamelize" "to_identifier" "assert_instance" "assert_all_instances" "read_json_file" "sorted_dependencies" ]<class_stmt>IgnoringDictionary(OrderedDict)<block_start>"""Simple dictionary extension that will ignore any keys of which values are empty (None/False)"""<def_stmt>__setitem__ self key value<block_start><if_stmt>value<is><not><none><block_start>super(IgnoringDictionary self).__setitem__(key value)<block_end><block_end><def_stmt>set self key value<block_start>"""Sets `value` for `key` even if value is null."""<line_sep>super(IgnoringDictionary self).__setitem__(key value)<block_end><def_stmt>__repr__ self<block_start>items=[]<for_stmt>key,value self.items()<block_start>item='%s: %s'%(repr(key) repr(value))<line_sep>items.append(item)<block_end><return>"{%s}"%", ".join(items)<block_end><block_end><def_stmt>assert_instance obj class_ label<block_start>"""Raises ArgumentError when `obj` is not instance of `cls`"""<if_stmt><not>isinstance(obj class_)<block_start><raise>ModelInconsistencyError("%s should be sublcass of %s, "<concat>"provided: %s"%(label class_.__name__ type(obj).__name__))<block_end><block_end><def_stmt>assert_all_instances list_ class_ label="object"<block_start>"""Raises ArgumentError when objects in `list_` are not instances of `cls`"""<for_stmt>obj list_<or>[]<block_start>assert_instance(obj class_ label="object")<block_end><block_end><class_stmt>MissingPackageError(Exception)<block_start>"""Exception raised when encountered a missing package."""<line_sep><pass><block_end><class_stmt>MissingPackage(object)<block_start>"""Bogus class to handle missing optional packages - packages that are not necessarily required for Cubes, but are needed for certain features."""<def_stmt>__init__ self package feature=<none> source=<none> comment=<none><block_start>self.package=package<line_sep>self.feature=feature<line_sep>self.source=source<line_sep>self.comment=comment<block_end><def_stmt>__call__ self *args **kwargs<block_start>self._fail()<block_end><def_stmt>__getattr__ self name<block_start>self._fail()<block_end><def_stmt>_fail self<block_start><if_stmt>self.feature<block_start>use=" to be able to use: %s"%self.feature<block_end><else_stmt><block_start>use=""<block_end><if_stmt>self.source<block_start>source=" from %s"%self.source<block_end><else_stmt><block_start>source=""<block_end><if_stmt>self.comment<block_start>comment=". %s"%self.comment<block_end><else_stmt><block_start>comment=""<block_end><raise>MissingPackageError("Optional package '%s' is not installed. "<concat>"Please install the package%s%s%s"%(self.package source use comment))<block_end><block_end><def_stmt>optional_import name feature=<none> source=<none> comment=<none><block_start>"""Optionally import package `name`. If package does not exist, import a placeholder object, that raises an exception with more detailed description about the missing package."""<try_stmt><block_start><return>__import__(name)<block_end><except_stmt>ImportError<block_start><return>MissingPackage(name feature source comment)<block_end><block_end><def_stmt>expand_dictionary record separator='.'<block_start>"""Return expanded dictionary: treat keys are paths separated by `separator`, create sub-dictionaries as necessary"""<line_sep>result={}<for_stmt>key,value record.items()<block_start>current=result<line_sep>path=key.split(separator)<for_stmt>part path[:-1]<block_start><if_stmt>part<not><in>current<block_start>current[part]={}<block_end>current=current[part]<block_end>current[path[-1]]=value<block_end><return>result<block_end><def_stmt>localize_common obj trans<block_start>"""Localize common attributes: label and description"""<if_stmt>"label"<in>trans<block_start>obj.label=trans["label"]<block_end><if_stmt>"description"<in>trans<block_start>obj.description=trans["description"]<block_end><block_end><def_stmt>localize_attributes attribs translations<block_start>"""Localize list of attributes. `translations` should be a dictionary with keys as attribute names, values are dictionaries with localizable attribute metadata, such as ``label`` or ``description``."""<for_stmt>(name atrans) translations.items()<block_start>attrib=attribs[name]<line_sep>localize_common(attrib atrans)<block_end><block_end><def_stmt>get_localizable_attributes obj<block_start>"""Returns a dictionary with localizable attributes of `obj`."""<line_sep># FIXME: use some kind of class attribute to get list of localizable attributes locale={}<try_stmt><block_start><if_stmt>obj.label<block_start>locale["label"]=obj.label<block_end><block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start><if_stmt>obj.description<block_start>locale["description"]=obj.description<block_end><block_end><except_stmt><block_start><pass><block_end><return>locale<block_end><def_stmt>decamelize name<block_start>s1=re.sub('(.)([A-Z][a-z]+)' r'\1 \2' name)<line_sep><return>re.sub('([a-z0-9])([A-Z])' r'\1 \2' s1)<block_end><def_stmt>to_identifier name<block_start><return>re.sub(r' ' r'_' name).lower()<block_end><def_stmt>to_label name capitalize=<true><block_start>"""Converts `name` into label by replacing underscores by spaces. If `capitalize` is ``True`` (default) then the first letter of the label is capitalized."""<line_sep>label=name.replace("_" " ")<if_stmt>capitalize<block_start>label=label.capitalize()<block_end><return>label<block_end><def_stmt>coalesce_option_value value value_type label=<none><block_start>"""Convert string into an object value of `value_type`. The type might be: `string` (no conversion), `integer`, `float`, `list` – comma separated list of strings. """<line_sep>value_type=value_type.lower()<try_stmt><block_start><if_stmt>value_type<in>('string' 'str')<block_start>return_value=str(value)<block_end><elif_stmt>value_type<eq>'list'<block_start><if_stmt>isinstance(value compat.string_type)<block_start>return_value=value.split(",")<block_end><else_stmt><block_start>return_value=list(value)<block_end><block_end><elif_stmt>value_type<eq>"float"<block_start>return_value=float(value)<block_end><elif_stmt>value_type<in>["integer" "int"]<block_start>return_value=int(value)<block_end><elif_stmt>value_type<in>["bool" "boolean"]<block_start><if_stmt><not>value<block_start>return_value=<false><block_end><elif_stmt>isinstance(value compat.string_type)<block_start>return_value=value.lower()<in>["1" "true" "yes" "on"]<block_end><else_stmt><block_start>return_value=bool(value)<block_end><block_end><else_stmt><block_start><raise>ArgumentError("Unknown option value type %s"%value_type)<block_end><block_end><except_stmt>ValueError<block_start><if_stmt>label<block_start>label="parameter %s "%label<block_end><else_stmt><block_start>label=""<block_end><raise>ArgumentError("Unable to convert %svalue '%s' into type %s"%(label astring value_type))<block_end><return>return_value<block_end><def_stmt>coalesce_options options types<block_start>"""Coalesce `options` dictionary according to types dictionary. Keys in `types` refer to keys in `options`, values of `types` are value types: string, list, float, integer or bool."""<line_sep>out={}<for_stmt>key,value options.items()<block_start><if_stmt>key<in>types<block_start>out[key]=coalesce_option_value(value types[key] key)<block_end><else_stmt><block_start>out[key]=value<block_end><block_end><return>out<block_end><def_stmt>read_json_file path kind=<none><block_start>"""Read a JSON from `path`. This is convenience function that provides more descriptive exception handling."""<line_sep>kind="%s "%str(kind)<if>kind<else>""<if_stmt><not>os.path.exists(path)<block_start><raise>ConfigurationError("Can not find %sfile '%s'"%(kind path))<block_end><try_stmt><block_start>f=compat.open_unicode(path)<block_end><except_stmt>IOError<block_start><raise>ConfigurationError("Can not open %sfile '%s'"%(kind path))<block_end><try_stmt><block_start>content=json.load(f)<block_end><except_stmt>ValueError<as>e<block_start><raise>SyntaxError("Syntax error in %sfile %s: %s"%(kind path str(e)))<block_end><finally_stmt><block_start>f.close()<block_end><return>content<block_end><def_stmt>sorted_dependencies graph<block_start>"""Return keys from `deps` ordered by dependency (topological sort). `deps` is a dictionary where keys are strings and values are list of strings where keys is assumed to be dependant on values. Example:: A ---> B -+--> C | +--> D --> E Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}`` """<line_sep>graph=dict((key set(value))<for>key,value graph.items())<line_sep># L ← Empty list that will contain the sorted elements L=[]<line_sep># S ← Set of all nodes with no dependencies (incoming edges) S=set(parent<for>parent,req graph.items()<if><not>req)<while_stmt>S# remove a node n from S <block_start>n=S.pop()<line_sep># insert n into L L.append(n)<line_sep># for each node m with an edge e from n to m do # (n that depends on m) parents=[parent<for>parent,req graph.items()<if>n<in>req]<for_stmt>parent parents<block_start>graph[parent].remove(n)<line_sep># remove edge e from the graph # if m has no other incoming edges then insert m into S <if_stmt><not>graph[parent]<block_start>S.add(parent)<block_end><block_end><block_end># if graph has edges then -> error nonempty=[k<for>k,v graph.items()<if>v]<if_stmt>nonempty<block_start><raise>ArgumentError("Cyclic dependency of: %s"%", ".join(nonempty))<block_end><return>L<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>modules Conv ResBlock<class_stmt>Wavenet_Student(nn.Module)<block_start><def_stmt>__init__ self num_blocks_student=[1 1 1 1 1 1] num_layers=10 front_channels=32 residual_channels=64 gate_channels=128 skip_channels=64 kernel_size=3 cin_channels=80 causal=<true><block_start>super(Wavenet_Student self).__init__()<line_sep>self.num_blocks=num_blocks_student<line_sep>self.num_flow=len(self.num_blocks)<line_sep>self.num_layers=num_layers<line_sep>self.iafs=nn.ModuleList()<for_stmt>i range(self.num_flow)<block_start>self.iafs.append(Wavenet_Flow(out_channels=2 num_blocks=self.num_blocks[i] num_layers=self.num_layers front_channels=front_channels residual_channels=residual_channels gate_channels=gate_channels skip_channels=skip_channels kernel_size=kernel_size cin_channels=cin_channels causal=causal))<block_end><block_end><def_stmt>forward self z c<block_start><return>self.iaf(z c)<block_end><def_stmt>iaf self z c_up<block_start>mu_tot,logs_tot=0. 0.<for_stmt>i,iaf enumerate(self.iafs)<block_start>mu_logs=iaf(z c_up)<line_sep>mu=mu_logs[: 0:1 :-1]<line_sep>logs=mu_logs[: 1: :-1]<line_sep>mu_tot=mu_tot<times>torch.exp(logs)+mu<line_sep>logs_tot=logs_tot+logs<line_sep>z=z[: : 1:]<times>torch.exp(logs)+mu<line_sep>z=F.pad(z pad=(1 0) mode='constant' value=0)<block_end><return>z mu_tot logs_tot<block_end><def_stmt>receptive_field self<block_start>receptive_field=1<for_stmt>iaf self.iafs<block_start>receptive_field<augadd>iaf.receptive_field_size()-1<block_end><return>receptive_field<block_end><def_stmt>generate self z c_up<block_start>x,_,_=self.iaf(z c_up)<line_sep><return>x<block_end><def_stmt>remove_weight_norm self<block_start><for_stmt>iaf self.iafs<block_start>iaf.remove_weight_norm()<block_end><block_end><block_end><class_stmt>Wavenet_Flow(nn.Module)<block_start><def_stmt>__init__ self out_channels=1 num_blocks=1 num_layers=10 front_channels=32 residual_channels=64 gate_channels=32 skip_channels=<none> kernel_size=3 cin_channels=80 causal=<true><block_start>super(Wavenet_Flow self).__init__()<line_sep>self.causal=causal<line_sep>self.num_blocks=num_blocks<line_sep>self.num_layers=num_layers<line_sep>self.front_channels=front_channels<line_sep>self.out_channels=out_channels<line_sep>self.gate_channels=gate_channels<line_sep>self.residual_channels=residual_channels<line_sep>self.skip_channels=skip_channels<line_sep>self.cin_channels=cin_channels<line_sep>self.kernel_size=kernel_size<line_sep>self.front_conv=nn.Sequential(Conv(1 self.residual_channels self.front_channels causal=self.causal) nn.ReLU())<line_sep>self.res_blocks=nn.ModuleList()<line_sep>self.res_blocks_fast=nn.ModuleList()<for_stmt>b range(self.num_blocks)<block_start><for_stmt>n range(self.num_layers)<block_start>self.res_blocks.append(ResBlock(self.residual_channels self.gate_channels self.skip_channels self.kernel_size dilation=2<power>n cin_channels=self.cin_channels local_conditioning=<true> causal=self.causal mode='SAME'))<block_end><block_end>self.final_conv=nn.Sequential(nn.ReLU() Conv(self.skip_channels self.skip_channels 1 causal=self.causal) nn.ReLU() Conv(self.skip_channels self.out_channels 1 causal=self.causal))<block_end><def_stmt>forward self x c<block_start><return>self.wavenet(x c)<block_end><def_stmt>wavenet self tensor c=<none><block_start>h=self.front_conv(tensor)<line_sep>skip=0<for_stmt>i,f enumerate(self.res_blocks)<block_start>h,s=f(h c)<line_sep>skip<augadd>s<block_end>out=self.final_conv(skip)<line_sep><return>out<block_end><def_stmt>receptive_field_size self<block_start>num_dir=1<if>self.causal<else>2<line_sep>dilations=[2<power>(i%self.num_layers)<for>i range(self.num_layers<times>self.num_blocks)]<line_sep><return>num_dir<times>(self.kernel_size-1)<times>sum(dilations)+1+(self.front_channels-1)<block_end><def_stmt>remove_weight_norm self<block_start><for_stmt>f self.res_blocks<block_start>f.remove_weight_norm()<block_end><block_end><block_end>
#! /usr/bin/env python <import_from_future_stmt> print_function<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>argparse<def_stmt>generate_csv start_index fname<block_start>cols=[str('A'+str(i))<for>i range(start_index NUM_COLS+start_index)]<line_sep>data=[]<for_stmt>i range(NUM_ROWS)<block_start>vals=(np.random.choice(NUM_DISTINCT_VALS)<for>j range(NUM_COLS))<line_sep>data.append(vals)<block_end>df=pd.DataFrame(data=data columns=cols)<line_sep>df.to_csv(fname index=<false> header=<true>)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Generate sample tables to test joins.')<line_sep>parser.add_argument('--num-rows' '-r' type=int default=100)<line_sep>parser.add_argument('--num-cols' '-c' type=int required=<true>)<line_sep>parser.add_argument('--num-distinct-vals' '-d' type=int required=<true>)<line_sep>parser.add_argument('--num-cols-overlap' '-o' type=int default=1)<line_sep>args=parser.parse_args()<line_sep>NUM_ROWS=args.num_rows<line_sep>NUM_COLS=args.num_cols<line_sep>NUM_DISTINCT_VALS=args.num_distinct_vals<line_sep>num_overlap=args.num_cols_overlap<if_stmt>num_overlap<g>NUM_COLS<block_start>print('--num-cols-overlap cannot be greater than --num-cols')<import_stmt>sys<line_sep>sys.exit(1)<block_end>generate_csv(0 'table_a.csv')<line_sep>generate_csv(NUM_COLS-num_overlap 'table_b.csv')<block_end>
"""Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package."""<line_sep>
# SPDX-License-Identifier: MIT # Copyright (c) 2018-2020 The Pybricks Authors """Pybricks robotics module."""<import_from_stmt>_pybricks.robotics DriveBase<line_sep>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Policy evaluation."""<import_stmt>typing<import_stmt>tensorflow.compat.v2<as>tf<def_stmt>evaluate env policy num_episodes=10 ctx_length=<none> embed_training_window=<none> state_mask_fn=<none> # pylint: disable=g-bare-generic <block_start>"""Evaluates the policy. Args: env: Environment to evaluate the policy on. policy: Policy to evaluate. num_episodes: A number of episodes to average the policy on. ctx_length: number of previous steps to compute context from. embed_training_window: window size used during embed training. state_mask_fn: state masking function for partially obs envs. Returns: Averaged reward and a total number of steps. """<line_sep>total_timesteps=0<line_sep>total_returns=0.0<def_stmt>apply_mask observation<block_start><if_stmt>state_mask_fn<block_start><return>tf.convert_to_tensor(state_mask_fn(observation.numpy()))<block_end><return>observation<block_end><for_stmt>_ range(num_episodes)<block_start>timestep=env.reset()<if_stmt>ctx_length<block_start>states=[apply_mask(timestep.observation)<for>_ range(ctx_length)]<line_sep>actions=[tf.zeros(policy.action_spec.shape)[<none> :]<for>_ range(ctx_length)]<line_sep>rewards=[[0.]<for>_ range(ctx_length)]<block_end>latent_action=<none><line_sep>i=0<while_stmt><not>timestep.is_last()<block_start><if_stmt>embed_training_window<and>(i%embed_training_window<eq>0<or>embed_training_window<le>2)<block_start>latent_action=<none><block_end><if_stmt>ctx_length<block_start>states.append(apply_mask(timestep.observation))<if_stmt>len(states)<g>ctx_length<block_start>states.pop(0)<line_sep>actions.pop(0)<line_sep>rewards.pop(0)<block_end>action=policy.act(tf.stack(states axis=1) actions=tf.stack(actions axis=1) rewards=tf.stack(rewards axis=1))<line_sep>actions.append(action)<block_end><else_stmt><block_start><if_stmt>embed_training_window<block_start>action,latent_action=policy.act(apply_mask(timestep.observation) latent_action=latent_action)<block_end><else_stmt><block_start>action=policy.act(apply_mask(timestep.observation))<block_end><block_end>timestep=env.step(action)<if_stmt>ctx_length<block_start>rewards.append(timestep.reward)<block_end>total_returns<augadd>timestep.reward[0]<line_sep>total_timesteps<augadd>1<line_sep>i<augadd>1<block_end><block_end><return>total_returns/num_episodes total_timesteps/num_episodes<block_end>
# Copyright (c) Uber Technologies, Inc. and its affiliates. # Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab. # # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. <import_from_stmt>distutils.version LooseVersion<import_stmt>torch<import_from_stmt>torch.autograd.function Function<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.nn.modules.batchnorm _BatchNorm<import_stmt>bagua.torch_api<as>bagua<import_from_stmt>bagua.torch_api.communication allgather allreduce<line_sep># Backward compat for old PyTorch <if_stmt><not>hasattr(torch.jit "unused")<block_start>torch.jit.unused=<lambda>x:x<block_end>_SYNC_BN_V2=LooseVersion(torch.__version__)<ge>LooseVersion("1.5.0")<and>LooseVersion(torch.__version__)<le>LooseVersion("1.6.0")<line_sep>_SYNC_BN_V3=LooseVersion(torch.__version__)<ge>LooseVersion("1.6.0")<line_sep>_SYNC_BN_V4=LooseVersion(torch.__version__)<ge>LooseVersion("1.9.0")<class_stmt>SyncBatchNorm(_BatchNorm)<block_start>r"""Applies synchronous BatchNorm for distributed module with N-dimensional BatchNorm layer(s). See `BatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html?highlight=batchnorm#torch.nn.BatchNorm2d>`_ for more details. Arguments: num_features: Number of channels :math:`C` from the shape :math:`(N, C, ...)`. eps: A value added to the denominator for numerical stability. Default: 1e-5. momentum: The value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1. affine: A boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True``. track_running_stats: A boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``True``. .. note:: Only GPU input tensors are supported in the training mode. """<def_stmt>__init__ self num_features eps=1e-5 momentum=0.1 affine=<true> track_running_stats=<true> <block_start>super().__init__(num_features eps momentum affine track_running_stats)<block_end><def_stmt>_check_input_dim self input<block_start><if_stmt>input.dim()<l>2<block_start><raise>ValueError("expected at least 2D input (got {}D input)".format(input.dim()))<block_end><block_end><def_stmt>_run_bn self input<block_start><return>F.batch_norm(input self.running_mean self.running_var self.weight self.bias self.training<or><not>self.track_running_stats self.momentum self.eps )<block_end>@torch.jit.unused<def_stmt>_maybe_run_sync_bn self input<block_start><if_stmt>bagua.get_world_size()<eq>1<block_start><return>self._run_bn(input)<block_end><return>_SyncBatchNorm.apply(input self.weight self.bias self.running_mean self.running_var self.eps self.momentum )<block_end><def_stmt>forward self input# currently only GPU input is supported by underlying kernel from PyTorch <block_start><if_stmt><not>input.is_cuda<block_start><raise>ValueError("SyncBatchNorm expected input tensor to be on GPU")<block_end>self._check_input_dim(input)<if_stmt>self.training<and>self.track_running_stats<block_start><assert_stmt>self.num_batches_tracked<is><not><none><line_sep>self.num_batches_tracked=self.num_batches_tracked+1<block_end><if_stmt><not>self.training<and>self.track_running_stats<block_start><return>self._run_bn(input)<block_end><else_stmt><block_start><return>self._maybe_run_sync_bn(input)<block_end><block_end>@classmethod<def_stmt>convert_sync_batchnorm cls module<block_start>r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to `torch.nn.SyncBatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html?highlight=syncbatchnorm#torch.nn.SyncBatchNorm>`_ layers. Arguments: module (nn.Module): Module containing one or more :attr:`BatchNorm*D` layers Returns: The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm` layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer, a new :class:`torch.nn.SyncBatchNorm` layer object will be returned instead. .. note:: This function must be called before :meth:`~bagua.torch_api.distributed.BaguaModule.with_bagua` method. Example:: >>> # Network with nn.BatchNorm layer >>> model = torch.nn.Sequential( ... torch.nn.Linear(D_in, H), ... torch.nn.ReLU(), ... torch.nn.Linear(H, D_out), ... ) >>> optimizer = torch.optim.SGD( ... model.parameters(), ... lr=0.01, ... momentum=0.9 ... ) >>> sync_bn_model = bagua.torch_api.contrib.sync_batchnorm.SyncBatchNorm.convert_sync_batchnorm(model) >>> bagua_model = sync_bn_model.with_bagua([optimizer], GradientAllReduce()) """<line_sep>module_output=module<if_stmt>isinstance(module torch.nn.modules.batchnorm._BatchNorm)<block_start>module_output=SyncBatchNorm(module.num_features module.eps module.momentum module.affine module.track_running_stats )<if_stmt>module.affine<block_start><with_stmt>torch.no_grad()<block_start>module_output.weight=module.weight<line_sep>module_output.bias=module.bias<block_end><block_end>module_output.running_mean=module.running_mean<line_sep>module_output.running_var=module.running_var<line_sep>module_output.num_batches_tracked=module.num_batches_tracked<if_stmt>hasattr(module "qconfig")<block_start>module_output.qconfig=module.qconfig<block_end><block_end><for_stmt>name,child module.named_children()<block_start>module_output.add_module(name cls.convert_sync_batchnorm(child))<block_end><del_stmt>module<line_sep><return>module_output<block_end><block_end><class_stmt>_SyncBatchNorm(Function)<block_start>@staticmethod<def_stmt>forward self input weight bias running_mean running_var eps momentum<block_start>input=input.contiguous()<line_sep>size=input.numel()<floordiv>input.size(1)<line_sep>count=torch.tensor([size])<line_sep># calculate mean/invstd for input. mean,invstd=torch.batch_norm_stats(input eps)<line_sep>count,mean,invstd=count.cuda() mean.cuda() invstd.cuda()<line_sep>nums_ranks=bagua.get_world_size()<line_sep>count_all=torch.tensor([torch.empty_like(count).cpu().detach().numpy()<for>_ range(nums_ranks)]).cuda()<line_sep>mean_all=torch.tensor([torch.empty_like(mean).cpu().detach().numpy()<for>_ range(nums_ranks)]).cuda()<line_sep>invstd_all=torch.tensor([torch.empty_like(invstd).cpu().detach().numpy()<for>_ range(nums_ranks)]).cuda()<line_sep>allgather(count.unsqueeze(0) count_all)<line_sep>allgather(mean.unsqueeze(0) mean_all)<line_sep>allgather(invstd.unsqueeze(0) invstd_all)<if_stmt>_SYNC_BN_V3<block_start>counts_for_bngswc=count_all.view(-1).float().to(input.device)<block_end><else_stmt># backwards compatibility <block_start>counts_for_bngswc=count_all.view(-1).tolist()<block_end># calculate global mean & invstd mean,invstd=torch.batch_norm_gather_stats_with_counts(input mean_all invstd_all running_mean running_var momentum eps counts_for_bngswc )<line_sep>self.save_for_backward(input weight mean invstd count_all)<line_sep># apply element-wise normalization <return>torch.batch_norm_elemt(input weight bias mean invstd eps)<block_end>@staticmethod<def_stmt>backward self grad_output<block_start>grad_output=grad_output.contiguous()<line_sep>saved_input,weight,mean,invstd,count_all=self.saved_tensors<line_sep>need_input_grad,need_weight_grad,need_bias_grad=self.needs_input_grad[0:3]<line_sep># calculate local stats as well as grad_weight / grad_bias sum_dy,sum_dy_xmu,grad_weight,grad_bias=torch.batch_norm_backward_reduce(grad_output saved_input mean invstd weight need_input_grad need_weight_grad need_bias_grad )<if_stmt>need_input_grad# synchronizing stats used to calculate input gradient. <block_start>allreduce(sum_dy sum_dy)<line_sep>allreduce(sum_dy_xmu sum_dy_xmu)<if_stmt>_SYNC_BN_V4# from 1.9.0 on we need a count tensor on all devices # count_all is calculated as total count across all ranks in forward function <block_start>count_all=count_all.to(dtype=torch.int device=grad_output.device)<block_end><elif_stmt>_SYNC_BN_V2<or>_SYNC_BN_V3# before 1.9.0 we need the count as an integer to compute means values <block_start>count=count_all.sum()<block_end><else_stmt># before 1.5.0, sum_dy was sum of means from every worker, so we just # need to divide it by number of workers <block_start>count=bagua.get_world_size()<block_end># backward pass for gradient calculation # we are calling into a non-public undocumented function which broke moving to 1.9.0 # https://github.com/pytorch/pytorch/issues/57900 <if_stmt>_SYNC_BN_V4# from 1.9.0 on, sums and count parameters expected <block_start>grad_input=torch.batch_norm_backward_elemt(grad_output saved_input mean invstd weight sum_dy sum_dy_xmu count_all )<block_end><else_stmt># before 1.9.0, mean parameters expected, not sums and count <block_start>grad_input=torch.batch_norm_backward_elemt(grad_output saved_input mean invstd weight sum_dy/count sum_dy_xmu/count )<block_end><block_end><else_stmt><block_start>grad_input=<none><block_end># synchronizing of grad_weight / grad_bias is not needed as distributed # training would handle all reduce. <if_stmt>weight<is><none><or><not>need_weight_grad<block_start>grad_weight=<none><block_end><if_stmt>weight<is><none><or><not>need_bias_grad<block_start>grad_bias=<none><block_end><return>grad_input grad_weight grad_bias <none> <none> <none> <none> <none> <none><block_end><block_end>
""" Created on July 20, 2020 Updated on May 19, 2021 model: Product-based Neural Networks for User Response Prediction @author: <NAME>(<EMAIL>) """<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.keras Model<import_from_stmt>tensorflow.keras.regularizers l2<import_from_stmt>tensorflow.keras.layers Embedding Dense Layer Dropout Input<import_from_stmt>modules DNN<class_stmt>PNN(Model)<block_start><def_stmt>__init__ self feature_columns hidden_units mode='in' dnn_dropout=0. activation='relu' embed_reg=1e-6 w_z_reg=1e-6 w_p_reg=1e-6 l_b_reg=1e-6<block_start>""" Product-based Neural Networks :param feature_columns: A list. sparse column feature information. :param hidden_units: A list. Neural network hidden units. :param mode: A string. 'in' IPNN or 'out'OPNN. :param activation: A string. Activation function of dnn. :param dnn_dropout: A scalar. Dropout of dnn. :param embed_reg: A scalar. The regularizer of embedding. :param w_z_reg: A scalar. The regularizer of w_z_ in product layer :param w_p_reg: A scalar. The regularizer of w_p in product layer :param l_b_reg: A scalar. The regularizer of l_b in product layer """<line_sep>super(PNN self).__init__()<line_sep># inner product or outer product self.mode=mode<line_sep>self.sparse_feature_columns=feature_columns<line_sep># the number of feature fields self.field_num=len(self.sparse_feature_columns)<line_sep>self.embed_dim=self.sparse_feature_columns[0]['embed_dim']<line_sep># The embedding dimension of each feature field must be the same self.embed_layers={'embed_'+str(i):Embedding(input_dim=feat['feat_num'] input_length=1 output_dim=feat['embed_dim'] embeddings_initializer='random_uniform' embeddings_regularizer=l2(embed_reg))<for>i,feat enumerate(self.sparse_feature_columns)}<line_sep># parameters self.w_z=self.add_weight(name='w_z' shape=(self.field_num self.embed_dim hidden_units[0]) initializer='random_uniform' regularizer=l2(w_z_reg) trainable=<true>)<if_stmt>mode<eq>'in'<block_start>self.w_p=self.add_weight(name='w_p' shape=(self.field_num<times>(self.field_num-1)<floordiv>2 self.embed_dim hidden_units[0]) initializer='random_uniform' reguarizer=l2(w_p_reg) trainable=<true>)<block_end># out <else_stmt><block_start>self.w_p=self.add_weight(name='w_p' shape=(self.field_num<times>(self.field_num-1)<floordiv>2 self.embed_dim self.embed_dim hidden_units[0]) initializer='random_uniform' regularizer=l2(w_p_reg) trainable=<true>)<block_end>self.l_b=self.add_weight(name='l_b' shape=(hidden_units[0] ) initializer='random_uniform' regularizer=l2(l_b_reg) trainable=<true>)<line_sep># dnn self.dnn_network=DNN(hidden_units[1:] activation dnn_dropout)<line_sep>self.dense_final=Dense(1)<block_end><def_stmt>call self inputs<block_start>sparse_inputs=inputs<line_sep>sparse_embed=[self.embed_layers['embed_{}'.format(i)](sparse_inputs[: i])<for>i range(sparse_inputs.shape[1])]<line_sep>sparse_embed=tf.transpose(tf.convert_to_tensor(sparse_embed) [1 0 2])# (None, field_num, embed_dim) # product layer row=[]<line_sep>col=[]<for_stmt>i range(len(self.sparse_feature_columns)-1)<block_start><for_stmt>j range(i+1 len(self.sparse_feature_columns))<block_start>row.append(i)<line_sep>col.append(j)<block_end><block_end>p=tf.gather(sparse_embed row axis=1)<line_sep>q=tf.gather(sparse_embed col axis=1)<if_stmt>self.mode<eq>'in'<block_start>l_p=tf.tensordot(p<times>q self.w_p axes=2)# (None, hidden[0]) <block_end><else_stmt># out <block_start>u=tf.expand_dims(q 2)# (None, field_num(field_num-1)/2, 1, emb_dim) v=tf.expand_dims(p 2)# (None, field_num(field_num-1)/2, 1, emb_dim) l_p=tf.tensordot(tf.matmul(tf.transpose(u [0 1 3 2]) v) self.w_p axes=3)<block_end># (None, hidden[0]) l_z=tf.tensordot(sparse_embed self.w_z axes=2)# (None, hidden[0]) l_1=tf.nn.relu(tf.concat([l_z+l_p+self.l_b] axis=-1))<line_sep># dnn layer dnn_x=self.dnn_network(l_1)<line_sep>outputs=tf.nn.sigmoid(self.dense_final(dnn_x))<line_sep><return>outputs<block_end><def_stmt>summary self<block_start>sparse_inputs=Input(shape=(len(self.sparse_feature_columns) ) dtype=tf.int32)<line_sep>Model(inputs=sparse_inputs outputs=self.call(sparse_inputs)).summary()<block_end><block_end>
<import_stmt>time<import_stmt>sys<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision.models<as>models<import_from_stmt>torch.nn.utils.rnn pack_padded_sequence<import_from_stmt>model.base_torch BaseModel<import_from_stmt>model.utils.general init_dir get_logger<import_from_stmt>model.utils.general Progbar<import_from_stmt>model.utils.general Config<import_from_stmt>model.utils.general minibatches<import_from_stmt>model.components.SimpleCNN SimpleCNN<import_from_stmt>model.components.ResNet ResNet9<import_from_stmt>model.components.DenseNet DenseNet169<import_from_stmt>model.components.seq2seq_torch EncoderCNN DecoderWithAttention Img2Seq<import_from_stmt>model.evaluation.text score_files truncate_end write_answers<import_from_stmt>model.utils.image pad_batch_images_2<import_from_stmt>model.utils.text pad_batch_formulas<import_from_stmt>torch.utils.data Dataset<import_stmt>h5py<import_stmt>json<import_from_stmt>model.utils.data_generator DataGenerator<class_stmt>ImgFormulaDataset(Dataset)<block_start>""" A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches. """<def_stmt>__init__ self data_generator:DataGenerator transform=<none><block_start>""" :param data_folder: folder where data files are stored :param data_name: base name of processed datasets :param split: split, one of 'TRAIN', 'VAL', or 'TEST' :param transform: image transform pipeline """<line_sep>self.data_generator=data_generator<line_sep># PyTorch transformation pipeline for the image (normalizing, etc.) self.transform=transform<block_end><def_stmt>__getitem__ self i# Remember, the Nth caption corresponds to the (N // captions_per_image)th image <block_start>(img formula)=self.data_generator.__getitem__(i)<line_sep>img=pad_batch_images_2([img] [800 800 1])<line_sep># img = torch.tensor(img, dtype=torch.int8) # (N, W, H, C) # img = img.squeeze(0) # img = img.permute(2, 0, 1) # (C, W, H) # if self.transform is not None: # img = self.transform(img) # formula = torch.tensor(formula, dtype=torch.int) # (C, W, H), (TOKEN) <return>img formula<block_end><def_stmt>__len__ self<block_start><return>len(self.data_generator)<block_end><block_end><class_stmt>Img2SeqModel(BaseModel)<block_start><def_stmt>__init__ self config dir_output vocab<block_start>super(Img2SeqModel self).__init__(config dir_output)<line_sep>self._vocab=vocab<block_end><def_stmt>getModel self model_name="CNN"<block_start><if_stmt>model_name<eq>"CNN"<block_start><return>SimpleCNN()<block_end><elif_stmt>model_name<eq>"ResNet9"<block_start><return>ResNet9()<block_end><elif_stmt>model_name<eq>"DenseNet169"<block_start><return>DenseNet169(pretrained=<true>)<block_end><elif_stmt>model_name<eq>"Img2Seq"<block_start>self.encoder=EncoderCNN(self._config)<line_sep>self.decoder=DecoderWithAttention(attention_dim=512 embed_dim=512 decoder_dim=512 vocab_size=self._vocab.n_tok dropout=0.5)<line_sep><return>Img2Seq(self._config self._vocab)<block_end><block_end><def_stmt>getOptimizer self lr_method='adam' lr=0.001<block_start>self.encoder_optimizer=torch.optim.Adam(params=self.encoder.parameters() lr=lr)<line_sep>self.decoder_optimizer=torch.optim.Adam(params=self.decoder.parameters() lr=lr)<line_sep><return>super().getOptimizer(lr_method=lr_method lr=lr)<block_end><def_stmt>_run_train_epoch self config train_set val_set epoch lr_schedule<block_start>"""Performs an epoch of training Args: config: Config instance train_set: Dataset instance val_set: Dataset instance epoch: (int) id of the epoch, starting at 0 lr_schedule: LRSchedule instance that takes care of learning proc Returns: score: (float) model will select weights that achieve the highest score """<line_sep># logging batch_size=config.batch_size<line_sep>nbatches=(len(train_set)+batch_size-1)<floordiv>batch_size<line_sep>prog=Progbar(nbatches)<line_sep>self.model.train()<line_sep>self.encoder.train()<line_sep>self.decoder.train()<line_sep>train_loader=torch.utils.data.DataLoader(ImgFormulaDataset(train_set) batch_size=batch_size shuffle=<true> num_workers=3 pin_memory=<true>)<line_sep># for i, (img, formula) in enumerate(train_loader): <for_stmt>i,(img formula) enumerate(minibatches(train_set batch_size))<block_start>img=pad_batch_images_2(img)<line_sep>img=torch.FloatTensor(img)# (N, W, H, C) formula,formula_length=pad_batch_formulas(formula self._vocab.id_pad self._vocab.id_end)<line_sep>img=img.permute(0 3 1 2)# (N, C, W, H) formula=torch.LongTensor(formula)# (N,) loss_eval=self.getLoss(img formula=formula lr=lr_schedule.lr dropout=config.dropout training=<true>)<line_sep>prog.update(i+1 [("loss" loss_eval) ("lr" lr_schedule.lr)])<line_sep># update learning rate lr_schedule.update(batch_no=epoch<times>nbatches+i)<block_end>self.logger.info("- Training: {}".format(prog.info))<line_sep># evaluation config_eval=Config({"dir_answers":self._dir_output+"formulas_val/" "batch_size":config.batch_size})<line_sep>scores=self.evaluate(config_eval val_set)<line_sep>score=scores["perplexity"]<line_sep>lr_schedule.update(score=score)<line_sep><return>score<block_end><def_stmt>getLoss self img formula lr dropout training=<true># Move to GPU, if available <block_start>img=img.to(self.device)<line_sep>formula=formula.to(self.device)<line_sep># Forward prop. imgs=self.encoder(img)<line_sep>scores,caps_sorted,decode_lengths,alphas,sort_ind=self.decoder(imgs formula torch.LongTensor([[len(i)]<for>i formula]))<line_sep># Since we decoded starting with <start>, the targets are all words after <start>, up to <end> targets=caps_sorted[: 1:]<line_sep># Remove timesteps that we didn't decode at, or are pads # pack_padded_sequence is an easy trick to do this scores,_=pack_padded_sequence(scores decode_lengths batch_first=<true>)<line_sep>targets,_=pack_padded_sequence(targets decode_lengths batch_first=<true>)<line_sep># Calculate loss loss=self.criterion(scores targets)<line_sep>alpha_c=1.<line_sep># Add doubly stochastic attention regularization loss<augadd>alpha_c<times>((1.-alphas.sum(dim=1))<power>2).mean()<line_sep># Back prop. self.decoder_optimizer.zero_grad()<if_stmt>self.encoder_optimizer<is><not><none><block_start>self.encoder_optimizer.zero_grad()<block_end>loss.backward()<line_sep># Update weights self.decoder_optimizer.step()<if_stmt>self.encoder_optimizer<is><not><none><block_start>self.encoder_optimizer.step()<block_end><return>-loss.item()<block_end><def_stmt>_run_evaluate_epoch self config test_set<block_start>"""Performs an epoch of evaluation Args: test_set: Dataset instance params: (dict) with extra params in it - "dir_name": (string) Returns: scores: (dict) scores["acc"] = 0.85 for instance """<line_sep>self.model.eval()<line_sep>self.encoder.eval()<line_sep>self.decoder.eval()<line_sep># initialize containers of references and predictions <if_stmt>self._config.decoding<eq>"greedy"<block_start>refs,hyps=[] [[]]<block_end><elif_stmt>self._config.decoding<eq>"beam_search"<block_start>refs,hyps=[] [[]<for>i range(self._config.beam_size)]<block_end>references=list()# references (true captions) for calculating BLEU-4 score hypotheses=list()# hypotheses (predictions) <with_stmt>torch.no_grad()<block_start>nbatches=len(test_set)<line_sep>prog=Progbar(nbatches)<line_sep>test_loader=torch.utils.data.DataLoader(ImgFormulaDataset(test_set) batch_size=nbatches shuffle=<true> num_workers=3 pin_memory=<true>)<for_stmt>i,(img formula) enumerate(minibatches(test_set nbatches))# print(type(img), len(img), img[0].shape) # print(type(formula), formula) # Move to GPU, if available <block_start>img=pad_batch_images_2(img)<line_sep>img=torch.FloatTensor(img)# (N, W, H, C) formula,formula_length=pad_batch_formulas(formula self._vocab.id_pad self._vocab.id_end)<line_sep>img=img.permute(0 3 1 2)# (N, C, W, H) formula=torch.LongTensor(formula)# (N,) img=img.to(self.device)<line_sep>formula=formula.to(self.device)<line_sep># Forward prop. imgs=self.encoder(img)<line_sep>scores,caps_sorted,decode_lengths,alphas,sort_ind=self.decoder(imgs formula torch.LongTensor([[len(i)]<for>i formula]))<line_sep># Since we decoded starting with <start>, the targets are all words after <start>, up to <end> targets=caps_sorted[: 1:]<line_sep># Remove timesteps that we didn't decode at, or are pads # pack_padded_sequence is an easy trick to do this scores,_=pack_padded_sequence(scores decode_lengths batch_first=<true>)<line_sep>targets,_=pack_padded_sequence(targets decode_lengths batch_first=<true>)<line_sep># Calculate loss loss=self.criterion(scores targets)<line_sep>print(scores.shape targets.shape)<line_sep>print(loss)<line_sep>alpha_c=1.<line_sep># Add doubly stochastic attention regularization loss<augadd>alpha_c<times>((1.-alphas.sum(dim=1))<power>2).mean()<line_sep>loss_eval=loss.item()<line_sep>prog.update(i+1 [("loss" loss_eval) ("perplexity" np.exp(loss_eval))])<line_sep># Store references (true captions), and hypothesis (prediction) for each image # If for n images, we have n hypotheses, and references a, b, c... for each image, we need - # references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...] # print("---------------------------------------------------------------formula and prediction :") <for_stmt>form,preds zip(formula scores)<block_start>refs.append(form)<line_sep># print(form, " ---------- ", preds[0]) <for_stmt>i,pred enumerate(preds)<block_start>hyps[i].append(pred)<block_end><block_end><block_end>files=write_answers(refs hyps self._vocab.id_to_tok config.dir_answers self._vocab.id_end)<line_sep>scores=score_files(files[0] files[1])<line_sep># perp = - np.exp(ce_words / float(n_words)) # scores["perplexity"] = perp <block_end>self.logger.info("- Evaluating: {}".format(prog.info))<line_sep><return>{"perplexity":loss.item()}<block_end><def_stmt>predict_batch self images<block_start>preds=[]<line_sep>images=images.to(self.device)<line_sep>outputs=self.model(images)<line_sep>_,predicted=torch.max(outputs.data 1)<line_sep>pr=outputs[: 1].detach().cpu().numpy()<for_stmt>i pr<block_start>preds.append(i)<block_end><return>preds<block_end><def_stmt>predict self img<block_start><return>self.predict_batch([img])<block_end><block_end>
# encoding: utf-8 # # Copyright (c) 2019 <NAME> <<EMAIL>> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2019-09-06 # """Overlay check mark on icons."""<import_from_future_stmt> print_function absolute_import<import_from_stmt>Cocoa NSBitmapImageRep NSPNGFileType NSImage NSMakeSize NSCompositeCopy NSSizeToCGSize NSZeroPoint <import_from_stmt>CoreGraphics CGRectZero<def_stmt>overlay src overlay dest<block_start>"""Create image ``dest`` by putting ``overlay`` on top of ``src``. Args: src (str): Path to source image. overlay (str): Path to overlay image. dest (str): Path to save combined image to. """<line_sep>src=NSImage.alloc().initWithContentsOfFile_(src)<line_sep>overlay=NSImage.alloc().initWithContentsOfFile_(overlay)<line_sep>img=NSImage.alloc().initWithSize_(src.size())<line_sep>img.lockFocus()<line_sep>rect=(0 0) src.size()<line_sep>src.drawInRect_(rect)<line_sep>overlay.drawInRect_(rect)<line_sep>img.unlockFocus()<line_sep>rep=NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation())<line_sep>data=rep.representationUsingType_properties_(NSPNGFileType {})<line_sep>data.writeToFile_atomically_(dest <false>)<block_end>
""" ========================================================== Fitting model on imbalanced datasets and how to fight bias ========================================================== This example illustrates the problem induced by learning on datasets having imbalanced classes. Subsequently, we compare different approaches alleviating these negative effects. """<line_sep># Authors: <NAME> <<EMAIL>> # License: MIT # %% print(__doc__)<line_sep># %% [markdown] # Problem definition # ------------------ # # We are dropping the following features: # # - "fnlwgt": this feature was created while studying the "adult" dataset. # Thus, we will not use this feature which is not acquired during the survey. # - "education-num": it is encoding the same information than "education". # Thus, we are removing one of these 2 features. # %% <import_from_stmt>sklearn.datasets fetch_openml<line_sep>df,y=fetch_openml("adult" version=2 as_frame=<true> return_X_y=<true>)<line_sep>df=df.drop(columns=["fnlwgt" "education-num"])<line_sep># %% [markdown] # The "adult" dataset as a class ratio of about 3:1 # %% classes_count=y.value_counts()<line_sep>classes_count<line_sep># %% [markdown] # This dataset is only slightly imbalanced. To better highlight the effect of # learning from an imbalanced dataset, we will increase its ratio to 30:1 # %% <import_from_stmt>imblearn.datasets make_imbalance<line_sep>ratio=30<line_sep>df_res,y_res=make_imbalance(df y sampling_strategy={classes_count.idxmin():classes_count.max()<floordiv>ratio} )<line_sep>y_res.value_counts()<line_sep># %% [markdown] # We will perform a cross-validation evaluation to get an estimate of the test # score. # # As a baseline, we could use a classifier which will always predict the # majority class independently of the features provided. # %% <import_from_stmt>sklearn.model_selection cross_validate<import_from_stmt>sklearn.dummy DummyClassifier<line_sep>dummy_clf=DummyClassifier(strategy="most_frequent")<line_sep>scoring=["accuracy" "balanced_accuracy"]<line_sep>cv_result=cross_validate(dummy_clf df_res y_res scoring=scoring)<line_sep>print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")<line_sep># %% [markdown] # Instead of using the accuracy, we can use the balanced accuracy which will # take into account the balancing issue. # %% print(f"Balanced accuracy score of a dummy classifier: "<concat>f"{cv_result['test_balanced_accuracy'].mean():.3f}")<line_sep># %% [markdown] # Strategies to learn from an imbalanced dataset # ---------------------------------------------- # We will use a dictionary and a list to continuously store the results of # our experiments and show them as a pandas dataframe. # %% index=[]<line_sep>scores={"Accuracy":[] "Balanced accuracy":[]}<line_sep># %% [markdown] # Dummy baseline # .............. # # Before to train a real machine learning model, we can store the results # obtained with our :class:`~sklearn.dummy.DummyClassifier`. # %% <import_stmt>pandas<as>pd<line_sep>index<augadd>["Dummy classifier"]<line_sep>cv_result=cross_validate(dummy_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # Linear classifier baseline # .......................... # # We will create a machine learning pipeline using a # :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard, # we will need to one-hot encode the categorical columns and standardized the # numerical columns before to inject the data into the # :class:`~sklearn.linear_model.LogisticRegression` classifier. # # First, we define our numerical and categorical pipelines. # %% <import_from_stmt>sklearn.impute SimpleImputer<import_from_stmt>sklearn.preprocessing StandardScaler<import_from_stmt>sklearn.preprocessing OneHotEncoder<import_from_stmt>sklearn.pipeline make_pipeline<line_sep>num_pipe=make_pipeline(StandardScaler() SimpleImputer(strategy="mean" add_indicator=<true>))<line_sep>cat_pipe=make_pipeline(SimpleImputer(strategy="constant" fill_value="missing") OneHotEncoder(handle_unknown="ignore") )<line_sep># %% [markdown] # Then, we can create a preprocessor which will dispatch the categorical # columns to the categorical pipeline and the numerical columns to the # numerical pipeline # %% <import_from_stmt>sklearn.compose make_column_transformer<import_from_stmt>sklearn.compose make_column_selector<as>selector<line_sep>preprocessor_linear=make_column_transformer((num_pipe selector(dtype_include="number")) (cat_pipe selector(dtype_include="category")) n_jobs=2 )<line_sep># %% [markdown] # Finally, we connect our preprocessor with our # :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our # model. # %% <import_from_stmt>sklearn.linear_model LogisticRegression<line_sep>lr_clf=make_pipeline(preprocessor_linear LogisticRegression(max_iter=1000))<line_sep># %% index<augadd>["Logistic regression"]<line_sep>cv_result=cross_validate(lr_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # We can see that our linear model is learning slightly better than our dummy # baseline. However, it is impacted by the class imbalance. # # We can verify that something similar is happening with a tree-based model # such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of # classifier, we will not need to scale the numerical data, and we will only # need to ordinal encode the categorical data. # %% <import_from_stmt>sklearn.preprocessing OrdinalEncoder<import_from_stmt>sklearn.ensemble RandomForestClassifier<line_sep>num_pipe=SimpleImputer(strategy="mean" add_indicator=<true>)<line_sep>cat_pipe=make_pipeline(SimpleImputer(strategy="constant" fill_value="missing") OrdinalEncoder(handle_unknown="use_encoded_value" unknown_value=-1) )<line_sep>preprocessor_tree=make_column_transformer((num_pipe selector(dtype_include="number")) (cat_pipe selector(dtype_include="category")) n_jobs=2 )<line_sep>rf_clf=make_pipeline(preprocessor_tree RandomForestClassifier(random_state=42 n_jobs=2))<line_sep># %% index<augadd>["Random forest"]<line_sep>cv_result=cross_validate(rf_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by # the class imbalanced, slightly less than the linear model. Now, we will # present different approach to improve the performance of these 2 models. # # Use `class_weight` # .................. # # Most of the models in `scikit-learn` have a parameter `class_weight`. This # parameter will affect the computation of the loss in linear model or the # criterion in the tree-based model to penalize differently a false # classification from the minority and majority class. We can set # `class_weight="balanced"` such that the weight applied is inversely # proportional to the class frequency. We test this parametrization in both # linear model and tree-based model. # %% lr_clf.set_params(logisticregression__class_weight="balanced")<line_sep>index<augadd>["Logistic regression with balanced class weights"]<line_sep>cv_result=cross_validate(lr_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% rf_clf.set_params(randomforestclassifier__class_weight="balanced")<line_sep>index<augadd>["Random forest with balanced class weights"]<line_sep>cv_result=cross_validate(rf_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # We can see that using `class_weight` was really effective for the linear # model, alleviating the issue of learning from imbalanced classes. However, # the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward # the majority class, mainly due to the criterion which is not suited enough to # fight the class imbalance. # # Resample the training set during learning # ......................................... # # Another way is to resample the training set by under-sampling or # over-sampling some of the samples. `imbalanced-learn` provides some samplers # to do such processing. # %% <import_from_stmt>imblearn.pipeline make_pipeline<as>make_pipeline_with_sampler<import_from_stmt>imblearn.under_sampling RandomUnderSampler<line_sep>lr_clf=make_pipeline_with_sampler(preprocessor_linear RandomUnderSampler(random_state=42) LogisticRegression(max_iter=1000) )<line_sep># %% index<augadd>["Under-sampling + Logistic regression"]<line_sep>cv_result=cross_validate(lr_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% rf_clf=make_pipeline_with_sampler(preprocessor_tree RandomUnderSampler(random_state=42) RandomForestClassifier(random_state=42 n_jobs=2) )<line_sep># %% index<augadd>["Under-sampling + Random forest"]<line_sep>cv_result=cross_validate(rf_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # Applying a random under-sampler before the training of the linear model or # random forest, allows to not focus on the majority class at the cost of # making more mistake for samples in the majority class (i.e. decreased # accuracy). # # We could apply any type of samplers and find which sampler is working best # on the current dataset. # # Instead, we will present another way by using classifiers which will apply # sampling internally. # # Use of specific balanced algorithms from imbalanced-learn # ......................................................... # # We already showed that random under-sampling can be effective on decision # tree. However, instead of under-sampling once the dataset, one could # under-sample the original dataset before to take a bootstrap sample. This is # the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and # :class:`~imblearn.ensemble.BalancedBaggingClassifier`. # %% <import_from_stmt>imblearn.ensemble BalancedRandomForestClassifier<line_sep>rf_clf=make_pipeline(preprocessor_tree BalancedRandomForestClassifier(random_state=42 n_jobs=2) )<line_sep># %% index<augadd>["Balanced random forest"]<line_sep>cv_result=cross_validate(rf_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # The performance with the # :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than # applying a single random under-sampling. We will use a gradient-boosting # classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`. <import_from_stmt>sklearn.experimental enable_hist_gradient_boosting# noqa <import_from_stmt>sklearn.ensemble HistGradientBoostingClassifier<import_from_stmt>imblearn.ensemble BalancedBaggingClassifier<line_sep>bag_clf=make_pipeline(preprocessor_tree BalancedBaggingClassifier(base_estimator=HistGradientBoostingClassifier(random_state=42) n_estimators=10 random_state=42 n_jobs=2 ) )<line_sep>index<augadd>["Balanced bag of histogram gradient boosting"]<line_sep>cv_result=cross_validate(bag_clf df_res y_res scoring=scoring)<line_sep>scores["Accuracy"].append(cv_result["test_accuracy"].mean())<line_sep>scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())<line_sep>df_scores=pd.DataFrame(scores index=index)<line_sep>df_scores<line_sep># %% [markdown] # This last approach is the most effective. The different under-sampling allows # to bring some diversity for the different GBDT to learn and not focus on a # portion of the majority class.
# This sample tests the checker's ability to enforce # type invariance for type arguments. # pyright: strict <import_from_stmt>typing Dict Union<line_sep>foo:Dict[Union[int str] str]={}<line_sep>bar:Dict[str str]={}<line_sep># This should generate an error because # both type parameters for Dict are invariant, # and str isn't assignable to Union[int, str]. foo=bar<line_sep>
# coding: utf-8 # quote from kmaiya/HQAutomator # 谷歌搜索部分原版搬运,未做修改 <import_stmt>time<import_stmt>json<import_stmt>requests<import_stmt>webbrowser<line_sep>questions=[]<def_stmt>get_answer <block_start>resp=requests.get('http://htpmsg.jiecaojingxuan.com/msg/current' timeout=4).text<line_sep>resp_dict=json.loads(resp)<if_stmt>resp_dict['msg']<eq>'no data'<block_start><return>'Waiting for question...'<block_end><else_stmt><block_start>resp_dict=eval(str(resp))<line_sep>question=resp_dict['data']['event']['desc']<line_sep>question=question[question.find('.')+1:question.find('?')]<if_stmt>question<not><in>questions<block_start>questions.append(question)<line_sep>webbrowser.open("https://www.baidu.com/s?ie=UTF-8&wd="+question)<block_end><else_stmt><block_start><return>'Waiting for new question...'<block_end><block_end><block_end><def_stmt>main <block_start><while_stmt><true><block_start>print(time.strftime('%H:%M:%S' time.localtime(time.time())))<line_sep>print(get_answer())<line_sep>time.sleep(1)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
"""Classes representing color entries and mappings."""<line_sep># ============================================================================= # IMPORTS # ============================================================================= <import_from_future_stmt> annotations<line_sep># Standard Library <import_stmt>re<import_from_stmt>typing TYPE_CHECKING Optional Tuple<if_stmt>TYPE_CHECKING<block_start><import_stmt>hou<block_end># ============================================================================= # CLASSES # ============================================================================= <class_stmt>StyleConstant<block_start>"""This class represents a named constant style. :param name: The constant's name. :param color: The constant's color. :param color_type: The color type. :param shape: The constant's shape. :param file_path: The path to the definition file. :return: """<def_stmt>__init__ self name:str color:hou.Color color_type:str shape:Optional[str]=<none> file_path:Optional[str]=<none> <block_start>self._color=color<line_sep>self._color_type=color_type<line_sep>self._shape=shape<line_sep>self._file_path=file_path<line_sep>self._name=name<block_end># ------------------------------------------------------------------------- # SPECIAL METHODS # ------------------------------------------------------------------------- <def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other StyleConstant)<block_start><return>NotImplemented<block_end># For our purposes we only care if the names match. <return>self.name<eq>other.name<block_end><def_stmt>__hash__ self<block_start><return>hash(self.name)<block_end><def_stmt>__ne__ self other<block_start><if_stmt><not>isinstance(other StyleConstant)<block_start><return>NotImplemented<block_end><return><not>self.__eq__(other)<block_end><def_stmt>__repr__ self<block_start><return>"<StyleConstant {} ({})>".format(self.name self.color)<block_end># ------------------------------------------------------------------------- # PROPERTIES # ------------------------------------------------------------------------- @property<def_stmt>color self<arrow>hou.Color<block_start>"""The mapped color."""<line_sep><return>self._color<block_end># ------------------------------------------------------------------------- @property<def_stmt>color_type self<arrow>str<block_start>"""The mapped color type."""<line_sep><return>self._color_type<block_end># ------------------------------------------------------------------------- @property<def_stmt>file_path self<arrow>Optional[str]<block_start>"""Path the definition was from."""<line_sep><return>self._file_path<block_end># ------------------------------------------------------------------------- @property<def_stmt>name self<arrow>str<block_start>"""The name the color is mapped to."""<line_sep><return>self._name<block_end># ------------------------------------------------------------------------- @property<def_stmt>shape self<arrow>Optional[str]<block_start>"""The mapped shape."""<line_sep><return>self._shape<block_end># ------------------------------------------------------------------------- # METHODS # ------------------------------------------------------------------------- <def_stmt>apply_to_node self node:hou.Node<block_start>"""Apply styling to a node. :param node: Node to apply to :return: """<if_stmt>self.color<is><not><none><block_start>node.setColor(self.color)<block_end><if_stmt>self.shape<is><not><none><block_start>node.setUserData("nodeshape" self.shape)<block_end><block_end><block_end><class_stmt>StyleRule<block_start>"""This class represents a color application bound to a name. :param name: The rule's name. :param color: The rule's color. :param color_type: The rule's color type. :param shape: The rule's shape. :param file_path: The path to the definition file. :return: """<def_stmt>__init__ self name:str color:hou.Color color_type:str shape:Optional[str]=<none> file_path:Optional[str]=<none> <block_start>self._color=color<line_sep>self._color_type=color_type<line_sep>self._shape=shape<line_sep>self._file_path=file_path<line_sep>self._name=name<block_end># ------------------------------------------------------------------------- # SPECIAL METHODS # ------------------------------------------------------------------------- <def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other StyleRule)<block_start><return>NotImplemented<block_end># For our purposes we only care if the names match. <return>self.name<eq>other.name<block_end><def_stmt>__hash__ self<block_start><return>hash(self.name)<block_end><def_stmt>__ne__ self other<block_start><if_stmt><not>isinstance(other StyleRule)<block_start><return>NotImplemented<block_end><return><not>self.__eq__(other)<block_end><def_stmt>__repr__ self<block_start><return>"<StyleRule {} ({})>".format(self.name self.color)<block_end><def_stmt>__str__ self<block_start>value=self._get_typed_color_value()<line_sep>components=[re.sub("\\.*0+$" "" "{:0.3f}".format(val))<for>val value]<line_sep><return>"("+", ".join(components)+")"<block_end># ------------------------------------------------------------------------- # NON-PUBLIC METHODS # ------------------------------------------------------------------------- <def_stmt>_get_typed_color_value self<arrow>Tuple[float]<block_start>"""Get the appropriately typed color values. :return: The color value in the correct type. """<line_sep>to_func=getattr(self.color self.color_type.lower())<line_sep><return>to_func()<block_end># ------------------------------------------------------------------------- # PROPERTIES # ------------------------------------------------------------------------- @property<def_stmt>color self<arrow>hou.Color<block_start>"""The mapped color."""<line_sep><return>self._color<block_end>@property<def_stmt>color_type self<arrow>str<block_start>"""The mapped color type."""<line_sep><return>self._color_type<block_end>@property<def_stmt>shape self<arrow>Optional[str]<block_start>"""The mapped shape name."""<line_sep><return>self._shape<block_end>@property<def_stmt>file_path self<arrow>Optional[str]<block_start>"""Path the definition was from."""<line_sep><return>self._file_path<block_end>@property<def_stmt>name self<arrow>str<block_start>"""The name the style is mapped to."""<line_sep><return>self._name<block_end># ------------------------------------------------------------------------- # METHODS # ------------------------------------------------------------------------- <def_stmt>apply_to_node self node:hou.Node<block_start>"""Apply styling to a node. :param node: Node to apply to :return: """<if_stmt>self.color<is><not><none><block_start>node.setColor(self.color)<block_end><if_stmt>self.shape<is><not><none><block_start>node.setUserData("nodeshape" self.shape)<block_end><block_end><block_end><class_stmt>ConstantRule<block_start>"""This class represents a style application bound to a named constant. :param name: The rule's name. :param constant_name: The constant name. :param file_path: The path to the definition file. :return: """<def_stmt>__init__ self name:str constant_name:str file_path:Optional[str]=<none><block_start>self._constant_name=constant_name<line_sep>self._file_path=file_path<line_sep>self._name=name<block_end># ------------------------------------------------------------------------- # SPECIAL METHODS # ------------------------------------------------------------------------- <def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other ConstantRule)<block_start><return>NotImplemented<block_end># For our purposes we only care if the names match. <return>self.name<eq>other.name<block_end><def_stmt>__hash__ self<block_start><return>hash((self.constant_name self.name))<block_end><def_stmt>__ne__ self other<block_start><if_stmt><not>isinstance(other ConstantRule)<block_start><return>NotImplemented<block_end><return><not>self.__eq__(other)<block_end><def_stmt>__repr__ self<block_start><return>"<ConstantRule {} ({})>".format(self.name self.constant_name)<block_end># ------------------------------------------------------------------------- # PROPERTIES # ------------------------------------------------------------------------- @property<def_stmt>constant_name self<arrow>str<block_start>"""The mapped constant."""<line_sep><return>self._constant_name<block_end>@property<def_stmt>file_path self<arrow>Optional[str]<block_start>"""Path the definition was from."""<line_sep><return>self._file_path<block_end>@property<def_stmt>name self<arrow>str<block_start>"""The name the style is mapped to."""<line_sep><return>self._name<block_end><block_end>
""" Holds global celery application state and startup / shutdown handlers. """<import_from_stmt>celery Celery<import_from_stmt>celery.app app_or_default<import_from_stmt>celery.signals beat_init worker_process_init worker_process_shutdown setup_logging <import_from_stmt>ichnaea.log configure_logging<import_from_stmt>ichnaea.taskapp.config configure_celery init_beat init_worker shutdown_worker <line_sep>@setup_logging.connect<def_stmt>setup_logging_process loglevel logfile format colorize **kwargs<block_start>"""Called at scheduler and worker setup. Configures logging using the same configuration as the webapp. """<line_sep>configure_logging()<block_end>@beat_init.connect<def_stmt>init_beat_process signal sender **kw<block_start>""" Called automatically when `celery beat` is started. Calls :func:`ichnaea.taskapp.config.init_beat`. """<line_sep>celery_app=app_or_default()<line_sep>init_beat(sender celery_app)<block_end>@worker_process_init.connect<def_stmt>init_worker_process signal sender **kw<block_start>""" Called automatically when `celery worker` is started. This is executed inside each forked worker process. Calls :func:`ichnaea.taskapp.config.init_worker`. """<line_sep># get the app in the current worker process celery_app=app_or_default()<line_sep>init_worker(celery_app)<block_end>@worker_process_shutdown.connect<def_stmt>shutdown_worker_process signal sender **kw<block_start>""" Called automatically when `celery worker` is stopped. This is executed inside each forked worker process. Calls :func:`ichnaea.taskapp.config.shutdown_worker`. """<line_sep>celery_app=app_or_default()<line_sep>shutdown_worker(celery_app)<block_end>celery_app=Celery("ichnaea.taskapp.app")<line_sep>configure_celery(celery_app)<line_sep>
#=============================================================================== # Copyright 2020-2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== load("@onedal//dev/bazel:repos.bzl" "repos")<line_sep>micromkl_repo=repos.prebuilt_libs_repo_rule(includes=["include" "%{os}/include" ] libs=["%{os}/lib/intel64/libdaal_mkl_thread.a" "%{os}/lib/intel64/libdaal_mkl_sequential.a" "%{os}/lib/intel64/libdaal_vmlipp_core.a" ] build_template="@onedal//dev/bazel/deps:micromkl.tpl.BUILD" )<line_sep>micromkl_dpc_repo=repos.prebuilt_libs_repo_rule(includes=["include" ] libs=["lib/intel64/libdaal_sycl.a" ] build_template="@onedal//dev/bazel/deps:micromkldpc.tpl.BUILD" )<line_sep>
<import_from_stmt>pudzu.charts *<import_from_stmt>pudzu.sandbox.bamboo *<import_stmt>seaborn<as>sns<line_sep># generate map df=pd.read_csv("datasets/euvotes.csv").set_index('country')<line_sep>palette=tmap(RGBA sns.cubehelix_palette(11 start=0.2 rot=-0.75))<line_sep>ranges=[20000000 10000000 5000000 2000000 1000000 500000 200000 100000 0]<def_stmt>votecolfn n<block_start><return>palette[8-next(i<for>i,x enumerate(ranges)<if>n<ge>x)]<block_end><def_stmt>colorfn c<block_start><if_stmt>c<not><in>df.index<block_start><return>"white"<if>c<in>['Sea' 'Borders']<else>"grey"<block_end><return>votecolfn(int(df.loc[c].votes))<block_end><def_stmt>labelfn c<block_start><if_stmt>c<not><in>df.index<block_start><return><none><block_end>dfc=df.loc[c]<line_sep>label="{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1] year=dfc.year[2:] votes=int(dfc.votes)/1000000)<line_sep><return>Image.from_text(label arial(14 bold=<true>) align="center" padding=2)<block_end>map=map_chart("maps/Europe.png" colorfn labelfn)<line_sep># legend <def_stmt>box c<block_start><return>Image.new("RGBA" (30 30) c).place(Image.from_text("" arial(16 bold=<true>) "black" bg=c))<block_end>vote_arr=Image.from_array([[box(votecolfn(n)) Image.from_text("<0.1M"<if>n<l>100000<else>">{:.2g}M".format(n/1000000) arial(16) padding=(10 0))]<for>n ranges] bg="white" xalign=0)<line_sep>vote_leg=Image.from_column([Image.from_text("# votes" arial(16 bold=<true>)) vote_arr] bg="white" xalign=0 padding=(0 5))<line_sep>note_leg=Image.from_text("Multi-party national elections for executive head or party." arial(16) max_width=100 bg="white" padding=(0 2))<line_sep>legend=Image.from_column([vote_leg note_leg] bg="white" xalign=0 padding=5).pad(1 "black")<line_sep>chart=map.place(legend align=(1 0) padding=10)<line_sep>title=Image.from_column([Image.from_text("EUROPEAN POPULAR VOTE RECORDS" arial(48 bold=<true>)) Image.from_text("candidate or party with the highest absolute popular vote" arial(36))] bg="white")<line_sep>img=Image.from_column([title chart] bg="white" padding=2)<line_sep>img.place(Image.from_text("/u/Udzu" font("arial" 16) fg="black" bg="white" padding=5).pad((1 1 0 0) "black") align=1 padding=10 copy=<false>)<line_sep>img.save("output/euvotes.png")<line_sep>
#! /usr/bin/env python # This file is part of khmer, https://github.com/dib-lab/khmer/, and is # Copyright (C) 2011-2015, Michigan State University. # Copyright (C) 2015, The Regents of the University of California. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the Michigan State University nor the names # of its contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Contact: <EMAIL> """ Error correct reads based on a counting hash from a diginorm step. Output sequences will be put in inputfile.corr. % python scripts/error-correct-pass2 <counting.ct> <data1> [ <data2> <...> ] Use '-h' for parameter help. """<import_stmt>sys<import_stmt>os<import_stmt>screed<import_stmt>khmer<import_from_stmt>khmer Countgraph<import_from_stmt>khmer khmer_args<import_from_stmt>khmer.khmer_args FileType<as>khFileType<line_sep>DEFAULT_CUTOFF=2<def_stmt>output_single read new_sequence<block_start>name=read.name<line_sep>sequence=new_sequence<line_sep>quality=<none><if_stmt>hasattr(read 'quality')<block_start>quality=read.quality[:len(sequence)]<line_sep>sequence=sequence[:len(quality)]<block_end># sequence is _lengthened_ <if_stmt>quality<block_start><assert_stmt>len(sequence)<eq>len(quality) (sequence quality)<line_sep><return>"@%s\n%s\n+\n%s\n"%(name sequence quality)<block_end><else_stmt><block_start><return>">%s\n%s\n"%(name sequence)<block_end><block_end><def_stmt>main <block_start>parser=khmer_args.build_counting_args("Correct reads against an already-computed table" citations=['counting' 'SeqAn'])<line_sep>parser.add_argument("--trusted-cov" dest="trusted_cov" type=int default=DEFAULT_CUTOFF)<line_sep>parser.add_argument("--theta" dest="bits_theta" type=float default=1.0)<line_sep>parser.add_argument('-o' '--output' dest='output_file' help="output file for histogram; defaults to "<concat>"<first filename>.corr in cwd." type=khFileType('w') default=<none>)<line_sep>parser.add_argument('counts_table')<line_sep>parser.add_argument('readfile')<line_sep>args=parser.parse_args()<line_sep>print('loading counts')<line_sep>ht=Countgraph.load(args.counts_table)<line_sep>aligner=khmer.ReadAligner(ht args.trusted_cov args.bits_theta)<line_sep>print("trusted:" args.trusted_cov)<line_sep>corrfp=args.output_file<if_stmt><not>corrfp<block_start>outfile=os.path.basename(args.readfile)+'.corr'<line_sep>corrfp=open(outfile 'w')<block_end>n_corrected=0<for_stmt>n,read enumerate(screed.open(args.readfile))<block_start><if_stmt>n%10000<eq>0<block_start>print('...' n n_corrected file=sys.stderr)<block_end>seq=read.sequence.replace('N' 'A')<line_sep># build the alignment... score,graph_alignment,read_alignment,truncated=aligner.align(seq)<if_stmt><not>truncated<block_start>graph_seq=graph_alignment.replace("-" "")<if_stmt>graph_seq<ne>seq<block_start>n_corrected<augadd>1<block_end>seq=graph_seq<block_end>corrfp.write(output_single(read seq))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Copyright (c) nexB Inc. and others. All rights reserved. # ScanCode is a trademark of nexB Inc. # SPDX-License-Identifier: Apache-2.0 # See http://www.apache.org/licenses/LICENSE-2.0 for the license text. # See https://github.com/nexB/scancode-toolkit for support or download. # See https://aboutcode.org for more information about nexB OSS projects. # <import_stmt>logging<import_stmt>re<import_stmt>attr<import_from_stmt>packageurl PackageURL<import_stmt>toml<import_from_stmt>commoncode filetype<import_from_stmt>commoncode fileutils<import_from_stmt>packagedcode models<line_sep>""" Handle Rust cargo crates """<line_sep>TRACE=<false><line_sep>logger=logging.getLogger(__name__)<if_stmt>TRACE<block_start><import_stmt>sys<line_sep>logging.basicConfig(stream=sys.stdout)<line_sep>logger.setLevel(logging.DEBUG)<block_end>@attr.s()<class_stmt>RustCargoCrate(models.Package)<block_start>default_type='cargo'<line_sep>default_primary_language='Rust'<line_sep>default_web_baseurl='https://crates.io'<line_sep>default_download_baseurl='https://crates.io/api/v1'<line_sep>default_api_baseurl='https://crates.io/api/v1'<line_sep>@classmethod<def_stmt>get_package_root cls manifest_resource codebase<block_start><return>manifest_resource.parent(codebase)<block_end><def_stmt>repository_homepage_url self baseurl=default_web_baseurl<block_start><if_stmt>self.name<block_start><return>'{}/crates/{}'.format(baseurl self.name)<block_end><block_end><def_stmt>repository_download_url self baseurl=default_download_baseurl<block_start><if_stmt>self.name<and>self.version<block_start><return>'{}/crates/{}/{}/download'.format(baseurl self.name self.version)<block_end><block_end><def_stmt>api_data_url self baseurl=default_api_baseurl<block_start><if_stmt>self.name<block_start><return>'{}/crates/{}'.format(baseurl self.name)<block_end><block_end><block_end>@attr.s()<class_stmt>CargoToml(RustCargoCrate models.PackageManifest)<block_start>file_patterns=('Cargo.toml' )<line_sep>extensions=('.toml' )<line_sep>@classmethod<def_stmt>is_manifest cls location<block_start>""" Return True if the file at ``location`` is likely a manifest of this type. """<line_sep><return>filetype.is_file(location)<and>fileutils.file_name(location).lower()<eq>'cargo.toml'<block_end>@classmethod<def_stmt>recognize cls location<block_start>""" Yield one or more Package manifest objects given a file ``location`` pointing to a package archive, manifest or similar. """<line_sep>package_data=toml.load(location _dict=dict)<line_sep>core_package_data=package_data.get('package' {})<line_sep>name=core_package_data.get('name')<line_sep>version=core_package_data.get('version')<line_sep>description=core_package_data.get('description')<if_stmt>description<block_start>description=description.strip()<block_end>authors=core_package_data.get('authors')<line_sep>parties=list(party_mapper(authors party_role='author'))<line_sep>declared_license=core_package_data.get('license')<line_sep>package=cls(name=name version=version description=description parties=parties declared_license=declared_license)<line_sep><yield>package<block_end><block_end>@attr.s()<class_stmt>CargoLock(RustCargoCrate models.PackageManifest)<block_start>file_patterns=('Cargo.lock' )<line_sep>extensions=('.lock' )<line_sep>@classmethod<def_stmt>is_manifest cls location<block_start>""" Return True if the file at ``location`` is likely a manifest of this type. """<line_sep><return>(filetype.is_file(location)<and>fileutils.file_name(location).lower()<eq>'cargo.lock')<block_end>@classmethod<def_stmt>recognize cls location<block_start>""" Yield one or more Package manifest objects given a file ``location`` pointing to a package archive, manifest or similar. """<line_sep>package_data=toml.load(location _dict=dict)<line_sep>package_dependencies=[]<line_sep>core_package_data=package_data.get('package' [])<for_stmt>dep core_package_data<block_start>package_dependencies.append(models.DependentPackage(purl=PackageURL(type='crates' name=dep.get('name') version=dep.get('version')).to_string() requirement=dep.get('version') scope='dependency' is_runtime=<true> is_optional=<false> is_resolved=<true> ))<block_end><yield>cls(dependencies=package_dependencies)<block_end><block_end><def_stmt>party_mapper party party_role<block_start>""" Yields a Party object with party of `party_role`. https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional """<for_stmt>person party<block_start>name,email=parse_person(person)<line_sep><yield>models.Party(type=models.party_person name=name role=party_role email=email)<block_end><block_end><def_stmt>parse_person person<block_start>""" https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional A "person" is an object with an optional "name" or "email" field. A person can be in the form: "author": "<NAME> <<EMAIL>>" For example: >>> p = parse_person('<NAME> <<EMAIL>>') >>> assert p == ('<NAME>', '<EMAIL>') >>> p = parse_person('<NAME>') >>> assert p == ('<NAME>', None) >>> p = parse_person('<<EMAIL>>') >>> assert p == (None, '<EMAIL>') """<line_sep>parsed=person_parser(person)<if_stmt><not>parsed<block_start>name=<none><line_sep>parsed=person_parser_no_name(person)<block_end><else_stmt><block_start>name=parsed.group('name')<block_end>email=parsed.group('email')<if_stmt>name<block_start>name=name.strip()<block_end><if_stmt>email<block_start>email=email.strip('<> ')<block_end><return>name email<block_end>person_parser=re.compile(r'^(?P<name>[^\(<]+)'<concat>r'\s?'<concat>r'(?P<email><([^>]+)>)?').match<line_sep>person_parser_no_name=re.compile(r'(?P<email><([^>]+)>)?').match<line_sep>
<import_from_stmt>ctypes c_int<import_from_stmt>.dll _bind<line_sep>__all__=[# Enums "SDL_BlendMode" "SDL_BLENDMODE_NONE" "SDL_BLENDMODE_BLEND" "SDL_BLENDMODE_ADD" "SDL_BLENDMODE_MOD" "SDL_BLENDMODE_MUL" "SDL_BLENDMODE_INVALID" "SDL_BlendOperation" "SDL_BLENDOPERATION_ADD" "SDL_BLENDOPERATION_SUBTRACT" "SDL_BLENDOPERATION_REV_SUBTRACT" "SDL_BLENDOPERATION_MINIMUM" "SDL_BLENDOPERATION_MAXIMUM" "SDL_BlendFactor" "SDL_BLENDFACTOR_ZERO" "SDL_BLENDFACTOR_ONE" "SDL_BLENDFACTOR_SRC_COLOR" "SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR" "SDL_BLENDFACTOR_SRC_ALPHA" "SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA" "SDL_BLENDFACTOR_DST_COLOR" "SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR" "SDL_BLENDFACTOR_DST_ALPHA" "SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA" # Functions "SDL_ComposeCustomBlendMode"]<line_sep>SDL_BlendMode=c_int<line_sep>SDL_BLENDMODE_NONE=0x00000000<line_sep>SDL_BLENDMODE_BLEND=0x00000001<line_sep>SDL_BLENDMODE_ADD=0x00000002<line_sep>SDL_BLENDMODE_MOD=0x00000004<line_sep>SDL_BLENDMODE_MUL=0x00000008<line_sep>SDL_BLENDMODE_INVALID=0x7FFFFFFF<line_sep>SDL_BlendOperation=c_int<line_sep>SDL_BLENDOPERATION_ADD=0x1<line_sep>SDL_BLENDOPERATION_SUBTRACT=0x2<line_sep>SDL_BLENDOPERATION_REV_SUBTRACT=0x3<line_sep>SDL_BLENDOPERATION_MINIMUM=0x4<line_sep>SDL_BLENDOPERATION_MAXIMUM=0x5<line_sep>SDL_BlendFactor=c_int<line_sep>SDL_BLENDFACTOR_ZERO=0x1<line_sep>SDL_BLENDFACTOR_ONE=0x2<line_sep>SDL_BLENDFACTOR_SRC_COLOR=0x3<line_sep>SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR=0x4<line_sep>SDL_BLENDFACTOR_SRC_ALPHA=0x5<line_sep>SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA=0x6<line_sep>SDL_BLENDFACTOR_DST_COLOR=0x7<line_sep>SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR=0x8<line_sep>SDL_BLENDFACTOR_DST_ALPHA=0x9<line_sep>SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA=0xA<line_sep>SDL_ComposeCustomBlendMode=_bind("SDL_ComposeCustomBlendMode" [SDL_BlendFactor SDL_BlendFactor SDL_BlendOperation SDL_BlendFactor SDL_BlendFactor SDL_BlendOperation] SDL_BlendMode added='2.0.6')<line_sep>
<import_from_stmt>six python_2_unicode_compatible<import_from_stmt>.base QuickbooksManagedObject QuickbooksTransactionEntity Ref CustomField MetaData<line_sep>@python_2_unicode_compatible<class_stmt>CompanyCurrency(QuickbooksManagedObject QuickbooksTransactionEntity)<block_start>""" QBO definition: Applicable only for those companies that enable multicurrency, a companycurrency object defines a currency that is active in the QuickBooks Online company. One or more companycurrency objects are active based on the company's multicurrency business requirements and correspond to the list displayed by the Currency Center in the QuickBooks Online UI """<line_sep>class_dict={"CustomField":CustomField "MetaData":MetaData }<line_sep>qbo_object_name="CompanyCurrency"<def_stmt>__init__ self<block_start>super(CompanyCurrency self).__init__()<line_sep>self.Id=<none><line_sep>self.Code=""<line_sep>self.Name=""<line_sep>self.Active=<true><line_sep>self.CustomField=<none><line_sep>self.MetaData=<none><block_end><def_stmt>__str__ self<block_start><return>self.Name<block_end><def_stmt>to_ref self<block_start>ref=Ref()<line_sep>ref.name=self.Name<line_sep>ref.type=self.qbo_object_name<line_sep>ref.value=self.Id<line_sep><return>ref<block_end><block_end>
<import_from_stmt>foundations_spec *<import_from_stmt>unittest.mock call<class_stmt>TestArtifactDownloader(Spec)<block_start>mock_archiver=let_mock()<line_sep>make_directory_mock=let_patch_mock('os.makedirs')<line_sep>@let<def_stmt>source_directory self<block_start><return>self.faker.uri_path()<block_end>@let<def_stmt>download_directory self<block_start><return>self.faker.uri_path()<block_end>@let<def_stmt>artifact_downloader self<block_start><import_from_stmt>foundations_contrib.archiving.artifact_downloader ArtifactDownloader<line_sep><return>ArtifactDownloader(self.mock_archiver)<block_end>@let<def_stmt>mock_foundations_files self<block_start><return>['foundations/a' 'foundations/b' 'foundations_contrib/c' 'foundations_contrib/d' 'foundations_events/e' 'foundations_events/f' 'foundations_internal/g' 'foundations_internal/h' 'jobs/i' 'jobs/j' 'model_serving/k' 'model_serving/l' 'venv/m' 'venv/n' 'docker_image_version.sh' 'download_gui_images.sh' 'foundations_gui.sh' 'foundations_package_manifest.yaml' 'foundations_requirements.txt' 'job.tgz' 'run.env' 'run.sh' 'p.bin' 'q.bin' 'template/t' 'template/u' ]<block_end><def_stmt>test_downloads_single_file_to_specified_directory self<block_start>self._mock_file_list(['path/to/my/file'])<line_sep>self.artifact_downloader.download_files('' self.download_directory)<line_sep>self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/my/file' self.download_directory+'/path/to/my/file')<block_end><def_stmt>test_downloads_multiple_files_to_specified_directory self<block_start>self._mock_file_list(['different/file' 'other/different/file'])<line_sep>self.artifact_downloader.download_files('' self.download_directory)<line_sep>first_file_download=call('different/file' self.download_directory+'/different/file')<line_sep>second_file_download=call('other/different/file' self.download_directory+'/other/different/file')<line_sep>self.mock_archiver.fetch_persisted_file.assert_has_calls([first_file_download second_file_download])<block_end><def_stmt>test_ensures_target_directory_exists self<block_start>self._mock_file_list(['path/to/my/file'])<line_sep>self.artifact_downloader.download_files('' self.download_directory)<line_sep>self.make_directory_mock.assert_called_with(self.download_directory+'/path/to/my' exist_ok=<true>)<block_end><def_stmt>test_downloads_multiple_files_to_specified_directory self<block_start>self._mock_file_list(['different/file' 'other/different/file'])<line_sep>self.artifact_downloader.download_files('' self.download_directory)<line_sep>first_dirctory_creation=call(self.download_directory+'/different' exist_ok=<true>)<line_sep>second_dirctory_creation=call(self.download_directory+'/other/different' exist_ok=<true>)<line_sep>self.make_directory_mock.assert_has_calls([first_dirctory_creation second_dirctory_creation])<block_end><def_stmt>test_downloads_only_files_with_specified_source_directory self<block_start>self._mock_file_list(['different/file' 'other/different/file'])<line_sep>self.artifact_downloader.download_files('other/' self.download_directory)<line_sep>self.mock_archiver.fetch_persisted_file.assert_called_once_with('other/different/file' self.download_directory+'/other/different/file')<block_end><def_stmt>test_downloads_only_files_with_specified_source_directory_with_different_source_directory self<block_start>self._mock_file_list(['different/file' 'other/different/file'])<line_sep>self.artifact_downloader.download_files('different/' self.download_directory)<line_sep>self.mock_archiver.fetch_persisted_file.assert_called_once_with('different/file' self.download_directory+'/different/file')<block_end><def_stmt>test_download_does_not_include_foundations_files self<block_start><for_stmt>foundations_file self.mock_foundations_files<block_start>self._mock_file_list(['path/to/some/file' foundations_file])<line_sep>self.artifact_downloader.download_files('' self.download_directory)<line_sep>self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/some/file' self.download_directory+'/path/to/some/file')<block_end><block_end><def_stmt>test_download_includes_config_yamls self<block_start><for_stmt>foundations_file self.mock_foundations_files<block_start>self._mock_file_list(['a.config.yaml' foundations_file])<line_sep>self.artifact_downloader.download_files('' self.download_directory)<line_sep>self.mock_archiver.fetch_persisted_file.assert_called_with('a.config.yaml' self.download_directory+'/a.config.yaml')<block_end><block_end><def_stmt>_mock_file_list self file_list<block_start>self.mock_archiver.fetch_miscellaneous=ConditionalReturn()<line_sep>self.mock_archiver.fetch_miscellaneous.return_when(file_list 'job_artifact_listing.pkl')<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_from_stmt>vilya.models.project CodeDoubanProject<import_from_stmt>vilya.models git<import_from_stmt>tests.base TestCase<import_from_stmt>tests.utils mkdtemp<import_from_stmt>vilya.libs gyt<import_from_stmt>vilya.libs.permdir get_repo_root<class_stmt>TestGit(TestCase)<block_start>@property<def_stmt>u self<block_start><return>self.addUser()<block_end><def_stmt>_path self name<block_start><return>os.path.join(get_repo_root() '%s.git'%name)<block_end><def_stmt>_path_work_tree self name<block_start><return>os.path.join(get_repo_root() '%s.work_tree'%name)<block_end><def_stmt>_repo self name bare=<true><block_start>git_path=self._path(name)<if_stmt>bare<block_start>work_tree_path=<none><block_end><else_stmt><block_start>work_tree_path=self._path_work_tree(name)<if_stmt><not>os.path.exists(work_tree_path)<block_start>os.mkdir(work_tree_path)<block_end><block_end><try_stmt><block_start>CodeDoubanProject.create_git_repo(git_path)<block_end><except_stmt><block_start><pass><block_end>repo=git.GitRepo(git_path work_tree=work_tree_path)<line_sep><return>repo<block_end><def_stmt>_commit self repo filename content='testcontent' message='testmessage'# TODO allow commiting more than one file <block_start><assert_stmt>os.path.exists(repo.work_tree) "repo.work_tree must exist, check if repo has been created with bare=False"<line_sep># noqa path=os.path.join(repo.work_tree filename)<line_sep>dir_=os.path.dirname(path)<if_stmt><not>os.path.exists(dir_)<block_start>os.makedirs(os.path.dirname(path))<block_end>f=open(path 'w')<line_sep>f.write(content)<line_sep>f.close()<line_sep>rep2=gyt.repo(repo.path repo.work_tree bare=<false>)<line_sep>rep2.call(['add' filename])<line_sep>rep2.call(['commit' filename '-m' message] _env=self.env_for_git)<line_sep><return>gyt.repo(repo.path).sha()<block_end><def_stmt>test_simple_commit self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'testfile1' 'content1' 'msg1')<line_sep>src=repo.get_src('testfile1')<assert_stmt>src<eq>('blob' u'content1')<line_sep>repo.commit_one_file('testfile1' 'content1 modified' 'change1' self.u orig_hash=hash('content1'))<line_sep>src=repo.get_src('testfile1')<assert_stmt>src<eq>('blob' u'content1 modified')<block_end><def_stmt>test_simple_commit_do_not_delete_other_files self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'testfile1' 'content1' 'msg1')<line_sep>self._commit(repo 'testfile2' 'content2' 'msg2')<line_sep>repo.commit_one_file('testfile1' 'content1 modified' 'change1' self.u orig_hash=hash('content1'))<line_sep>src=repo.get_src('testfile1')<assert_stmt>src<eq>('blob' u'content1 modified')<line_sep>type_,files=repo.get_src('')<assert_stmt>any(d['path']<eq>'testfile2'<for>d files) "testfile2 should exists in root tree"<line_sep>src=repo.get_src('testfile2')<assert_stmt>src<eq>('blob' u'content2')<block_end><def_stmt>test_commit_in_inner_directory self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'test/file1' 'content1' 'msg1')<line_sep>src=repo.get_src('test/file1')<assert_stmt>src<eq>('blob' u'content1')<line_sep>repo.commit_one_file('test/file1' 'content1 modified' 'change1' self.u orig_hash=hash('content1'))<line_sep>src=repo.get_src('test/file1')<assert_stmt>src<eq>('blob' u'content1 modified')<block_end><def_stmt>test_create_file self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'file1' 'content1' 'msg1')<line_sep>repo.commit_one_file('file2' 'content2 created' 'create1' self.u)<assert_stmt>repo.cat('HEAD:file1')<eq>'content1'<assert_stmt>repo.cat('HEAD:file2')<eq>'content2 created'<block_end><def_stmt>test_create_first_file self<block_start>repo=self._repo('test' bare=<false>)<line_sep>repo.commit_one_file('file1' 'content1 created' 'create1' self.u)<assert_stmt>repo.cat('HEAD:file1')<eq>'content1 created'<block_end><def_stmt>test_create_first_file_and_more self<block_start>repo=self._repo('test' bare=<false>)<line_sep>repo.commit_one_file('file1' 'content1 created' 'create1' self.u)<line_sep>repo.commit_one_file('file2' 'content2 created' 'create2' self.u)<line_sep>repo.commit_one_file('file3' 'content3 created' 'create3' self.u)<line_sep>repo.commit_one_file('file4' 'content4 created' 'create4' self.u)<assert_stmt>repo.cat('HEAD:file1')<eq>'content1 created'<assert_stmt>repo.cat('HEAD:file2')<eq>'content2 created'<assert_stmt>repo.cat('HEAD:file3')<eq>'content3 created'<assert_stmt>repo.cat('HEAD:file4')<eq>'content4 created'<block_end><def_stmt>test_commit_file_on_dirty_index self<block_start>repo=self._repo('test' bare=<false>)<line_sep>repo.commit_one_file('file1' 'content1 created' 'create1' self.u)<line_sep>repo.commit_one_file('file2' 'content2 created' 'create2' self.u)<line_sep>repo.commit_one_file('file1' 'content1 modified' 'modify1' self.u)<line_sep># Now artificially rewind the index tree state repo.call('read-tree HEAD^')<line_sep>repo.commit_one_file('file2' 'content2 modified' 'modify2' self.u)<line_sep># the latest commit should not have anything related to file1 <assert_stmt>'file1'<not><in>repo.call('log -p -n1')<block_end><def_stmt>test_create_file_in_dir self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'test/file1' 'content1' 'msg1')<line_sep>repo.commit_one_file('test/file2' 'content2 created' 'create1' self.u)<assert_stmt>repo.cat('HEAD:test/file1')<eq>'content1'<assert_stmt>repo.cat('HEAD:test/file2')<eq>'content2 created'<block_end><def_stmt>test_simple_commit_in_branch self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'testfile1' 'content1' 'msg1')<line_sep>tmp_branch=repo.temp_branch_name()<line_sep>repo.commit_one_file('testfile1' 'content1 modified' 'change1' self.u orig_hash=hash('content1') branch=tmp_branch)<with_stmt>mkdtemp()<as>tmpdir<block_start>gyt.call(['git' 'clone' repo.path tmpdir])<line_sep>repo_check=gyt.repo(tmpdir bare=<false>)<line_sep>src=repo_check.call('show HEAD:testfile1')<assert_stmt>src<eq>u'content1'<line_sep>repo_check.call('checkout master')<line_sep>src=repo_check.call('show HEAD:testfile1')<assert_stmt>src<eq>u'content1'<line_sep>repo_check.call('checkout %s'%tmp_branch)<line_sep>src=repo_check.call('show HEAD:testfile1')<assert_stmt>src<eq>u'content1 modified'<line_sep>repo_check.call('checkout master')<line_sep>src=repo_check.call('show HEAD:testfile1')<assert_stmt>src<eq>u'content1'<block_end><block_end><def_stmt>test_simple_commit_in_branch_in_subdir self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'test/file1' 'content1' 'msg1')<line_sep>tmp_branch=repo.temp_branch_name()<line_sep>repo.commit_one_file('test/file1' 'content1 modified' 'change1' self.u orig_hash=hash('content1') branch=tmp_branch)<with_stmt>mkdtemp()<as>tmpdir<block_start>gyt.call(['git' 'clone' repo.path tmpdir])<line_sep>repo_check=gyt.repo(tmpdir bare=<false>)<line_sep>src=repo_check.call('show HEAD:test/file1')<assert_stmt>src<eq>u'content1'<line_sep>repo_check.call('checkout master')<line_sep>src=repo_check.call('show HEAD:test/file1')<assert_stmt>src<eq>u'content1'<line_sep>repo_check.call('checkout %s'%tmp_branch)<line_sep>src=repo_check.call('show HEAD:test/file1')<assert_stmt>src<eq>u'content1 modified'<line_sep>repo_check.call('checkout master')<line_sep>src=repo_check.call('show HEAD:test/file1')<assert_stmt>src<eq>u'content1'<block_end><block_end><def_stmt>test_simple_commit_in_branch_creates_branch self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'testfile1' 'content1' 'msg1')<assert_stmt>repo.get_branches()<eq>['master']<line_sep>tmp_branch=repo.temp_branch_name()<line_sep>repo.commit_one_file('testfile1' 'content1 modified' 'change1' self.u orig_hash=hash('content1') branch=tmp_branch)<assert_stmt>repo.get_branches()<eq>['master' tmp_branch]<block_end><def_stmt>test_simple_commit_in_branch_and_delete_branch self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'testfile1' 'content1' 'msg1')<line_sep>tmp_branch=repo.temp_branch_name()<line_sep>repo.commit_one_file('testfile1' 'content1 modified' 'change1' self.u orig_hash=hash('content1') branch=tmp_branch)<assert_stmt>tmp_branch<in>repo.get_branches()<line_sep>repo.remove_temp_branch(tmp_branch)<assert_stmt>tmp_branch<not><in>repo.get_branches()<assert_stmt>repo.get_branches()<eq>['master']<block_end><def_stmt>test_simple_commit_in_another_branch self<block_start>repo=self._repo('test' bare=<false>)<line_sep>self._commit(repo 'testfile1' 'content1' 'msg1')<line_sep>branch='mybranch'<line_sep>repo.commit_one_file('testfile1' 'content1 modified' 'change1' self.u orig_hash=hash('content1') branch=branch)<assert_stmt>branch<in>repo.get_branches()<assert_stmt>set(repo.get_branches())<eq>set(['master' branch])<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>json<import_stmt>glob<import_stmt>os.path<as>osp<import_stmt>cv2<def_stmt>compute_whdr reflectance judgements delta=0.1<block_start>points=judgements['intrinsic_points']<line_sep>comparisons=judgements['intrinsic_comparisons']<line_sep>id_to_points={p['id']:p<for>p points}<line_sep>rows,cols=reflectance.shape[0:2]<line_sep>error_sum=0.0<line_sep>error_equal_sum=0.0<line_sep>error_inequal_sum=0.0<line_sep>weight_sum=0.0<line_sep>weight_equal_sum=0.0<line_sep>weight_inequal_sum=0.0<for_stmt>c comparisons# "darker" is "J_i" in our paper <block_start>darker=c['darker']<if_stmt>darker<not><in>('1' '2' 'E')<block_start><continue><block_end># "darker_score" is "w_i" in our paper weight=c['darker_score']<if_stmt>weight<le>0.0<or>weight<is><none><block_start><continue><block_end>point1=id_to_points[c['point1']]<line_sep>point2=id_to_points[c['point2']]<if_stmt><not>point1['opaque']<or><not>point2['opaque']<block_start><continue><block_end># convert to grayscale and threshold l1=max(1e-10 np.mean(reflectance[int(point1['y']<times>rows) int(point1['x']<times>cols) <ellipsis>]))<line_sep>l2=max(1e-10 np.mean(reflectance[int(point2['y']<times>rows) int(point2['x']<times>cols) <ellipsis>]))<line_sep># convert algorithm value to the same units as human judgements <if_stmt>l2/l1<g>1.0+delta<block_start>alg_darker='1'<block_end><elif_stmt>l1/l2<g>1.0+delta<block_start>alg_darker='2'<block_end><else_stmt><block_start>alg_darker='E'<block_end><if_stmt>darker<eq>'E'<block_start><if_stmt>darker<ne>alg_darker<block_start>error_equal_sum<augadd>weight<block_end>weight_equal_sum<augadd>weight<block_end><else_stmt><block_start><if_stmt>darker<ne>alg_darker<block_start>error_inequal_sum<augadd>weight<block_end>weight_inequal_sum<augadd>weight<block_end><if_stmt>darker<ne>alg_darker<block_start>error_sum<augadd>weight<block_end>weight_sum<augadd>weight<block_end><if_stmt>weight_sum<block_start><return>(error_sum/weight_sum) error_equal_sum/(weight_equal_sum+1e-10) error_inequal_sum/(weight_inequal_sum+1e-10)<block_end><else_stmt><block_start><return><none><block_end><block_end>#root = './testReal_cascade0_black_height120_width160/cascade0/iiw/' root='IIW_cascade1/results_brdf2_brdf1/'<line_sep>rootGt='/home/zhl/CVPR20/Resubmission/Dataset/IIW/iiw-dataset/data/'<line_sep>suffix='albedoBS1.png'<line_sep>count=0.0<line_sep>whdr_sum=0.0<line_sep>whdr_mean=0.0<line_sep>img_list=glob.glob(osp.join(root '*_%s'%suffix))<for_stmt>img_path img_list#load CGI precomputed file <block_start>judgement_path=osp.join(rootGt img_path.split('/')[-1].split('_')[0]+'.json')<line_sep>judgements=json.load(open(judgement_path))<line_sep>count<augadd>1.0<line_sep>ourR=cv2.imread(img_path).astype(np.float32)/255.0<line_sep>whdr,_,_=compute_whdr(ourR judgements)<line_sep>whdr_sum<augadd>whdr<line_sep>print('img_path: {0}, whdr: current {1} average {2}'.format(img_path.split('/')[-1].split('_')[0] whdr whdr_sum/count))<block_end>whdr_mean=whdr_sum/count<line_sep>print('whdr ours: {0}'.format(whdr_mean))<line_sep>
__title__='The Onion Box'<line_sep>__description__='Dashboard to monitor Tor node operations.'<line_sep>__version__='20.2'<line_sep>__stamp__='20200119|095654'<line_sep>
<import_stmt>glob<import_stmt>pandas<as>pd<import_from_stmt>configparser ConfigParser<import_stmt>os<import_from_stmt>simba.drop_bp_cords *<def_stmt>multiplyFreeHand inifile currVid<block_start>_,CurrVidName,ext=get_fn_ext(currVid)<line_sep>config=ConfigParser()<line_sep>configFile=str(inifile)<line_sep>config.read(configFile)<line_sep>projectPath=config.get('General settings' 'project_path')<line_sep>videoPath=os.path.join(projectPath 'videos')<line_sep>ROIcoordinatesPath=os.path.join(projectPath 'logs' 'measures' 'ROI_definitions.h5')<try_stmt><block_start>rectanglesInfo=pd.read_hdf(ROIcoordinatesPath key='rectangles')<line_sep>circleInfo=pd.read_hdf(ROIcoordinatesPath key='circleDf')<line_sep>polygonInfo=pd.read_hdf(ROIcoordinatesPath key='polygons')<line_sep>rectangularDf=rectanglesInfo.loc[rectanglesInfo['Video']<eq>str(CurrVidName)]<line_sep>circleDf=circleInfo.loc[circleInfo['Video']<eq>str(CurrVidName)]<line_sep>polygonDf=polygonInfo.loc[polygonInfo['Video']<eq>str(CurrVidName)]<line_sep>ROIdefExist=<true><block_end><except_stmt>FileNotFoundError<block_start>ROIdefExist=<false><line_sep>print('Cannot apply to all: no ROI definitions exists')<block_end><if_stmt>ROIdefExist<is><true><block_start><if_stmt>(len(rectangularDf)<eq>0<and>len(circleDf)<eq>0<and>len(polygonDf)<eq>0)<block_start>print('Cannot apply ROIs to all: no records exist for '+str(CurrVidName))<block_end><else_stmt><block_start>videofilesFound=glob.glob(videoPath+'/*.mp4')+glob.glob(videoPath+'/*.avi')<line_sep>duplicatedRec,duplicatedCirc,duplicatedPoly=(rectangularDf.copy() circleDf.copy() polygonDf.copy())<for_stmt>vids videofilesFound<block_start>_,CurrVidName,ext=get_fn_ext(vids)<line_sep>duplicatedRec['Video'],duplicatedCirc['Video'],duplicatedPoly['Video']=(CurrVidName CurrVidName CurrVidName)<line_sep>rectangularDf=rectangularDf.append(duplicatedRec ignore_index=<true>)<line_sep>circleDf=circleDf.append(duplicatedCirc ignore_index=<true>)<line_sep>polygonDf=polygonDf.append(duplicatedPoly ignore_index=<true>)<block_end>rectangularDf=rectangularDf.drop_duplicates(subset=['Video' 'Name'] keep="first")<line_sep>circleDf=circleDf.drop_duplicates(subset=['Video' 'Name'] keep="first")<line_sep>polygonDf=polygonDf.drop_duplicates(subset=['Video' 'Name'] keep="first")<line_sep>store=pd.HDFStore(ROIcoordinatesPath mode='w')<line_sep>store['rectangles']=rectangularDf<line_sep>store['circleDf']=circleDf<line_sep>store['polygons']=polygonDf<line_sep>store.close()<line_sep>print('ROI(s) for '+CurrVidName+' applied to all videos')<line_sep>print('Next, click on "draw" to modify ROI location(s) or click on "reset" to remove ROI drawing(s)')<block_end><block_end><block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep># noqa """A distributed rate limiter rely on redis based on `token bucket <https://en.wikipedia.org/wiki/Token_bucket>` algorithm Usage ~~~~~ .. code-block:: python # Init a redis connection pool import redis redisdb = redis.Redis() rate = RateLimiter(redisdb, identifier='ip=127.0.0.1 path=/get_user_info/') # Allow 10 requests every 1 minute # period also accepts seconds/minutes/hours/days as key rate.add_rule(tokens=10, period={'minute': 1}) # You could add multiple rules for on limiter # rate.add_rule(tokens=200, period={'hour': 1}) print rate.acquire() # returns {'allowed': True, 'remaining_tokens': 9.0} """<import_stmt>time<import_stmt>logging<import_from_stmt>redis WatchError<line_sep>logger=logging.getLogger('root')<class_stmt>BaseRateLimiter(object)<block_start><def_stmt>__init__ self redisdb identifier namespace='' tokens=<none> period=<none><block_start>"""Init a RateLimiter class :param redisdb: a `redis.Redis` instance :param str identifier: identifier for the limiter, such as an user_id etc. :param str namespace: namespace for redis keys :param int tokens: maxium tokens for one time period :param dict period: dict, time period, such as {'minutes': 10} """<line_sep>self.redisdb=redisdb<line_sep>self.identifier=identifier<line_sep>self.namespace=namespace<line_sep>self.rules=[]<line_sep># Add rule <if_stmt>tokens<is><not><none><and>period<block_start>self.add_rule(tokens period)<block_end>self.prepare()<block_end><def_stmt>prepare self<block_start>"""Prepare to work """<line_sep><pass><block_end><def_stmt>add_rule self tokens period<block_start>"""Add multiple rules for this limiter, see `__init__` for parameter details """<line_sep>rule=Rule(tokens Rule.period_to_seonds(period))<line_sep>self.rules.append(rule)<block_end><def_stmt>acquire self tokens=1<block_start>"""Acquire for a single request :param int tokens: tokens to consume for this request, default to 1 """<if_stmt><not>self.rules<block_start><return>{'allowed':<true> 'remaining_tokens':0}<block_end>logger.debug('Start acquiring tokens by given rules, this operation may have several '<concat>'communications with redis.')<line_sep>rets=[]<for_stmt>rule self.rules<block_start>logger.debug('Acquiring by single rule, rule=%s tokens=%s' rule tokens)<line_sep>ret=self.acquire_by_single_rule(rule tokens)<line_sep>logger.debug('Acquiring finished, result=%s' ret)<if_stmt><not>ret['allowed']<block_start>logger.debug('Acquiring denied by given rule, rule=%s.' rule)<line_sep><return>ret<block_end>rets.append(ret)<block_end>logger.debug('Acquiring successed.')<line_sep><return>{'allowed':<true> 'remaining_tokens':min(x['remaining_tokens']<for>x rets)}<block_end><block_end><class_stmt>RateLimiter(BaseRateLimiter)<block_start>"""Rate limiter class """<def_stmt>acquire_by_single_rule self rule tokens=1<block_start>"""Acquire an request quota from limiter :param rule: `Rule` object :param int tokens: tokens to be consumed, default 1 :returns: a dict of `allowed` and `remaining_tokens` - allowed: wheather this request is allowed - remaining_tokens: remaining_tokens for this rule's period """<line_sep>rk_tokens='rlim::%s::tokens::%s::r%s'%(self.namespace self.identifier rule.to_string())<line_sep>rk_last_ts='rlim::%s::last_ts::%s::r%s'%(self.namespace self.identifier rule.to_string())<line_sep>rule_ttl_seconds=rule.period_seconds+10<try_stmt><block_start>rv_last_ts=float(self.redisdb.get(rk_last_ts))<line_sep>rv_tokens=float(self.redisdb.get(rk_tokens))<block_end><except_stmt>Exception# Inintilize values if not exists <block_start>rv_last_ts=time.time()<line_sep>rv_tokens=rule.tokens<line_sep>self.redisdb.set(rk_tokens rv_tokens ex=rule_ttl_seconds)<line_sep>self.redisdb.set(rk_last_ts '%.3f'%rv_last_ts ex=rule_ttl_seconds)<block_end># Add fresh tokens since last timestamp <with_stmt>self.redisdb.pipeline()<as>pipe<block_start>pipe.watch(rk_last_ts)<line_sep># Float precision may cause this value negative # Add token by passed time senconds_passed=max(time.time()-rv_last_ts 0)<line_sep>fresh_tokens=rule.fresh_tokens_by_seconds(senconds_passed)<line_sep>remaining_tokens=rv_tokens<line_sep># Only add fresh token when it's greater than 1 # Passed time maybe less than 1, fresh_token more than 1 <if_stmt>fresh_tokens<ge>1<and>remaining_tokens<l>rule.tokens# Never add let tokens more than rule.tokens <block_start>fresh_tokens=min(fresh_tokens rule.tokens-remaining_tokens)<line_sep>pipe.multi()<line_sep>pipe.incrbyfloat(rk_tokens fresh_tokens)<line_sep>pipe.expire(rk_tokens rule_ttl_seconds)<line_sep>pipe.set(rk_last_ts '%.3f'%time.time() ex=rule_ttl_seconds)<line_sep># Ignore WatchError <try_stmt><block_start>pipe.execute()<block_end><except_stmt>WatchError<block_start><pass><block_end><block_end><block_end># Remove tokens, if tokens to consume are bigger than remaining tokens, do nothing # and return Flase remaining_tokens=self.redisdb.incrbyfloat(rk_tokens -tokens)<line_sep>over_limit=<false><if_stmt>remaining_tokens<l>0<block_start>remaining_tokens=self.redisdb.incrbyfloat(rk_tokens tokens)<line_sep>over_limit=<true><block_end><return>{'allowed':<not>over_limit 'remaining_tokens':max(remaining_tokens 0)}<block_end><block_end><class_stmt>SimpleLimiter(BaseRateLimiter)<block_start><def_stmt>prepare self<block_start>self.simple_incr=self.redisdb.register_script('''\ local current current = redis.call("incr", KEYS[1]) if tonumber(current) == 1 then redis.call("expire", KEYS[1], ARGV[1]) end return current''')<block_end><def_stmt>acquire_by_single_rule self rule tokens=1<block_start>"""Acquire an request quota from limiter :param rule: `Rule` object :param int tokens: tokens to be consumed, default 1 :returns: a dict of `allowed` and `remaining_tokens` - allowed: wheather this request is allowed - remaining_tokens: remaining_tokens for this rule's period """<line_sep># TODO: Should we use ( current timestamp / period_seconds ) as part of the redis key? rk_counter='rlim::%s::scounter::%s::r%s'%(self.namespace self.identifier rule.to_string())<line_sep>old_cnt=self.redisdb.get(rk_counter)<if_stmt>old_cnt<is><not><none><and>int(old_cnt)<ge>rule.tokens<block_start><return>{'allowed':<false> 'remaining_tokens':0.0}<block_end>new_cnt=self.simple_incr(keys=[rk_counter] args=[rule.period_seconds])<line_sep><return>{'allowed':<true> 'remaining_tokens':max(0 rule.tokens-new_cnt)}<block_end><block_end><class_stmt>Rule(object)<block_start>"""Rule class for RateLimiter"""<line_sep>time_unit_to_seconds={'second':1 'minute':60 'hour':3600 'day':3600<times>24 }<line_sep>@classmethod<def_stmt>period_to_seonds cls period<block_start><for_stmt>unit,seconds cls.time_unit_to_seconds.items()<block_start><if_stmt>unit<in>period<block_start>period_seconds=period[unit]<times>seconds<line_sep><break><block_end><block_end><else_stmt><block_start><raise>ValueError(('Invalid period %s given, should be '<concat>'{"second/minute/hour/day": NUMBER}')%period)<block_end><return>period_seconds<block_end><def_stmt>__init__ self tokens period_seconds<block_start>self.tokens=tokens<line_sep># Precision of seconds only to second self.period_seconds=int(period_seconds)<if_stmt>tokens<l>0<block_start>logger.warn('Will not allow any acquire because given tokens < 0')<block_end><block_end><def_stmt>to_string self<block_start><return>"%s_%s"%(self.tokens self.period_seconds)<block_end><def_stmt>fresh_tokens_by_seconds self seconds<block_start><return>int(self.rate_per_seconds<times>seconds)<block_end>@property<def_stmt>rate_per_seconds self<block_start><return>self.tokens/float(self.period_seconds)<block_end><def_stmt>__repr__ self<block_start><return>'<Rule %s>'%self.to_string()<block_end><block_end>
### # Copyright (c) 2020-2021, The Limnoria Contributors # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### <import_from_stmt>supybot conf ircutils ircmsgs callbacks<import_from_stmt>supybot.i18n PluginInternationalization<line_sep>_=PluginInternationalization("Autocomplete")<line_sep>REQUEST_TAG="+draft/autocomplete-request"<line_sep>RESPONSE_TAG="+draft/autocomplete-response"<def_stmt>_commonPrefix L<block_start>"""Takes a list of lists, and returns their longest common prefix."""<assert_stmt>L<if_stmt>len(L)<eq>1<block_start><return>L[0]<block_end><for_stmt>n range(1 max(map(len L))+1)<block_start>prefix=L[0][:n]<for_stmt>item L[1:]<block_start><if_stmt>prefix<ne>item[:n]<block_start><return>prefix[0:-1]<block_end><block_end><block_end><assert_stmt><false><block_end><def_stmt>_getAutocompleteResponse irc msg payload<block_start>"""Returns the value of the +draft/autocomplete-response tag for the given +draft/autocomplete-request payload."""<line_sep>tokens=callbacks.tokenize(payload channel=msg.channel network=irc.network)<line_sep>normalized_payload=" ".join(tokens)<line_sep>candidate_commands=_getCandidates(irc normalized_payload)<if_stmt>len(candidate_commands)<eq>0# No result <block_start><return><none><block_end><elif_stmt>len(candidate_commands)<eq>1# One result, return it directly <block_start>commands=candidate_commands<block_end><else_stmt># Multiple results, return only the longest common prefix + one word <block_start>tokenized_candidates=[callbacks.tokenize(c channel=msg.channel network=irc.network)<for>c candidate_commands]<line_sep>common_prefix=_commonPrefix(tokenized_candidates)<line_sep>words_after_prefix={candidate[len(common_prefix)]<for>candidate tokenized_candidates}<line_sep>commands=[" ".join(common_prefix+[word])<for>word words_after_prefix]<block_end># strip what the user already typed <assert_stmt>all(command.startswith(normalized_payload)<for>command commands)<line_sep>normalized_payload_length=len(normalized_payload)<line_sep>response_items=[command[normalized_payload_length:]<for>command commands]<line_sep><return>"\t".join(sorted(response_items))<block_end><def_stmt>_getCandidates irc normalized_payload<block_start>"""Returns a list of commands starting with the normalized_payload."""<line_sep>candidates=set()<for_stmt>cb irc.callbacks<block_start>cb_commands=cb.listCommands()<line_sep># copy them with the plugin name (optional when calling a command) # at the beginning plugin_name=cb.canonicalName()<line_sep>cb_commands<augadd>[plugin_name+" "+command<for>command cb_commands]<line_sep>candidates<augor>{command<for>command cb_commands<if>command.startswith(normalized_payload)}<block_end><return>candidates<block_end><class_stmt>Autocomplete(callbacks.Plugin)<block_start>"""Provides command completion for IRC clients that support it."""<def_stmt>_enabled self irc msg<block_start><return>(conf.supybot.protocols.irc.experimentalExtensions()<and>self.registryValue("enabled" msg.channel irc.network))<block_end><def_stmt>doTagmsg self irc msg<block_start><if_stmt>REQUEST_TAG<not><in>msg.server_tags<block_start><return><block_end><if_stmt>"msgid"<not><in>msg.server_tags<block_start><return><block_end><if_stmt><not>self._enabled(irc msg)<block_start><return><block_end>msgid=msg.server_tags["msgid"]<line_sep>text=msg.server_tags[REQUEST_TAG]<line_sep># using callbacks._addressed instead of callbacks.addressed, as # callbacks.addressed would tag the m payload=callbacks._addressed(irc msg payload=text)<if_stmt><not>payload# not addressed <block_start><return><block_end># marks used by '_addressed' are usually prefixes (char, string, # nick), but may also be suffixes (with # supybot.reply.whenAddressedBy.nick.atEnd); but there is no way to # have it in the middle of the message AFAIK. <assert_stmt>payload<in>text<if_stmt><not>text.endswith(payload)# If there is a suffix, it means the end of the text is used to # address the bot, so it can't be a method to be completed. <block_start><return><block_end>autocomplete_response=_getAutocompleteResponse(irc msg payload)<if_stmt><not>autocomplete_response<block_start><return><block_end>target=msg.channel<or>ircutils.nickFromHostmask(msg.prefix)<line_sep>irc.queueMsg(ircmsgs.IrcMsg(server_tags={"+draft/reply":msgid RESPONSE_TAG:autocomplete_response } command="TAGMSG" args=[target] ))<block_end><block_end>Class=Autocomplete<line_sep># vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: