content
stringlengths
0
1.55M
<import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>WordDropout(nn.Module)<block_start>""" A word dropout layer that's designed for embedded inputs (e.g., any inputs to an LSTM layer). Given a batch of embedded inputs, this layer randomly set some of them to be a replacement state. Note that this layer assumes the last dimension of the input to be the hidden dimension of a unit. """<def_stmt>__init__ self dropprob<block_start>super().__init__()<line_sep>self.dropprob=dropprob<block_end><def_stmt>forward self x replacement=<none><block_start><if_stmt><not>self.training<or>self.dropprob<eq>0<block_start><return>x<block_end>masksize=[y<for>y x.size()]<line_sep>masksize[-1]=1<line_sep>dropmask=torch.rand(*masksize device=x.device)<l>self.dropprob<line_sep>res=x.masked_fill(dropmask 0)<if_stmt>replacement<is><not><none><block_start>res=res+dropmask.float()<times>replacement<block_end><return>res<block_end><def_stmt>extra_repr self<block_start><return>'p={}'.format(self.dropprob)<block_end><block_end><class_stmt>LockedDropout(nn.Module)<block_start>""" A variant of dropout layer that consistently drops out the same parameters over time. Also known as the variational dropout. This implementation was modified from the LockedDropout implementation in the flair library (https://github.com/zalandoresearch/flair). """<def_stmt>__init__ self dropprob batch_first=<true><block_start>super().__init__()<line_sep>self.dropprob=dropprob<line_sep>self.batch_first=batch_first<block_end><def_stmt>forward self x<block_start><if_stmt><not>self.training<or>self.dropprob<eq>0<block_start><return>x<block_end><if_stmt><not>self.batch_first<block_start>m=x.new_empty(1 x.size(1) x.size(2) requires_grad=<false>).bernoulli_(1-self.dropprob)<block_end><else_stmt><block_start>m=x.new_empty(x.size(0) 1 x.size(2) requires_grad=<false>).bernoulli_(1-self.dropprob)<block_end>mask=m.div(1-self.dropprob).expand_as(x)<line_sep><return>mask<times>x<block_end><def_stmt>extra_repr self<block_start><return>'p={}'.format(self.dropprob)<block_end><block_end><class_stmt>SequenceUnitDropout(nn.Module)<block_start>""" A unit dropout layer that's designed for input of sequence units (e.g., word sequence, char sequence, etc.). Given a sequence of unit indices, this layer randomly set some of them to be a replacement id (usually set to be <UNK>). """<def_stmt>__init__ self dropprob replacement_id<block_start>super().__init__()<line_sep>self.dropprob=dropprob<line_sep>self.replacement_id=replacement_id<block_end><def_stmt>forward self x<block_start>""" :param: x must be a LongTensor of unit indices. """<if_stmt><not>self.training<or>self.dropprob<eq>0<block_start><return>x<block_end>masksize=[y<for>y x.size()]<line_sep>dropmask=torch.rand(*masksize device=x.device)<l>self.dropprob<line_sep>res=x.masked_fill(dropmask self.replacement_id)<line_sep><return>res<block_end><def_stmt>extra_repr self<block_start><return>'p={}, replacement_id={}'.format(self.dropprob self.replacement_id)<block_end><block_end>
# Copyright (c) 2020 Xiaomi Corporation (author: <NAME>) # See ../../../LICENSE for clarification regarding multiple authors <import_stmt>torch<import_from_stmt>torch.utils.dlpack to_dlpack<import_from_stmt>.fsa Fsa<import_from_stmt>_k2host _is_valid<import_from_stmt>_k2host _is_top_sorted<import_from_stmt>_k2host _is_arc_sorted<import_from_stmt>_k2host _has_self_loops<import_from_stmt>_k2host _is_acyclic<import_from_stmt>_k2host _is_deterministic<import_from_stmt>_k2host _is_epsilon_free<import_from_stmt>_k2host _is_connected<import_from_stmt>_k2host _is_empty<def_stmt>is_valid fsa:Fsa<arrow>bool<block_start><return>_is_valid(fsa.get_base())<block_end><def_stmt>is_top_sorted fsa:Fsa<arrow>bool<block_start><return>_is_top_sorted(fsa.get_base())<block_end><def_stmt>is_arc_sorted fsa:Fsa<arrow>bool<block_start><return>_is_arc_sorted(fsa.get_base())<block_end><def_stmt>has_self_loops fsa:Fsa<arrow>bool<block_start><return>_has_self_loops(fsa.get_base())<block_end><def_stmt>is_acyclic fsa:Fsa<arrow>bool<block_start><return>_is_acyclic(fsa.get_base())<block_end><def_stmt>is_deterministic fsa:Fsa<arrow>bool<block_start><return>_is_deterministic(fsa.get_base())<block_end><def_stmt>is_epsilon_free fsa:Fsa<arrow>bool<block_start><return>_is_epsilon_free(fsa.get_base())<block_end><def_stmt>is_connected fsa:Fsa<arrow>bool<block_start><return>_is_connected(fsa.get_base())<block_end><def_stmt>is_empty fsa:Fsa<arrow>bool<block_start><return>_is_empty(fsa.get_base())<block_end>
""" """<line_sep># Created on 2015.08.19 # # Author: <NAME> # # Copyright 2015 - 2018 <NAME> # # This file is part of ldap3. # # ldap3 is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ldap3 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with ldap3 in the COPYING and COPYING.LESSER files. # If not, see <http://www.gnu.org/licenses/>. <import_from_stmt>pyasn1 __version__<as>pyasn1_version<import_from_stmt>pyasn1.codec.ber decoder# for usage in other modules <import_from_stmt>pyasn1.codec.ber.encoder Encoder# for monkeypatching of boolean value <import_from_stmt>..core.results RESULT_CODES<import_from_stmt>..utils.conv to_unicode<import_from_stmt>..protocol.convert referrals_to_list<line_sep>CLASSES={(<false> <false>):0 # Universal (<false> <true>):1 # Application (<true> <false>):2 # Context (<true> <true>):3}<line_sep># Private # Monkeypatching of pyasn1 for encoding Boolean with the value 0xFF for TRUE # THIS IS NOT PART OF THE FAST BER DECODER <if_stmt>pyasn1_version<eq>'xxx0.2.3'<block_start><import_from_stmt>pyasn1.codec.ber.encoder tagMap BooleanEncoder encode<import_from_stmt>pyasn1.type.univ Boolean<import_from_stmt>pyasn1.compat.octets ints2octs<class_stmt>BooleanCEREncoder(BooleanEncoder)<block_start>_true=ints2octs((255 ))<block_end>tagMap[Boolean.tagSet]=BooleanCEREncoder()<block_end><else_stmt><block_start><import_from_stmt>pyasn1.codec.ber.encoder tagMap typeMap AbstractItemEncoder<import_from_stmt>pyasn1.type.univ Boolean<import_from_stmt>copy deepcopy<class_stmt>LDAPBooleanEncoder(AbstractItemEncoder)<block_start>supportIndefLenMode=<false><if_stmt>pyasn1_version<le>'0.2.3'<block_start><import_from_stmt>pyasn1.compat.octets ints2octs<line_sep>_true=ints2octs((255 ))<line_sep>_false=ints2octs((0 ))<def_stmt>encodeValue self encodeFun value defMode maxChunkSize<block_start><return>value<and>self._true<or>self._false 0<block_end><block_end><elif_stmt>pyasn1_version<le>'0.3.1'<block_start><def_stmt>encodeValue self encodeFun value defMode maxChunkSize<block_start><return>value<and>(255 )<or>(0 ) <false> <false><block_end><block_end><elif_stmt>pyasn1_version<le>'0.3.4'<block_start><def_stmt>encodeValue self encodeFun value defMode maxChunkSize ifNotEmpty=<false><block_start><return>value<and>(255 )<or>(0 ) <false> <false><block_end><block_end><elif_stmt>pyasn1_version<le>'0.3.7'<block_start><def_stmt>encodeValue self value encodeFun **options<block_start><return>value<and>(255 )<or>(0 ) <false> <false><block_end><block_end><else_stmt><block_start><def_stmt>encodeValue self value asn1Spec encodeFun **options<block_start><return>value<and>(255 )<or>(0 ) <false> <false><block_end><block_end><block_end>customTagMap=deepcopy(tagMap)<line_sep>customTypeMap=deepcopy(typeMap)<line_sep>customTagMap[Boolean.tagSet]=LDAPBooleanEncoder()<line_sep>customTypeMap[Boolean.typeId]=LDAPBooleanEncoder()<line_sep>encode=Encoder(customTagMap customTypeMap)<block_end># end of monkey patching # a fast BER decoder for LDAP responses only <def_stmt>compute_ber_size data<block_start>""" Compute size according to BER definite length rules Returns size of value and value offset """<if_stmt>data[1]<le>127# BER definite length - short form. Highest bit of byte 1 is 0, message length is in the last 7 bits - Value can be up to 127 bytes long <block_start><return>data[1] 2<block_end><else_stmt># BER definite length - long form. Highest bit of byte 1 is 1, last 7 bits counts the number of following octets containing the value length <block_start>bytes_length=data[1]-128<line_sep>value_length=0<line_sep>cont=bytes_length<for_stmt>byte data[2:2+bytes_length]<block_start>cont<augsub>1<line_sep>value_length<augadd>byte<times>(256<power>cont)<block_end><return>value_length bytes_length+2<block_end><block_end><def_stmt>decode_message_fast message<block_start>ber_len,ber_value_offset=compute_ber_size(get_bytes(message[:10]))# get start of sequence, at maximum 3 bytes for length decoded=decode_sequence(message ber_value_offset ber_len+ber_value_offset LDAP_MESSAGE_CONTEXT)<line_sep><return>{'messageID':decoded[0][3] 'protocolOp':decoded[1][2] 'payload':decoded[1][3] 'controls':decoded[2][3]<if>len(decoded)<eq>3<else><none>}<block_end><def_stmt>decode_sequence message start stop context_decoders=<none><block_start>decoded=[]<while_stmt>start<l>stop<block_start>octet=get_byte(message[start])<line_sep>ber_class=CLASSES[(bool(octet&0b10000000) bool(octet&0b01000000))]<line_sep>ber_constructed=bool(octet&0b00100000)<line_sep>ber_type=octet&0b00011111<line_sep>ber_decoder=DECODERS[(ber_class octet&0b00011111)]<if>ber_class<l>2<else><none><line_sep>ber_len,ber_value_offset=compute_ber_size(get_bytes(message[start:start+10]))<line_sep>start<augadd>ber_value_offset<if_stmt>ber_decoder<block_start>value=ber_decoder(message start start+ber_len context_decoders)# call value decode function <block_end><else_stmt># try: <block_start>value=context_decoders[ber_type](message start start+ber_len)# call value decode function for context class # except KeyError: # if ber_type == 3: # Referral in result # value = decode_sequence(message, start, start + ber_len) # else: # raise # re-raise, should never happen <block_end>decoded.append((ber_class ber_constructed ber_type value))<line_sep>start<augadd>ber_len<block_end><return>decoded<block_end><def_stmt>decode_integer message start stop context_decoders=<none><block_start>first=message[start]<line_sep>value=-1<if>get_byte(first)&0x80<else>0<for_stmt>octet message[start:stop]<block_start>value=value<lshift>8|get_byte(octet)<block_end><return>value<block_end><def_stmt>decode_octet_string message start stop context_decoders=<none><block_start><return>message[start:stop]<block_end><def_stmt>decode_boolean message start stop context_decoders=<none><block_start><return><false><if>message[start:stop]<eq>0<else><true><block_end><def_stmt>decode_bind_response message start stop context_decoders=<none><block_start><return>decode_sequence(message start stop BIND_RESPONSE_CONTEXT)<block_end><def_stmt>decode_extended_response message start stop context_decoders=<none><block_start><return>decode_sequence(message start stop EXTENDED_RESPONSE_CONTEXT)<block_end><def_stmt>decode_intermediate_response message start stop context_decoders=<none><block_start><return>decode_sequence(message start stop INTERMEDIATE_RESPONSE_CONTEXT)<block_end><def_stmt>decode_controls message start stop context_decoders=<none><block_start><return>decode_sequence(message start stop CONTROLS_CONTEXT)<block_end><def_stmt>ldap_result_to_dict_fast response<block_start>response_dict=dict()<line_sep>response_dict['result']=int(response[0][3])# resultCode response_dict['description']=RESULT_CODES[response_dict['result']]<line_sep>response_dict['dn']=to_unicode(response[1][3] from_server=<true>)# matchedDN response_dict['message']=to_unicode(response[2][3] from_server=<true>)# diagnosticMessage <if_stmt>len(response)<eq>4<block_start>response_dict['referrals']=referrals_to_list([to_unicode(referral[3] from_server=<true>)<for>referral response[3][3]])# referrals <block_end><else_stmt><block_start>response_dict['referrals']=<none><block_end><return>response_dict<block_end>###### <if_stmt>str<is><not>bytes# Python 3 <block_start><def_stmt>get_byte x<block_start><return>x<block_end><def_stmt>get_bytes x<block_start><return>x<block_end><block_end><else_stmt># Python 2 <block_start><def_stmt>get_byte x<block_start><return>ord(x)<block_end><def_stmt>get_bytes x<block_start><return>bytearray(x)<block_end><block_end>DECODERS={# Universal (0 1):decode_boolean # Boolean (0 2):decode_integer # Integer (0 4):decode_octet_string # Octet String (0 10):decode_integer # Enumerated (0 16):decode_sequence # Sequence (0 17):decode_sequence # Set # Application (1 1):decode_bind_response # Bind response (1 4):decode_sequence # Search result entry (1 5):decode_sequence # Search result done (1 7):decode_sequence # Modify response (1 9):decode_sequence # Add response (1 11):decode_sequence # Delete response (1 13):decode_sequence # ModifyDN response (1 15):decode_sequence # Compare response (1 19):decode_sequence # Search result reference (1 24):decode_extended_response # Extended response (1 25):decode_intermediate_response # intermediate response (2 3):decode_octet_string# }<line_sep>BIND_RESPONSE_CONTEXT={7:decode_octet_string# SaslCredentials }<line_sep>EXTENDED_RESPONSE_CONTEXT={10:decode_octet_string # ResponseName 11:decode_octet_string# Response Value }<line_sep>INTERMEDIATE_RESPONSE_CONTEXT={0:decode_octet_string # IntermediateResponseName 1:decode_octet_string# IntermediateResponseValue }<line_sep>LDAP_MESSAGE_CONTEXT={0:decode_controls # Controls 3:decode_sequence# Referral }<line_sep>CONTROLS_CONTEXT={0:decode_sequence# Control }<line_sep>
<import_stmt>filters<as>f<import_from_stmt>iota TransactionHash Address<import_from_stmt>iota.commands FilterCommand RequestFilter ResponseFilter<import_from_stmt>iota.filters Trytes<line_sep>__all__=['GetNodeInfoCommand' ]<class_stmt>GetNodeInfoCommand(FilterCommand)<block_start>""" Executes `getNodeInfo` command. See :py:meth:`iota.api.StrictIota.get_node_info`. """<line_sep>command='getNodeInfo'<def_stmt>get_request_filter self<block_start><return>GetNodeInfoRequestFilter()<block_end><def_stmt>get_response_filter self<block_start><return>GetNodeInfoResponseFilter()<block_end><block_end><class_stmt>GetNodeInfoRequestFilter(RequestFilter)<block_start><def_stmt>__init__ self<arrow><none># ``getNodeInfo`` does not accept any parameters. # Using a filter here just to enforce that the request is empty. <block_start>super(GetNodeInfoRequestFilter self).__init__({})<block_end><block_end><class_stmt>GetNodeInfoResponseFilter(ResponseFilter)<block_start><def_stmt>__init__ self<arrow><none><block_start>super(GetNodeInfoResponseFilter self).__init__({'coordinatorAddress':f.ByteString(encoding='ascii')|Trytes(Address) 'latestMilestone':f.ByteString(encoding='ascii')|Trytes(TransactionHash) 'latestSolidSubtangleMilestone':f.ByteString(encoding='ascii')|Trytes(TransactionHash) })<block_end><block_end>
<try_stmt><block_start><import_from_stmt>public_config *<block_end><except_stmt>ImportError<block_start><pass><block_end>PORT=9028<line_sep>SERVICE_NAME='interface'<line_sep>
<import_stmt>asyncio<import_stmt>logging<import_stmt>synapse.exc<as>s_exc<import_stmt>synapse.lib.types<as>s_types<import_stmt>synapse.lib.module<as>s_module<import_stmt>synapse.lib.version<as>s_version<line_sep>logger=logging.getLogger(__name__)<class_stmt>Cpe23Str(s_types.Str)<block_start>''' CPE 2.3 Formatted String https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf (Section 6.2) cpe:2.3: part : vendor : product : version : update : edition : language : sw_edition : target_sw : target_hw : other * = "any" - = N/A '''<def_stmt>__init__ self modl name info opts<block_start>opts['lower']=<true><line_sep>s_types.Str.__init__(self modl name info opts)<block_end><def_stmt>_splitCpe23 self text<block_start>part=''<line_sep>parts=[]<line_sep>genr=iter(text)<try_stmt><block_start><while_stmt><true><block_start>c=next(genr)<if_stmt>c<eq>'\\'<block_start>c<augadd>next(genr)<block_end><if_stmt>c<eq>':'<block_start>parts.append(part)<line_sep>part=''<line_sep><continue><block_end>part<augadd>c<block_end><block_end><except_stmt>StopIteration<block_start>parts.append(part)<block_end><return>parts<block_end><def_stmt>_normPyStr self valu<block_start><if_stmt><not>valu.startswith('cpe:2.3:')<block_start>mesg='CPE 2.3 string is expected to start with "cpe:2.3:"'<line_sep><raise>s_exc.BadTypeValu(valu=valu mesg=mesg)<block_end>text,info=s_types.Str._normPyStr(self valu)<line_sep>parts=self._splitCpe23(text)<if_stmt>len(parts)<ne>13<block_start>mesg=f'CPE 2.3 string has {len(parts)} parts, expected 13.'<line_sep><raise>s_exc.BadTypeValu(valu=valu mesg=mesg)<block_end>subs={'part':parts[2] 'vendor':parts[3] 'product':parts[4] 'version':parts[5] 'update':parts[6] 'edition':parts[7] 'language':parts[8] 'sw_edition':parts[9] 'target_sw':parts[10] 'target_hw':parts[11] 'other':parts[12] }<line_sep><return>':'.join(parts) {'subs':subs}<block_end><block_end><class_stmt>SemVer(s_types.Int)<block_start>''' Provides support for parsing a semantic version string into its component parts. This normalizes a version string into an integer to allow version ordering. Prerelease information is disregarded for integer comparison purposes, as we cannot map an arbitrary pre-release version into a integer value Major, minor and patch levels are represented as integers, with a max width of 20 bits. The comparable integer value representing the semver is the bitwise concatenation of the major, minor and patch levels. Prerelease and build information will be parsed out and available as strings if that information is present. '''<def_stmt>postTypeInit self<block_start>s_types.Int.postTypeInit(self)<line_sep>self.setNormFunc(str self._normPyStr)<line_sep>self.setNormFunc(int self._normPyInt)<block_end><def_stmt>_normPyStr self valu<block_start>valu=valu.strip()<if_stmt><not>valu<block_start><raise>s_exc.BadTypeValu(valu=valu name=self.name mesg='No text left after stripping whitespace')<block_end>subs=s_version.parseSemver(valu)<if_stmt>subs<is><none><block_start><raise>s_exc.BadTypeValu(valu=valu name=self.name mesg='Unable to parse string as a semver.')<block_end>valu=s_version.packVersion(subs.get('major') subs.get('minor') subs.get('patch'))<line_sep><return>valu {'subs':subs}<block_end><def_stmt>_normPyInt self valu<block_start><if_stmt>valu<l>0<block_start><raise>s_exc.BadTypeValu(valu=valu name=self.name mesg='Cannot norm a negative integer as a semver.')<block_end><if_stmt>valu<g>s_version.mask60<block_start><raise>s_exc.BadTypeValu(valu=valu name=self.name mesg='Cannot norm a integer larger than 1152921504606846975 as a semver.')<block_end>major,minor,patch=s_version.unpackVersion(valu)<line_sep>valu=s_version.packVersion(major minor patch)<line_sep>subs={'major':major 'minor':minor 'patch':patch}<line_sep><return>valu {'subs':subs}<block_end><def_stmt>repr self valu<block_start>major,minor,patch=s_version.unpackVersion(valu)<line_sep>valu=s_version.fmtVersion(major minor patch)<line_sep><return>valu<block_end><block_end>loglevels=((10 'debug') (20 'info') (30 'notice') (40 'warning') (50 'err') (60 'crit') (70 'alert') (80 'emerg') )<class_stmt>ItModule(s_module.CoreModule)<block_start><async_keyword><def_stmt>initCoreModule self<block_start>self.model.form('it:dev:str').onAdd(self._onFormItDevStr)<line_sep>self.model.form('it:dev:pipe').onAdd(self._onFormMakeDevStr)<line_sep>self.model.form('it:dev:mutex').onAdd(self._onFormMakeDevStr)<line_sep>self.model.form('it:dev:regkey').onAdd(self._onFormMakeDevStr)<line_sep>self.model.prop('it:prod:softver:arch').onSet(self._onPropSoftverArch)<line_sep>self.model.prop('it:prod:softver:vers').onSet(self._onPropSoftverVers)<line_sep>self.model.prop('it:prod:softver:software').onSet(self._onPropSoftverSoft)<block_end><def_stmt>bruteVersionStr self valu<block_start>''' Brute force the version out of a string. Args: valu (str): String to attempt to get version information for. Notes: This first attempts to parse strings using the it:semver normalization before attempting to extract version parts out of the string. Returns: int, dict: The system normalized version integer and a subs dictionary. '''<try_stmt><block_start>valu,info=self.core.model.type('it:semver').norm(valu)<line_sep>subs=info.get('subs')<line_sep><return>valu subs<block_end><except_stmt>s_exc.BadTypeValu# Try doing version part extraction by noming through the string <block_start>subs=s_version.parseVersionParts(valu)<if_stmt>subs<is><none><block_start><raise>s_exc.BadTypeValu(valu=valu name='bruteVersionStr' mesg='Unable to brute force version parts out of the string')<block_end><if_stmt>subs<block_start>valu=s_version.packVersion(subs.get('major') subs.get('minor' 0) subs.get('patch' 0))<line_sep><return>valu subs<block_end><block_end><block_end><async_keyword><def_stmt>_onFormItDevStr self node<block_start><await>node.set('norm' node.ndef[1])<block_end><async_keyword><def_stmt>_onFormMakeDevStr self node<block_start>pprop=node.ndef[1]<line_sep><await>node.snap.addNode('it:dev:str' pprop)<block_end><async_keyword><def_stmt>_onPropSoftverSoft self node oldv# Check to see if name is available and set it if possible <block_start>prop=node.get('software')<if_stmt>prop<block_start>opts={'vars':{'soft':prop}}<line_sep>nodes=<await>node.snap.nodes('it:prod:soft=$soft' opts=opts)<if_stmt>nodes<block_start>name=nodes[0].get('name')<if_stmt>name<block_start><await>node.set('software:name' name)<block_end><block_end><block_end><block_end><async_keyword><def_stmt>_onPropSoftverArch self node oldv# make it:dev:str for arch <block_start>prop=node.get('arch')<if_stmt>prop<block_start><await>node.snap.addNode('it:dev:str' prop)<block_end><block_end><async_keyword><def_stmt>_onPropSoftverVers self node oldv# Set vers:norm and make it's normed valu <block_start>prop=node.get('vers')<if_stmt><not>prop<block_start><return><block_end><await>node.set('vers:norm' prop)<line_sep># Make it:dev:str from version str <await>node.snap.addNode('it:dev:str' prop)<line_sep># form the semver properly or bruteforce parts <try_stmt><block_start>valu,subs=self.bruteVersionStr(prop)<line_sep><await>node.set('semver' valu)<for_stmt>k,v subs.items()<block_start><await>node.set(f'semver:{k}' v)<block_end><block_end><except_stmt>asyncio.CancelledError# pragma: no cover <block_start><raise><block_end><except_stmt>Exception<block_start>logger.exception('Failed to brute force version string [%s]' prop)<block_end><block_end><def_stmt>getModelDefs self<block_start>modl={'ctors':(('it:semver' 'synapse.models.infotech.SemVer' {} {'doc':'Semantic Version type.' }) ('it:sec:cpe' 'synapse.models.infotech.Cpe23Str' {} {'doc':'A NIST CPE 2.3 Formatted String' }) ) 'types':(('it:hostname' ('str' {'strip':<true> 'lower':<true>}) {'doc':'The name of a host or system.' }) ('it:host' ('guid' {}) {'doc':'A GUID that represents a host or system.'}) ('it:log:event' ('guid' {}) {'doc':'A GUID representing an individual log event.' 'interfaces':('it:host:activity' ) }) ('it:network' ('guid' {}) {'doc':'A GUID that represents a logical network.'}) ('it:domain' ('guid' {}) {'doc':'A logical boundary of authentication and configuration such as a windows domain.'}) ('it:account' ('guid' {}) {'doc':'A GUID that represents an account on a host or network.'}) ('it:group' ('guid' {}) {'doc':'A GUID that represents a group on a host or network.'}) ('it:logon' ('guid' {}) {'doc':'A GUID that represents an individual logon/logoff event.'}) ('it:hosturl' ('comp' {'fields':(('host' 'it:host') ('url' 'inet:url'))}) {'doc':'A url hosted on or served by a host or system.' }) ('it:sec:cve' ('str' {'lower':<true> 'regex':r'(?i)^CVE-[0-9]{4}-[0-9]{4,}$'}) {'doc':'A vulnerability as designated by a Common Vulnerabilities and Exposures (CVE) number.' 'ex':'cve-2012-0158'}) ('it:sec:cwe' ('str' {'regex':r'^CWE-[0-9]{1,8}$'}) {'doc':'NIST NVD Common Weaknesses Enumeration Specification' 'ex':'CWE-120' }) ('it:mitre:attack:status' ('str' {'enums':'current,deprecated,withdrawn'}) {'doc':'A Mitre ATT&CK element status.' 'ex':'current' }) ('it:mitre:attack:group' ('str' {'regex':r'^G[0-9]{4}$'}) {'doc':'A Mitre ATT&CK Group ID.' 'ex':'G0100' }) ('it:mitre:attack:tactic' ('str' {'regex':r'^TA[0-9]{4}$'}) {'doc':'A Mitre ATT&CK Tactic ID.' 'ex':'TA0040' }) ('it:mitre:attack:technique' ('str' {'regex':r'^T[0-9]{4}(.[0-9]{3})?$'}) {'doc':'A Mitre ATT&CK Technique ID.' 'ex':'T1548' }) ('it:mitre:attack:mitigation' ('str' {'regex':r'^M[0-9]{4}$'}) {'doc':'A Mitre ATT&CK Mitigation ID.' 'ex':'M1036' }) ('it:mitre:attack:software' ('str' {'regex':r'^S[0-9]{4}$'}) {'doc':'A Mitre ATT&CK Software ID.' 'ex':'S0154' }) ('it:dev:str' ('str' {}) {'doc':'A developer-selected string.'}) ('it:dev:pipe' ('str' {}) {'doc':'A string representing a named pipe.' }) ('it:dev:mutex' ('str' {}) {'doc':'A string representing a mutex.' }) ('it:dev:int' ('int' {}) {'doc':'A developer selected integer constant.' }) ('it:dev:regkey' ('str' {}) {'doc':'A Windows registry key.' 'ex':'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run' }) ('it:dev:regval' ('guid' {}) {'doc':'A Windows registry key/value pair.' }) ('it:prod:soft' ('guid' {}) {'doc':'A arbitrary, unversioned software product.' }) ('it:adid' ('str' {'lower':<true> 'strip':<true>}) {'doc':'An advertising identification string.'}) ('it:os:windows:sid' ('str' {'regex':r'^S-1-[0-59]-\d{2}-\d{8,10}-\d{8,10}-\d{8,10}-[1-9]\d{3}$'}) {'doc':'A Microsoft Windows Security Identifier.' 'ex':'S-1-5-21-1220945662-1202665555-839525555-5555' }) ('it:os:ios:idfa' ('it:adid' {}) {'doc':'An iOS advertising identification string.'}) ('it:os:android:aaid' ('it:adid' {}) {'doc':'An android advertising identification string.'}) ('it:os:android:perm' ('str' {}) {'doc':'An android permission string.'}) ('it:os:android:intent' ('str' {}) {'doc':'An android intent string.'}) ('it:os:android:reqperm' ('comp' {'fields':(('app' 'it:prod:soft') ('perm' 'it:os:android:perm'))}) {'doc':'The given software requests the android permission.'}) ('it:os:android:ilisten' ('comp' {'fields':(('app' 'it:prod:soft') ('intent' 'it:os:android:intent'))}) {'doc':'The given software listens for an android intent.'}) ('it:os:android:ibroadcast' ('comp' {'fields':(('app' 'it:prod:soft') ('intent' 'it:os:android:intent'))}) {'doc':'The given software broadcasts the given Android intent.'}) ('it:prod:softver' ('guid' {}) {'doc':'A specific version of a software product.'}) ('it:prod:softfile' ('comp' {'fields':(('soft' 'it:prod:softver') ('file' 'file:bytes'))}) {'doc':'A file is distributed by a specific software version.'}) ('it:prod:softlib' ('comp' {'fields':(('soft' 'it:prod:softver') ('lib' 'it:prod:softver'))}) {'doc':'A software version contains a library software version.'}) ('it:prod:softos' ('comp' {'fields':(('soft' 'it:prod:softver') ('os' 'it:prod:softver'))}) {'doc':'The software version is known to be compatible with the given os software version.'}) ('it:hostsoft' ('comp' {'fields':(('host' 'it:host') ('softver' 'it:prod:softver'))}) {'doc':'A version of a software product which is present on a given host.' }) ('it:av:sig' ('comp' {'fields':(('soft' 'it:prod:soft') ('name' ('str' {'lower':<true>})))}) {'doc':'A signature name within the namespace of an antivirus engine name.'}) ('it:av:filehit' ('comp' {'fields':(('file' 'file:bytes') ('sig' 'it:av:sig'))}) {'doc':'A file that triggered an alert on a specific antivirus signature.' }) ('it:av:prochit' ('guid' {}) {'doc':'An instance of a process triggering an alert on a specific antivirus signature.'}) ('it:auth:passwdhash' ('guid' {}) {'doc':'An instance of a password hash.' }) ('it:exec:proc' ('guid' {}) {'doc':'A process executing on a host. May be an actual (e.g., endpoint) or virtual (e.g., malware sandbox) host.' }) ('it:exec:thread' ('guid' {}) {'doc':'A thread executing in a process.' }) ('it:exec:loadlib' ('guid' {}) {'doc':'A library load event in a process.' }) ('it:exec:mmap' ('guid' {}) {'doc':'A memory mapped segment located in a process.' }) ('it:cmd' ('str' {'strip':<true>}) {'doc':'A unique command-line string.' 'ex':'foo.exe --dostuff bar' }) ('it:exec:mutex' ('guid' {}) {'doc':'A mutex created by a process at runtime.' }) ('it:exec:pipe' ('guid' {}) {'doc':'A named pipe created by a process at runtime.' }) ('it:exec:url' ('guid' {}) {'doc':'An instance of a host requesting a URL.' }) ('it:exec:bind' ('guid' {}) {'doc':'An instance of a host binding a listening port.' }) ('it:fs:file' ('guid' {}) {'doc':'A file on a host.'}) ('it:exec:file:add' ('guid' {}) {'doc':'An instance of a host adding a file to a filesystem.' }) ('it:exec:file:del' ('guid' {}) {'doc':'An instance of a host deleting a file from a filesystem.' }) ('it:exec:file:read' ('guid' {}) {'doc':'An instance of a host reading a file from a filesystem.' }) ('it:exec:file:write' ('guid' {}) {'doc':'An instance of a host writing a file to a filesystem.' }) ('it:exec:reg:get' ('guid' {}) {'doc':'An instance of a host getting a registry key.' }) ('it:exec:reg:set' ('guid' {}) {'doc':'An instance of a host creating or setting a registry key.' }) ('it:exec:reg:del' ('guid' {}) {'doc':'An instance of a host deleting a registry key.' }) ('it:app:yara:rule' ('guid' {}) {'doc':'A YARA rule unique identifier.' }) ('it:app:yara:match' ('comp' {'fields':(('rule' 'it:app:yara:rule') ('file' 'file:bytes'))}) {'doc':'A YARA rule match to a file.' }) ('it:app:yara:procmatch' ('guid' {}) {'doc':'An instance of a YARA rule match to a process.' }) ('it:app:snort:rule' ('guid' {}) {'doc':'A snort rule unique identifier.' }) ('it:app:snort:hit' ('guid' {}) {'doc':'An instance of a snort rule hit.' }) ('it:reveng:function' ('guid' {}) {'doc':'A function inside an executable.' }) ('it:reveng:filefunc' ('comp' {'fields':(('file' 'file:bytes') ('function' 'it:reveng:function'))}) {'doc':'An instance of a function in an executable.' }) ('it:reveng:funcstr' ('comp' {'fields':(('function' 'it:reveng:function') ('string' 'str'))}) {'deprecated':<true> 'doc':'A reference to a string inside a function.' }) ('it:reveng:impfunc' ('str' {'lower':1}) {'doc':'A function from an imported library.' }) ) 'interfaces':(('it:host:activity' {'props':(('exe' ('file:bytes' {}) {'doc':'The executable file which caused the activity.'}) ('proc' ('it:exec:proc' {}) {'doc':'The host process which caused the activity.'}) ('thread' ('it:exec:thread' {}) {'doc':'The host thread which caused the activity.'}) ('host' ('it:host' {}) {'doc':'The host on which the activity occurred.'}) ('time' ('time' {}) {'doc':'The time that the activity started.'}) ) }) ) 'forms':(('it:hostname' {} ()) ('it:host' {} (('name' ('it:hostname' {}) {'doc':'The name of the host or system.' }) ('desc' ('str' {}) {'doc':'A free-form description of the host.' }) ('domain' ('it:domain' {}) {'doc':'The authentication domain that the host is a member of.' }) ('ipv4' ('inet:ipv4' {}) {'doc':'The last known ipv4 address for the host.'}) ('latlong' ('geo:latlong' {}) {'doc':'The last known location for the host.'}) ('place' ('geo:place' {}) {'doc':'The place where the host resides.' }) ('loc' ('loc' {}) {'doc':'The geo-political location string for the node.' }) ('os' ('it:prod:softver' {}) {'doc':'The operating system of the host.'}) ('manu' ('str' {}) {'doc':'The manufacturer of the host.' }) ('model' ('str' {}) {'doc':'The product model of the host.' }) ('serial' ('str' {}) {'doc':'The serial number of the host.' }) ('operator' ('ps:contact' {}) {'doc':'The operator of the host.' }) ('org' ('ou:org' {}) {'doc':'The org that operates the given host.' }) )) ('it:log:event' {} (('mesg' ('str' {}) {'doc':'The log messsage text.' }) ('severity' ('int' {'enums':loglevels}) {'doc':'A log level integer that increases with severity.' }) ('data' ('data' {}) {'doc':'A raw JSON record of the log event.' }) )) ('it:domain' {} (('name' ('str' {'lower':<true> 'strip':<true> 'onespace':<true>}) {'doc':'The name of the domain.' }) ('desc' ('str' {}) {'doc':'A brief description of the domain.' }) ('org' ('ou:org' {}) {'doc':'The org that operates the given domain.' }) )) ('it:network' {} (('name' ('str' {'lower':<true> 'strip':<true> 'onespace':<true>}) {'doc':'The name of the network.' }) ('desc' ('str' {}) {'doc':'A brief description of the network.' }) ('org' ('ou:org' {}) {'doc':'The org that owns/operates the network.' }) ('net4' ('inet:net4' {}) {'doc':'The optional contiguous IPv4 address range of this network.' }) ('net6' ('inet:net6' {}) {'doc':'The optional contiguous IPv6 address range of this network.' }) )) ('it:account' {} (('user' ('inet:user' {}) {'doc':'The username associated with the account' }) ('contact' ('ps:contact' {}) {'doc':'Additional contact information associated with this account.' }) ('host' ('it:host' {}) {'doc':'The host where the account is registered.' }) ('domain' ('it:domain' {}) {'doc':'The authentication domain where the account is registered.' }) ('posix:uid' ('int' {}) {'doc':'The user ID of the account.' 'ex':'1001' }) ('posix:gid' ('int' {}) {'doc':'The primary group ID of the account.' 'ex':'1001' }) ('posix:gecos' ('int' {}) {'doc':'The GECOS field for the POSIX account.' }) ('posix:home' ('file:path' {}) {'doc':"The path to the POSIX account's home directory." 'ex':'/home/visi' }) ('posix:shell' ('file:path' {}) {'doc':"The path to the POSIX account's default shell." 'ex':'/bin/bash' }) ('windows:sid' ('it:os:windows:sid' {}) {'doc':'The Microsoft Windows Security Identifier of the account.' }) ('groups' ('array' {'type':'it:group'}) {'doc':'An array of groups that the account is a member of.' }) )) ('it:group' {} (('name' ('str' {'lower':<true> 'strip':<true> 'onespace':<true>}) {'doc':'The name of the group.' }) ('desc' ('str' {}) {'doc':'A brief description of the group.' }) ('host' ('it:host' {}) {'doc':'The host where the group is registered.' }) ('domain' ('it:domain' {}) {'doc':'The authentication domain where the group is registered.' }) ('groups' ('array' {'type':'it:group'}) {'doc':'Groups that are a member of this group.' }) ('posix:gid' ('int' {}) {'doc':'The primary group ID of the account.' 'ex':'1001' }) ('windows:sid' ('it:os:windows:sid' {}) {'doc':'The Microsoft Windows Security Identifier of the group.' }) )) ('it:logon' {} (('time' ('time' {}) {'doc':'The time the logon occured.' }) ('success' ('bool' {}) {'doc':'Set to false to indicate an unsuccessful logon attempt.' }) ('logoff:time' ('time' {}) {'doc':'The time the logon session ended.' }) ('host' ('it:host' {}) {'doc':'The host that the account logged in to.' }) ('account' ('it:account' {}) {'doc':'The account that logged in.' }) ('creds' ('auth:creds' {}) {'doc':'The credentials that were used for the logon.' }) ('duration' ('duration' {}) {'doc':'The duration of the logon session.' }) ('client:host' ('it:host' {}) {'doc':'The host where the logon originated.' }) ('client:ipv4' ('inet:ipv4' {}) {'doc':'The IPv4 where the logon originated.' }) ('client:ipv6' ('inet:ipv6' {}) {'doc':'The IPv6 where the logon originated.' }) )) ('it:hosturl' {} (('host' ('it:host' {}) {'ro':<true> 'doc':'Host serving a url.' }) ('url' ('inet:url' {}) {'ro':<true> 'doc':'URL available on the host.' }) )) ('it:dev:str' {} (('norm' ('str' {'lower':<true>}) {'doc':'Lower case normalized version of the it:dev:str.' }) )) ('it:sec:cve' {} (('desc' ('str' {}) {'doc':'A free-form description of the CVE vulnerability.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'A URL linking this CVE to a full description.' }) ('references' ('array' {'type':'inet:url' 'uniq':<true>}) {'doc':'An array of URLs that document the CVE ID.' }) )) ('it:sec:cpe' {} (('part' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "part" field from the CPE 2.3 string.'}) ('vendor' ('ou:name' {}) {'ro':<true> 'doc':'The "vendor" field from the CPE 2.3 string.'}) ('product' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "product" field from the CPE 2.3 string.'}) ('version' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "version" field from the CPE 2.3 string.'}) ('update' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "update" field from the CPE 2.3 string.'}) ('edition' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "edition" field from the CPE 2.3 string.'}) ('language' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "language" field from the CPE 2.3 string.'}) ('sw_edition' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "sw_edition" field from the CPE 2.3 string.'}) ('target_sw' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "target_sw" field from the CPE 2.3 string.'}) ('target_hw' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "target_hw" field from the CPE 2.3 string.'}) ('other' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The "other" field from the CPE 2.3 string.'}) )) ('it:sec:cwe' {} (('name' ('str' {}) {'doc':'The CWE description field.' 'ex':'Buffer Copy without Checking Size of Input (Classic Buffer Overflow)' }) ('desc' ('str' {}) {'doc':'The CWE description field.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'A URL linking this CWE to a full description.' }) ('parents' ('array' {'type':'it:sec:cwe' 'uniq':<true> 'sorted':<true> 'split':','}) {'doc':'An array of ChildOf CWE Relationships.'}) )) ('it:mitre:attack:group' {} (('org' ('ou:org' {}) {'doc':'Used to map an ATT&CK group to a synapse ou:org.' }) ('name' ('ou:name' {}) {'doc':'The primary name for the ATT&CK group.' }) ('names' ('array' {'type':'ou:name' 'uniq':<true> 'sorted':<true>}) {'doc':'An array of alternate names for the ATT&CK group.' }) ('desc' ('str' {}) {'doc':'A description of the ATT&CK group.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'The URL that documents the ATT&CK group.' }) ('tag' ('syn:tag' {}) {'doc':'The synapse tag used to annotate nodes included in this ATT&CK group ID.' 'ex':'cno.mitre.g0100' }) ('references' ('array' {'type':'inet:url' 'uniq':<true>}) {'doc':'An array of URLs that document the ATT&CK group.' }) ('techniques' ('array' {'type':'it:mitre:attack:technique' 'uniq':<true> 'sorted':<true> 'split':','}) {'doc':'An array of ATT&CK technique IDs used by the group.' }) ('software' ('array' {'type':'it:mitre:attack:software' 'uniq':<true> 'sorted':<true> 'split':','}) {'doc':'An array of ATT&CK software IDs used by the group.' }) )) ('it:mitre:attack:tactic' {} (('name' ('str' {'strip':<true>}) {'doc':'The primary name for the ATT&CK tactic.' }) ('desc' ('str' {}) {'doc':'A description of the ATT&CK tactic.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'The URL that documents the ATT&CK tactic.' }) ('tag' ('syn:tag' {}) {'doc':'The synapse tag used to annotate nodes included in this ATT&CK tactic.' 'ex':'cno.mitre.ta0100' }) ('references' ('array' {'type':'inet:url' 'uniq':<true>}) {'doc':'An array of URLs that document the ATT&CK tactic.' }) )) ('it:mitre:attack:technique' {} (('name' ('str' {'strip':<true>}) {'doc':'The primary name for the ATT&CK technique.' }) ('status' ('it:mitre:attack:status' {}) {'doc':'The status of this ATT&CK technique.' }) ('isnow' ('it:mitre:attack:technique' {}) {'doc':'If deprecated, this field may contain the current value for the technique.' }) ('desc' ('str' {'strip':<true>}) {'doc':'A description of the ATT&CK technique.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'The URL that documents the ATT&CK technique.' }) ('tag' ('syn:tag' {}) {'doc':'The synapse tag used to annotate nodes included in this ATT&CK technique.' 'ex':'cno.mitre.t0100' }) ('references' ('array' {'type':'inet:url' 'uniq':<true>}) {'doc':'An array of URLs that document the ATT&CK technique.' }) ('parent' ('it:mitre:attack:technique' {}) {'doc':'The parent ATT&CK technique on this sub-technique.' }) ('tactics' ('array' {'type':'it:mitre:attack:tactic' 'uniq':<true> 'sorted':<true> 'split':','}) {'doc':'An array of ATT&CK tactics that include this technique.' }) )) ('it:mitre:attack:software' {} (('software' ('it:prod:soft' {}) {'doc':'Used to map an ATT&CK software to a synapse it:prod:soft.' }) ('name' ('str' {'strip':<true>}) {'doc':'The primary name for the ATT&CK software.' }) ('names' ('array' {'type':'str' 'uniq':<true> 'sorted':<true>}) {'doc':'Associated names for the ATT&CK software.' }) ('desc' ('str' {'strip':<true>}) {'doc':'A description of the ATT&CK software.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'The URL that documents the ATT&CK software.' }) ('tag' ('syn:tag' {}) {'doc':'The synapse tag used to annotate nodes included in this ATT&CK software.' 'ex':'cno.mitre.s0100' }) ('references' ('array' {'type':'inet:url' 'uniq':<true>}) {'doc':'An array of URLs that document the ATT&CK software.' }) ('techniques' ('array' {'type':'it:mitre:attack:technique' 'uniq':<true> 'sorted':<true> 'split':','}) {'doc':'An array of techniques used by the software.' }) )) ('it:mitre:attack:mitigation' {} (# TODO map to an eventual risk:mitigation ('name' ('str' {'strip':<true>}) {'doc':'The primary name for the ATT&CK mitigation.' }) ('desc' ('str' {'strip':<true>}) {'doc':'A description of the ATT&CK mitigation.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'The URL that documents the ATT&CK mitigation.' }) ('tag' ('syn:tag' {}) {'doc':'The synapse tag used to annotate nodes included in this ATT&CK mitigation.' 'ex':'cno.mitre.m0100' }) ('references' ('array' {'type':'inet:url' 'uniq':<true>}) {'doc':'An array of URLs that document the ATT&CK mitigation.' }) ('addresses' ('array' {'type':'it:mitre:attack:technique' 'uniq':<true> 'sorted':<true> 'split':','}) {'doc':'An array of ATT&CK technique IDs addressed by the mitigation.' }) )) ('it:dev:int' {} ()) ('it:dev:pipe' {} ()) ('it:dev:mutex' {} ()) ('it:dev:regkey' {} ()) ('it:dev:regval' {} (('key' ('it:dev:regkey' {}) {'doc':'The Windows registry key.' }) ('str' ('it:dev:str' {}) {'doc':'The value of the registry key, if the value is a string.' }) ('int' ('it:dev:int' {}) {'doc':'The value of the registry key, if the value is an integer.' }) ('bytes' ('file:bytes' {}) {'doc':'The file representing the value of the registry key, if the value is binary data.' }) )) ('it:prod:soft' {} (('name' ('str' {'lower':<true> 'strip':<true>}) {'doc':'Name of the software.' }) ('names' ('array' {'type':'it:dev:str' 'uniq':<true> 'sorted':<true>}) {'doc':'Observed/variant names for this software.' }) ('desc' ('str' {}) {'doc':'A description of the software.' 'disp':{'hint':'text'} }) ('desc:short' ('str' {'lower':<true>}) {'doc':'A short description of the software.' }) ('cpe' ('it:sec:cpe' {}) {'doc':'The NIST CPE 2.3 string specifying this software.' }) ('author' ('ps:contact' {}) {'doc':'The contact information of the org or person who authored the software.' }) ('author:org' ('ou:org' {}) {'deprecated':<true> 'doc':'Organization which authored the software.' }) ('author:acct' ('inet:web:acct' {}) {'deprecated':<true> 'doc':'Web account of the software author.' }) ('author:email' ('inet:email' {}) {'deprecated':<true> 'doc':'Email address of the sofware author.' }) ('author:person' ('ps:person' {}) {'deprecated':<true> 'doc':'Person who authored the software.' }) ('url' ('inet:url' {}) {'doc':'URL relevant for the software.' }) ('isos' ('bool' {}) {'doc':'Set to True if the software is an operating system.'}) ('islib' ('bool' {}) {'doc':'Set to True if the software is a library.'}) )) ('it:adid' {} ()) ('it:os:ios:idfa' {} ()) ('it:os:android:aaid' {} ()) ('it:os:android:perm' {} ()) ('it:os:android:intent' {} ()) ('it:os:android:reqperm' {} (('app' ('it:prod:softver' {}) {'ro':<true> 'doc':'The android app which requests the permission.'}) ('perm' ('it:os:android:perm' {}) {'ro':<true> 'doc':'The android permission requested by the app.'}) )) ('it:prod:softos' {} (('soft' ('it:prod:softver' {}) {'ro':<true> 'doc':'The software which can run on the operating system.'}) ('os' ('it:prod:softver' {}) {'ro':<true> 'doc':'The operating system which the software can run on.'}) )) ('it:os:android:ilisten' {} (('app' ('it:prod:softver' {}) {'ro':<true> 'doc':'The app software which listens for the android intent.'}) ('intent' ('it:os:android:intent' {}) {'ro':<true> 'doc':'The android intent which is listened for by the app.'}) )) ('it:os:android:ibroadcast' {} (('app' ('it:prod:softver' {}) {'ro':<true> 'doc':'The app software which broadcasts the android intent.'}) ('intent' ('it:os:android:intent' {}) {'ro':<true> 'doc':'The android intent which is broadcast by the app.'}) )) ('it:prod:softver' {} (('software' ('it:prod:soft' {}) {'doc':'Software associated with this version instance.' }) ('software:name' ('str' {'lower':<true> 'strip':<true>}) {'doc':'The name of the software at a particular version.' }) ('names' ('array' {'type':'it:dev:str' 'uniq':<true> 'sorted':<true>}) {'doc':'Observed/variant names for this software version.' }) ('cpe' ('it:sec:cpe' {}) {'doc':'The NIST CPE 2.3 string specifying this software version' }) ('cves' ('array' {'type':'it:sec:cve' 'uniq':<true> 'sorted':<true>}) {'doc':'A list of CVEs that apply to this software version.' }) ('vers' ('it:dev:str' {}) {'doc':'Version string associated with this version instance.' }) ('vers:norm' ('str' {'lower':<true>}) {'doc':'Normalized version of the version string.' }) ('arch' ('it:dev:str' {}) {'doc':'Software architecture.' }) ('released' ('time' {}) {'doc':'Timestamp for when this version of the software was released.' }) ('semver' ('it:semver' {}) {'doc':'System normalized semantic version number.' }) ('semver:major' ('int' {}) {'doc':'Version major number.' }) ('semver:minor' ('int' {}) {'doc':'Version minor number.' }) ('semver:patch' ('int' {}) {'doc':'Version patch number.' }) ('semver:pre' ('str' {}) {'doc':'Semver prerelease string.' }) ('semver:build' ('str' {}) {'doc':'Semver build string.' }) ('url' ('inet:url' {}) {'doc':'URL where a specific version of the software is available from.' }) )) ('it:prod:softlib' {} (('soft' ('it:prod:softver' {}) {'ro':<true> 'doc':'The software version that contains the library.'}) ('lib' ('it:prod:softver' {}) {'ro':<true> 'doc':'The library software version.'}) )) ('it:prod:softfile' {} (('soft' ('it:prod:softver' {}) {'ro':<true> 'doc':'The software which distributes the file.'}) ('file' ('file:bytes' {}) {'ro':<true> 'doc':'The file distributed by the software.'}) ('path' ('file:path' {}) {'doc':'The default installation path of the file.'}) )) ('it:hostsoft' {} (('host' ('it:host' {}) {'ro':<true> 'doc':'Host with the software.'}) ('softver' ('it:prod:softver' {}) {'ro':<true> 'doc':'Software on the host.'}))) ('it:av:sig' {} (('soft' ('it:prod:soft' {}) {'ro':<true> 'doc':'The anti-virus product which contains the signature.' }) ('name' ('str' {'lower':<true>}) {'ro':<true> 'doc':'The signature name.'}) ('desc' ('str' {}) {'doc':'A free-form description of the signature.' 'disp':{'hint':'text'} }) ('url' ('inet:url' {}) {'doc':'A reference URL for information about the signature.' }))) ('it:av:filehit' {} (('file' ('file:bytes' {}) {'ro':<true> 'doc':'The file that triggered the signature hit.' }) ('sig' ('it:av:sig' {}) {'ro':<true> 'doc':'The signature that the file triggered on.'}) ('sig:name' ('str' {'lower':<true>}) {'ro':<true> 'doc':'The signature name.' }) ('sig:soft' ('it:prod:soft' {}) {'ro':<true> 'doc':'The anti-virus product which contains the signature.' }) )) ('it:av:prochit' {} (('proc' ('it:exec:proc' {}) {'doc':'The file that triggered the signature hit.' }) ('sig' ('it:av:sig' {}) {'doc':'The signature that the file triggered on.'}) ('time' ('time' {}) {'doc':'The time that the AV engine detected the signature.'}) )) ('it:auth:passwdhash' {} (('salt' ('hex' {}) {'doc':'The (optional) hex encoded salt value used to calculate the password hash.' }) ('hash:md5' ('hash:md5' {}) {'doc':'The MD5 password hash value.' }) ('hash:sha1' ('hash:sha1' {}) {'doc':'The SHA1 password hash value.' }) ('hash:sha256' ('hash:sha256' {}) {'doc':'The SHA256 password hash value.' }) ('hash:sha512' ('hash:sha512' {}) {'doc':'The SHA512 password hash value.' }) ('hash:lm' ('hash:lm' {}) {'doc':'The LM password hash value.' }) ('hash:ntlm' ('hash:ntlm' {}) {'doc':'The NTLM password hash value.' }) ('passwd' ('inet:passwd' {}) {'doc':'The (optional) clear text password for this password hash.' }) )) ('it:cmd' {} ()) ('it:exec:proc' {} (('host' ('it:host' {}) {'doc':'The host that executed the process. May be an actual or a virtual / notional host.' }) ('exe' ('file:bytes' {}) {'doc':'The file considered the "main" executable for the process. For example, rundll32.exe may be considered the "main" executable for DLLs loaded by that program.' }) ('cmd' ('it:cmd' {}) {'doc':'The command string used to launch the process, including any command line parameters.' 'disp':{'hint':'text'} }) ('pid' ('int' {}) {'doc':'The process ID.' }) ('time' ('time' {}) {'doc':'The start time for the process.' }) ('exited' ('time' {}) {'doc':'The time the process exited.' }) ('exitcode' ('int' {}) {'doc':'The exit code for the process.' }) ('user' ('inet:user' {}) {'doc':'The user name of the process owner.' }) ('path' ('file:path' {}) {'doc':'The path to the executable of the process.' }) ('src:exe' ('file:path' {}) {'doc':'The path to the executable which started the process.' }) ('src:proc' ('it:exec:proc' {}) {'doc':'The process which created the process.'}) ('killedby' ('it:exec:proc' {}) {'doc':'The process which killed this process.' }) )) ('it:exec:thread' {} (('proc' ('it:exec:proc' {}) {'doc':'The process which contains the thread.' }) ('created' ('time' {}) {'doc':'The time the thread was created.' }) ('exited' ('time' {}) {'doc':'The time the thread exited.' }) ('exitcode' ('int' {}) {'doc':'The exit code or return value for the thread.' }) ('src:proc' ('it:exec:proc' {}) {'doc':'An external process which created the thread.' }) ('src:thread' ('it:exec:thread' {}) {'doc':'The thread which created this thread.' }) )) ('it:exec:loadlib' {} (('proc' ('it:exec:proc' {}) {'doc':'The process where the library was loaded.' }) ('va' ('int' {}) {'doc':'The base memory address where the library was loaded in the process.' }) ('loaded' ('time' {}) {'doc':'The time the library was loaded.' }) ('unloaded' ('time' {}) {'doc':'The time the library was unloaded.' }) ('path' ('file:path' {}) {'doc':'The path that the library was loaded from.' }) ('file' ('file:bytes' {}) {'doc':'The library file that was loaded.' }) )) ('it:exec:mmap' {} (('proc' ('it:exec:proc' {}) {'doc':'The process where the memory was mapped.' }) ('va' ('int' {}) {'doc':'The base memory address where the map was created in the process.' }) ('size' ('int' {}) {'doc':'The size of the memory map in bytes.' }) ('perms:read' ('bool' {}) {'doc':'True if the mmap is mapped with read permissions.' }) ('perms:write' ('bool' {}) {'doc':'True if the mmap is mapped with write permissions.' }) ('perms:execute' ('bool' {}) {'doc':'True if the mmap is mapped with execute permissions.' }) ('created' ('time' {}) {'doc':'The time the memory map was created.' }) ('deleted' ('time' {}) {'doc':'The time the memory map was deleted.' }) ('path' ('file:path' {}) {'doc':'The file path if the mmap is a mapped view of a file.' }) ('hash:sha256' ('hash:sha256' {}) {'doc':'A SHA256 hash of the memory map. Bytes may optionally be present in the axon.' }) )) ('it:exec:mutex' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that created the mutex.' }) ('host' ('it:host' {}) {'doc':'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the mutex was created.' }) ('name' ('it:dev:mutex' {}) {'doc':'The mutex string.' }) )) ('it:exec:pipe' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that created the named pipe.' }) ('host' ('it:host' {}) {'doc':'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the named pipe was created.' }) ('name' ('it:dev:pipe' {}) {'doc':'The named pipe string.' }) )) ('it:exec:url' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that requested the URL.' }) ('host' ('it:host' {}) {'doc':'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the URL was requested.' }) ('url' ('inet:url' {}) {'doc':'The URL that was requested.' }) ('client' ('inet:client' {}) {'doc':'The address of the client during the URL retrieval.'}) ('client:ipv4' ('inet:ipv4' {}) {'doc':'The IPv4 of the client during the URL retrieval..'}) ('client:ipv6' ('inet:ipv6' {}) {'doc':'The IPv6 of the client during the URL retrieval..'}) ('client:port' ('inet:port' {}) {'doc':'The client port during the URL retrieval..'}) )) ('it:exec:bind' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that bound the listening port.' }) ('host' ('it:host' {}) {'doc':'The host running the process that bound the listening port. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that bound the listening port. May or may not be the same :exe specified in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the port was bound.' }) ('server' ('inet:server' {}) {'doc':'The inet:addr of the server when binding the port.'}) ('server:ipv4' ('inet:ipv4' {}) {'doc':'The IPv4 address specified to bind().'}) ('server:ipv6' ('inet:ipv6' {}) {'doc':'The IPv6 address specified to bind().'}) ('server:port' ('inet:port' {}) {'doc':'The bound (listening) TCP port.'}) )) ('it:fs:file' {} (('host' ('it:host' {}) {'doc':'The host containing the file.' }) ('path' ('file:path' {}) {'doc':'The path for the file.' }) ('path:dir' ('file:path' {}) {'ro':<true> 'doc':'The parent directory of the file path (parsed from :path).' }) ('path:ext' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The file extension of the file name (parsed from :path).' }) ('path:base' ('file:base' {}) {'ro':<true> 'doc':'The final component of the file path (parsed from :path).' }) ('file' ('file:bytes' {}) {'doc':'The file on the host.' }) ('ctime' ('time' {}) {'doc':'The file creation time.' }) ('mtime' ('time' {}) {'doc':'The file modification time.' }) ('atime' ('time' {}) {'doc':'The file access time.' }) ('user' ('inet:user' {}) {'doc':'The owner of the file.' }) ('group' ('inet:user' {}) {'doc':'The group owner of the file.' }) )) ('it:exec:file:add' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that created the new file.' }) ('host' ('it:host' {}) {'doc':'The host running the process that created the new file. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}) ('time' ('time' {}) {'doc':'The time the file was created.' }) ('path' ('file:path' {}) {'doc':'The path where the file was created.' }) ('path:dir' ('file:path' {}) {'ro':<true> 'doc':'The parent directory of the file path (parsed from :path).' }) ('path:ext' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The file extension of the file name (parsed from :path).' }) ('path:base' ('file:base' {}) {'ro':<true> 'doc':'The final component of the file path (parsed from :path).' }) ('file' ('file:bytes' {}) {'doc':'The file that was created.' }) )) ('it:exec:file:del' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that deleted the file.' }) ('host' ('it:host' {}) {'doc':'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc, if present.'}) ('time' ('time' {}) {'doc':'The time the file was deleted.' }) ('path' ('file:path' {}) {'doc':'The path where the file was deleted.' }) ('path:dir' ('file:path' {}) {'ro':<true> 'doc':'The parent directory of the file path (parsed from :path).' }) ('path:ext' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The file extension of the file name (parsed from :path).' }) ('path:base' ('file:base' {}) {'ro':<true> 'doc':'The final component of the file path (parsed from :path).' }) ('file' ('file:bytes' {}) {'doc':'The file that was deleted.' }) )) ('it:exec:file:read' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that read the file.' }) ('host' ('it:host' {}) {'doc':'The host running the process that read the file. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}) ('time' ('time' {}) {'doc':'The time the file was read.' }) ('path' ('file:path' {}) {'doc':'The path where the file was read.' }) ('path:dir' ('file:path' {}) {'ro':<true> 'doc':'The parent directory of the file path (parsed from :path).' }) ('path:ext' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The file extension of the file name (parsed from :path).' }) ('path:base' ('file:base' {}) {'ro':<true> 'doc':'The final component of the file path (parsed from :path).' }) ('file' ('file:bytes' {}) {'doc':'The file that was read.' }) )) ('it:exec:file:write' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that wrote to / modified the existing file.' }) ('host' ('it:host' {}) {'doc':'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that wrote to the file. May or may not be the same :exe specified in :proc, if present.'}) ('time' ('time' {}) {'doc':'The time the file was written to/modified.' }) ('path' ('file:path' {}) {'doc':'The path where the file was written to/modified.' }) ('path:dir' ('file:path' {}) {'ro':<true> 'doc':'The parent directory of the file path (parsed from :path).' }) ('path:ext' ('str' {'lower':<true> 'strip':<true>}) {'ro':<true> 'doc':'The file extension of the file name (parsed from :path).' }) ('path:base' ('file:base' {}) {'ro':<true> 'doc':'The final component of the file path (parsed from :path).' }) ('file' ('file:bytes' {}) {'doc':'The file that was modified.' }) )) ('it:exec:reg:get' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that read the registry.' }) ('host' ('it:host' {}) {'doc':'The host running the process that read the registry. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that read the registry. May or may not be the same :exe referenced in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the registry was read.' }) ('reg' ('it:dev:regval' {}) {'doc':'The registry key or value that was read.' }) )) ('it:exec:reg:set' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that wrote to the registry.' }) ('host' ('it:host' {}) {'doc':'The host running the process that wrote to the registry. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that wrote to the registry. May or may not be the same :exe referenced in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the registry was written to.' }) ('reg' ('it:dev:regval' {}) {'doc':'The registry key or value that was written to.' }) )) ('it:exec:reg:del' {} (('proc' ('it:exec:proc' {}) {'doc':'The main process executing code that deleted data from the registry.' }) ('host' ('it:host' {}) {'doc':'The host running the process that deleted data from the registry. Typically the same host referenced in :proc, if present.' }) ('exe' ('file:bytes' {}) {'doc':'The specific file containing code that deleted data from the registry. May or may not be the same :exe referenced in :proc, if present.' }) ('time' ('time' {}) {'doc':'The time the data from the registry was deleted.' }) ('reg' ('it:dev:regval' {}) {'doc':'The registry key or value that was deleted.' }) )) ('it:app:snort:rule' {} (('text' ('str' {}) {'doc':'The snort rule text.' 'disp':{'hint':'text'} }) ('name' ('str' {}) {'doc':'The name of the snort rule.'}) ('version' ('it:semver' {}) {'doc':'The current version of the rule.'}) )) ('it:app:snort:hit' {} (('rule' ('it:app:snort:rule' {}) {'doc':'The snort rule that matched the file.'}) ('flow' ('inet:flow' {}) {'doc':'The inet:flow that matched the snort rule.'}) ('src' ('inet:addr' {}) {'doc':'The source address of flow that caused the hit.'}) ('src:ipv4' ('inet:ipv4' {}) {'doc':'The source IPv4 address of the flow that caused the hit.'}) ('src:ipv6' ('inet:ipv6' {}) {'doc':'The source IPv6 address of the flow that caused the hit.'}) ('src:port' ('inet:port' {}) {'doc':'The source port of the flow that caused the hit.'}) ('dst' ('inet:addr' {}) {'doc':'The destination address of the trigger.'}) ('dst:ipv4' ('inet:ipv4' {}) {'doc':'The destination IPv4 address of the flow that caused the hit.'}) ('dst:ipv6' ('inet:ipv6' {}) {'doc':'The destination IPv4 address of the flow that caused the hit.'}) ('dst:port' ('inet:port' {}) {'doc':'The destination port of the flow that caused the hit.'}) ('time' ('time' {}) {'doc':'The time of the network flow that caused the hit.'}) ('sensor' ('it:host' {}) {'doc':'The sensor host node that produced the hit.'}) ('version' ('it:semver' {}) {'doc':'The version of the rule at the time of match.'}) )) ('it:app:yara:rule' {} (('text' ('str' {}) {'doc':'The YARA rule text.' 'disp':{'hint':'text'} }) ('name' ('str' {}) {'doc':'The name of the YARA rule.'}) ('author' ('ps:contact' {}) {'doc':'Contact info for the author of the YARA rule.'}) ('version' ('it:semver' {}) {'doc':'The current version of the rule.'}) ('enabled' ('bool' {}) {'doc':'The rule enabled status to be used for YARA evaluation engines.'}) )) ('it:app:yara:match' {} (('rule' ('it:app:yara:rule' {}) {'ro':<true> 'doc':'The YARA rule that matched the file.'}) ('file' ('file:bytes' {}) {'ro':<true> 'doc':'The file that matched the YARA rule.'}) ('version' ('it:semver' {}) {'doc':'The most recent version of the rule evaluated as a match.'}) )) ('it:app:yara:procmatch' {} (('rule' ('it:app:yara:rule' {}) {'doc':'The YARA rule that matched the file.'}) ('proc' ('it:exec:proc' {}) {'doc':'The process that matched the YARA rule.'}) ('time' ('time' {}) {'doc':'The time that the YARA engine matched the process to the rule.'}) ('version' ('it:semver' {}) {'doc':'The most recent version of the rule evaluated as a match.'}) )) ('it:reveng:function' {} (('name' ('str' {}) {'doc':'The name of the function.'}) ('description' ('str' {}) {'doc':'Notes concerning the function.'}) ('impcalls' ('array' {'type':'it:reveng:impfunc'}) {'doc':'Calls to imported library functions within the scope of the function.' }) ('strings' ('array' {'type':'it:dev:str' 'uniq':<true>}) {'doc':'An array of strings referenced within the function.' }) )) ('it:reveng:filefunc' {} (('function' ('it:reveng:function' {}) {'ro':<true> 'doc':'The guid matching the function.'}) ('file' ('file:bytes' {}) {'ro':<true> 'doc':'The file that contains the function.'}) ('va' ('int' {}) {'doc':'The virtual address of the first codeblock of the function.'}) ('rank' ('int' {}) {'doc':'The function rank score used to evaluate if it exhibits interesting behavior.'}) ('complexity' ('int' {}) {'doc':'The complexity of the function.'}) ('funccalls' ('array' {'type':'it:reveng:filefunc'}) {'doc':'Other function calls within the scope of the function.' }) )) ('it:reveng:funcstr' {} (('function' ('it:reveng:function' {}) {'ro':<true> 'doc':'The guid matching the function.'}) ('string' ('str' {}) {'ro':<true> 'doc':'The string that the function references.'}) )) ('it:reveng:impfunc' {} ()) ) }<line_sep>name='it'<line_sep><return>((name modl) )<block_end><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- <import_from_stmt>.available_operation_display AvailableOperationDisplay<import_from_stmt>.error_details_model ErrorDetailsModel<import_from_stmt>.error_error_model ErrorErrorModel<import_from_stmt>.error_model ErrorModel ErrorModelException<import_from_stmt>.operation_result OperationResult<import_from_stmt>.provisioned_resource_properties ProvisionedResourceProperties<import_from_stmt>.proxy_resource ProxyResource<import_from_stmt>.managed_proxy_resource ManagedProxyResource<import_from_stmt>.resource Resource<import_from_stmt>.tracked_resource TrackedResource<import_from_stmt>.secret_resource_properties SecretResourceProperties<import_from_stmt>.inlined_value_secret_resource_properties InlinedValueSecretResourceProperties<import_from_stmt>.secret_resource_properties_base SecretResourcePropertiesBase<import_from_stmt>.secret_resource_description SecretResourceDescription<import_from_stmt>.secret_value SecretValue<import_from_stmt>.secret_value_properties SecretValueProperties<import_from_stmt>.secret_value_resource_description SecretValueResourceDescription<import_from_stmt>.volume_provider_parameters_azure_file VolumeProviderParametersAzureFile<import_from_stmt>.volume_properties VolumeProperties<import_from_stmt>.volume_reference VolumeReference<import_from_stmt>.application_scoped_volume_creation_parameters ApplicationScopedVolumeCreationParameters<import_from_stmt>.application_scoped_volume ApplicationScopedVolume<import_from_stmt>.application_scoped_volume_creation_parameters_service_fabric_volume_disk ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk<import_from_stmt>.volume_resource_description VolumeResourceDescription<import_from_stmt>.network_resource_properties NetworkResourceProperties<import_from_stmt>.local_network_resource_properties LocalNetworkResourceProperties<import_from_stmt>.endpoint_ref EndpointRef<import_from_stmt>.network_ref NetworkRef<import_from_stmt>.network_resource_properties_base NetworkResourcePropertiesBase<import_from_stmt>.network_resource_description NetworkResourceDescription<import_from_stmt>.gateway_destination GatewayDestination<import_from_stmt>.tcp_config TcpConfig<import_from_stmt>.http_route_match_path HttpRouteMatchPath<import_from_stmt>.http_route_match_header HttpRouteMatchHeader<import_from_stmt>.http_route_match_rule HttpRouteMatchRule<import_from_stmt>.http_route_config HttpRouteConfig<import_from_stmt>.http_host_config HttpHostConfig<import_from_stmt>.http_config HttpConfig<import_from_stmt>.gateway_properties GatewayProperties<import_from_stmt>.gateway_resource_description GatewayResourceDescription<import_from_stmt>.image_registry_credential ImageRegistryCredential<import_from_stmt>.environment_variable EnvironmentVariable<import_from_stmt>.setting Setting<import_from_stmt>.container_label ContainerLabel<import_from_stmt>.endpoint_properties EndpointProperties<import_from_stmt>.resource_requests ResourceRequests<import_from_stmt>.resource_limits ResourceLimits<import_from_stmt>.resource_requirements ResourceRequirements<import_from_stmt>.diagnostics_ref DiagnosticsRef<import_from_stmt>.reliable_collections_ref ReliableCollectionsRef<import_from_stmt>.container_state ContainerState<import_from_stmt>.container_event ContainerEvent<import_from_stmt>.container_instance_view ContainerInstanceView<import_from_stmt>.container_code_package_properties ContainerCodePackageProperties<import_from_stmt>.auto_scaling_trigger AutoScalingTrigger<import_from_stmt>.auto_scaling_mechanism AutoScalingMechanism<import_from_stmt>.auto_scaling_policy AutoScalingPolicy<import_from_stmt>.service_resource_description ServiceResourceDescription<import_from_stmt>.diagnostics_sink_properties DiagnosticsSinkProperties<import_from_stmt>.diagnostics_description DiagnosticsDescription<import_from_stmt>.application_properties ApplicationProperties<import_from_stmt>.azure_internal_monitoring_pipeline_sink_description AzureInternalMonitoringPipelineSinkDescription<import_from_stmt>.application_resource_description ApplicationResourceDescription<import_from_stmt>.add_remove_replica_scaling_mechanism AddRemoveReplicaScalingMechanism<import_from_stmt>.auto_scaling_metric AutoScalingMetric<import_from_stmt>.auto_scaling_resource_metric AutoScalingResourceMetric<import_from_stmt>.service_properties ServiceProperties<import_from_stmt>.service_replica_properties ServiceReplicaProperties<import_from_stmt>.service_replica_description ServiceReplicaDescription<import_from_stmt>.average_load_scaling_trigger AverageLoadScalingTrigger<import_from_stmt>.container_logs ContainerLogs<import_from_stmt>.operation_result_paged OperationResultPaged<import_from_stmt>.secret_resource_description_paged SecretResourceDescriptionPaged<import_from_stmt>.secret_value_resource_description_paged SecretValueResourceDescriptionPaged<import_from_stmt>.volume_resource_description_paged VolumeResourceDescriptionPaged<import_from_stmt>.network_resource_description_paged NetworkResourceDescriptionPaged<import_from_stmt>.gateway_resource_description_paged GatewayResourceDescriptionPaged<import_from_stmt>.application_resource_description_paged ApplicationResourceDescriptionPaged<import_from_stmt>.service_resource_description_paged ServiceResourceDescriptionPaged<import_from_stmt>.service_replica_description_paged ServiceReplicaDescriptionPaged<import_from_stmt>.service_fabric_mesh_management_client_enums ResourceStatus HealthState SecretKind VolumeProvider SizeTypes ApplicationScopedVolumeKind NetworkKind HeaderMatchType OperatingSystemType DiagnosticsSinkKind AutoScalingMechanismKind AutoScalingMetricKind AutoScalingResourceMetricName AutoScalingTriggerKind <line_sep>__all__=['AvailableOperationDisplay' 'ErrorDetailsModel' 'ErrorErrorModel' 'ErrorModel' 'ErrorModelException' 'OperationResult' 'ProvisionedResourceProperties' 'ProxyResource' 'ManagedProxyResource' 'Resource' 'TrackedResource' 'SecretResourceProperties' 'InlinedValueSecretResourceProperties' 'SecretResourcePropertiesBase' 'SecretResourceDescription' 'SecretValue' 'SecretValueProperties' 'SecretValueResourceDescription' 'VolumeProviderParametersAzureFile' 'VolumeProperties' 'VolumeReference' 'ApplicationScopedVolumeCreationParameters' 'ApplicationScopedVolume' 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk' 'VolumeResourceDescription' 'NetworkResourceProperties' 'LocalNetworkResourceProperties' 'EndpointRef' 'NetworkRef' 'NetworkResourcePropertiesBase' 'NetworkResourceDescription' 'GatewayDestination' 'TcpConfig' 'HttpRouteMatchPath' 'HttpRouteMatchHeader' 'HttpRouteMatchRule' 'HttpRouteConfig' 'HttpHostConfig' 'HttpConfig' 'GatewayProperties' 'GatewayResourceDescription' 'ImageRegistryCredential' 'EnvironmentVariable' 'Setting' 'ContainerLabel' 'EndpointProperties' 'ResourceRequests' 'ResourceLimits' 'ResourceRequirements' 'DiagnosticsRef' 'ReliableCollectionsRef' 'ContainerState' 'ContainerEvent' 'ContainerInstanceView' 'ContainerCodePackageProperties' 'AutoScalingTrigger' 'AutoScalingMechanism' 'AutoScalingPolicy' 'ServiceResourceDescription' 'DiagnosticsSinkProperties' 'DiagnosticsDescription' 'ApplicationProperties' 'AzureInternalMonitoringPipelineSinkDescription' 'ApplicationResourceDescription' 'AddRemoveReplicaScalingMechanism' 'AutoScalingMetric' 'AutoScalingResourceMetric' 'ServiceProperties' 'ServiceReplicaProperties' 'ServiceReplicaDescription' 'AverageLoadScalingTrigger' 'ContainerLogs' 'OperationResultPaged' 'SecretResourceDescriptionPaged' 'SecretValueResourceDescriptionPaged' 'VolumeResourceDescriptionPaged' 'NetworkResourceDescriptionPaged' 'GatewayResourceDescriptionPaged' 'ApplicationResourceDescriptionPaged' 'ServiceResourceDescriptionPaged' 'ServiceReplicaDescriptionPaged' 'ResourceStatus' 'HealthState' 'SecretKind' 'VolumeProvider' 'SizeTypes' 'ApplicationScopedVolumeKind' 'NetworkKind' 'HeaderMatchType' 'OperatingSystemType' 'DiagnosticsSinkKind' 'AutoScalingMechanismKind' 'AutoScalingMetricKind' 'AutoScalingResourceMetricName' 'AutoScalingTriggerKind' ]<line_sep>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['WorkspaceArgs' 'Workspace']<line_sep>@pulumi.input_type<class_stmt>WorkspaceArgs<block_start><def_stmt>__init__ __self__ * resource_group_name:pulumi.Input[str] description:Optional[pulumi.Input[str]]=<none> friendly_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none><block_start>""" The set of arguments for constructing a Workspace resource. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """<line_sep>pulumi.set(__self__ "resource_group_name" resource_group_name)<if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>friendly_name<is><not><none><block_start>pulumi.set(__self__ "friendly_name" friendly_name)<block_end><if_stmt>location<is><not><none><block_start>pulumi.set(__self__ "location" location)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>tags<is><not><none><block_start>pulumi.set(__self__ "tags" tags)<block_end><block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Input[str]<block_start>""" The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. """<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:pulumi.Input[str]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" A description for the Virtual Desktop Workspace. """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter(name="friendlyName")<def_stmt>friendly_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" A friendly name for the Virtual Desktop Workspace. """<line_sep><return>pulumi.get(self "friendly_name")<block_end>@friendly_name.setter<def_stmt>friendly_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "friendly_name" value)<block_end>@property@pulumi.getter<def_stmt>location self<arrow>Optional[pulumi.Input[str]]<block_start>""" The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. """<line_sep><return>pulumi.get(self "location")<block_end>@location.setter<def_stmt>location self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "location" value)<block_end>@property@pulumi.getter<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@property@pulumi.getter<def_stmt>tags self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>""" A mapping of tags to assign to the resource. """<line_sep><return>pulumi.get(self "tags")<block_end>@tags.setter<def_stmt>tags self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "tags" value)<block_end><block_end>@pulumi.input_type<class_stmt>_WorkspaceState<block_start><def_stmt>__init__ __self__ * description:Optional[pulumi.Input[str]]=<none> friendly_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none><block_start>""" Input properties used for looking up and filtering Workspace resources. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """<if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>friendly_name<is><not><none><block_start>pulumi.set(__self__ "friendly_name" friendly_name)<block_end><if_stmt>location<is><not><none><block_start>pulumi.set(__self__ "location" location)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>resource_group_name<is><not><none><block_start>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end><if_stmt>tags<is><not><none><block_start>pulumi.set(__self__ "tags" tags)<block_end><block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" A description for the Virtual Desktop Workspace. """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter(name="friendlyName")<def_stmt>friendly_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" A friendly name for the Virtual Desktop Workspace. """<line_sep><return>pulumi.get(self "friendly_name")<block_end>@friendly_name.setter<def_stmt>friendly_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "friendly_name" value)<block_end>@property@pulumi.getter<def_stmt>location self<arrow>Optional[pulumi.Input[str]]<block_start>""" The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. """<line_sep><return>pulumi.get(self "location")<block_end>@location.setter<def_stmt>location self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "location" value)<block_end>@property@pulumi.getter<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. """<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@property@pulumi.getter<def_stmt>tags self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>""" A mapping of tags to assign to the resource. """<line_sep><return>pulumi.get(self "tags")<block_end>@tags.setter<def_stmt>tags self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "tags" value)<block_end><block_end><class_stmt>Workspace(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> description:Optional[pulumi.Input[str]]=<none> friendly_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> __props__=<none><block_start>""" Manages a Virtual Desktop Workspace. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.core.ResourceGroup("example", location="West Europe") workspace = azure.desktopvirtualization.Workspace("workspace", location=example.location, resource_group_name=example.name, friendly_name="FriendlyName", description="A description of my workspace") ``` ## Import Virtual Desktop Workspaces can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:WorkspaceArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" Manages a Virtual Desktop Workspace. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.core.ResourceGroup("example", location="West Europe") workspace = azure.desktopvirtualization.Workspace("workspace", location=example.location, resource_group_name=example.name, friendly_name="FriendlyName", description="A description of my workspace") ``` ## Import Virtual Desktop Workspaces can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of the resource. :param WorkspaceArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(WorkspaceArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> description:Optional[pulumi.Input[str]]=<none> friendly_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=WorkspaceArgs.__new__(WorkspaceArgs)<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["friendly_name"]=friendly_name<line_sep>__props__.__dict__["location"]=location<line_sep>__props__.__dict__["name"]=name<if_stmt>resource_group_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'resource_group_name'")<block_end>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep>__props__.__dict__["tags"]=tags<block_end>super(Workspace __self__).__init__('azure:desktopvirtualization/workspace:Workspace' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> description:Optional[pulumi.Input[str]]=<none> friendly_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none><arrow>'Workspace'<block_start>""" Get an existing Workspace resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_WorkspaceState.__new__(_WorkspaceState)<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["friendly_name"]=friendly_name<line_sep>__props__.__dict__["location"]=location<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep>__props__.__dict__["tags"]=tags<line_sep><return>Workspace(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>pulumi.Output[Optional[str]]<block_start>""" A description for the Virtual Desktop Workspace. """<line_sep><return>pulumi.get(self "description")<block_end>@property@pulumi.getter(name="friendlyName")<def_stmt>friendly_name self<arrow>pulumi.Output[Optional[str]]<block_start>""" A friendly name for the Virtual Desktop Workspace. """<line_sep><return>pulumi.get(self "friendly_name")<block_end>@property@pulumi.getter<def_stmt>location self<arrow>pulumi.Output[str]<block_start>""" The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. """<line_sep><return>pulumi.get(self "location")<block_end>@property@pulumi.getter<def_stmt>name self<arrow>pulumi.Output[str]<block_start>""" The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. """<line_sep><return>pulumi.get(self "name")<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Output[str]<block_start>""" The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. """<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@property@pulumi.getter<def_stmt>tags self<arrow>pulumi.Output[Optional[Mapping[str str]]]<block_start>""" A mapping of tags to assign to the resource. """<line_sep><return>pulumi.get(self "tags")<block_end><block_end>
<import_stmt>os<import_stmt>pytest<import_stmt>torch<import_stmt>pytorch_pfn_extras.onnx<as>tou<import_from_stmt>tests.pytorch_pfn_extras_tests.onnx.test_export_testcase Net<line_sep>@pytest.mark.filterwarnings("ignore:Named tensors .* experimental:UserWarning")<def_stmt>test_onnx_load_model <block_start>model=Net()<line_sep>outdir="out/load_model_test"<line_sep>tou.export_testcase(model torch.rand(1 1 28 28) outdir training=<true> do_constant_folding=<false>)<line_sep>tou.load_model(os.path.join(outdir "model.onnx"))<block_end>@pytest.mark.filterwarnings("ignore:.*ONNX contains stripped .*:UserWarning")<def_stmt>test_stripped_onnx_load_model <block_start>model=Net()<line_sep>outdir="out/stripped_load_model_test"<line_sep>tou.export_testcase(model torch.rand(1 1 28 28) outdir strip_large_tensor_data=<true> training=<true> do_constant_folding=<false>)<line_sep>tou.load_model(os.path.join(outdir "model.onnx"))<block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Sentencize the raw wikitext103."""<import_stmt>tensorflow.compat.v1<as>tf<line_sep>app=tf.app<line_sep>flags=tf.flags<line_sep>gfile=tf.gfile<line_sep>logging=tf.logging<line_sep>flags.DEFINE_string("wiki103_raw" <none> "Path to raw wikitext103 train corpus.")<line_sep>flags.DEFINE_string("output_path" <none> "Path to output the processed dataset.")<line_sep>FLAGS=flags.FLAGS<def_stmt>main _<block_start><with_stmt>open(FLAGS.wiki103_raw "r")<as>f<block_start>data=f.read().strip().split("\n")<block_end>data=[x.split(" . ")<for>x data<if>x.strip()<and>x.strip()[0]<ne>"="]<line_sep>sentences=[]<for_stmt>para data<block_start><for_stmt>sent para<block_start>sentences.append(sent+".")<block_end><block_end>data="\n".join(sentences)<line_sep>data=data.replace(" @.@ " ".").replace(" @-@ " "-").replace(" ," ",")<line_sep>data=data.replace(" \'" "\'").replace(" )" ")").replace("( " "(")<line_sep>data=data.replace(" ;" ";")<line_sep>data="\n".join([x<for>x data.split("\n")<if>len(x.split())<g>3])<line_sep>logging.info("length = %d" len(data.split("\n")))<with_stmt>open(FLAGS.output_path "w")<as>f<block_start>f.write(data)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(main)<block_end>
<import_from_stmt>.clip_sampler DistributedSampler UniformClipSampler RandomClipSampler<line_sep>__all__=("DistributedSampler" "UniformClipSampler" "RandomClipSampler")<line_sep>
# -*- coding: utf-8 -*- # Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Utilities for creating VCG and Dot diagrams"""<import_from_stmt>logilab.common.vcgutils VCGPrinter<import_from_stmt>logilab.common.graph DotBackend<import_from_stmt>pylint.pyreverse.utils is_exception<class_stmt>DiagramWriter(object)<block_start>"""base class for writing project diagrams """<def_stmt>__init__ self config styles<block_start>self.config=config<line_sep>self.pkg_edges,self.inh_edges,self.imp_edges,self.ass_edges=styles<line_sep>self.printer=<none><block_end># defined in set_printer <def_stmt>write self diadefs<block_start>"""write files for <project> according to <diadefs> """<for_stmt>diagram diadefs<block_start>basename=diagram.title.strip().replace(' ' '_')<line_sep>file_name='%s.%s'%(basename self.config.output_format)<line_sep>self.set_printer(file_name basename)<if_stmt>diagram.TYPE<eq>'class'<block_start>self.write_classes(diagram)<block_end><else_stmt><block_start>self.write_packages(diagram)<block_end>self.close_graph()<block_end><block_end><def_stmt>write_packages self diagram<block_start>"""write a package diagram"""<line_sep># sorted to get predictable (hence testable) results <for_stmt>i,obj enumerate(sorted(diagram.modules() key=<lambda>x:x.title))<block_start>self.printer.emit_node(i label=self.get_title(obj) shape='box')<line_sep>obj.fig_id=i<block_end># package dependencies <for_stmt>rel diagram.get_relationships('depends')<block_start>self.printer.emit_edge(rel.from_object.fig_id rel.to_object.fig_id **self.pkg_edges)<block_end><block_end><def_stmt>write_classes self diagram<block_start>"""write a class diagram"""<line_sep># sorted to get predictable (hence testable) results <for_stmt>i,obj enumerate(sorted(diagram.objects key=<lambda>x:x.title))<block_start>self.printer.emit_node(i **self.get_values(obj))<line_sep>obj.fig_id=i<block_end># inheritance links <for_stmt>rel diagram.get_relationships('specialization')<block_start>self.printer.emit_edge(rel.from_object.fig_id rel.to_object.fig_id **self.inh_edges)<block_end># implementation links <for_stmt>rel diagram.get_relationships('implements')<block_start>self.printer.emit_edge(rel.from_object.fig_id rel.to_object.fig_id **self.imp_edges)<block_end># generate associations <for_stmt>rel diagram.get_relationships('association')<block_start>self.printer.emit_edge(rel.from_object.fig_id rel.to_object.fig_id label=rel.name **self.ass_edges)<block_end><block_end><def_stmt>set_printer self file_name basename<block_start>"""set printer"""<line_sep><raise>NotImplementedError<block_end><def_stmt>get_title self obj<block_start>"""get project title"""<line_sep><raise>NotImplementedError<block_end><def_stmt>get_values self obj<block_start>"""get label and shape for classes."""<line_sep><raise>NotImplementedError<block_end><def_stmt>close_graph self<block_start>"""finalize the graph"""<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>DotWriter(DiagramWriter)<block_start>"""write dot graphs from a diagram definition and a project """<def_stmt>__init__ self config<block_start>styles=[dict(arrowtail='none' arrowhead="open") dict(arrowtail='none' arrowhead='empty') dict(arrowtail='node' arrowhead='empty' style='dashed') dict(fontcolor='green' arrowtail='none' arrowhead='diamond' style='solid') ]<line_sep>DiagramWriter.__init__(self config styles)<block_end><def_stmt>set_printer self file_name basename<block_start>"""initialize DotWriter and add options for layout. """<line_sep>layout=dict(rankdir="BT")<line_sep>self.printer=DotBackend(basename additionnal_param=layout)<line_sep>self.file_name=file_name<block_end><def_stmt>get_title self obj<block_start>"""get project title"""<line_sep><return>obj.title<block_end><def_stmt>get_values self obj<block_start>"""get label and shape for classes. The label contains all attributes and methods """<line_sep>label=obj.title<if_stmt>obj.shape<eq>'interface'<block_start>label=u'«interface»\\n%s'%label<block_end><if_stmt><not>self.config.only_classnames<block_start>label=r'%s|%s\l|'%(label r'\l'.join(obj.attrs))<for_stmt>func obj.methods<block_start>label=r'%s%s()\l'%(label func.name)<block_end>label='{%s}'%label<block_end><if_stmt>is_exception(obj.node)<block_start><return>dict(fontcolor='red' label=label shape='record')<block_end><return>dict(label=label shape='record')<block_end><def_stmt>close_graph self<block_start>"""print the dot graph into <file_name>"""<line_sep>self.printer.generate(self.file_name)<block_end><block_end><class_stmt>VCGWriter(DiagramWriter)<block_start>"""write vcg graphs from a diagram definition and a project """<def_stmt>__init__ self config<block_start>styles=[dict(arrowstyle='solid' backarrowstyle='none' backarrowsize=0) dict(arrowstyle='solid' backarrowstyle='none' backarrowsize=10) dict(arrowstyle='solid' backarrowstyle='none' linestyle='dotted' backarrowsize=10) dict(arrowstyle='solid' backarrowstyle='none' textcolor='green') ]<line_sep>DiagramWriter.__init__(self config styles)<block_end><def_stmt>set_printer self file_name basename<block_start>"""initialize VCGWriter for a UML graph"""<line_sep>self.graph_file=open(file_name 'w+')<line_sep>self.printer=VCGPrinter(self.graph_file)<line_sep>self.printer.open_graph(title=basename layoutalgorithm='dfs' late_edge_labels='yes' port_sharing='no' manhattan_edges='yes')<line_sep>self.printer.emit_node=self.printer.node<line_sep>self.printer.emit_edge=self.printer.edge<block_end><def_stmt>get_title self obj<block_start>"""get project title in vcg format"""<line_sep><return>r'\fb%s\fn'%obj.title<block_end><def_stmt>get_values self obj<block_start>"""get label and shape for classes. The label contains all attributes and methods """<if_stmt>is_exception(obj.node)<block_start>label=r'\fb\f09%s\fn'%obj.title<block_end><else_stmt><block_start>label=r'\fb%s\fn'%obj.title<block_end><if_stmt>obj.shape<eq>'interface'<block_start>shape='ellipse'<block_end><else_stmt><block_start>shape='box'<block_end><if_stmt><not>self.config.only_classnames<block_start>attrs=obj.attrs<line_sep>methods=[func.name<for>func obj.methods]<line_sep># box width for UML like diagram maxlen=max(len(name)<for>name [obj.title]+methods+attrs)<line_sep>line='_'<times>(maxlen+2)<line_sep>label=r'%s\n\f%s'%(label line)<for_stmt>attr attrs<block_start>label=r'%s\n\f08%s'%(label attr)<block_end><if_stmt>attrs<block_start>label=r'%s\n\f%s'%(label line)<block_end><for_stmt>func methods<block_start>label=r'%s\n\f10%s()'%(label func)<block_end><block_end><return>dict(label=label shape=shape)<block_end><def_stmt>close_graph self<block_start>"""close graph and file"""<line_sep>self.printer.close_graph()<line_sep>self.graph_file.close()<block_end><block_end>
<import_from_stmt>typing Dict List Any<import_from_stmt>..df.types Definition<import_from_stmt>..df.base op<import_from_stmt>..util.data traverse_get<line_sep>MAPPING=Definition(name="mapping" primitive="map")<line_sep>MAPPING_TRAVERSE=Definition(name="mapping_traverse" primitive="List[str]")<line_sep>MAPPING_KEY=Definition(name="key" primitive="str")<line_sep>MAPPING_VALUE=Definition(name="value" primitive="generic")<line_sep>@op(name="dffml.mapping.extract" inputs={"mapping":MAPPING "traverse":MAPPING_TRAVERSE} outputs={"value":MAPPING_VALUE} )<def_stmt>mapping_extract_value mapping:Dict[str Any] traverse:List[str]<block_start>""" Extracts value from a given mapping. Parameters ---------- mapping : dict The mapping to extract the value from. traverse : list[str] A list of keys to traverse through the mapping dictionary and extract the values. Returns ------- dict A dictionary containing the value of the keys. Examples -------- >>> import asyncio >>> from dffml import * >>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs["value"].name], ... definition=GetSingle.op.inputs["spec"], ... ) ... ) >>> inputs = [ ... Input( ... value={"key1": {"key2": 42}}, ... definition=mapping_extract_value.op.inputs["mapping"], ... ), ... Input( ... value=["key1", "key2"], ... definition=mapping_extract_value.op.inputs["traverse"], ... ), ... ] >>> >>> async def main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'value': 42} """<line_sep><return>{"value":traverse_get(mapping *traverse)}<block_end>@op(name="dffml.mapping.create" inputs={"key":MAPPING_KEY "value":MAPPING_VALUE} outputs={"mapping":MAPPING} )<def_stmt>create_mapping key:str value:Any<block_start>""" Creates a mapping of a given key and value. Parameters ---------- key : str The key for the mapping. value : Any The value for the mapping. Returns ------- dict A dictionary containing the mapping created. Examples -------- >>> import asyncio >>> from dffml import * >>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs["mapping"].name], ... definition=GetSingle.op.inputs["spec"], ... ) ... ) >>> inputs = [ ... Input( ... value="key1", definition=create_mapping.op.inputs["key"], ... ), ... Input( ... value=42, definition=create_mapping.op.inputs["value"], ... ), ... ] >>> >>> async def main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1': 42}} """<line_sep><return>{"mapping":{key:value}}<block_end>
"""Scraper for Supreme Court of U.S. CourtID: scotus Court Short Name: scotus History: - 2014-07-20 - Created by <NAME>, reviewed by MLR - 2017-10-09 - Updated by MLR. """<import_from_stmt>datetime datetime<import_from_stmt>juriscraper.OralArgumentSite OralArgumentSite<class_stmt>Site(OralArgumentSite)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(Site self).__init__(*args **kwargs)<line_sep>self.court_id=self.__module__<line_sep>self.url=("http://www.supremecourt.gov/oral_arguments/argument_audio.aspx")<line_sep>self.back_scrape_iterable=list(range(2010 2015))<block_end><def_stmt>_get_download_urls self<block_start>path="id('list')//tr//a/text()"<line_sep><return>list(map(self._return_download_url self.html.xpath(path)))<block_end>@staticmethod<def_stmt>_return_download_url d<block_start>file_type="mp3"# or 'wma' is also available for any case. download_url="http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}".format(type=file_type docket_number=d)<line_sep><return>download_url<block_end><def_stmt>_get_case_names self<block_start>path="id('list')//tr/td/span/text()"<line_sep><return>[s.lstrip(". ")<for>s self.html.xpath(path)]<block_end><def_stmt>_get_case_dates self<block_start>path="id('list')//tr/td[2]//text()"<line_sep><return>[datetime.strptime(s "%m/%d/%y").date()<for>s self.html.xpath(path)<if><not>"Date"<in>s]<block_end><def_stmt>_get_docket_numbers self<block_start>path="id('list')//tr//a/text()"<line_sep><return>list(self.html.xpath(path))<block_end><def_stmt>_download_backwards self year<block_start>self.url=("http://www.supremecourt.gov/oral_arguments/argument_audio/%s"%year)<line_sep>self.html=self._download()<block_end><block_end>
<import_stmt>unittest<import_stmt>base<class_stmt>Test(base.BaseScriptTest unittest.TestCase)<block_start>command_line="./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8."<line_sep>input_stdin=base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.bed")<line_sep>output_stdout=base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.maf")<block_end>
<import_stmt>svhn2mnist<import_stmt>usps<import_stmt>syn2gtrsb<import_stmt>syndig2svhn<def_stmt>Generator source target pixelda=<false><block_start><if_stmt>source<eq>'usps'<or>target<eq>'usps'<block_start><return>usps.Feature()<block_end><elif_stmt>source<eq>'svhn'<block_start><return>svhn2mnist.Feature()<block_end><elif_stmt>source<eq>'synth'<block_start><return>syn2gtrsb.Feature()<block_end><block_end><def_stmt>Classifier source target<block_start><if_stmt>source<eq>'usps'<or>target<eq>'usps'<block_start><return>usps.Predictor()<block_end><if_stmt>source<eq>'svhn'<block_start><return>svhn2mnist.Predictor()<block_end><if_stmt>source<eq>'synth'<block_start><return>syn2gtrsb.Predictor()<block_end><block_end>
# Lint as: python3 # coding=utf-8 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Split data into train, validation and test dataset according to person. That is, use some people's data as train, some other people's data as validation, and the rest ones' data as test. These data would be saved separately under "/person_split". It will generate new files with the following structure: ├──person_split │   ├── test │   ├── train │   └──valid """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>random<import_from_stmt>data_split read_data<import_from_stmt>data_split write_data<def_stmt>person_split whole_data train_names valid_names test_names<block_start>"""Split data by person."""<line_sep>random.seed(30)<line_sep>random.shuffle(whole_data)<line_sep>train_data=[]<line_sep>valid_data=[]<line_sep>test_data=[]<for_stmt>idx,data enumerate(whole_data)# pylint: disable=unused-variable <block_start><if_stmt>data["name"]<in>train_names<block_start>train_data.append(data)<block_end><elif_stmt>data["name"]<in>valid_names<block_start>valid_data.append(data)<block_end><elif_stmt>data["name"]<in>test_names<block_start>test_data.append(data)<block_end><block_end>print("train_length:"+str(len(train_data)))<line_sep>print("valid_length:"+str(len(valid_data)))<line_sep>print("test_length:"+str(len(test_data)))<line_sep><return>train_data valid_data test_data<block_end><if_stmt>__name__<eq>"__main__"<block_start>data=read_data("./data/complete_data")<line_sep>train_names=["hyw" "shiyun" "tangsy" "dengyl" "jiangyh" "xunkai" "negative3" "negative4" "negative5" "negative6"]<line_sep>valid_names=["lsj" "pengxl" "negative2" "negative7"]<line_sep>test_names=["liucx" "zhangxy" "negative1" "negative8"]<line_sep>train_data,valid_data,test_data=person_split(data train_names valid_names test_names)<if_stmt><not>os.path.exists("./person_split")<block_start>os.makedirs("./person_split")<block_end>write_data(train_data "./person_split/train")<line_sep>write_data(valid_data "./person_split/valid")<line_sep>write_data(test_data "./person_split/test")<block_end>
<import_from_stmt>datetime datetime<import_from_stmt>kubernetes client<import_from_stmt>kubernetes.client.rest ApiException<import_stmt>os<import_stmt>time<import_stmt>yaml<import_from_stmt>tests config<as>conf<import_stmt>tests.utils<as>ut<def_stmt>remove_clusterrole_binding shipper_name crb_name# remove clusterrolebind <block_start>k8s_client=client.RbacAuthorizationV1Api()<try_stmt><block_start>k8s_client.delete_cluster_role_binding(crb_name)<line_sep>print(f"\nsuccessfully deleted: {crb_name}")<block_end><except_stmt>Exception<as>e<block_start>print(f"\n{shipper_name} cluster role binding deletion has failed, please manually delete {crb_name}:")<line_sep>print(f"kubectl delete clusterrolebinding {crb_name}")<block_end><block_end><def_stmt>filebeat_teardown namespace# remove clusterrolebind # TODO: find a solution for sharing the name both here and in the kube object <block_start>crb_name=f"filebeat-cluster-role-binding-{namespace}"<line_sep>remove_clusterrole_binding("filebeat" crb_name)<block_end><def_stmt>fluent_bit_teardown namespace# remove clusterrolebind # TODO: find a solution for sharing the name both here and in the kube object <block_start>crb_name=f"fluent-bit-clusterrole-binding-{namespace}"<line_sep>remove_clusterrole_binding("fluent-bit" crb_name)<block_end><def_stmt>add_elastic_cluster namespace<block_start>print("\nDeploying ElasticSearch\n")<line_sep>add_deployment_dir(namespace conf.ELASTIC_CONF_DIR)<block_end><def_stmt>add_filebeat_cluster namespace<block_start>print("\nDeploying FileBeat\n")<line_sep>add_deployment_dir(namespace conf.FILEBEAT_CONF_DIR)<block_end><def_stmt>add_fluent_bit_cluster namespace<block_start>print("\nDeploying Fluent-bit\n")<line_sep>add_deployment_dir(namespace conf.FLUENT_BIT_CONF_DIR)<block_end><def_stmt>add_kibana_cluster namespace<block_start>print("\nDeploying Kibana\n")<line_sep>add_deployment_dir(namespace conf.KIBANA_CONF_DIR)<block_end><def_stmt>add_logstash_cluster namespace<block_start>print("\nDeploying LogStash\n")<line_sep>add_deployment_dir(namespace conf.LOGSTASH_CONF_DIR)<block_end><def_stmt>add_deployment_dir namespace dir_path delete=<false><block_start><with_stmt>open(os.path.join(dir_path 'dep_order.txt'))<as>f<block_start>dep_order=f.readline()<line_sep>dep_lst=[x.strip()<for>x dep_order.split(',')]<line_sep>print(dep_lst)<block_end>phrases_to_replace=["(?<!_)NAMESPACE" "REP_ES_USER" "REP_ES_PASS"]<line_sep>values_for_replacement=[namespace conf.ES_USER_LOCAL conf.ES_PASS_LOCAL]<for_stmt>filename dep_lst# replace all phrases with the actual values if exists <block_start>modified_file_path,is_change=ut.duplicate_file_and_replace_phrases(dir_path filename f"{namespace}_{filename}" phrases_to_replace values_for_replacement)<line_sep>print(f"applying file: {filename}")<with_stmt>open(modified_file_path)<as>f<block_start>dep=yaml.safe_load(f)<if_stmt>modified_file_path<ne>os.path.join(dir_path filename)<and>is_change# remove modified file <block_start>ut.delete_file(modified_file_path)<block_end>name=dep["metadata"]["name"]<if_stmt>dep['kind']<eq>'StatefulSet'<block_start>k8s_client=client.AppsV1Api()<if_stmt><not>delete<block_start>k8s_client.create_namespaced_stateful_set(body=dep namespace=namespace)<block_end><else_stmt><block_start>k8s_client.delete_namespaced_stateful_set(name=name namespace=namespace)<block_end><block_end><elif_stmt>dep['kind']<eq>'DaemonSet'<block_start>k8s_client=client.AppsV1Api()<line_sep>k8s_client.create_namespaced_daemon_set(body=dep namespace=namespace)<block_end><elif_stmt>dep['kind']<eq>'Deployment'<block_start>k8s_client=client.AppsV1Api()<line_sep>k8s_client.create_namespaced_deployment(body=dep namespace=namespace)<block_end><elif_stmt>dep['kind']<eq>'Service'<block_start><try_stmt><block_start>k8s_client=client.CoreV1Api()<line_sep>k8s_client.create_namespaced_service(body=dep namespace=namespace)<block_end><except_stmt>ApiException<as>e<block_start><if_stmt>e.status<eq>409<block_start>print(f"Service exists: {dep['metadata']['name']}")<line_sep><continue><block_end><raise>e<block_end><block_end><elif_stmt>dep['kind']<eq>'PodDisruptionBudget'<block_start>k8s_client=client.PolicyV1beta1Api()<line_sep>k8s_client.create_namespaced_pod_disruption_budget(body=dep namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'Role'<block_start>k8s_client=client.RbacAuthorizationV1Api()<line_sep>k8s_client.create_namespaced_role(body=dep namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'ClusterRole'<block_start><try_stmt><block_start>k8s_client=client.RbacAuthorizationV1Api()<line_sep>k8s_client.create_cluster_role(body=dep)<block_end><except_stmt>ApiException<as>e<block_start><if_stmt>e.status<eq>409<block_start>print(f"cluster role already exists")<line_sep><continue><block_end><raise>e<block_end><block_end><elif_stmt>dep["kind"]<eq>'RoleBinding'<block_start>k8s_client=client.RbacAuthorizationV1Api()<line_sep>dep["subjects"][0]["namespace"]=namespace<line_sep>k8s_client.create_namespaced_role_binding(body=dep namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'ClusterRoleBinding'<block_start>k8s_client=client.RbacAuthorizationV1Api()<try_stmt><block_start>k8s_client.create_cluster_role_binding(body=dep)<block_end><except_stmt>ApiException<as>e<block_start><if_stmt>e.status<eq>409<block_start>print(f"cluster role binding already exists")<line_sep><continue><block_end><raise>e<block_end><block_end><elif_stmt>dep["kind"]<eq>'ConfigMap'<block_start>k8s_client=client.CoreV1Api()<line_sep>k8s_client.create_namespaced_config_map(body=dep namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'ServiceAccount'<block_start>k8s_client=client.CoreV1Api()<line_sep>k8s_client.create_namespaced_service_account(body=dep namespace=namespace)<block_end><block_end><block_end>print("\nDone\n")<block_end><def_stmt>remove_deployment_dir namespace dir_path<block_start><with_stmt>open(os.path.join(dir_path 'dep_order.txt'))<as>f<block_start>dep_order=f.readline()<line_sep>dep_lst=[x.strip()<for>x dep_order.split(',')]<line_sep>print(dep_lst)<block_end><for_stmt>filename dep_lst<block_start>print(f"deleting {filename}")<with_stmt>open(os.path.join(dir_path filename))<as>f<block_start>dep=yaml.safe_load(f)<line_sep>name=dep["metadata"]["name"]<if_stmt>dep['kind']<eq>'StatefulSet'<block_start>k8s_client=client.AppsV1Api()<line_sep>k8s_client.delete_namespaced_stateful_set(name=name namespace=namespace)<block_end><elif_stmt>dep['kind']<eq>'DaemonSet'<block_start>k8s_client=client.AppsV1Api()<line_sep>k8s_client.delete_namespaced_daemon_set(name=name namespace=namespace)<block_end><elif_stmt>dep['kind']<eq>'Deployment'<block_start>k8s_client=client.AppsV1Api()<line_sep>k8s_client.delete_namespaced_deployment(name=name namespace=namespace)<block_end><elif_stmt>dep['kind']<eq>'Service'<block_start>k8s_client=client.CoreV1Api()<line_sep>k8s_client.delete_namespaced_service(name=name namespace=namespace grace_period_seconds=0)<line_sep>delete_func=k8s_client.delete_namespaced_service<line_sep>list_func=k8s_client.list_namespaced_service<line_sep>wait_for_namespaced_deletion(name namespace delete_func list_func)<block_end><elif_stmt>dep['kind']<eq>'PodDisruptionBudget'<block_start>k8s_client=client.PolicyV1beta1Api()<line_sep>k8s_client.delete_namespaced_pod_disruption_budget(name=name namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'Role'<block_start>k8s_client=client.RbacAuthorizationV1Api()<line_sep>k8s_client.delete_namespaced_role(name=name namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'RoleBinding'<block_start>k8s_client=client.RbacAuthorizationV1Api()<line_sep>k8s_client.delete_namespaced_role_binding(name=name namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'ClusterRoleBinding'<block_start>k8s_client=client.RbacAuthorizationV1Api()<line_sep>k8s_client.delete_cluster_role_binding(name=name)<block_end><elif_stmt>dep["kind"]<eq>'ConfigMap'<block_start>k8s_client=client.CoreV1Api()<line_sep>k8s_client.delete_namespaced_config_map(name=name namespace=namespace)<block_end><elif_stmt>dep["kind"]<eq>'ServiceAccount'<block_start>k8s_client=client.CoreV1Api()<line_sep>k8s_client.delete_namespaced_service_account(name=name namespace=namespace)<block_end><block_end><block_end>print("\nDone\n")<block_end><def_stmt>wait_for_namespaced_deletion name namespace deletion_func list_func timeout=15<block_start>deleted=<false><line_sep>orig_timeout=timeout<while_stmt><not>deleted# find by name and delete requested item <block_start><for_stmt>item list_func(namespace).items<block_start><if_stmt>item.metadata.name<eq>name<block_start><if_stmt>timeout<l>0<block_start><raise>TimeoutError(f"{orig_timeout} was not enough for deleting item:\n{item}\n")<block_end>deletion_func(name=name namespace=namespace)<line_sep>print(f"service {name} was not deleted, retrying")<line_sep>time.sleep(1)<line_sep>timeout<augsub>1<block_end><block_end># validate item was deleted <for_stmt>item list_func(namespace).items<block_start>deleted=<true><if_stmt>item.metadata.name<eq>name<block_start>deleted=<false><block_end><block_end><block_end><return>deleted<block_end><def_stmt>wait_for_daemonset_to_be_ready name namespace timeout=<none><block_start>wait_for_to_be_ready("daemonset" name namespace timeout=timeout)<block_end><def_stmt>resolve_read_status_func obj_name<block_start><if_stmt>obj_name<eq>"daemonset"<block_start><return>client.AppsV1Api().read_namespaced_daemon_set_status<block_end><else_stmt><block_start><raise>ValueError(f"resolve_read_status_func: {obj_name} is not a valid value")<block_end><block_end><def_stmt>wait_for_to_be_ready obj_name name namespace timeout=<none><block_start>start=datetime.now()<while_stmt><true><block_start>read_func=resolve_read_status_func(obj_name)<line_sep>resp=read_func(name=name namespace=namespace)<line_sep>total_sleep_time=(datetime.now()-start).total_seconds()<line_sep>number_ready=resp.status.number_ready<line_sep>updated_number_scheduled=resp.status.updated_number_scheduled<if_stmt>number_ready<and>updated_number_scheduled<and>number_ready<eq>updated_number_scheduled<block_start>print("Total time waiting for {3} {0} [size: {1}]: {2} sec".format(name number_ready total_sleep_time obj_name))<line_sep><break><block_end>print("{0}/{1} pods ready {2} sec ".format(number_ready updated_number_scheduled total_sleep_time) end="\r")<line_sep>time.sleep(1)<if_stmt>timeout<and>total_sleep_time<g>timeout<block_start><raise>Exception(f"Timeout waiting for {obj_name} to be ready")<block_end><block_end><block_end>
<import_stmt>json<import_stmt>inspect<import_stmt>hashlib<import_from_stmt>_plotly_utils.utils PlotlyJSONEncoder<import_from_stmt>dash.long_callback.managers BaseLongCallbackManager<class_stmt>CeleryLongCallbackManager(BaseLongCallbackManager)<block_start><def_stmt>__init__ self celery_app cache_by=<none> expire=<none><block_start>""" Long callback manager that runs callback logic on a celery task queue, and stores results using a celery result backend. :param celery_app: A celery.Celery application instance that must be configured with a result backend. See the celery documentation for information on configuration options. :param cache_by: A list of zero-argument functions. When provided, caching is enabled and the return values of these functions are combined with the callback function's input arguments and source code to generate cache keys. :param expire: If provided, a cache entry will be removed when it has not been accessed for ``expire`` seconds. If not provided, the lifetime of cache entries is determined by the default behavior of the celery result backend. """<try_stmt><block_start><import_stmt>celery# pylint: disable=import-outside-toplevel,import-error <import_from_stmt>celery.backends.base # pylint: disable=import-outside-toplevel,import-error DisabledBackend <block_end><except_stmt>ImportError<as>missing_imports<block_start><raise>ImportError("""\ CeleryLongCallbackManager requires extra dependencies which can be installed doing $ pip install "dash[celery]"\n""")<from>missing_imports<block_end><if_stmt><not>isinstance(celery_app celery.Celery)<block_start><raise>ValueError("First argument must be a celery.Celery object")<block_end><if_stmt>isinstance(celery_app.backend DisabledBackend)<block_start><raise>ValueError("Celery instance must be configured with a result backend")<block_end>super().__init__(cache_by)<line_sep>self.handle=celery_app<line_sep>self.expire=expire<block_end><def_stmt>terminate_job self job<block_start><if_stmt>job<is><none><block_start><return><block_end>self.handle.control.terminate(job)<block_end><def_stmt>terminate_unhealthy_job self job<block_start>task=self.get_task(job)<if_stmt>task<and>task.status<in>("FAILURE" "REVOKED")<block_start><return>self.terminate_job(job)<block_end><return><false><block_end><def_stmt>job_running self job<block_start>future=self.get_task(job)<line_sep><return>future<and>future.status<in>("PENDING" "RECEIVED" "STARTED" "RETRY" "PROGRESS" )<block_end><def_stmt>make_job_fn self fn progress args_deps<block_start><return>_make_job_fn(fn self.handle progress args_deps)<block_end><def_stmt>get_task self job<block_start><if_stmt>job<block_start><return>self.handle.AsyncResult(job)<block_end><return><none><block_end><def_stmt>clear_cache_entry self key<block_start>self.handle.backend.delete(key)<block_end><def_stmt>call_job_fn self key job_fn args<block_start>task=job_fn.delay(key self._make_progress_key(key) args)<line_sep><return>task.task_id<block_end><def_stmt>get_progress self key<block_start>progress_key=self._make_progress_key(key)<line_sep>progress_data=self.handle.backend.get(progress_key)<if_stmt>progress_data<block_start><return>json.loads(progress_data)<block_end><return><none><block_end><def_stmt>result_ready self key<block_start><return>self.handle.backend.get(key)<is><not><none><block_end><def_stmt>get_result self key job# Get result value <block_start>result=self.handle.backend.get(key)<if_stmt>result<is><none><block_start><return><none><block_end>result=json.loads(result)<line_sep># Clear result if not caching <if_stmt>self.cache_by<is><none><block_start>self.clear_cache_entry(key)<block_end><else_stmt><block_start><if_stmt>self.expire# Set/update expiration time <block_start>self.handle.backend.expire(key self.expire)<block_end><block_end>self.clear_cache_entry(self._make_progress_key(key))<line_sep>self.terminate_job(job)<line_sep><return>result<block_end><block_end><def_stmt>_make_job_fn fn celery_app progress args_deps<block_start>cache=celery_app.backend<line_sep># Hash function source and module to create a unique (but stable) celery task name fn_source=inspect.getsource(fn)<line_sep>fn_str=fn_source<line_sep>fn_hash=hashlib.sha1(fn_str.encode("utf-8")).hexdigest()<line_sep>@celery_app.task(name=f"long_callback_{fn_hash}")<def_stmt>job_fn result_key progress_key user_callback_args fn=fn<block_start><def_stmt>_set_progress progress_value<block_start>cache.set(progress_key json.dumps(progress_value cls=PlotlyJSONEncoder))<block_end>maybe_progress=[_set_progress]<if>progress<else>[]<if_stmt>isinstance(args_deps dict)<block_start>user_callback_output=fn(*maybe_progress **user_callback_args)<block_end><elif_stmt>isinstance(args_deps (list tuple))<block_start>user_callback_output=fn(*maybe_progress *user_callback_args)<block_end><else_stmt><block_start>user_callback_output=fn(*maybe_progress user_callback_args)<block_end>cache.set(result_key json.dumps(user_callback_output cls=PlotlyJSONEncoder))<block_end><return>job_fn<block_end>
# Generated by Django 3.0.4 on 2020-04-06 09:56 <import_from_stmt>django.db migrations<import_from_stmt>saleor.order OrderStatus<def_stmt>match_orders_with_users apps *_args **_kwargs<block_start>Order=apps.get_model("order" "Order")<line_sep>User=apps.get_model("account" "User")<line_sep>orders_without_user=Order.objects.filter(user_email__isnull=<false> user=<none>).exclude(status=OrderStatus.DRAFT)<for_stmt>order orders_without_user<block_start><try_stmt><block_start>new_user=User.objects.get(email=order.user_email)<block_end><except_stmt>User.DoesNotExist<block_start><continue><block_end>order.user=new_user<line_sep>order.save(update_fields=["user"])<block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("order" "0080_invoice") ]<line_sep>operations=[migrations.RunPython(match_orders_with_users) ]<block_end>
""" Copyright 2019 Samsung SDS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>brightics.common.utils check_required_parameters<import_from_stmt>brightics.common.exception BrighticsFunctionException<import_from_stmt>.data regex_format_dict<import_stmt>re<def_stmt>regex table **params<block_start>check_required_parameters(_regex params ['table'])<line_sep><return>_regex(table **params)<block_end><def_stmt>_regex table input_cols transformation_mode='extract' find_mode='all' pattern='' user_dict_pattern='' custom_pattern='' replacement_string='' user_dict=<none><block_start>out_table=table.copy()<line_sep>pattern_dict=regex_format_dict.pattern_dict<line_sep>user_pattern_dict={}<if_stmt>user_dict<is><not><none><block_start>user_patterns=user_dict.values<for_stmt>user_pattern user_patterns<block_start>user_pattern_name=user_pattern[0]<line_sep>user_pattern_content=user_pattern[1]<line_sep>user_pattern_dict[user_pattern_name]=user_pattern_dict.get(user_pattern_name [])+[user_pattern_content]<block_end><block_end>user_pattern_dict={key:r'|'.join(value)<for>key,value user_pattern_dict.items()}<if_stmt>pattern<eq>''<block_start><raise>BrighticsFunctionException.from_errors([{'0100':"Please choose a pattern."}])<block_end><if_stmt>pattern<eq>'custom'<block_start>raw_pattern=custom_pattern<block_end><elif_stmt>pattern<eq>'user_dictionary'<block_start>raw_pattern=user_pattern_dict.get(user_dict_pattern)<if_stmt>raw_pattern<is><none><block_start><raise>BrighticsFunctionException.from_errors([{'0100':user_dict_pattern+" is not a valid pattern name in the user dictionary."}])<block_end><block_end><else_stmt><block_start>raw_pattern=pattern_dict.get(pattern)<block_end>regex_pattern=re.compile(raw_pattern)<def_stmt>transformation text<block_start><if_stmt>transformation_mode<eq>'extract'<block_start><if_stmt>find_mode<eq>'first'<block_start>result=regex_pattern.search(text)<if_stmt>result<is><none><block_start><return>""<block_end><else_stmt><block_start><return>result.group()<block_end><block_end><else_stmt># find_mode == 'all' <block_start><return>regex_pattern.findall(text)<block_end><block_end><elif_stmt>transformation_mode<eq>'replace'<block_start><if_stmt>find_mode<eq>'first'<block_start><return>regex_pattern.sub(replacement_string text 1)<block_end><else_stmt># find_mode == 'all' <block_start><return>regex_pattern.sub(replacement_string text)<block_end><block_end><elif_stmt>transformation_mode<eq>'remove'<block_start><if_stmt>find_mode<eq>'first'<block_start><return>regex_pattern.sub("" text 1)<block_end><else_stmt># find_mode == 'all' <block_start><return>regex_pattern.sub("" text)<block_end><block_end><else_stmt># transformation_mode == 'split' <block_start><if_stmt>find_mode<eq>'first'<block_start><return>regex_pattern.split(text 1)<block_end><else_stmt># find_mode == 'all' <block_start><return>regex_pattern.split(text)<block_end><block_end><block_end><for_stmt>col input_cols<block_start>result_col=table[col].apply(transformation)<line_sep>out_table['regex_'+col]=result_col<block_end><return>{'out_table':out_table}<block_end>
# import Kratos <import_stmt>KratosMultiphysics<import_stmt>KratosMultiphysics.StructuralMechanicsApplication<as>StructuralMechanicsApplication<import_stmt>KratosMultiphysics.CSharpWrapperApplication<as>CSharpWrapperApplication<import_stmt>run_cpp_unit_tests<line_sep># Import Kratos "wrapper" for unittests <import_stmt>KratosMultiphysics.KratosUnittest<as>KratosUnittest<line_sep># Import subprocess <import_stmt>subprocess<line_sep># Using kratos_utilities <import_stmt>KratosMultiphysics.kratos_utilities<as>kratos_utilities<if_stmt>kratos_utilities.CheckIfApplicationsAvailable("ExternalSolversApplication")<block_start>has_external_solvers_application=<true><block_end><else_stmt><block_start>has_external_solvers_application=<false><block_end># Import the tests o test_classes to create the suits ## SMALL TESTS ## NIGTHLY TESTS ## VALIDATION TESTS <def_stmt>AssembleTestSuites <block_start>''' Populates the test suites to run. Populates the test suites to run. At least, it should pupulate the suites: "small", "nighlty" and "all" Return ------ suites: A dictionary of suites The set of suites with its test_cases added. '''<line_sep>suites=KratosUnittest.KratosSuites<line_sep># Create a test suit with the selected tests (Small tests): smallSuite=suites['small']<line_sep># Create a test suit with the selected tests plus all small tests nightlySuite=suites['nightly']<line_sep>### BEGIN SMALL SUITE ### ### END SMALL SUITE ### ### BEGIN NIGHTLY SUITE ### ### END VALIDATION SUITE ### ### BEGIN VALIDATION SUITE ### # For very long tests that should not be in nighly and you can use to validate validationSuite=suites['validation']<line_sep>validationSuite.addTests(nightlySuite)<line_sep>### END VALIDATION ### # Create a test suit that contains all the tests: allSuite=suites['all']<line_sep>allSuite.addTests(nightlySuite)# Already contains the smallSuite validationSuite.addTests(allSuite)# Validation contains all # Manual list for debugging #allSuite.addTests( #KratosUnittest.TestLoader().loadTestsFromTestCases([ #### STANDALONE #### SMALL #### NIGTHLY #### VALIDATION #]) #) <return>suites<block_end><if_stmt>__name__<eq>'__main__'<block_start>KratosMultiphysics.Logger.PrintInfo("Unittests" "\nRunning cpp unit tests ...")<line_sep>run_cpp_unit_tests.run()<line_sep>KratosMultiphysics.Logger.PrintInfo("Unittests" "Finished running cpp unit tests!")<line_sep>KratosMultiphysics.Logger.PrintInfo("Unittests" "\nRunning python tests ...")<line_sep>KratosUnittest.runTests(AssembleTestSuites())<line_sep>KratosMultiphysics.Logger.PrintInfo("Unittests" "Finished python tests!")<block_end>
<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>numpy<as>np<import_from_stmt>joblib Parallel delayed<import_from_stmt>tensorflow.keras.utils get_file<import_from_stmt>tqdm tqdm<import_from_stmt>spektral.data Dataset Graph<import_from_stmt>spektral.utils label_to_one_hot sparse<import_from_stmt>spektral.utils.io load_csv load_sdf<line_sep>ATOM_TYPES=[1 6 7 8 9]<line_sep>BOND_TYPES=[1 2 3 4]<class_stmt>QM9(Dataset)<block_start>""" The QM9 chemical data set of small molecules. In this dataset, nodes represent atoms and edges represent chemical bonds. There are 5 possible atom types (H, C, N, O, F) and 4 bond types (single, double, triple, aromatic). Node features represent the chemical properties of each atom and include: - The atomic number, one-hot encoded; - The atom's position in the X, Y, and Z dimensions; - The atomic charge; - The mass difference from the monoisotope; The edge features represent the type of chemical bond between two atoms, one-hot encoded. Each graph has an 19-dimensional label for regression. **Arguments** - `amount`: int, load this many molecules instead of the full dataset (useful for debugging). - `n_jobs`: number of CPU cores to use for reading the data (-1, to use all available cores). """<line_sep>url="https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"<def_stmt>__init__ self amount=<none> n_jobs=1 **kwargs<block_start>self.amount=amount<line_sep>self.n_jobs=n_jobs<line_sep>super().__init__(**kwargs)<block_end><def_stmt>download self<block_start>get_file("qm9.tar.gz" self.url extract=<true> cache_dir=self.path cache_subdir=self.path )<line_sep>os.remove(osp.join(self.path "qm9.tar.gz"))<block_end><def_stmt>read self<block_start>print("Loading QM9 dataset.")<line_sep>sdf_file=osp.join(self.path "gdb9.sdf")<line_sep>data=load_sdf(sdf_file amount=self.amount)# Internal SDF format <def_stmt>read_mol mol<block_start>x=np.array([atom_to_feature(atom)<for>atom mol["atoms"]])<line_sep>a,e=mol_to_adj(mol)<line_sep><return>x a e<block_end>data=Parallel(n_jobs=self.n_jobs)(delayed(read_mol)(mol)<for>mol tqdm(data ncols=80))<line_sep>x_list,a_list,e_list=list(zip(*data))<line_sep># Load labels labels_file=osp.join(self.path "gdb9.sdf.csv")<line_sep>labels=load_csv(labels_file)<line_sep>labels=labels.set_index("mol_id").values<if_stmt>self.amount<is><not><none><block_start>labels=labels[:self.amount]<block_end><return>[Graph(x=x a=a e=e y=y)<for>x,a,e,y zip(x_list a_list e_list labels)]<block_end><block_end><def_stmt>atom_to_feature atom<block_start>atomic_num=label_to_one_hot(atom["atomic_num"] ATOM_TYPES)<line_sep>coords=atom["coords"]<line_sep>charge=atom["charge"]<line_sep>iso=atom["iso"]<line_sep><return>np.concatenate((atomic_num coords [charge iso]) -1)<block_end><def_stmt>mol_to_adj mol<block_start>row,col,edge_features=[] [] []<for_stmt>bond mol["bonds"]<block_start>start,end=bond["start_atom"] bond["end_atom"]<line_sep>row<augadd>[start end]<line_sep>col<augadd>[end start]<line_sep>edge_features<augadd>[bond["type"]]<times>2<block_end>a,e=sparse.edge_index_to_matrix(edge_index=np.array((row col)).T edge_weight=np.ones_like(row) edge_features=label_to_one_hot(edge_features BOND_TYPES) )<line_sep><return>a e<block_end>
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Generic TF-Agents training function for bandits."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_from_stmt>absl logging<import_stmt>tensorflow<as>tf# pylint: disable=g-explicit-tensorflow-version-import <import_from_stmt>tf_agents.drivers dynamic_step_driver<import_from_stmt>tf_agents.eval metric_utils<import_from_stmt>tf_agents.metrics tf_metrics<import_from_stmt>tf_agents.policies policy_saver<import_from_stmt>tf_agents.replay_buffers tf_uniform_replay_buffer<line_sep>tf=tf.compat.v2<line_sep>AGENT_CHECKPOINT_NAME='agent'<line_sep>STEP_CHECKPOINT_NAME='step'<line_sep>CHECKPOINT_FILE_PREFIX='ckpt'<def_stmt>get_replay_buffer data_spec batch_size steps_per_loop<block_start>"""Return a `TFUniformReplayBuffer` for the given `agent`."""<line_sep>buf=tf_uniform_replay_buffer.TFUniformReplayBuffer(data_spec=data_spec batch_size=batch_size max_length=steps_per_loop)<line_sep><return>buf<block_end><def_stmt>set_expected_shape experience num_steps<block_start><def_stmt>set_time_dim input_tensor steps<block_start>tensor_shape=input_tensor.shape.as_list()<line_sep>tensor_shape[1]=steps<line_sep>input_tensor.set_shape(tensor_shape)<block_end>tf.nest.map_structure(<lambda>t:set_time_dim(t num_steps) experience)<block_end><def_stmt>get_training_loop_fn driver replay_buffer agent steps<block_start>"""Returns a `tf.function` that runs the driver and training loops. Args: driver: an instance of `Driver`. replay_buffer: an instance of `ReplayBuffer`. agent: an instance of `TFAgent`. steps: an integer indicating how many driver steps should be executed and presented to the trainer during each training loop. """<def_stmt>training_loop <block_start>"""Returns a `tf.function` that runs the training loop."""<line_sep>driver.run()<line_sep>batch_size=driver.env.batch_size<line_sep>dataset=replay_buffer.as_dataset(sample_batch_size=batch_size num_steps=steps single_deterministic_pass=<true>)<line_sep>experience,unused_info=tf.data.experimental.get_single_element(dataset)<line_sep>set_expected_shape(experience steps)<line_sep>loss_info=agent.train(experience)<line_sep>replay_buffer.clear()<line_sep><return>loss_info<block_end><return>training_loop<block_end><def_stmt>restore_and_get_checkpoint_manager root_dir agent metrics step_metric<block_start>"""Restores from `root_dir` and returns a function that writes checkpoints."""<line_sep>trackable_objects={metric.name:metric<for>metric metrics}<line_sep>trackable_objects[AGENT_CHECKPOINT_NAME]=agent<line_sep>trackable_objects[STEP_CHECKPOINT_NAME]=step_metric<line_sep>checkpoint=tf.train.Checkpoint(**trackable_objects)<line_sep>checkpoint_manager=tf.train.CheckpointManager(checkpoint=checkpoint directory=root_dir max_to_keep=5)<line_sep>latest=checkpoint_manager.latest_checkpoint<if_stmt>latest<is><not><none><block_start>logging.info('Restoring checkpoint from %s.' latest)<line_sep>checkpoint.restore(latest)<line_sep>logging.info('Successfully restored to step %s.' step_metric.result())<block_end><else_stmt><block_start>logging.info('Did not find a pre-existing checkpoint. '<concat>'Starting from scratch.')<block_end><return>checkpoint_manager<block_end><def_stmt>train root_dir agent environment training_loops steps_per_loop additional_metrics=() training_data_spec_transformation_fn=<none><block_start>"""Perform `training_loops` iterations of training. Checkpoint results. If one or more baseline_reward_fns are provided, the regret is computed against each one of them. Here is example baseline_reward_fn: def baseline_reward_fn(observation, per_action_reward_fns): rewards = ... # compute reward for each arm optimal_action_reward = ... # take the maximum reward return optimal_action_reward Args: root_dir: path to the directory where checkpoints and metrics will be written. agent: an instance of `TFAgent`. environment: an instance of `TFEnvironment`. training_loops: an integer indicating how many training loops should be run. steps_per_loop: an integer indicating how many driver steps should be executed and presented to the trainer during each training loop. additional_metrics: Tuple of metric objects to log, in addition to default metrics `NumberOfEpisodes`, `AverageReturnMetric`, and `AverageEpisodeLengthMetric`. training_data_spec_transformation_fn: Optional function that transforms the data items before they get to the replay buffer. """<line_sep># TODO(b/127641485): create evaluation loop with configurable metrics. <if_stmt>training_data_spec_transformation_fn<is><none><block_start>data_spec=agent.policy.trajectory_spec<block_end><else_stmt><block_start>data_spec=training_data_spec_transformation_fn(agent.policy.trajectory_spec)<block_end>replay_buffer=get_replay_buffer(data_spec environment.batch_size steps_per_loop)<line_sep># `step_metric` records the number of individual rounds of bandit interaction; # that is, (number of trajectories) * batch_size. step_metric=tf_metrics.EnvironmentSteps()<line_sep>metrics=[tf_metrics.NumberOfEpisodes() tf_metrics.AverageEpisodeLengthMetric(batch_size=environment.batch_size)]+list(additional_metrics)<if_stmt>isinstance(environment.reward_spec() dict)<block_start>metrics<augadd>[tf_metrics.AverageReturnMultiMetric(reward_spec=environment.reward_spec() batch_size=environment.batch_size)]<block_end><else_stmt><block_start>metrics<augadd>[tf_metrics.AverageReturnMetric(batch_size=environment.batch_size)]<block_end><if_stmt>training_data_spec_transformation_fn<is><not><none><block_start>add_batch_fn=<lambda>data:replay_buffer.add_batch(# pylint: disable=g-long-lambda training_data_spec_transformation_fn(data))<block_end><else_stmt><block_start>add_batch_fn=replay_buffer.add_batch<block_end>observers=[add_batch_fn step_metric]+metrics<line_sep>driver=dynamic_step_driver.DynamicStepDriver(env=environment policy=agent.collect_policy num_steps=steps_per_loop<times>environment.batch_size observers=observers)<line_sep>training_loop=get_training_loop_fn(driver replay_buffer agent steps_per_loop)<line_sep>checkpoint_manager=restore_and_get_checkpoint_manager(root_dir agent metrics step_metric)<line_sep>train_step_counter=tf.compat.v1.train.get_or_create_global_step()<line_sep>saver=policy_saver.PolicySaver(agent.policy train_step=train_step_counter)<line_sep>summary_writer=tf.summary.create_file_writer(root_dir)<line_sep>summary_writer.set_as_default()<for_stmt>i range(training_loops)<block_start>training_loop()<line_sep>metric_utils.log_metrics(metrics)<for_stmt>metric metrics<block_start>metric.tf_summaries(train_step=step_metric.result())<block_end>checkpoint_manager.save()<if_stmt>i%100<eq>0<block_start>saver.save(os.path.join(root_dir 'policy_%d'%step_metric.result()))<block_end><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># handle normal mixing or premixing <def_stmt>getHcalDigitizer process<block_start><if_stmt>hasattr(process 'mixData')<block_start><return>process.mixData<block_end><if_stmt>hasattr(process 'mix')<and>hasattr(process.mix 'digitizers')<and>hasattr(process.mix.digitizers 'hcal')<block_start><return>process.mix.digitizers.hcal<block_end><return><none><block_end><def_stmt>getHGCalDigitizer process section<block_start><if_stmt>hasattr(process 'mix')<and>hasattr(process.mix 'digitizers')<block_start><if_stmt>section<eq>'EE'<and>hasattr(process.mix.digitizers 'hgceeDigitizer')<block_start><return>process.mix.digitizers.hgceeDigitizer<block_end><elif_stmt>section<eq>'FH'<and>hasattr(process.mix.digitizers 'hgchefrontDigitizer')<block_start><return>process.mix.digitizers.hgchefrontDigitizer<block_end><elif_stmt>section<eq>'BH'<and>hasattr(process.mix.digitizers 'hgchebackDigitizer')<block_start><return>process.mix.digitizers.hgchebackDigitizer<block_end><elif_stmt>section<eq>'HFNose'<and>hasattr(process.mix.digitizers 'hfnoseDigitizer')<block_start><return>process.mix.digitizers.hfnoseDigitizer<block_end><block_end><return><none><block_end># change assumptions about lumi rate <def_stmt>setScenarioHLLHC module scenarioHLLHC<block_start><if_stmt>scenarioHLLHC<eq>"nominal"<block_start><import_from_stmt>CalibCalorimetry.HcalPlugins.HBHEDarkening_cff _years_LHC _years_HLLHC_nominal<line_sep>module.years=_years_LHC+_years_HLLHC_nominal<block_end><elif_stmt>scenarioHLLHC<eq>"ultimate"<block_start><import_from_stmt>CalibCalorimetry.HcalPlugins.HBHEDarkening_cff _years_LHC _years_HLLHC_ultimate<line_sep>module.years=_years_LHC+_years_HLLHC_ultimate<block_end><return>module<block_end># turnon = True enables default, False disables # recalibration and darkening always together <def_stmt>ageHB process turnon scenarioHLLHC<block_start><if_stmt>turnon<block_start><import_from_stmt>CalibCalorimetry.HcalPlugins.HBHEDarkening_cff HBDarkeningEP<line_sep>process.HBDarkeningEP=HBDarkeningEP<line_sep>process.HBDarkeningEP=setScenarioHLLHC(process.HBDarkeningEP scenarioHLLHC)<block_end>hcaldigi=getHcalDigitizer(process)<if_stmt>hcaldigi<is><not><none><block_start>hcaldigi.HBDarkening=cms.bool(turnon)<block_end><if_stmt>hasattr(process 'es_hardcode')<block_start>process.es_hardcode.HBRecalibration=cms.bool(turnon)<block_end><return>process<block_end><def_stmt>ageHE process turnon scenarioHLLHC<block_start><if_stmt>turnon<block_start><import_from_stmt>CalibCalorimetry.HcalPlugins.HBHEDarkening_cff HEDarkeningEP<line_sep>process.HEDarkeningEP=HEDarkeningEP<line_sep>process.HEDarkeningEP=setScenarioHLLHC(process.HEDarkeningEP scenarioHLLHC)<block_end>hcaldigi=getHcalDigitizer(process)<if_stmt>hcaldigi<is><not><none><block_start>hcaldigi.HEDarkening=cms.bool(turnon)<block_end><if_stmt>hasattr(process 'es_hardcode')<block_start>process.es_hardcode.HERecalibration=cms.bool(turnon)<block_end><return>process<block_end><def_stmt>ageHF process turnon<block_start>hcaldigi=getHcalDigitizer(process)<if_stmt>hcaldigi<is><not><none><block_start>hcaldigi.HFDarkening=cms.bool(turnon)<block_end><if_stmt>hasattr(process 'es_hardcode')<block_start>process.es_hardcode.HFRecalibration=cms.bool(turnon)<block_end><return>process<block_end><def_stmt>agedHFNose process algo=0<block_start><import_from_stmt>SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi HFNose_setEndOfLifeNoise<line_sep>process=HFNose_setEndOfLifeNoise(process byDose=<true> byDoseAlgo=algo)<line_sep><return>process<block_end><def_stmt>agedHGCal process algo=0<block_start><import_from_stmt>SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi HGCal_setEndOfLifeNoise<line_sep>process=HGCal_setEndOfLifeNoise(process byDose=<true> byDoseAlgo=algo)<line_sep><return>process<block_end><def_stmt>realisticHGCalStartup process<block_start><import_from_stmt>SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi HGCal_setRealisticStartupNoise<line_sep>process=HGCal_setRealisticStartupNoise(process)<line_sep><return>process<block_end># needs lumi to set proper ZS thresholds (tbd) <def_stmt>ageSiPM process turnon lumi<block_start>process.es_hardcode.hbUpgrade.doRadiationDamage=turnon<line_sep>process.es_hardcode.heUpgrade.doRadiationDamage=turnon<line_sep># todo: determine ZS threshold adjustments # adjust PF thresholds for increased noise # based on: https://baylor.box.com/s/w32ja75krcbxcycyifexu28dwlgrj7wg hcal_lumis=[300 1000 3000 4500 1e10]<line_sep>hcal_thresholds={300:{"seed":[0.5 0.625 0.75 0.75] "rec":[0.4 0.5 0.6 0.6] } 1000:{"seed":[1.0 1.5 1.5 1.5] "rec":[0.8 1.2 1.2 1.2] } 3000:{"seed":[1.25 2.5 2.5 2.5] "rec":[1.0 2.0 2.0 2.0] } 4500:{"seed":[1.5 3.0 3.0 3.0] "rec":[1.25 2.5 2.5 2.5] } }<line_sep>ctmodules=['calotowermaker' 'caloTowerForTrk' 'caloTowerForTrkPreSplitting' 'towerMaker' 'towerMakerWithHO']<for_stmt>ilumi,hcal_lumi enumerate(hcal_lumis[:-1])<block_start><if_stmt>lumi<ge>hcal_lumi<and>lumi<l>hcal_lumis[ilumi+1]<block_start><if_stmt>hasattr(process 'particleFlowClusterHBHE')<block_start>process.particleFlowClusterHBHE.seedFinder.thresholdsByDetector[0].seedingThreshold=hcal_thresholds[hcal_lumi]["seed"]<line_sep>process.particleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold=hcal_thresholds[hcal_lumi]["rec"]<line_sep>process.particleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm=hcal_thresholds[hcal_lumi]["rec"]<line_sep>process.particleFlowClusterHBHE.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector[0].logWeightDenominator=hcal_thresholds[hcal_lumi]["rec"]<line_sep>process.particleFlowClusterHBHE.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator=hcal_thresholds[hcal_lumi]["rec"]<block_end><if_stmt>hasattr(process 'particleFlowClusterHCAL')<block_start>process.particleFlowClusterHCAL.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector[0].logWeightDenominator=hcal_thresholds[hcal_lumi]["rec"]<block_end><if_stmt>hasattr(process 'particleFlowRecHitHBHE')<block_start>process.particleFlowRecHitHBHE.producers[0].qualityTests[0].cuts[0].threshold=hcal_thresholds[hcal_lumi]["rec"]<block_end><for_stmt>ctmod ctmodules<block_start><if_stmt>hasattr(process ctmod)<block_start>getattr(process ctmod).HBThreshold1=hcal_thresholds[hcal_lumi]["rec"][0]<line_sep>getattr(process ctmod).HBThreshold2=hcal_thresholds[hcal_lumi]["rec"][1]<line_sep>getattr(process ctmod).HBThreshold=hcal_thresholds[hcal_lumi]["rec"][-1]<block_end><block_end><break><block_end><block_end><return>process<block_end><def_stmt>ageHcal process lumi instLumi scenarioHLLHC<block_start>hcaldigi=getHcalDigitizer(process)<if_stmt>hcaldigi<is><not><none><block_start>hcaldigi.DelivLuminosity=cms.double(float(lumi))<block_end># integrated lumi in fb-1 # these lines need to be further activated by turning on 'complete' aging for HF <if_stmt>hasattr(process 'g4SimHits')<block_start>process.g4SimHits.HCalSD.InstLuminosity=cms.double(float(instLumi))<line_sep>process.g4SimHits.HCalSD.DelivLuminosity=cms.double(float(lumi))<block_end># recalibration and darkening always together <if_stmt>hasattr(process 'es_hardcode')<block_start>process.es_hardcode.iLumi=cms.double(float(lumi))<block_end># functions to enable individual subdet aging process=ageHB(process <true> scenarioHLLHC)<line_sep>process=ageHE(process <true> scenarioHLLHC)<line_sep>process=ageHF(process <true>)<line_sep>process=ageSiPM(process <true> lumi)<line_sep><return>process<block_end><def_stmt>turn_on_HB_aging process<block_start>process=ageHB(process <true> "")<line_sep><return>process<block_end><def_stmt>turn_off_HB_aging process<block_start>process=ageHB(process <false> "")<line_sep><return>process<block_end><def_stmt>turn_on_HE_aging process<block_start>process=ageHE(process <true> "")<line_sep><return>process<block_end><def_stmt>turn_off_HE_aging process<block_start>process=ageHE(process <false> "")<line_sep><return>process<block_end><def_stmt>turn_on_HF_aging process<block_start>process=ageHF(process <true>)<line_sep><return>process<block_end><def_stmt>turn_off_HF_aging process<block_start>process=ageHF(process <false>)<line_sep><return>process<block_end><def_stmt>turn_off_SiPM_aging process<block_start>process=ageSiPM(process <false> 0.0)<line_sep><return>process<block_end><def_stmt>hf_complete_aging process<block_start><if_stmt>hasattr(process 'g4SimHits')<block_start>process.g4SimHits.HCalSD.HFDarkening=cms.untracked.bool(<true>)<block_end>hcaldigi=getHcalDigitizer(process)<if_stmt>hcaldigi<is><not><none><block_start>hcaldigi.HFDarkening=cms.untracked.bool(<false>)<block_end><return>process<block_end><def_stmt>ageEcal process lumi instLumi<block_start><if_stmt>hasattr(process 'g4SimHits')#these lines need to be further activiated by tuning on 'complete' aging for ecal <block_start>process.g4SimHits.ECalSD.InstLuminosity=cms.double(instLumi)<line_sep>process.g4SimHits.ECalSD.DelivLuminosity=cms.double(float(lumi))<block_end># available conditions ecal_lumis=[300 1000 3000 4500]<line_sep>ecal_conditions=[['EcalIntercalibConstantsRcd' 'EcalIntercalibConstants_TL{:d}_upgrade_8deg_v2_mc'] ['EcalIntercalibConstantsMCRcd' 'EcalIntercalibConstantsMC_TL{:d}_upgrade_8deg_v2_mc'] ['EcalLaserAPDPNRatiosRcd' 'EcalLaserAPDPNRatios_TL{:d}_upgrade_8deg_mc'] ['EcalPedestalsRcd' 'EcalPedestals_TL{:d}_upgradeTIA_8deg_mc'] ['EcalTPGLinearizationConstRcd' 'EcalTPGLinearizationConst_TL{:d}_upgrade_8deg_mc'] ]<line_sep># update PF thresholds, based on https://indico.cern.ch/event/653123/contributions/2659235/attachments/1491385/2318364/170711_upsg_ledovskoy.pdf ecal_thresholds={300:0.103 1000:0.175 3000:0.435 4500:0.707 }<line_sep>ecal_seed_multiplier=2.5<line_sep># try to get conditions <if_stmt>int(lumi)<in>ecal_lumis<block_start><if_stmt><not>hasattr(process.GlobalTag 'toGet')<block_start>process.GlobalTag.toGet=cms.VPSet()<block_end><for_stmt>ecal_condition ecal_conditions<block_start>process.GlobalTag.toGet.append(cms.PSet(record=cms.string(ecal_condition[0]) tag=cms.string(ecal_condition[1].format(int(lumi))) connect=cms.string("frontier://FrontierProd/CMS_CONDITIONS")))<block_end><if_stmt>hasattr(process "particleFlowClusterECALUncorrected")<block_start>_seeds=process.particleFlowClusterECALUncorrected.seedFinder.thresholdsByDetector<for_stmt>iseed range(0 len(_seeds))<block_start><if_stmt>_seeds[iseed].detector.value()<eq>"ECAL_BARREL"<block_start>_seeds[iseed].seedingThreshold=cms.double(ecal_thresholds[int(lumi)]<times>ecal_seed_multiplier)<block_end><block_end>_clusters=process.particleFlowClusterECALUncorrected.initialClusteringStep.thresholdsByDetector<for_stmt>icluster range(0 len(_clusters))<block_start><if_stmt>_clusters[icluster].detector.value()<eq>"ECAL_BARREL"<block_start>_clusters[icluster].gatheringThreshold=cms.double(ecal_thresholds[int(lumi)])<block_end><block_end><block_end><block_end><return>process<block_end><def_stmt>ecal_complete_aging process<block_start><if_stmt>hasattr(process 'g4SimHits')<block_start>process.g4SimHits.ECalSD.AgeingWithSlopeLY=cms.untracked.bool(<true>)<block_end><if_stmt>hasattr(process 'ecal_digi_parameters')<block_start>process.ecal_digi_parameters.UseLCcorrection=cms.untracked.bool(<false>)<block_end><return>process<block_end><def_stmt>customise_aging_300 process<block_start>process=ageHcal(process 300 5.0e34 "nominal")<line_sep>process=ageEcal(process 300 5.0e34)<line_sep><return>process<block_end><def_stmt>customise_aging_1000 process<block_start>process=ageHcal(process 1000 5.0e34 "nominal")<line_sep>process=turn_off_HE_aging(process)#avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process 1000 5.0e34)<line_sep><return>process<block_end><def_stmt>customise_aging_3000 process<block_start>process=ageHcal(process 3000 5.0e34 "nominal")<line_sep>process=turn_off_HE_aging(process)#avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process 3000 5.0e34)<line_sep>process=agedHGCal(process)<line_sep>process=agedHFNose(process)<line_sep><return>process<block_end><def_stmt>customise_aging_3000_ultimate process<block_start>process=ageHcal(process 3000 7.5e34 "ultimate")<line_sep>process=turn_off_HE_aging(process)#avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process 3000 7.5e34)<line_sep>process=agedHGCal(process)<line_sep>process=agedHFNose(process)<line_sep><return>process<block_end><def_stmt>customise_aging_4500_ultimate process<block_start>process=ageHcal(process 4500 7.5e34 "ultimate")<line_sep>process=turn_off_HE_aging(process)#avoid conflict between HGCal and Hcal in phase2 geom configuration process=ageEcal(process 4500 7.5e34)<line_sep>process=agedHGCal(process)<line_sep>process=agedHFNose(process)<line_sep><return>process<block_end>
"""Validate that number of threads in thread pools is set to 1."""<import_stmt>numexpr<import_stmt>blosc<import_stmt>threadpoolctl<line_sep># APIs that return previous number of threads: <assert_stmt>numexpr.set_num_threads(2)<eq>1<assert_stmt>blosc.set_nthreads(2)<eq>1<for_stmt>d threadpoolctl.threadpool_info()<block_start><assert_stmt>d["num_threads"]<eq>1 d<block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>typing Any List Optional<import_from_stmt>azure.core.exceptions HttpResponseError<import_stmt>msrest.serialization<class_stmt>ErrorAdditionalInfo(msrest.serialization.Model)<block_start>"""The resource management error additional info. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: any """<line_sep>_validation={'type':{'readonly':<true>} 'info':{'readonly':<true>} }<line_sep>_attribute_map={'type':{'key':'type' 'type':'str'} 'info':{'key':'info' 'type':'object'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorAdditionalInfo self).__init__(**kwargs)<line_sep>self.type=<none><line_sep>self.info=<none><block_end><block_end><class_stmt>ErrorDetail(msrest.serialization.Model)<block_start>"""The error detail. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar message: The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: The error details. :vartype details: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorAdditionalInfo] """<line_sep>_validation={'code':{'readonly':<true>} 'message':{'readonly':<true>} 'target':{'readonly':<true>} 'details':{'readonly':<true>} 'additional_info':{'readonly':<true>} }<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} 'target':{'key':'target' 'type':'str'} 'details':{'key':'details' 'type':'[ErrorDetail]'} 'additional_info':{'key':'additionalInfo' 'type':'[ErrorAdditionalInfo]'} }<def_stmt>__init__ self **kwargs<block_start>super(ErrorDetail self).__init__(**kwargs)<line_sep>self.code=<none><line_sep>self.message=<none><line_sep>self.target=<none><line_sep>self.details=<none><line_sep>self.additional_info=<none><block_end><block_end><class_stmt>ErrorResponse(msrest.serialization.Model)<block_start>"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :param error: The error object. :type error: ~azure.mgmt.authorization.v2018_01_01_preview.models.ErrorDetail """<line_sep>_attribute_map={'error':{'key':'error' 'type':'ErrorDetail'} }<def_stmt>__init__ self * error:Optional["ErrorDetail"]=<none> **kwargs<block_start>super(ErrorResponse self).__init__(**kwargs)<line_sep>self.error=error<block_end><block_end><class_stmt>Permission(msrest.serialization.Model)<block_start>"""Role definition permissions. :param actions: Allowed actions. :type actions: list[str] :param not_actions: Denied actions. :type not_actions: list[str] :param data_actions: Allowed Data actions. :type data_actions: list[str] :param not_data_actions: Denied Data actions. :type not_data_actions: list[str] """<line_sep>_attribute_map={'actions':{'key':'actions' 'type':'[str]'} 'not_actions':{'key':'notActions' 'type':'[str]'} 'data_actions':{'key':'dataActions' 'type':'[str]'} 'not_data_actions':{'key':'notDataActions' 'type':'[str]'} }<def_stmt>__init__ self * actions:Optional[List[str]]=<none> not_actions:Optional[List[str]]=<none> data_actions:Optional[List[str]]=<none> not_data_actions:Optional[List[str]]=<none> **kwargs<block_start>super(Permission self).__init__(**kwargs)<line_sep>self.actions=actions<line_sep>self.not_actions=not_actions<line_sep>self.data_actions=data_actions<line_sep>self.not_data_actions=not_data_actions<block_end><block_end><class_stmt>PermissionGetResult(msrest.serialization.Model)<block_start>"""Permissions information. :param value: An array of permissions. :type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission] :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[Permission]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self * value:Optional[List["Permission"]]=<none> next_link:Optional[str]=<none> **kwargs<block_start>super(PermissionGetResult self).__init__(**kwargs)<line_sep>self.value=value<line_sep>self.next_link=next_link<block_end><block_end><class_stmt>ProviderOperation(msrest.serialization.Model)<block_start>"""Operation. :param name: The operation name. :type name: str :param display_name: The operation display name. :type display_name: str :param description: The operation description. :type description: str :param origin: The operation origin. :type origin: str :param properties: The operation properties. :type properties: any :param is_data_action: The dataAction flag to specify the operation type. :type is_data_action: bool """<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display_name':{'key':'displayName' 'type':'str'} 'description':{'key':'description' 'type':'str'} 'origin':{'key':'origin' 'type':'str'} 'properties':{'key':'properties' 'type':'object'} 'is_data_action':{'key':'isDataAction' 'type':'bool'} }<def_stmt>__init__ self * name:Optional[str]=<none> display_name:Optional[str]=<none> description:Optional[str]=<none> origin:Optional[str]=<none> properties:Optional[Any]=<none> is_data_action:Optional[bool]=<none> **kwargs<block_start>super(ProviderOperation self).__init__(**kwargs)<line_sep>self.name=name<line_sep>self.display_name=display_name<line_sep>self.description=description<line_sep>self.origin=origin<line_sep>self.properties=properties<line_sep>self.is_data_action=is_data_action<block_end><block_end><class_stmt>ProviderOperationsMetadata(msrest.serialization.Model)<block_start>"""Provider Operations metadata. :param id: The provider id. :type id: str :param name: The provider name. :type name: str :param type: The provider type. :type type: str :param display_name: The provider display name. :type display_name: str :param resource_types: The provider resource types. :type resource_types: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ResourceType] :param operations: The provider operations. :type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation] """<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'display_name':{'key':'displayName' 'type':'str'} 'resource_types':{'key':'resourceTypes' 'type':'[ResourceType]'} 'operations':{'key':'operations' 'type':'[ProviderOperation]'} }<def_stmt>__init__ self * id:Optional[str]=<none> name:Optional[str]=<none> type:Optional[str]=<none> display_name:Optional[str]=<none> resource_types:Optional[List["ResourceType"]]=<none> operations:Optional[List["ProviderOperation"]]=<none> **kwargs<block_start>super(ProviderOperationsMetadata self).__init__(**kwargs)<line_sep>self.id=id<line_sep>self.name=name<line_sep>self.type=type<line_sep>self.display_name=display_name<line_sep>self.resource_types=resource_types<line_sep>self.operations=operations<block_end><block_end><class_stmt>ProviderOperationsMetadataListResult(msrest.serialization.Model)<block_start>"""Provider operations metadata list. :param value: The list of providers. :type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperationsMetadata] :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[ProviderOperationsMetadata]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self * value:Optional[List["ProviderOperationsMetadata"]]=<none> next_link:Optional[str]=<none> **kwargs<block_start>super(ProviderOperationsMetadataListResult self).__init__(**kwargs)<line_sep>self.value=value<line_sep>self.next_link=next_link<block_end><block_end><class_stmt>ResourceType(msrest.serialization.Model)<block_start>"""Resource Type. :param name: The resource type name. :type name: str :param display_name: The resource type display name. :type display_name: str :param operations: The resource type operations. :type operations: list[~azure.mgmt.authorization.v2018_01_01_preview.models.ProviderOperation] """<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display_name':{'key':'displayName' 'type':'str'} 'operations':{'key':'operations' 'type':'[ProviderOperation]'} }<def_stmt>__init__ self * name:Optional[str]=<none> display_name:Optional[str]=<none> operations:Optional[List["ProviderOperation"]]=<none> **kwargs<block_start>super(ResourceType self).__init__(**kwargs)<line_sep>self.name=name<line_sep>self.display_name=display_name<line_sep>self.operations=operations<block_end><block_end><class_stmt>RoleAssignment(msrest.serialization.Model)<block_start>"""Role Assignments. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The role assignment ID. :vartype id: str :ivar name: The role assignment name. :vartype name: str :ivar type: The role assignment type. :vartype type: str :param scope: The role assignment scope. :type scope: str :param role_definition_id: The role definition ID. :type role_definition_id: str :param principal_id: The principal ID. :type principal_id: str :param can_delegate: The Delegation flag for the role assignment. :type can_delegate: bool """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'scope':{'key':'properties.scope' 'type':'str'} 'role_definition_id':{'key':'properties.roleDefinitionId' 'type':'str'} 'principal_id':{'key':'properties.principalId' 'type':'str'} 'can_delegate':{'key':'properties.canDelegate' 'type':'bool'} }<def_stmt>__init__ self * scope:Optional[str]=<none> role_definition_id:Optional[str]=<none> principal_id:Optional[str]=<none> can_delegate:Optional[bool]=<none> **kwargs<block_start>super(RoleAssignment self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.type=<none><line_sep>self.scope=scope<line_sep>self.role_definition_id=role_definition_id<line_sep>self.principal_id=principal_id<line_sep>self.can_delegate=can_delegate<block_end><block_end><class_stmt>RoleAssignmentCreateParameters(msrest.serialization.Model)<block_start>"""Role assignment create parameters. All required parameters must be populated in order to send to Azure. :param role_definition_id: Required. The role definition ID used in the role assignment. :type role_definition_id: str :param principal_id: Required. The principal ID assigned to the role. This maps to the ID inside the Active Directory. It can point to a user, service principal, or security group. :type principal_id: str :param can_delegate: The delegation flag used for creating a role assignment. :type can_delegate: bool """<line_sep>_validation={'role_definition_id':{'required':<true>} 'principal_id':{'required':<true>} }<line_sep>_attribute_map={'role_definition_id':{'key':'properties.roleDefinitionId' 'type':'str'} 'principal_id':{'key':'properties.principalId' 'type':'str'} 'can_delegate':{'key':'properties.canDelegate' 'type':'bool'} }<def_stmt>__init__ self * role_definition_id:str principal_id:str can_delegate:Optional[bool]=<none> **kwargs<block_start>super(RoleAssignmentCreateParameters self).__init__(**kwargs)<line_sep>self.role_definition_id=role_definition_id<line_sep>self.principal_id=principal_id<line_sep>self.can_delegate=can_delegate<block_end><block_end><class_stmt>RoleAssignmentFilter(msrest.serialization.Model)<block_start>"""Role Assignments filter. :param principal_id: Returns role assignment of the specific principal. :type principal_id: str :param can_delegate: The Delegation flag for the role assignment. :type can_delegate: bool """<line_sep>_attribute_map={'principal_id':{'key':'principalId' 'type':'str'} 'can_delegate':{'key':'canDelegate' 'type':'bool'} }<def_stmt>__init__ self * principal_id:Optional[str]=<none> can_delegate:Optional[bool]=<none> **kwargs<block_start>super(RoleAssignmentFilter self).__init__(**kwargs)<line_sep>self.principal_id=principal_id<line_sep>self.can_delegate=can_delegate<block_end><block_end><class_stmt>RoleAssignmentListResult(msrest.serialization.Model)<block_start>"""Role assignment list operation result. :param value: Role assignment list. :type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleAssignment] :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[RoleAssignment]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self * value:Optional[List["RoleAssignment"]]=<none> next_link:Optional[str]=<none> **kwargs<block_start>super(RoleAssignmentListResult self).__init__(**kwargs)<line_sep>self.value=value<line_sep>self.next_link=next_link<block_end><block_end><class_stmt>RoleDefinition(msrest.serialization.Model)<block_start>"""Role definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The role definition ID. :vartype id: str :ivar name: The role definition name. :vartype name: str :ivar type: The role definition type. :vartype type: str :param role_name: The role name. :type role_name: str :param description: The role definition description. :type description: str :param role_type: The role type. :type role_type: str :param permissions: Role definition permissions. :type permissions: list[~azure.mgmt.authorization.v2018_01_01_preview.models.Permission] :param assignable_scopes: Role definition assignable scopes. :type assignable_scopes: list[str] """<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} 'type':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'role_name':{'key':'properties.roleName' 'type':'str'} 'description':{'key':'properties.description' 'type':'str'} 'role_type':{'key':'properties.type' 'type':'str'} 'permissions':{'key':'properties.permissions' 'type':'[Permission]'} 'assignable_scopes':{'key':'properties.assignableScopes' 'type':'[str]'} }<def_stmt>__init__ self * role_name:Optional[str]=<none> description:Optional[str]=<none> role_type:Optional[str]=<none> permissions:Optional[List["Permission"]]=<none> assignable_scopes:Optional[List[str]]=<none> **kwargs<block_start>super(RoleDefinition self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.type=<none><line_sep>self.role_name=role_name<line_sep>self.description=description<line_sep>self.role_type=role_type<line_sep>self.permissions=permissions<line_sep>self.assignable_scopes=assignable_scopes<block_end><block_end><class_stmt>RoleDefinitionFilter(msrest.serialization.Model)<block_start>"""Role Definitions filter. :param role_name: Returns role definition with the specific name. :type role_name: str :param type: Returns role definition with the specific type. :type type: str """<line_sep>_attribute_map={'role_name':{'key':'roleName' 'type':'str'} 'type':{'key':'type' 'type':'str'} }<def_stmt>__init__ self * role_name:Optional[str]=<none> type:Optional[str]=<none> **kwargs<block_start>super(RoleDefinitionFilter self).__init__(**kwargs)<line_sep>self.role_name=role_name<line_sep>self.type=type<block_end><block_end><class_stmt>RoleDefinitionListResult(msrest.serialization.Model)<block_start>"""Role definition list operation result. :param value: Role definition list. :type value: list[~azure.mgmt.authorization.v2018_01_01_preview.models.RoleDefinition] :param next_link: The URL to use for getting the next set of results. :type next_link: str """<line_sep>_attribute_map={'value':{'key':'value' 'type':'[RoleDefinition]'} 'next_link':{'key':'nextLink' 'type':'str'} }<def_stmt>__init__ self * value:Optional[List["RoleDefinition"]]=<none> next_link:Optional[str]=<none> **kwargs<block_start>super(RoleDefinitionListResult self).__init__(**kwargs)<line_sep>self.value=value<line_sep>self.next_link=next_link<block_end><block_end>
# coding: utf-8 """ 插入所有需要的库,和函数 """<line_sep>#---------------------------------------------------------------------- <def_stmt>klSigmode self<block_start>"""查找模式"""<if_stmt>self.mode<eq>'deal'<block_start>self.canvas.updateSig(self.signalsOpen)<line_sep>self.mode='dealOpen'<block_end><else_stmt><block_start>self.canvas.updateSig(self.signals)<line_sep>self.mode='deal'<block_end><block_end>
<import_stmt>base64<import_from_stmt>google.protobuf json_format<import_from_stmt>importlib import_module<import_stmt>json<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>mmdnn.conversion.caffe.errors ConversionError<import_from_stmt>mmdnn.conversion.caffe.common_graph fetch_attr_value<import_from_stmt>mmdnn.conversion.caffe.utils get_lower_case get_upper_case get_real_name<class_stmt>JsonFormatter(object)<block_start>'''Dumpt a DL graph into a Json file.'''<def_stmt>__init__ self graph<block_start>self.graph_def=graph.as_graph_def()<block_end><def_stmt>dump self json_path<block_start>json_txt=json_format.MessageToJson(self.graph_def)<line_sep>parsed=json.loads(json_txt)<line_sep>formatted=json.dumps(parsed indent=4 sort_keys=<true>)<with_stmt>open(json_path 'w')<as>f<block_start>f.write(formatted)<block_end><block_end><block_end><class_stmt>PyWriter(object)<block_start>'''Dumpt a DL graph into a Python script.'''<def_stmt>__init__ self graph data target<block_start>self.graph=graph<line_sep>self.data=data<line_sep>self.tab=' '<times>4<line_sep>self.prefix=''<line_sep>target=target.lower()<if_stmt>target<eq>'tensorflow'<block_start>self.target=target<line_sep>self.net='TensorFlowNetwork'<block_end><elif_stmt>target<eq>'keras'<block_start>self.target=target<line_sep>self.net='KerasNetwork'<block_end><elif_stmt>target<eq>'caffe'<block_start>self.target=target<line_sep>self.net='CaffeNetwork'<block_end><else_stmt><block_start><raise>ConversionError('Target %s is not supported yet.'%target)<block_end><block_end><def_stmt>indent self<block_start>self.prefix<augadd>self.tab<block_end><def_stmt>outdent self<block_start>self.prefix=self.prefix[:-len(self.tab)]<block_end><def_stmt>statement self s<block_start><return>self.prefix+s+'\n'<block_end><def_stmt>emit_imports self<block_start><return>self.statement('from dlconv.%s import %s\n'%(self.target self.net))<block_end><def_stmt>emit_class_def self name<block_start><return>self.statement('class %s(%s):'%(name self.net))<block_end><def_stmt>emit_setup_def self<block_start><return>self.statement('def setup(self):')<block_end><def_stmt>emit_node self node<block_start>'''Emits the Python source for this node.'''<def_stmt>pair key value<block_start><return>'%s=%s'%(key value)<block_end>args=[]<for_stmt>input node.input<block_start>input=input.strip().split(':')<line_sep>name=''.join(input[:-1])<line_sep>idx=int(input[-1])<assert_stmt>name<in>self.graph.node_dict<line_sep>parent=self.graph.get_node(name)<line_sep>args.append(parent.output[idx])<block_end>#FIXME: output=[node.output[0]]<line_sep># output = node.output <for_stmt>k,v node.attr<block_start><if_stmt>k<eq>'cell_type'<block_start>args.append(pair(k "'"+fetch_attr_value(v)+"'"))<block_end><else_stmt><block_start>args.append(pair(k fetch_attr_value(v)))<block_end><block_end>args.append(pair('name' "'"+node.name+"'"))# Set the node name args=', '.join(args)<line_sep><return>self.statement('%s = self.%s(%s)'%(', '.join(output) node.op args))<block_end><def_stmt>dump self code_output_dir<block_start><if_stmt><not>os.path.exists(code_output_dir)<block_start>os.makedirs(code_output_dir)<block_end>file_name=get_lower_case(self.graph.name)<line_sep>code_output_path=os.path.join(code_output_dir file_name+'.py')<line_sep>data_output_path=os.path.join(code_output_dir file_name+'.npy')<with_stmt>open(code_output_path 'w')<as>f<block_start>f.write(self.emit())<block_end><with_stmt>open(data_output_path 'wb')<as>f<block_start>np.save(f self.data)<block_end><return>code_output_path data_output_path<block_end><def_stmt>emit self# Decompose DAG into chains <block_start>chains=[]<for_stmt>node self.graph.topologically_sorted()<block_start>attach_to_chain=<none><if_stmt>len(node.input)<eq>1<block_start>parent=get_real_name(node.input[0])<for_stmt>chain chains<block_start><if_stmt>chain[-1].name<eq>parent# Node is part of an existing chain. <block_start>attach_to_chain=chain<line_sep><break><block_end><block_end><block_end><if_stmt>attach_to_chain<is><none># Start a new chain for this node. <block_start>attach_to_chain=[]<line_sep>chains.append(attach_to_chain)<block_end>attach_to_chain.append(node)<block_end># Generate Python code line by line source=self.emit_imports()<line_sep>source<augadd>self.emit_class_def(self.graph.name)<line_sep>self.indent()<line_sep>source<augadd>self.emit_setup_def()<line_sep>self.indent()<line_sep>blocks=[]<for_stmt>chain chains<block_start>b=''<for_stmt>node chain<block_start>b<augadd>self.emit_node(node)<block_end>blocks.append(b[:-1])<block_end>source<augadd>'\n\n'.join(blocks)<line_sep><return>source<block_end><block_end><class_stmt>ModelSaver(object)<block_start><def_stmt>__init__ self code_output_path data_output_path<block_start>self.code_output_path=code_output_path<line_sep>self.data_output_path=data_output_path<block_end><def_stmt>dump self model_output_dir<block_start>'''Return the file path containing graph in generated model files.'''<if_stmt><not>os.path.exists(model_output_dir)<block_start>os.makedirs(model_output_dir)<block_end>sys.path.append(os.path.dirname(self.code_output_path))<line_sep>file_name=os.path.splitext(os.path.basename(self.code_output_path))[0]<line_sep>module=import_module(file_name)<line_sep>class_name=get_upper_case(file_name)<line_sep>net=getattr(module class_name)<line_sep><return>net.dump(self.data_output_path model_output_dir)<block_end><block_end><class_stmt>GraphDrawer(object)<block_start><def_stmt>__init__ self toolkit meta_path<block_start>self.toolkit=toolkit.lower()<line_sep>self.meta_path=meta_path<block_end><def_stmt>dump self graph_path<block_start><if_stmt>self.toolkit<eq>'tensorflow'<block_start><import_from_stmt>dlconv.tensorflow.visualizer TensorFlowVisualizer<if_stmt>self._is_web_page(graph_path)<block_start>TensorFlowVisualizer(self.meta_path).dump_html(graph_path)<block_end><else_stmt><block_start><raise>NotImplementedError('Image format or %s is unsupported!'%graph_path)<block_end><block_end><elif_stmt>self.toolkit<eq>'keras'<block_start><import_from_stmt>dlconv.keras.visualizer KerasVisualizer<line_sep>png_path,html_path=(<none> <none>)<if_stmt>graph_path.endswith('.png')<block_start>png_path=graph_path<block_end><elif_stmt>self._is_web_page(graph_path)<block_start>png_path=graph_path+".png"<line_sep>html_path=graph_path<block_end><else_stmt><block_start><raise>NotImplementedError('Image format or %s is unsupported!'%graph_path)<block_end>KerasVisualizer(self.meta_path).dump_png(png_path)<if_stmt>html_path<block_start>self._png_to_html(png_path html_path)<line_sep>os.remove(png_path)<block_end><block_end><else_stmt><block_start><raise>NotImplementedError('Visualization of %s is unsupported!'%self.toolkit)<block_end><block_end><def_stmt>_is_web_page self path<block_start><return>path.split('.')[-1]<in>('html' 'htm')<block_end><def_stmt>_png_to_html self png_path html_path<block_start><with_stmt>open(png_path "rb")<as>f<block_start>encoded=base64.b64encode(f.read()).decode('utf-8')<block_end>source="""<!DOCTYPE> <html> <head> <meta charset="utf-8"> <title>Keras</title> </head> <body> <img alt="Model Graph" src="data:image/png;base64,{base64_str}" /> </body> </html>""".format(base64_str=encoded)<with_stmt>open(html_path 'w' encoding='utf-8')<as>f<block_start>f.write(source)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<line_sep># start tutorial <import_from_stmt>django.db models<import_from_stmt>djng.forms NgModelFormMixin NgFormValidationMixin<import_from_stmt>djng.styling.bootstrap3.forms Bootstrap3ModelForm<class_stmt>SubscribeUser(models.Model)<block_start>full_name=models.CharField("<NAME>" max_length=99)<line_sep>avatar=models.ImageField("Avatar" blank=<false> null=<true>)<line_sep>permit=models.FileField("Permit" blank=<true> null=<true>)<block_end><class_stmt>SubscribeForm(NgModelFormMixin NgFormValidationMixin Bootstrap3ModelForm)<block_start>use_required_attribute=<false><line_sep>scope_prefix='subscribe_data'<line_sep>form_name='my_form'<class_stmt>Meta<block_start>model=SubscribeUser<line_sep>fields=['full_name' 'avatar' 'permit']<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Compute and schedule for add, multiply, subtract slice op Please note the following assumptions made by the implementation: 1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting."""<import_from_stmt>tvm te<import_from_stmt>tvm tir<import_from_stmt>tvm topi<import_from_stmt>..utils get_layout_transform_fn<def_stmt>add_broadcast_compute input_a input_b<block_start>"""Call the add op from topi"""<line_sep><return>topi.add(input_a input_b)<block_end><def_stmt>subtract_broadcast_compute input_a input_b<block_start>"""Call the subtract op from topi"""<line_sep><return>topi.subtract(input_a input_b)<block_end><def_stmt>multiply_broadcast_compute input_a input_b<block_start>"""Call the multiply op from topi"""<line_sep><return>topi.multiply(input_a input_b)<block_end><def_stmt>tir_broadcast_schedule out_m input_a input_b output_layout:str input_a_layout:str input_b_layout:str op_name:str <block_start>"""Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast"""<line_sep>func=te.create_prim_func([input_a input_b out_m])<line_sep>s=tir.Schedule(func)<line_sep>block_dict={"add":"T_add" "subtract":"T_subtract" "multiply":"T_multiply"}<line_sep>block=s.get_block(block_dict[op_name])<if_stmt>input_a_layout<eq>"nhwc-8h2w32c2w-2d"<block_start>input_a_transformed_layout=get_layout_transform_fn(input_a_layout)<line_sep>s.transform_layout(block buffer=("read" 0) index_map=input_a_transformed_layout)<block_end><if_stmt>input_b_layout<eq>"nhwc-8h2w32c2w-2d"<block_start>input_b_transformed_layout=get_layout_transform_fn(input_b_layout)<line_sep>s.transform_layout(block buffer=("read" 1) index_map=input_b_transformed_layout)<block_end>output_transformed_layout=get_layout_transform_fn(output_layout)<line_sep>s.transform_layout(block buffer=("write" 0) index_map=output_transformed_layout)<line_sep>n,h,w,c=s.get_loops(block)<line_sep>h_o,h_i=s.split(h [<none> 8])<line_sep>w_o,w_i=s.split(w [<none> 4])<line_sep>c_o,c_i=s.split(c [<none> 32])<line_sep>wio,wii=s.split(w_i [<none> 2])<line_sep>s.reorder(n h_o w_o c_o h_i wio c_i wii)<line_sep>fused=s.fuse(c_i wii)<line_sep>s.vectorize(fused)<line_sep><return>s<block_end>
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>unittest<import_from_stmt>telemetry.core browser_finder<import_from_stmt>telemetry.core exceptions<import_from_stmt>telemetry.core extension_to_load<import_from_stmt>telemetry.core util<import_from_stmt>telemetry.core.backends.chrome cros_interface<import_from_stmt>telemetry.unittest options_for_unittests<class_stmt>CrOSAutoTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>options=options_for_unittests.GetCopy()<line_sep>self._cri=cros_interface.CrOSInterface(options.cros_remote options.cros_ssh_identity)<line_sep>self._is_guest=options.browser_type<eq>'cros-chrome-guest'<line_sep>self._username=''<if>self._is_guest<else>options.browser_options.username<line_sep>self._password=options.browser_options.password<block_end><def_stmt>_IsCryptohomeMounted self<block_start>"""Returns True if cryptohome is mounted"""<line_sep>cryptohomeJSON,_=self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome' '--action=status'])<line_sep>cryptohomeStatus=json.loads(cryptohomeJSON)<line_sep><return>(cryptohomeStatus['mounts']<and>cryptohomeStatus['mounts'][0]['mounted'])<block_end><def_stmt>_CreateBrowser self autotest_ext=<false> auto_login=<true><block_start>"""Finds and creates a browser for tests. if autotest_ext is True, also loads the autotest extension"""<line_sep>options=options_for_unittests.GetCopy()<if_stmt>autotest_ext<block_start>extension_path=os.path.join(os.path.dirname(__file__) 'autotest_ext')<line_sep>self._load_extension=extension_to_load.ExtensionToLoad(path=extension_path browser_type=options.browser_type is_component=<true>)<line_sep>options.extensions_to_load=[self._load_extension]<block_end>browser_to_create=browser_finder.FindBrowser(options)<line_sep>self.assertTrue(browser_to_create)<line_sep>options.browser_options.create_browser_with_oobe=<true><line_sep>options.browser_options.auto_login=auto_login<line_sep>b=browser_to_create.Create()<line_sep>b.Start()<line_sep><return>b<block_end><def_stmt>_GetAutotestExtension self browser<block_start>"""Returns the autotest extension instance"""<line_sep>extension=browser.extensions[self._load_extension]<line_sep>self.assertTrue(extension)<line_sep><return>extension<block_end><def_stmt>_GetLoginStatus self browser<block_start>extension=self._GetAutotestExtension(browser)<line_sep>self.assertTrue(extension.EvaluateJavaScript("typeof('chrome.autotestPrivate') != 'undefined'"))<line_sep>extension.ExecuteJavaScript(''' window.__login_status = null; chrome.autotestPrivate.loginStatus(function(s) { window.__login_status = s; }); ''')<line_sep><return>util.WaitFor(<lambda>:extension.EvaluateJavaScript('window.__login_status') 10)<block_end><def_stmt>testCryptohomeMounted self<block_start>"""Verifies cryptohome mount status for regular and guest user and when logged out"""<with_stmt>self._CreateBrowser()<as>b<block_start>self.assertEquals(1 len(b.tabs))<line_sep>self.assertTrue(b.tabs[0].url)<line_sep>self.assertTrue(self._IsCryptohomeMounted())<line_sep>chronos_fs=self._cri.FilesystemMountedAt('/home/chronos/user')<line_sep>self.assertTrue(chronos_fs)<if_stmt>self._is_guest<block_start>self.assertEquals(chronos_fs 'guestfs')<block_end><else_stmt><block_start>home,_=self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome-path' 'user' self._username])<line_sep>self.assertEquals(self._cri.FilesystemMountedAt(home.rstrip()) chronos_fs)<block_end><block_end>self.assertFalse(self._IsCryptohomeMounted())<line_sep>self.assertEquals(self._cri.FilesystemMountedAt('/home/chronos/user') '/dev/mapper/encstateful')<block_end><def_stmt>testLoginStatus self<block_start>"""Tests autotestPrivate.loginStatus"""<with_stmt>self._CreateBrowser(autotest_ext=<true>)<as>b<block_start>login_status=self._GetLoginStatus(b)<line_sep>self.assertEquals(type(login_status) dict)<line_sep>self.assertEquals(<not>self._is_guest login_status['isRegularUser'])<line_sep>self.assertEquals(self._is_guest login_status['isGuest'])<line_sep>self.assertEquals(login_status['email'] self._username)<line_sep>self.assertFalse(login_status['isScreenLocked'])<block_end><block_end><def_stmt>_IsScreenLocked self browser<block_start><return>self._GetLoginStatus(browser)['isScreenLocked']<block_end><def_stmt>_LockScreen self browser<block_start>self.assertFalse(self._IsScreenLocked(browser))<line_sep>extension=self._GetAutotestExtension(browser)<line_sep>self.assertTrue(extension.EvaluateJavaScript("typeof chrome.autotestPrivate.lockScreen == 'function'"))<line_sep>logging.info('Locking screen')<line_sep>extension.ExecuteJavaScript('chrome.autotestPrivate.lockScreen();')<line_sep>logging.info('Waiting for the lock screen')<def_stmt>ScreenLocked <block_start><return>(browser.oobe<and>browser.oobe.EvaluateJavaScript("typeof Oobe == 'function'")<and>browser.oobe.EvaluateJavaScript("typeof Oobe.authenticateForTesting == 'function'"))<block_end>util.WaitFor(ScreenLocked 10)<line_sep>self.assertTrue(self._IsScreenLocked(browser))<block_end><def_stmt>_AttemptUnlockBadPassword self browser<block_start>logging.info('Trying a bad password')<def_stmt>ErrorBubbleVisible <block_start><return><not>browser.oobe.EvaluateJavaScript(''' document.getElementById('bubble').hidden ''')<block_end>self.assertFalse(ErrorBubbleVisible())<line_sep>browser.oobe.ExecuteJavaScript(''' Oobe.authenticateForTesting('%s', 'bad'); '''%self._username)<line_sep>util.WaitFor(ErrorBubbleVisible 10)<line_sep>self.assertTrue(self._IsScreenLocked(browser))<block_end><def_stmt>_UnlockScreen self browser<block_start>logging.info('Unlocking')<line_sep>browser.oobe.ExecuteJavaScript(''' Oobe.authenticateForTesting('%s', '%s'); '''%(self._username self._password))<line_sep>util.WaitFor(<lambda>:<not>browser.oobe 10)<line_sep>self.assertFalse(self._IsScreenLocked(browser))<block_end><def_stmt>testScreenLock self<block_start>"""Tests autotestPrivate.screenLock"""<with_stmt>self._CreateBrowser(autotest_ext=<true>)<as>browser<block_start>self._LockScreen(browser)<line_sep>self._AttemptUnlockBadPassword(browser)<line_sep>self._UnlockScreen(browser)<block_end><block_end><def_stmt>testLogout self<block_start>"""Tests autotestPrivate.logout"""<with_stmt>self._CreateBrowser(autotest_ext=<true>)<as>b<block_start>extension=self._GetAutotestExtension(b)<try_stmt><block_start>extension.ExecuteJavaScript('chrome.autotestPrivate.logout();')<block_end><except_stmt>(exceptions.BrowserConnectionGoneException exceptions.BrowserGoneException)<block_start><pass><block_end>util.WaitFor(<lambda>:<not>self._IsCryptohomeMounted() 20)<block_end><block_end><def_stmt>_SwitchRegion self region<block_start>self._cri.RunCmdOnDevice(['stop' 'ui'])<line_sep># Change VPD (requires RW-enabled firmware). # To save time, region and initial_timezone are not set. vpd={'initial_locale':region.language_code 'keyboard_layout':region.keyboard}<for_stmt>(key value) vpd.items()<block_start>self._cri.RunCmdOnDevice(['vpd' '-s' '"%s"="%s"'%(key value)])<block_end># Remove cached files to clear initial locale info and force regeneration. self._cri.RunCmdOnDevice(['rm' '/home/chronos/Local\ State'])<line_sep>self._cri.RunCmdOnDevice(['rm' '/home/chronos/.oobe_completed'])<line_sep>self._cri.RunCmdOnDevice(['dump_vpd_log' '--force'])<line_sep>self._cri.RunCmdOnDevice(['start' 'ui'])<block_end><def_stmt>_OobeHasOption self browser selectId value<block_start>hasOptionJs=''' // Check that the option is present, and selected if it is the default. (function hasOption(selectId, value, isDefault) { var options = document.getElementById(selectId).options; for (var i = 0; i < options.length; i++) { if (options[i].value == value) { // The option is present. Make sure it's selected if necessary. return !isDefault || options.selectedIndex == i; } } return false; })("%s", "%s", %s); '''<line_sep><return>browser.oobe.EvaluateJavaScript(hasOptionJs%(selectId value 'true'))<block_end><def_stmt>_ResolveLanguage self locale# If the locale matches a language but not the country, fall back to # an existing locale. See ui/base/l10n/l10n_util.cc. <block_start>lang,_,region=map(str.lower locale.partition('-'))<if_stmt><not>region<block_start><return>""<block_end># Map from other countries to a localized country <if_stmt>lang<eq>'es'<and>region<eq>'es'<block_start><return>'es-419'<block_end><if_stmt>lang<eq>'zh'<block_start><if_stmt>region<in>('hk' 'mo')<block_start><return>'zh-TW'<block_end><return>'zh-CN'<block_end><if_stmt>lang<eq>'en'<block_start><if_stmt>region<in>('au' 'ca' 'nz' 'za')<block_start><return>'en-GB'<block_end><return>'en-US'<block_end># No mapping found <return>""<block_end><def_stmt>testOobeLocalization self<block_start>"""Tests different region configurations at OOBE"""<line_sep># Save the original device localization settings. # To save time, only read initial_locale and keyboard_layout. initial_region=self.Region('' '' '' '' '')<line_sep>initial_region.language_code,_=self._cri.RunCmdOnDevice(['vpd' '-g' 'initial_locale'])<line_sep>initial_region.keyboard,_=self._cri.RunCmdOnDevice(['vpd' '-g' 'keyboard_layout'])<for_stmt>region self.REGIONS_LIST<block_start>self._SwitchRegion(region)<with_stmt>self._CreateBrowser(auto_login=<false>)<as>browser# Ensure the dropdown lists have been created. <block_start>util.WaitFor(<lambda>:browser.oobe.EvaluateJavaScript('document.getElementById("language-select") != null') 10)<line_sep># Find the language, or an acceptable fallback value. languageFound=self._OobeHasOption(browser 'language-select' region.language_code)<if_stmt><not>languageFound<block_start>fallback=self._ResolveLanguage(region.language_code)<line_sep>self.assertTrue(fallback<and>self._OobeHasOption(browser 'language-select' fallback))<block_end># Find the keyboard layout. self.assertTrue(self._OobeHasOption(browser 'keyboard-select' region.keyboard))<block_end><block_end># Test is finished. Restore original region settings. self._SwitchRegion(initial_region)<block_end># The Region class and region list will be available in regions.py. <class_stmt>Region(object)<block_start><def_stmt>__init__ self region_code keyboard time_zone language_code keyboard_mechanical_layout description=<none> notes=<none><block_start>self.region_code=region_code<line_sep>self.keyboard=keyboard<line_sep>self.time_zone=time_zone<line_sep>self.language_code=language_code<line_sep>self.keyboard_mechanical_layout=keyboard_mechanical_layout<line_sep>self.description=description<or>region_code<line_sep>self.notes=notes<block_end><block_end><class_stmt>Enum(frozenset)<block_start><def_stmt>__getattr__ self name<block_start><if_stmt>name<in>self<block_start><return>name<block_end><raise>AttributeError<block_end><block_end>KeyboardMechanicalLayout=Enum(['ANSI' 'ISO' 'JIS' 'ABNT2'])<line_sep>_KML=KeyboardMechanicalLayout<line_sep>REGIONS_LIST=[Region('au' 'xkb:us::eng' 'Australia/Sydney' 'en-AU' _KML.ANSI 'Australia') Region('ca.ansi' 'xkb:us::eng' 'America/Toronto' 'en-CA' _KML.ANSI 'Canada (US keyboard)' 'Canada with US (ANSI) keyboard; see http://goto/cros-canada') Region('ca.fr' 'xkb:ca::fra' 'America/Toronto' 'fr-CA' _KML.ISO 'Canada (French keyboard)' ('Canadian French (ISO) keyboard. The most common configuration for '<concat>'Canadian French SKUs. See http://goto/cros-canada')) Region('ca.hybrid' 'xkb:ca:eng:eng' 'America/Toronto' 'en-CA' _KML.ISO 'Canada (hybrid)' ('Canada with hybrid xkb:ca:eng:eng + xkb:ca::fra keyboard (ISO), '<concat>'defaulting to English language and keyboard. Used only if there '<concat>'needs to be a single SKU for all of Canada. See '<concat>'http://goto/cros-canada')) Region('ca.multix' 'xkb:ca:multix:fra' 'America/Toronto' 'fr-CA' _KML.ISO 'Canada (multilingual)' ("Canadian Multilingual keyboard; you probably don't want this. See "<concat>"http://goto/cros-canada")) Region('de' 'xkb:de::ger' 'Europe/Berlin' 'de' _KML.ISO 'Germany') Region('fi' 'xkb:fi::fin' 'Europe/Helsinki' 'fi' _KML.ISO 'Finland') Region('fr' 'xkb:fr::fra' 'Europe/Paris' 'fr' _KML.ISO 'France') Region('gb' 'xkb:gb:extd:eng' 'Europe/London' 'en-GB' _KML.ISO 'UK') Region('ie' 'xkb:gb:extd:eng' 'Europe/Dublin' 'en-GB' _KML.ISO 'Ireland') Region('in' 'xkb:us::eng' 'Asia/Calcutta' 'en-US' _KML.ANSI 'India') Region('my' 'xkb:us::eng' 'Asia/Kuala_Lumpur' 'ms' _KML.ANSI 'Malaysia') Region('nl' 'xkb:us:intl:eng' 'Europe/Amsterdam' 'nl' _KML.ANSI 'Netherlands') Region('nordic' 'xkb:se::swe' 'Europe/Stockholm' 'en-US' _KML.ISO 'Nordics' ('Unified SKU for Sweden, Norway, and Denmark. This defaults '<concat>'to Swedish keyboard layout, but starts with US English language '<concat>'for neutrality. Use if there is a single combined SKU for Nordic '<concat>'countries.')) Region('se' 'xkb:se::swe' 'Europe/Stockholm' 'sv' _KML.ISO 'Sweden' ("Use this if there separate SKUs for Nordic countries (Sweden, "<concat>"Norway, and Denmark), or the device is only shipping to Sweden. "<concat>"If there is a single unified SKU, use 'nordic' instead.")) Region('sg' 'xkb:us::eng' 'Asia/Singapore' 'en-GB' _KML.ANSI 'Singapore') Region('us' 'xkb:us::eng' 'America/Los_Angeles' 'en-US' _KML.ANSI 'United States') ]<block_end>
"""Monte Carlo receding horizon control."""<import_from_stmt>abc ABC abstractmethod<import_from_stmt>multiprocessing Pipe Process<import_stmt>gym<import_from_stmt>stable_baselines.common.vec_env CloudpickleWrapper<import_from_stmt>aprl.common.mujoco MujocoState ResettableEnv<class_stmt>MujocoResettableWrapper(ResettableEnv gym.Wrapper)<block_start>"""Converts a MujocoEnv into a ResettableEnv. Note all MuJoCo environments are resettable."""<def_stmt>__init__ self env<block_start>"""Wraps a MujocoEnv, adding get_state and set_state methods. :param env: a MujocoEnv. NOTE: it must not be wrapped in a TimeLimit."""<if_stmt>hasattr(env "_max_episode_steps")<block_start><raise>TypeError("Environment must not have a time limit "<concat>"(try passing in env.unwrapped instead).")<block_end>gym.Wrapper.__init__(self env)<line_sep>self.sim=env.unwrapped.sim<block_end><def_stmt>get_state self<block_start>"""Serializes the qpos and qvel state of the MuJoCo emulator."""<line_sep><return>MujocoState.from_mjdata(self.sim.data).flatten()<block_end><def_stmt>set_state self x<block_start>"""Restores qpos and qvel, calling forward() to derive other values."""<line_sep>state=MujocoState.from_flattened(x self.sim)<line_sep>state.set_mjdata(self.sim.data)<line_sep>self.sim.forward()<block_end># put mjData in consistent state <def_stmt>reset self<block_start>"""See base class."""<line_sep><return>self.env.reset()<block_end><def_stmt>step self a<block_start>"""See base class."""<line_sep><return>self.env.step(a)<block_end><block_end><class_stmt>MonteCarlo(ABC)<block_start>"""Selects an action for a ResettableEnv by random search. Randomly samples fixed-length sequences of actions. Evaluates each trajectory in the environment, resetting the state to the original after each trajectory."""<line_sep>@abstractmethod<def_stmt>__init__ self horizon trajectories<block_start>"""Constructs a MonteCarlo instance for env. :param horizon: the length of the trajectories to search over. :param trajectories: the number of trajectories to evaluate."""<line_sep>self.horizon=horizon<line_sep>self.trajectories=trajectories<block_end>@abstractmethod<def_stmt>seed self seed<block_start>"""Sets a seed for the PRNG for the action sequences. :param seed (int): a seed."""<line_sep><pass><block_end>@abstractmethod<def_stmt>best_action self state<block_start>"""Returns the best action out of a random search of action sequences. Generates self.trajectories action sequences, each of length self.horizon. The cumulative reward of each action sequence is computed, starting from state. The function returns the first action and the cumulative reward of the action sequences with the largest cumulative reward. :param state: a value returned by env.get_state(). :return (action, reward): the best action found and associated reward."""<line_sep><pass><block_end><block_end><class_stmt>MonteCarloSingle(MonteCarlo)<block_start>"""Selects an action for a ResettableEnv by random search. See base class for details. This implementation is not parallelized."""<def_stmt>__init__ self env horizon trajectories<block_start>"""See base class."""<line_sep>super().__init__(horizon trajectories)<line_sep>self.env=env<block_end><def_stmt>seed self seed<block_start>"""Sets a seed for the PRNG for the action sequences. :param seed (int): a seed."""<line_sep>self.env.action_space.np_random.seed(seed)<block_end><def_stmt>best_action self state<block_start>"""Returns the best action out of a random search of action sequences. See base class for details. Search takes place in a single environment, which is reset to state before evaluating each action sequence."""<line_sep>res=[]<for_stmt>_ range(self.trajectories)<block_start>self.env.set_state(state)<line_sep>us=[self.env.action_space.sample()<for>_ range(self.horizon)]<line_sep>total_rew=0<for_stmt>u us<block_start>_ob,rew,done,_info=self.env.step(u)<line_sep>total_rew<augadd>rew<if_stmt>done<block_start><break><block_end><block_end>res.append((us[0] total_rew))<block_end>self.env.set_state(state)<line_sep>best=max(res key=<lambda>x:x[1])<line_sep><return>best<block_end><block_end><def_stmt>_worker remote parent_remote dynamic_fn_wrapper horizon trajectories<block_start>parent_remote.close()<line_sep>dynamics=dynamic_fn_wrapper.var()<line_sep>dynamics.reset()<line_sep>mc=MonteCarloSingle(dynamics horizon trajectories)<try_stmt><block_start><while_stmt><true><block_start>cmd,x=remote.recv()<if_stmt>cmd<eq>"seed"<block_start>mc.seed(x)<block_end><elif_stmt>cmd<eq>"search"<block_start>best_u,best_r=mc.best_action(x)<line_sep>remote.send((best_u best_r))<block_end><elif_stmt>cmd<eq>"close"<block_start>remote.close()<line_sep><break><block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><block_end><except_stmt>KeyboardInterrupt<block_start>print("MonteCarloParallel worker: got KeyboardInterrupt")<block_end><finally_stmt><block_start>dynamics.close()<block_end><block_end><class_stmt>MonteCarloParallel(MonteCarlo)<block_start>"""Like MonteCarlo, but performs the random search in parallel."""<line_sep># This implementation is inspired by Baselines SubprocVecEnv. <def_stmt>__init__ self env_fns horizon trajectories seed=0<block_start>"""Launch subprocess workers and store configuration parameters. :param env_fns (list<()->ResettableEnv>): list of thunks. :param horizon (int): length of trajectories to search over. :param trajectories (int): minimum number of trajectories to evaluate. It will be rounded up to the nearest multiple of len(make_env)."""<line_sep>super().__init__(horizon trajectories)<line_sep>nremotes=len(env_fns)<line_sep># Integer ceiling of self.trajectories / nworkers traj_per_worker=(self.trajectories-1)<floordiv>nremotes+1<line_sep>pipes=[Pipe()<for>_ range(nremotes)]<line_sep>self.remotes,self.work_remotes=zip(*pipes)<line_sep>worker_cfgs=zip(self.work_remotes self.remotes env_fns)<line_sep>self.ps=[]<for_stmt>i,(work_remote remote dynamic_fn) enumerate(worker_cfgs)<block_start>args=(work_remote remote CloudpickleWrapper(dynamic_fn) horizon traj_per_worker)<line_sep>process=Process(target=_worker args=args)<line_sep>process.daemon=<true><line_sep># If the main process crashes, we should not cause things to hang process.start()<line_sep>self.ps.append(process)<block_end><for_stmt>remote self.work_remotes<block_start>remote.close()<block_end><block_end><def_stmt>seed self seed<block_start>"""See base class."""<for_stmt>i,remote enumerate(self.remotes)<block_start>remote.send(("seed" seed+i))<block_end><block_end><def_stmt>best_action self state<block_start>"""Returns the best action out of a random search of action sequences."""<for_stmt>remote self.remotes<block_start>remote.send(("search" state))<block_end>results=[remote.recv()<for>remote self.remotes]<line_sep>best=max(results key=<lambda>x:x[1])<line_sep><return>best<block_end><def_stmt>close self<block_start>"""Shuts down parallel workers."""<for_stmt>remote self.remotes<block_start>remote.send(("close" <none>))<block_end><for_stmt>p self.ps<block_start>p.join()<block_end><block_end><block_end><def_stmt>receding_horizon monte_carlo env<block_start>"""Receding horizon control :param monte_carlo(MonteCarlo): a Monte Carlo controller for env or a clone of env. :param env(ResettableEnv): a resettable environment."""<while_stmt><true><block_start>state=env.get_state()<line_sep>a,_seq_rew=monte_carlo.best_action(state)<line_sep>ob,rew,done,info=env.step(a)<line_sep><yield>a ob rew done info<if_stmt>done<block_start><break><block_end><block_end><block_end>
# -*- coding: utf-8 -*- """ The `TreeNode` class provides many helper functions that make the work done in the `BinarySearchTree` class methods much easier. The constructor for a `TreeNode`, along with these helper functions, is shown below. As you can see, many of these helper functions help to classify a node according to its own position as a child, (left or right) and the kind of children the node has. The `TreeNode` class will also explicitly keep track of the parent as an attribute of each node. You will see why this is important when we discuss the implementation for the `del` operator. One of the more interesting methods of `TreeNode` provides an interface to simply iterate over all the keys in the tree in order. You already know how to traverse a binary tree in order, using the `inorder` traversal algorithm. However, because we want our iterator to operate lazily, in this case we use the `yield` keyword to define our `__iter__` method as a Python generator. Pay close attention to the `__iter__` implementation as at first glance you might think that the code is not recursive: in fact, because `__iter__` overrides the `for x in` operation for iteration, it really is recursive! Our full implementation of `TreeNode` is provided below. It includes three further methods `find_successor`, `find_min` and `splice_out` which you can ignore for now as we will return to them later when discussing deletion. """<class_stmt>TreeNode(object)<block_start><def_stmt>__init__ self key val left=<none> right=<none> parent=<none><block_start>self.key=key<line_sep>self.val=val<line_sep>self.left=left<line_sep>self.right=right<line_sep>self.parent=parent<block_end><def_stmt>is_left_child self<block_start><return>self.parent<and>self.parent.left<eq>self<block_end><def_stmt>is_right_child self<block_start><return>self.parent<and>self.parent.right<eq>self<block_end><def_stmt>is_leaf self<block_start><return><not>(self.right<or>self.left)<block_end><def_stmt>has_any_children self<block_start><return>self.right<or>self.left<block_end><def_stmt>has_both_children self<block_start><return>self.right<and>self.left<block_end><def_stmt>has_one_child self<block_start><return>self.has_any_children()<and><not>self.has_both_children()<block_end><def_stmt>replace_node_data self key val left right<block_start>self.key=key<line_sep>self.val=val<line_sep>self.left=left<line_sep>self.right=right<if_stmt>self.left<block_start>self.left.parent=self<block_end><if_stmt>self.right<block_start>self.right.parent=self<block_end><block_end><def_stmt>__iter__ self<block_start><if_stmt>self<is><none><block_start><return><block_end><if_stmt>self.left# `in` calls `__iter__` so is recursive <block_start><for_stmt>elem self.left<block_start><yield>elem<block_end><block_end><yield>self.key<if_stmt>self.right# recurse again <block_start><for_stmt>elem self.right<block_start><yield>elem<block_end><block_end><block_end><def_stmt>find_successor self<block_start><if_stmt>self.right<block_start><return>self.right.find_min()<block_end><if_stmt>self.parent<is><none><block_start><return><none><block_end><if_stmt>self.is_left_child()<block_start><return>self.parent<block_end>self.parent.right=<none><line_sep>successor=self.parent.find_successor()<line_sep>self.parent.right=self<line_sep><return>successor<block_end><def_stmt>find_min self<block_start>current=self<while_stmt>current.left<block_start>current=current.left<block_end><return>current<block_end><def_stmt>splice_out self<block_start><if_stmt>self.is_leaf()<block_start><if_stmt>self.is_left_child()<block_start>self.parent.left=<none><block_end><else_stmt><block_start>self.parent.right=<none><block_end><block_end><else_stmt><block_start>promoted_node=self.left<or>self.right<if_stmt>self.is_left_child()<block_start>self.parent.left=promoted_node<block_end><else_stmt><block_start>self.parent.right=promoted_node<block_end>promoted_node.parent=self.parent<block_end><block_end>""" Now that we have our `TreeNode` class we can begin to write `BinarySearchTree` itself. Recall that the core functionality of this class will be to enable `put`ing to and `get`ing from the tree, so we begin our implementation with the `put` functionality. In order to enable the `tree[1] = 'foo'` style assignment interface for our `BinarySearchTree` instances, we override the `__setitem__` magic method. In this method we first check to see if the tree already has a root. If there is not a root then we create a new `TreeNode` and set it as the root of the tree. If a root node is already in place then `put` calls the private, recursive, helper function `_put` to search the tree according to the following algorithm: - Starting at the root of the tree, search the binary tree comparing the new key to the key in the current node. If the new key is less than the current node, search the left subtree. If the new key is greater than the current node, search the right subtree. - When there is no left (or right) child to search, we have found the position in the tree where the new node should be installed. - To add a node to the tree, create a new `TreeNode` object and insert the object at the point discovered in the previous step. The code below shows the Python code for inserting a new node in the tree. The `_put` function is written recursively following the steps outlined above. Notice that when a new child is inserted into the tree, the `node` is passed to the new tree as the parent. One important problem with our implementation of insert is that duplicate keys are not handled properly. As our tree is implemented a duplicate key will create a new node with the same key value in the right subtree of the node having the original key. The result of this is that the node with the new key will never be found during a search. A better way to handle the insertion of a duplicate key is for the value associated with the new key to replace the old value. We leave fixing this bug as an exercise for you. """<block_end><class_stmt>BinarySearchTree(object)<block_start>TreeNodeClass=TreeNode<def_stmt>__init__ self<block_start>self.root=<none><line_sep>self.size=0<block_end><def_stmt>__len__ self<block_start><return>self.size<block_end><def_stmt>__iter__ self<block_start><return>self.root.__iter__()<block_end><def_stmt>__setitem__ self key val<block_start><if_stmt>self.root<block_start>self._put(key val self.root)<block_end><else_stmt><block_start>self.root=self.TreeNodeClass(key val)<block_end>self.size=self.size+1<block_end><def_stmt>_put self key val node<block_start><if_stmt>key<l>node.key<block_start><if_stmt>node.left<block_start>self._put(key val node.left)<block_end><else_stmt><block_start>node.left=self.TreeNodeClass(key val parent=node)<block_end><block_end><else_stmt><block_start><if_stmt>node.right<block_start>self._put(key val node.right)<block_end><else_stmt><block_start>node.right=self.TreeNodeClass(key val parent=node)<block_end><block_end><block_end>""" The diagram below illustrates the process for inserting a new node into a binary search tree. The lightly shaded nodes indicate the nodes that were visited during the insertion process. ![Inserting a node with key = 19](figures/binary-search-tree-put.png) Once the tree is constructed, the next task is to implement the retrieval of a value for a given key. The `get` functionality is even easier than the `put` functionality because we simply search the tree recursively until we get to a non-matching leaf node or find a matching key. When a matching key is found, the value stored in the val of the node is returned. Again, inorder to enable a `tree[1]` retrieval interface, we overload one of Python’s magic methods—in this case `__getitem__`. Just like with `__setitem__`, the primary purpose of this method is to handle presence and absence of a root node, and delegates the core `get` functionality to `_get`. The search code in the `_get` method uses the same logic for choosing the left or right child as the `_put` method. Notice that the `_get` method returns a `TreeNode` to `__getitem__`, this allows `_get` to be used as a flexible helper method for other `BinarySearchTree` methods that may need to make use of other data from the `TreeNode` besides the val. """<def_stmt>__getitem__ self key<block_start><if_stmt>self.root<block_start>result=self._get(key self.root)<if_stmt>result<block_start><return>result.val<block_end><block_end><raise>KeyError<block_end><def_stmt>_get self key node<block_start><if_stmt><not>node<block_start><return><none><block_end><if_stmt>node.key<eq>key<block_start><return>node<block_end><if_stmt>key<l>node.key<block_start><return>self._get(key node.left)<block_end><return>self._get(key node.right)<block_end>""" Using `_get`, we can implement the `in` operation by writing a `__contains__` method for the `BinarySearchTree`. The `__contains__` method will simply call `_get` and return `True` if `_get` returns a value, or `False` if it returns `None`. The code for `__contains__` is shown below. """<def_stmt>__contains__ self key<block_start><return>bool(self._get(key self.root))<block_end>""" Finally, we turn our attention to the most challenging method in the binary search tree: the deletion of a key. The first task is to find the node to delete by searching the tree. If the tree has more than one node we search using the `_get` method to find the `TreeNode` that needs to be removed. If the tree only has a single node, that means we are removing the root of the tree, but we still must check to make sure the key of the root matches the key that is to be deleted. In either case if the key is not found the `del` operator raises an error. """<def_stmt>delete self key<block_start><if_stmt>self.size<g>1<block_start>node_to_remove=self._get(key self.root)<if_stmt>node_to_remove<block_start>self.remove(node_to_remove)<line_sep>self.size=self.size-1<line_sep><return><block_end><block_end><elif_stmt>self.size<eq>1<and>self.root.key<eq>key<block_start>self.root=<none><line_sep>self.size=self.size-1<line_sep><return><block_end><raise>KeyError('Error, key not in tree')<block_end><def_stmt>__delitem__ self key<block_start>self.delete(key)<line_sep>""" Once we’ve found the node containing the key we want to delete, there are three cases that we must consider: 1. The node to be deleted has no children 2. The node to be deleted has only one child 3. The node to be deleted has two children The first case is straightforward. If the current node has no children all we need to do is delete the node and remove the reference to this node in the parent. The code for this case is shown below. """<block_end><def_stmt>remove self node<block_start><if_stmt>node.is_leaf()<and>node.parent<is><not><none><block_start><if_stmt>node<eq>node.parent.left<block_start>node.parent.left=<none><block_end><else_stmt><block_start>node.parent.right=<none><block_end>""" ![Deleting Node 16, a node without children](figures/binary-search-tree-delete-1.png) The second case is only slightly more complicated (see below). If a node has only a single child, then we can simply promote the child to take the place of its parent. The code for this case is shown in the next code sample. As you look at this code you will see that there are six cases to consider. Since the cases are symmetric with respect to either having a left or right child we will just discuss the case where the current node has a left child. The decision proceeds as follows: 1. If the current node is a left child then we only need to update the parent reference of the left child to point to the parent of the current node, and then update the left child reference of the parent to point to the current node’s left child. 2. If the current node is a right child then we only need to update the parent reference of the right child to point to the parent of the current node, and then update the right child reference of the parent to point to the current node’s right child. 3. If the current node has no parent, it must be the root. In this case we will just replace the `key`, `val`, `left`, and `right` data by calling the `replace_node_data` method on the root. Code for this decision process may look like: """<block_end><elif_stmt>node.has_one_child()<block_start>promoted_node=node.left<or>node.right<if_stmt>node.is_left_child()<block_start>promoted_node.parent=node.parent<line_sep>node.parent.left=promoted_node<block_end><elif_stmt>node.is_right_child()<block_start>promoted_node.parent=node.parent<line_sep>node.parent.right=promoted_node<block_end><else_stmt><block_start>node.replace_node_data(promoted_node.key promoted_node.val promoted_node.left promoted_node.right)<block_end>""" ![Deleting node 25, a node that has a single child](figures/binary-search-tree-delete-2.png) The third case is the most difficult case to handle (see below). If a node has two children, then it is unlikely that we can simply promote one of them to take the node’s place. We can, however, search the tree for a node that can be used to replace the one scheduled for deletion. What we need is a node that will preserve the binary search tree relationships for both of the existing left and right subtrees. The node that will do this is the node that has the next-largest key in the tree. We call this node the **successor**, and we will look at a way to find the successor shortly. The successor is guaranteed to have no more than one child, so we know how to remove it using the two cases for deletion that we have already implemented. Once the successor has been removed, we simply put it in the tree in place of the node to be deleted. ![Deleting node 5, a node with two children](figures/binary-search-tree-delete-3.png) The code to handle the third case is shown below. Notice that we make use of the helper methods `find_successor` and `find_min` to find the successor. To remove the successor, we make use of the method `splice_out`. The reason we use `splice_out` is that it goes directly to the node we want to splice out and makes the right changes. We could call `delete` recursively, but then we would waste time re-searching for the key node. """<block_end><else_stmt># has both children <block_start>successor=node.find_successor()<if_stmt>successor<block_start>successor.splice_out()<line_sep>node.key=successor.key<line_sep>node.val=successor.val<block_end><block_end><block_end>""" The code to find the successor is shown above and as you can see is a method of the `TreeNode` class. This code makes use of the same properties of binary search trees that cause an inorder traversal to print out the nodes in the tree from smallest to largest. There are three cases to consider when looking for the successor: 1. If the node has a right child, then the successor is the smallest key in the right subtree. 2. If the node has no right child and is the left child of its parent, then the parent is the successor. 3. If the node is the right child of its parent, and itself has no right child, then the successor to this node is the successor of its parent, excluding this node. The first condition is the only one that matters for us when deleting a node from a binary search tree. The `find_min` method is called to find the minimum key in a subtree. You should convince yourself that the minimum valued key in any binary search tree is the leftmost child of the tree. Therefore the `find_min` method simply follows the `left` references in each node of the subtree until it reaches a node that does not have a left child. """<block_end>
<import_stmt>base64<def_stmt>parse_basic_auth header_value<block_start>""" Attempts to parse the given header value as a Base64-encoded Basic auth header. """<if_stmt><not>header_value<block_start><return><none><block_end>parts=header_value.split(" ")<if_stmt>len(parts)<ne>2<or>parts[0].lower()<ne>"basic"<block_start><return><none><block_end><try_stmt><block_start>basic_parts=base64.b64decode(parts[1]).split(":" 1)<if_stmt>len(basic_parts)<ne>2<block_start><return><none><block_end><return>basic_parts<block_end><except_stmt>ValueError<block_start><return><none><block_end><block_end>
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>numpy<as>np<import_from_stmt>caffe2.python core workspace<import_from_stmt>caffe2.python.test_util TestCase rand_array<class_stmt>TestPartitionOps(TestCase)<block_start><def_stmt>test_configs self# (main dims, partitions, main type, [list of (extra dims, type)]) <block_start>configs=[((10 ) 3) ((4 ) 10) ((10 10) 4) ((100 ) 2) ((5 ) 1) ((1 ) 1) ((2 10) 2) ]<line_sep>suffixes=[[] [((2 2) np.float32)] [((3 ) np.int64) ((2 ) np.float32)] ]<line_sep><return>[(main_dims parts main_type extra pack)<for>main_dims,parts configs<for>main_type [np.int32 np.int64]<for>extra suffixes<for>pack [<false> <true>]]<block_end><def_stmt>testPartition self<block_start><for_stmt>main_dims,parts,main_type,extra_ins,pack self.test_configs()<block_start>ins=['in'+str(i)<for>i range(1+len(extra_ins))]<line_sep>outs=['in{}_p{}'.format(j i)<for>i range(parts)<for>j range(1+len(extra_ins))]<line_sep>op=core.CreateOperator('Partition' ins outs pack_first_input=(1<if>pack<else>0))<line_sep>x=[]<for_stmt>i,(dims t) enumerate([(() main_type)]+extra_ins)<block_start><if_stmt>t<in>[np.float32 np.float64]<block_start>d=rand_array(*(main_dims+dims))<block_end><else_stmt><block_start>d=np.random.randint(-100 100 (main_dims+dims))<block_end>d=d.astype(t)<line_sep>workspace.FeedBlob(ins[i] d)<line_sep>x.append(d)<block_end><def_stmt>sharding x# numpy has proper modulo op that yields non-negative results <block_start>shards=(x[0]%parts).reshape([-1])<line_sep>out=[]<for_stmt>i range(parts)<block_start><for_stmt>ind,v enumerate(x)<block_start>suffix_shape=v.shape[len(x[0].shape):]<line_sep>accum=[]<line_sep>data=v.reshape((-1 )+suffix_shape)<if_stmt>pack<and>ind<eq>0<block_start>data=data<floordiv>parts<block_end><for_stmt>j,s enumerate(shards)<block_start><if_stmt>s<eq>i<block_start>accum.append(data[j])<block_end><block_end><def_stmt>join a<block_start><if_stmt><not>a<block_start><return>np.empty(shape=(0 )+suffix_shape)<block_end><return>np.stack(a)<block_end>out.append(join(accum))<block_end><block_end><return>out<block_end>workspace.RunOperatorOnce(op)<line_sep>ref=sharding(x)<line_sep>print(x)<line_sep>print(ref)<for_stmt>name,expected zip(outs ref)<block_start>np.testing.assert_array_equal(expected workspace.FetchBlob(name))<block_end># test inverse operation (GatherByKey) <if_stmt>len(main_dims)<eq>1# currently only 1D key tensor supported <block_start><for_stmt>i range(len(extra_ins))<block_start>expected_out=ins[i+1]<line_sep>gather_ins=[ins[0]]+[outs[len(ins)<times>p+i+1]<for>p range(parts)]<line_sep>actual_out=expected_out+'_actual'<line_sep>op=core.CreateOperator('GatherByKey' gather_ins actual_out)<line_sep>workspace.RunOperatorOnce(op)<line_sep>expected=workspace.FetchBlob(expected_out)<line_sep>actual=workspace.FetchBlob(actual_out)<line_sep>np.testing.assert_array_equal(expected actual)<block_end><block_end><block_end><block_end><def_stmt>testLengthsPartition self<block_start><for_stmt>main_dims,parts,main_type,extra_ins,pack self.test_configs()# For LengthsSharding only 1-D tensors supported as a first input <block_start><if_stmt>len(main_dims)<g>1<block_start><continue><block_end>ins=['in'+str(i)<for>i range(2+len(extra_ins))]<line_sep>outs=['in{}_p{}'.format(j i)<for>i range(parts)<for>j range(2+len(extra_ins))]<line_sep>op=core.CreateOperator('LengthsPartition' ins outs pack_first_input=(1<if>pack<else>0))<line_sep>x=[]<for_stmt>i,(dims t) enumerate([(() main_type)]+extra_ins)<block_start><if_stmt>t<in>[np.float32 np.float64]<block_start>d=rand_array(*(main_dims+dims))<block_end><else_stmt><block_start>d=np.random.randint(-100 100 (main_dims+dims))<block_end>d=d.astype(t)<line_sep>workspace.FeedBlob(ins[i+1] d)<line_sep>x.append(d)<block_end># Randomly generate length tensor as well elements=np.random.randint(2 10)<line_sep>lengths=[]<line_sep>total_length=0<for_stmt>_ range(elements-1)<block_start>lengths.append(np.random.randint(main_dims[0]-total_length))<line_sep>total_length<augadd>lengths[-1]<block_end>lengths.append(main_dims[0]-total_length)<line_sep>workspace.FeedBlob(ins[0] np.array(lengths dtype=np.int32))<def_stmt>sharding x# numpy has proper modulo op that yields non-negative results <block_start>shards=(x[0]%parts).reshape([-1])<line_sep>out=[]<for_stmt>i range(parts)<block_start>idx=0<line_sep>sharded_lengths=np.zeros(elements)<for_stmt>ind,length enumerate(lengths)<block_start><for_stmt>_ range(length)<block_start><if_stmt>shards[idx]<eq>i<block_start>sharded_lengths[ind]<augadd>1<block_end>idx<augadd>1<block_end><block_end>out.append(sharded_lengths)<for_stmt>ind,v enumerate(x)<block_start>suffix_shape=v.shape[len(x[0].shape):]<line_sep>accum=[]<line_sep>data=v.reshape((-1 )+suffix_shape)<if_stmt>pack<and>ind<eq>0<block_start>data=data<floordiv>parts<block_end><for_stmt>j,s enumerate(shards)<block_start><if_stmt>s<eq>i<block_start>accum.append(data[j])<block_end><block_end><def_stmt>join a<block_start><if_stmt><not>a<block_start><return>np.empty(shape=(0 )+suffix_shape)<block_end><return>np.stack(a)<block_end>out.append(join(accum))<block_end><block_end><return>out<block_end>workspace.RunOperatorOnce(op)<line_sep>ref=sharding(x)<for_stmt>name,expected zip(outs ref)<block_start>np.testing.assert_array_equal(expected workspace.FetchBlob(name))<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>unittest<line_sep>unittest.main()<block_end>
"""Find kernel specifications for a given language"""<import_stmt>os<import_stmt>sys<import_from_stmt>.languages same_language<import_from_stmt>.reraise reraise<try_stmt># I prefer not to take a dependency on jupyter_client <block_start><import_from_stmt>jupyter_client.kernelspec find_kernel_specs get_kernel_spec<block_end><except_stmt>ImportError<as>err<block_start>find_kernel_specs=reraise(err)<line_sep>get_kernel_spec=reraise(err)<block_end><def_stmt>set_kernelspec_from_language notebook<block_start>"""Set the kernel specification based on the 'main_language' metadata"""<line_sep>language=notebook.metadata.get("jupytext" {}).get("main_language")<if_stmt>"kernelspec"<not><in>notebook.metadata<and>language<block_start><try_stmt><block_start>kernelspec=kernelspec_from_language(language)<block_end><except_stmt>ValueError<block_start><return><block_end>notebook.metadata["kernelspec"]=kernelspec<line_sep>notebook.metadata.get("jupytext" {}).pop("main_language")<block_end><block_end><def_stmt>kernelspec_from_language language<block_start>"""Return the python kernel that matches the current env, or the first kernel that matches the given language"""<if_stmt>language<eq>"python"# Return the kernel that matches the current Python executable <block_start><for_stmt>name find_kernel_specs()<block_start>kernel_specs=get_kernel_spec(name)<line_sep>cmd=kernel_specs.argv[0]<if_stmt>(kernel_specs.language<eq>"python"<and>os.path.isfile(cmd)<and>os.path.samefile(cmd sys.executable))<block_start><return>{"name":name "language":language "display_name":kernel_specs.display_name }<block_end><block_end><raise>ValueError("No kernel found that matches the current python executable {}\n".format(sys.executable)+"Install one with 'python -m ipykernel install --name kernel_name [--user]'")<block_end><for_stmt>name find_kernel_specs()<block_start>kernel_specs=get_kernel_spec(name)<if_stmt>same_language(kernel_specs.language language)<block_start><return>{"name":name "language":language "display_name":kernel_specs.display_name }<block_end><block_end><raise>ValueError("No kernel found for the language {}".format(language))<block_end>
<import_stmt>pytest<import_from_stmt>datar stats<import_from_stmt>datar.base *<import_from_stmt>datar f<import_from_stmt>datar.datasets warpbreaks state_division state_region airquality<import_from_stmt>.conftest assert_iterable_equal<def_stmt>test_table # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table <block_start>z=stats.rpois(100 5)<line_sep>x=table(z)<assert_stmt>sum(x.values.flatten())<eq>100<line_sep>#----------------- <with_stmt>data_context(warpbreaks)<as>_<block_start>tab=table(f.wool f.tension)<block_end><assert_stmt>tab.columns.tolist()<eq>['H' 'L' 'M']<assert_stmt>tab.index.tolist()<eq>['A' 'B']<line_sep>assert_iterable_equal(tab.values.flatten() [9]<times>6)<line_sep>tab=table(warpbreaks.loc[: ['wool' 'tension']])<assert_stmt>tab.columns.tolist()<eq>['H' 'L' 'M']<assert_stmt>tab.index.tolist()<eq>['A' 'B']<line_sep>assert_iterable_equal(tab.values.flatten() [9]<times>6)<line_sep>#----------------- tab=table(state_division state_region)<assert_stmt>tab.loc['New England' 'Northeast']<eq>6<line_sep>#----------------- <with_stmt>data_context(airquality)<as>_<block_start>qt=stats.quantile(f.Temp)<line_sep>ct=cut(f.Temp qt)<line_sep>tab=table(ct f.Month)<block_end><assert_stmt>tab.iloc[0 0]<eq>24<line_sep>#----------------- a=letters[:3]<line_sep>tab=table(a sample(a))<assert_stmt>sum(tab.values.flatten())<eq>3<line_sep>#----------------- tab=table(a sample(a) dnn=['x' 'y'])<assert_stmt>tab.index.name<eq>'x'<assert_stmt>tab.columns.name<eq>'y'<line_sep>#----------------- a=c(NA Inf (1.0/(i+1)<for>i range(3)))<line_sep>a=a<times>10<line_sep># tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4) tab=table(a exclude=<none>)<line_sep>assert_iterable_equal(tab.values.flatten() [10]<times>5)<line_sep>#------------------ b=as_factor(rep(c("A" "B" "C") 10))<line_sep>tab=table(b)<assert_stmt>tab.shape<eq>(1 3)<line_sep>assert_iterable_equal(tab.values.flatten() [10]<times>3)<line_sep>tab=table(b exclude="B")<assert_stmt>tab.shape<eq>(1 2)<line_sep>assert_iterable_equal(tab.values.flatten() [10]<times>2)<assert_stmt>'B'<not><in>tab.columns<line_sep>#------------------- d=factor(rep(c("A" "B" "C") 10) levels=c("A" "B" "C" "D" "E"))<line_sep>tab=table(d exclude="B" dnn=['x'])<line_sep>assert_iterable_equal(tab.columns.to_list() ["A" "C" "D" "E"])<line_sep>assert_iterable_equal(tab.values.flatten() [10 10 0 0])<line_sep>d2=factor(rep(c("A" "B" "C") 10) levels=c("A" "B" "C" "D" "E"))<line_sep>tab=table(d d2 exclude="B")<assert_stmt>tab.shape<eq>(4 4)<line_sep>tab=table("abc" "cba" dnn='x')<assert_stmt>tab.shape<eq>(3 3)<assert_stmt>sum(tab.values.flatten())<eq>3<with_stmt>data_context(airquality)<as>_<block_start>tab=table(f.Ozone f.Solar_R exclude=<none>)<block_end><assert_stmt>'<NA>'<in>tab.columns<assert_stmt>'<NA>'<in>tab.index<block_end><def_stmt>test_table_error <block_start><import_from_stmt>datar.datasets iris warpbreaks<with_stmt>pytest.raises(ValueError)<block_start>table(iris)<block_end><with_stmt>pytest.raises(ValueError)<block_start>table(warpbreaks iris)<block_end><with_stmt>pytest.raises(ValueError)<block_start>table(warpbreaks.wool iris)<block_end><with_stmt>pytest.raises(ValueError)<block_start>table(iris.iloc[: []])<block_end><with_stmt>pytest.raises(ValueError)<block_start>table(iris.iloc[: [1 2]] iris)<block_end><with_stmt>pytest.raises(ValueError)<block_start>table(iris.iloc[: [1]] iris iris)<block_end><with_stmt>pytest.raises(ValueError)<block_start>table(iris.iloc[: [1]] iris.iloc[: []])<block_end><block_end>
<import_from_stmt>common.make_tx make_swap_tx<import_from_stmt>sol.handle_simple handle_unknown_detect_transfers<def_stmt>handle_metaplex exporter txinfo<block_start>transfers_in,transfers_out,_=txinfo.transfers_net<if_stmt>len(transfers_in)<eq>1<and>len(transfers_out)<eq>1<block_start>sent_amount,sent_currency,_,_=transfers_out[0]<line_sep>received_amount,received_currency,_,_=transfers_in[0]<line_sep>row=make_swap_tx(txinfo sent_amount sent_currency received_amount received_currency)<line_sep>exporter.ingest_row(row)<block_end><else_stmt><block_start>handle_unknown_detect_transfers(exporter txinfo)<block_end><block_end><def_stmt>is_nft_mint txinfo<block_start>log_instructions=txinfo.log_instructions<line_sep>transfers_in,transfers_out,_=txinfo.transfers_net<if_stmt>"MintTo"<in>log_instructions<and>len(transfers_out)<eq>1<and>len(transfers_in)<eq>0<block_start><return><true><block_end><elif_stmt>("MintTo"<in>log_instructions<and>len(transfers_out)<eq>1<and>len(transfers_in)<eq>1<and>transfers_in[0][0]<eq>1)<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>handle_nft_mint exporter txinfo<block_start>transfers_in,transfers_out,transfers_unknown=txinfo.transfers_net<if_stmt>len(transfers_in)<eq>1<and>len(transfers_out)<eq>1<block_start>sent_amount,sent_currency,_,_=transfers_out[0]<line_sep>received_amount,received_currency,_,_=transfers_in[0]<line_sep>row=make_swap_tx(txinfo sent_amount sent_currency received_amount received_currency)<line_sep>exporter.ingest_row(row)<line_sep><return><block_end>handle_unknown_detect_transfers(exporter txinfo)<block_end>
<import_stmt>warnings<import_from_stmt>typing Any Callable Optional Tuple<import_from_stmt>tensorboard.backend.event_processing event_accumulator<import_from_stmt>torch.utils.tensorboard SummaryWriter<import_from_stmt>tianshou.utils.logger.base LOG_DATA_TYPE BaseLogger<class_stmt>TensorboardLogger(BaseLogger)<block_start>"""A logger that relies on tensorboard SummaryWriter by default to visualize \ and log statistics. :param SummaryWriter writer: the writer to log data. :param int train_interval: the log interval in log_train_data(). Default to 1000. :param int test_interval: the log interval in log_test_data(). Default to 1. :param int update_interval: the log interval in log_update_data(). Default to 1000. :param int save_interval: the save interval in save_data(). Default to 1 (save at the end of each epoch). """<def_stmt>__init__ self writer:SummaryWriter train_interval:int=1000 test_interval:int=1 update_interval:int=1000 save_interval:int=1 <arrow><none><block_start>super().__init__(train_interval test_interval update_interval)<line_sep>self.save_interval=save_interval<line_sep>self.last_save_step=-1<line_sep>self.writer=writer<block_end><def_stmt>write self step_type:str step:int data:LOG_DATA_TYPE<arrow><none><block_start><for_stmt>k,v data.items()<block_start>self.writer.add_scalar(k v global_step=step)<block_end><block_end><def_stmt>save_data self epoch:int env_step:int gradient_step:int save_checkpoint_fn:Optional[Callable[[int int int] <none>]]=<none> <arrow><none><block_start><if_stmt>save_checkpoint_fn<and>epoch-self.last_save_step<ge>self.save_interval<block_start>self.last_save_step=epoch<line_sep>save_checkpoint_fn(epoch env_step gradient_step)<line_sep>self.write("save/epoch" epoch {"save/epoch":epoch})<line_sep>self.write("save/env_step" env_step {"save/env_step":env_step})<line_sep>self.write("save/gradient_step" gradient_step {"save/gradient_step":gradient_step})<block_end><block_end><def_stmt>restore_data self<arrow>Tuple[int int int]<block_start>ea=event_accumulator.EventAccumulator(self.writer.log_dir)<line_sep>ea.Reload()<try_stmt># epoch / gradient_step <block_start>epoch=ea.scalars.Items("save/epoch")[-1].step<line_sep>self.last_save_step=self.last_log_test_step=epoch<line_sep>gradient_step=ea.scalars.Items("save/gradient_step")[-1].step<line_sep>self.last_log_update_step=gradient_step<block_end><except_stmt>KeyError<block_start>epoch,gradient_step=0 0<block_end><try_stmt># offline trainer doesn't have env_step <block_start>env_step=ea.scalars.Items("save/env_step")[-1].step<line_sep>self.last_log_train_step=env_step<block_end><except_stmt>KeyError<block_start>env_step=0<block_end><return>epoch env_step gradient_step<block_end><block_end><class_stmt>BasicLogger(TensorboardLogger)<block_start>"""BasicLogger has changed its name to TensorboardLogger in #427. This class is for compatibility. """<def_stmt>__init__ self *args:Any **kwargs:Any<arrow><none><block_start>warnings.warn("Deprecated soon: BasicLogger has renamed to TensorboardLogger in #427.")<line_sep>super().__init__(*args **kwargs)<block_end><block_end>
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>tempest.lib.services.compute security_group_default_rules_client<import_from_stmt>tempest.tests.lib fake_auth_provider<import_from_stmt>tempest.tests.lib.services base<class_stmt>TestSecurityGroupDefaultRulesClient(base.BaseServiceTest)<block_start>FAKE_RULE={"from_port":80 "id":1 "ip_protocol":"TCP" "ip_range":{"cidr":"10.10.10.0/24"} "to_port":80}<def_stmt>setUp self<block_start>super(TestSecurityGroupDefaultRulesClient self).setUp()<line_sep>fake_auth=fake_auth_provider.FakeAuthProvider()<line_sep>self.client=(security_group_default_rules_client.SecurityGroupDefaultRulesClient(fake_auth 'compute' 'regionOne'))<block_end><def_stmt>_test_list_security_group_default_rules self bytes_body=<false><block_start>self.check_service_client_function(self.client.list_security_group_default_rules 'tempest.lib.common.rest_client.RestClient.get' {"security_group_default_rules":[self.FAKE_RULE]} to_utf=bytes_body)<block_end><def_stmt>test_list_security_group_default_rules_with_str_body self<block_start>self._test_list_security_group_default_rules()<block_end><def_stmt>test_list_security_group_default_rules_with_bytes_body self<block_start>self._test_list_security_group_default_rules(bytes_body=<true>)<block_end><def_stmt>_test_show_security_group_default_rule self bytes_body=<false><block_start>self.check_service_client_function(self.client.show_security_group_default_rule 'tempest.lib.common.rest_client.RestClient.get' {"security_group_default_rule":self.FAKE_RULE} to_utf=bytes_body security_group_default_rule_id=1)<block_end><def_stmt>test_show_security_group_default_rule_with_str_body self<block_start>self._test_show_security_group_default_rule()<block_end><def_stmt>test_show_security_group_default_rule_with_bytes_body self<block_start>self._test_show_security_group_default_rule(bytes_body=<true>)<block_end><def_stmt>_test_create_security_default_group_rule self bytes_body=<false><block_start>request_body={"to_port":80 "from_port":80 "ip_protocol":"TCP" "cidr":"10.10.10.0/24"}<line_sep>self.check_service_client_function(self.client.create_security_default_group_rule 'tempest.lib.common.rest_client.RestClient.post' {"security_group_default_rule":self.FAKE_RULE} to_utf=bytes_body **request_body)<block_end><def_stmt>test_create_security_default_group_rule_with_str_body self<block_start>self._test_create_security_default_group_rule()<block_end><def_stmt>test_create_security_default_group_rule_with_bytes_body self<block_start>self._test_create_security_default_group_rule(bytes_body=<true>)<block_end><def_stmt>test_delete_security_group_default_rule self<block_start>self.check_service_client_function(self.client.delete_security_group_default_rule 'tempest.lib.common.rest_client.RestClient.delete' {} status=204 security_group_default_rule_id=1)<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>keras backend<as>K<import_stmt>os<import_stmt>sys<line_sep>K.set_image_dim_ordering('tf')<def_stmt>patch_path path<block_start><return>os.path.join(os.path.dirname(__file__) path)<block_end><def_stmt>main <block_start>sys.path.append(patch_path('..'))<line_sep>data_dir_path=patch_path('very_large_data')<line_sep>model_dir_path=patch_path('models/UCF-101')<import_from_stmt>keras_video_classifier.library.convolutional CnnVideoClassifier<import_from_stmt>keras_video_classifier.library.utility.ucf.UCF101_loader load_ucf scan_ucf_with_labels<line_sep>config_file_path=CnnVideoClassifier.get_config_file_path(model_dir_path)<line_sep>weight_file_path=CnnVideoClassifier.get_weight_file_path(model_dir_path)<line_sep>np.random.seed(42)<line_sep>load_ucf(data_dir_path)<line_sep>predictor=CnnVideoClassifier()<line_sep>predictor.load_model(config_file_path weight_file_path)<line_sep>videos=scan_ucf_with_labels(data_dir_path [label<for>(label label_index) predictor.labels.items()])<line_sep>video_file_path_list=np.array([file_path<for>file_path videos.keys()])<line_sep>np.random.shuffle(video_file_path_list)<for_stmt>video_file_path video_file_path_list<block_start>label=videos[video_file_path]<line_sep>predicted_label=predictor.predict(video_file_path)<line_sep>print('predicted: '+predicted_label+' actual: '+label)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>scipy stats<import_stmt>numpy<as>np<import_stmt>warnings<import_from_stmt>...compat check_is_fitted pmdarima<as>pm_compat<import_from_stmt>.base BaseEndogTransformer<line_sep>__all__=['BoxCoxEndogTransformer']<class_stmt>BoxCoxEndogTransformer(BaseEndogTransformer)<block_start>r"""Apply the Box-Cox transformation to an endogenous array The Box-Cox transformation is applied to non-normal data to coerce it more towards a normal distribution. It's specified as:: (((y + lam2) ** lam1) - 1) / lam1, if lmbda != 0, else log(y + lam2) Parameters ---------- lmbda : float or None, optional (default=None) The lambda value for the Box-Cox transformation, if known. If not specified, it will be estimated via MLE. lmbda2 : float, optional (default=0.) The value to add to ``y`` to make it non-negative. If, after adding ``lmbda2``, there are still negative values, a ValueError will be raised. neg_action : str, optional (default="raise") How to respond if any values in ``y <= 0`` after adding ``lmbda2``. One of ('raise', 'warn', 'ignore'). If anything other than 'raise', values <= 0 will be truncated to the value of ``floor``. floor : float, optional (default=1e-16) A positive value that truncate values to if there are values in ``y`` that are zero or negative and ``neg_action`` is not 'raise'. Note that if values are truncated, invertibility will not be preserved, and the transformed array may not be perfectly inverse-transformed. """<def_stmt>__init__ self lmbda=<none> lmbda2=0 neg_action="raise" floor=1e-16<block_start>self.lmbda=lmbda<line_sep>self.lmbda2=lmbda2<line_sep>self.neg_action=neg_action<line_sep>self.floor=floor<block_end><def_stmt>fit self y X=<none> **kwargs# TODO: kwargs go away <block_start>"""Fit the transformer Learns the value of ``lmbda``, if not specified in the constructor. If defined in the constructor, is not re-learned. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. X : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. """<line_sep>lam1=self.lmbda<line_sep>lam2=self.lmbda2<line_sep># Temporary shim until we remove `exogenous` support completely X,_=pm_compat.get_X(X **kwargs)<if_stmt>lam2<l>0<block_start><raise>ValueError("lmbda2 must be a non-negative scalar value")<block_end><if_stmt>lam1<is><none><block_start>y,_=self._check_y_X(y X)<line_sep>_,lam1=stats.boxcox(y+lam2 lmbda=<none> alpha=<none>)<block_end>self.lam1_=lam1<line_sep>self.lam2_=lam2<line_sep><return>self<block_end><def_stmt>transform self y X=<none> **kwargs<block_start>"""Transform the new array Apply the Box-Cox transformation to the array after learning the lambda parameter. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. X : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y_transform : array-like or None The Box-Cox transformed y array X : array-like or None The X array """<line_sep>check_is_fitted(self "lam1_")<line_sep># Temporary shim until we remove `exogenous` support completely X,_=pm_compat.get_X(X **kwargs)<line_sep>lam1=self.lam1_<line_sep>lam2=self.lam2_<line_sep>y,exog=self._check_y_X(y X)<line_sep>y<augadd>lam2<line_sep>neg_mask=y<le>0.<if_stmt>neg_mask.any()<block_start>action=self.neg_action<line_sep>msg="Negative or zero values present in y"<if_stmt>action<eq>"raise"<block_start><raise>ValueError(msg)<block_end><elif_stmt>action<eq>"warn"<block_start>warnings.warn(msg UserWarning)<block_end>y[neg_mask]=self.floor<block_end><if_stmt>lam1<eq>0<block_start><return>np.log(y) exog<block_end><return>(y<power>lam1-1)/lam1 exog<block_end><def_stmt>inverse_transform self y X=<none> **kwargs# TODO: kwargs go away <block_start>"""Inverse transform a transformed array Inverse the Box-Cox transformation on the transformed array. Note that if truncation happened in the ``transform`` method, invertibility will not be preserved, and the transformed array may not be perfectly inverse-transformed. Parameters ---------- y : array-like or None, shape=(n_samples,) The transformed endogenous (time-series) array. X : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y : array-like or None The inverse-transformed y array X : array-like or None The inverse-transformed X array """<line_sep>check_is_fitted(self "lam1_")<line_sep># Temporary shim until we remove `exogenous` support completely X,_=pm_compat.get_X(X **kwargs)<line_sep>lam1=self.lam1_<line_sep>lam2=self.lam2_<line_sep>y,exog=self._check_y_X(y X)<if_stmt>lam1<eq>0<block_start><return>np.exp(y)-lam2 exog<block_end>numer=y<times>lam1# remove denominator numer<augadd>1.# add 1 back to it de_exp=numer<power>(1./lam1)# de-exponentiate <return>de_exp-lam2 exog<block_end><block_end>
# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # <import_from_stmt>alembic op<import_from_stmt>oslo_utils uuidutils<import_stmt>sqlalchemy<as>sa<import_from_stmt>neutron.db rbac_db_models<line_sep>"""rbac_qos_policy Revision ID: c6c112992c9 Revises: <PASSWORD> Create Date: 2015-11-25 18:45:03.831359 """<line_sep># revision identifiers, used by Alembic. revision='c6c112992c9'<line_sep>down_revision='e3278ee65050'<line_sep>depends_on=('15e43b934f81' )<line_sep>qos_rbacs=sa.Table('qospolicyrbacs' sa.MetaData() sa.Column('id' sa.String(length=36) nullable=<false>) sa.Column('tenant_id' sa.String(length=255) nullable=<true>) sa.Column('target_tenant' sa.String(length=255) nullable=<false>) sa.Column('action' sa.String(length=255) nullable=<false>) sa.Column('object_id' sa.String(length=36) nullable=<false>))<line_sep># A simple model of the qos_policies table with only the fields needed for # the migration. qos_policy=sa.Table('qos_policies' sa.MetaData() sa.Column('id' sa.String(length=36) nullable=<false>) sa.Column('tenant_id' sa.String(length=255)) sa.Column('shared' sa.Boolean() nullable=<false>))<def_stmt>upgrade <block_start>op.bulk_insert(qos_rbacs get_values())<line_sep>op.drop_column('qos_policies' 'shared')<block_end><def_stmt>get_values <block_start>session=sa.orm.Session(bind=op.get_bind())<line_sep>values=[]<for_stmt>row session.query(qos_policy).filter(qos_policy.c.shared).all()<block_start>values.append({'id':uuidutils.generate_uuid() 'object_id':row[0] 'tenant_id':row[1] 'target_tenant':'*' 'action':rbac_db_models.ACCESS_SHARED})<block_end>session.commit()<line_sep><return>values<block_end>
<import_from_stmt>typing Optional<import_from_stmt>botocore.client BaseClient<import_from_stmt>typing Dict<import_from_stmt>typing Union<import_from_stmt>botocore.paginate Paginator<import_from_stmt>botocore.waiter Waiter<import_from_stmt>typing List<class_stmt>Client(BaseClient)<block_start><def_stmt>accept_invitation self DetectorId:str InvitationId:str MasterId:str<arrow>Dict<block_start><pass><block_end><def_stmt>archive_findings self DetectorId:str FindingIds:List<arrow>Dict<block_start><pass><block_end><def_stmt>can_paginate self operation_name:str=<none><block_start><pass><block_end><def_stmt>create_detector self Enable:bool ClientToken:str=<none> FindingPublishingFrequency:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_filter self DetectorId:str FindingCriteria:Dict Name:str Action:str=<none> ClientToken:str=<none> Description:str=<none> Rank:int=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_ip_set self Activate:bool DetectorId:str Format:str Location:str Name:str ClientToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_members self AccountDetails:List DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>create_sample_findings self DetectorId:str FindingTypes:List=<none><arrow>Dict<block_start><pass><block_end><def_stmt>create_threat_intel_set self Activate:bool DetectorId:str Format:str Location:str Name:str ClientToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>decline_invitations self AccountIds:List<arrow>Dict<block_start><pass><block_end><def_stmt>delete_detector self DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>delete_filter self DetectorId:str FilterName:str<arrow>Dict<block_start><pass><block_end><def_stmt>delete_invitations self AccountIds:List<arrow>Dict<block_start><pass><block_end><def_stmt>delete_ip_set self DetectorId:str IpSetId:str<arrow>Dict<block_start><pass><block_end><def_stmt>delete_members self AccountIds:List DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>delete_threat_intel_set self DetectorId:str ThreatIntelSetId:str<arrow>Dict<block_start><pass><block_end><def_stmt>disassociate_from_master_account self DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>disassociate_members self AccountIds:List DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>generate_presigned_url self ClientMethod:str=<none> Params:Dict=<none> ExpiresIn:int=<none> HttpMethod:str=<none><block_start><pass><block_end><def_stmt>get_detector self DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>get_filter self DetectorId:str FilterName:str<arrow>Dict<block_start><pass><block_end><def_stmt>get_findings self DetectorId:str FindingIds:List SortCriteria:Dict=<none><arrow>Dict<block_start><pass><block_end><def_stmt>get_findings_statistics self DetectorId:str FindingStatisticTypes:List FindingCriteria:Dict=<none><arrow>Dict<block_start><pass><block_end><def_stmt>get_invitations_count self<arrow>Dict<block_start><pass><block_end><def_stmt>get_ip_set self DetectorId:str IpSetId:str<arrow>Dict<block_start><pass><block_end><def_stmt>get_master_account self DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>get_members self AccountIds:List DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>get_paginator self operation_name:str=<none><arrow>Paginator<block_start><pass><block_end><def_stmt>get_threat_intel_set self DetectorId:str ThreatIntelSetId:str<arrow>Dict<block_start><pass><block_end><def_stmt>get_waiter self waiter_name:str=<none><arrow>Waiter<block_start><pass><block_end><def_stmt>invite_members self AccountIds:List DetectorId:str DisableEmailNotification:bool=<none> Message:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_detectors self MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_filters self DetectorId:str MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_findings self DetectorId:str FindingCriteria:Dict=<none> MaxResults:int=<none> NextToken:str=<none> SortCriteria:Dict=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_invitations self MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_ip_sets self DetectorId:str MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_members self DetectorId:str MaxResults:int=<none> NextToken:str=<none> OnlyAssociated:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>list_threat_intel_sets self DetectorId:str MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>start_monitoring_members self AccountIds:List DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>stop_monitoring_members self AccountIds:List DetectorId:str<arrow>Dict<block_start><pass><block_end><def_stmt>unarchive_findings self DetectorId:str FindingIds:List<arrow>Dict<block_start><pass><block_end><def_stmt>update_detector self DetectorId:str Enable:bool=<none> FindingPublishingFrequency:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_filter self DetectorId:str FilterName:str Action:str=<none> Description:str=<none> FindingCriteria:Dict=<none> Rank:int=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_findings_feedback self DetectorId:str Feedback:str FindingIds:List Comments:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_ip_set self DetectorId:str IpSetId:str Activate:bool=<none> Location:str=<none> Name:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>update_threat_intel_set self DetectorId:str ThreatIntelSetId:str Activate:bool=<none> Location:str=<none> Name:str=<none><arrow>Dict<block_start><pass><block_end><block_end>
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=cell-var-from-loop,invalid-name """ Measurement correction filters. """<import_from_stmt>typing List Union<import_from_stmt>copy deepcopy<import_from_stmt>scipy.optimize minimize<import_stmt>scipy.linalg<as>la<import_stmt>numpy<as>np<import_stmt>qiskit<import_from_stmt>qiskit QiskitError<import_from_stmt>qiskit.tools parallel_map<import_from_stmt>qiskit.ignis.verification.tomography count_keys<class_stmt>MeasurementFilter()<block_start>""" Measurement error mitigation filter. Produced from a measurement calibration fitter and can be applied to data. """<def_stmt>__init__ self cal_matrix:np.matrix state_labels:list<block_start>""" Initialize a measurement error mitigation filter using the cal_matrix from a measurement calibration fitter. Args: cal_matrix: the calibration matrix for applying the correction state_labels: the states for the ordering of the cal matrix """<line_sep>self._cal_matrix=cal_matrix<line_sep>self._state_labels=state_labels<block_end>@property<def_stmt>cal_matrix self<block_start>"""Return cal_matrix."""<line_sep><return>self._cal_matrix<block_end>@property<def_stmt>state_labels self<block_start>"""return the state label ordering of the cal matrix"""<line_sep><return>self._state_labels<block_end>@state_labels.setter<def_stmt>state_labels self new_state_labels<block_start>"""set the state label ordering of the cal matrix"""<line_sep>self._state_labels=new_state_labels<block_end>@cal_matrix.setter<def_stmt>cal_matrix self new_cal_matrix<block_start>"""Set cal_matrix."""<line_sep>self._cal_matrix=new_cal_matrix<block_end><def_stmt>apply self raw_data method='least_squares'<block_start>"""Apply the calibration matrix to results. Args: raw_data (dict or list): The data to be corrected. Can be in a number of forms: Form 1: a counts dictionary from results.get_counts Form 2: a list of counts of `length==len(state_labels)` Form 3: a list of counts of `length==M*len(state_labels)` where M is an integer (e.g. for use with the tomography data) Form 4: a qiskit Result method (str): fitting method. If `None`, then least_squares is used. ``pseudo_inverse``: direct inversion of the A matrix ``least_squares``: constrained to have physical probabilities Returns: dict or list: The corrected data in the same form as `raw_data` Raises: QiskitError: if `raw_data` is not an integer multiple of the number of calibrated states. """<line_sep># check forms of raw_data <if_stmt>isinstance(raw_data dict)# counts dictionary <block_start><for_stmt>data_label raw_data.keys()<block_start><if_stmt>data_label<not><in>self._state_labels<block_start><raise>QiskitError("Unexpected state label '"+data_label+"', verify the fitter's state labels "<concat>"correspond to the input data")<block_end><block_end>data_format=0<line_sep># convert to form2 raw_data2=[np.zeros(len(self._state_labels) dtype=float)]<for_stmt>stateidx,state enumerate(self._state_labels)<block_start>raw_data2[0][stateidx]=raw_data.get(state 0)<block_end><block_end><elif_stmt>isinstance(raw_data list)<block_start>size_ratio=len(raw_data)/len(self._state_labels)<if_stmt>len(raw_data)<eq>len(self._state_labels)<block_start>data_format=1<line_sep>raw_data2=[raw_data]<block_end><elif_stmt>int(size_ratio)<eq>size_ratio<block_start>data_format=2<line_sep>size_ratio=int(size_ratio)<line_sep># make the list into chunks the size of state_labels for easier # processing raw_data2=np.zeros([size_ratio len(self._state_labels)])<for_stmt>i range(size_ratio)<block_start>raw_data2[i][:]=raw_data[i<times>len(self._state_labels):(i+1)<times>len(self._state_labels)]<block_end><block_end><else_stmt><block_start><raise>QiskitError("Data list is not an integer multiple "<concat>"of the number of calibrated states")<block_end><block_end><elif_stmt>isinstance(raw_data qiskit.result.result.Result)# extract out all the counts, re-call the function with the # counts and push back into the new result <block_start>new_result=deepcopy(raw_data)<line_sep>new_counts_list=parallel_map(self._apply_correction [resultidx<for>resultidx,_ enumerate(raw_data.results)] task_args=(raw_data method))<for_stmt>resultidx,new_counts new_counts_list<block_start>new_result.results[resultidx].data.counts=new_counts<block_end><return>new_result<block_end><else_stmt><block_start><raise>QiskitError("Unrecognized type for raw_data.")<block_end><if_stmt>method<eq>'pseudo_inverse'<block_start>pinv_cal_mat=la.pinv(self._cal_matrix)<block_end># Apply the correction <for_stmt>data_idx,_ enumerate(raw_data2)<block_start><if_stmt>method<eq>'pseudo_inverse'<block_start>raw_data2[data_idx]=np.dot(pinv_cal_mat raw_data2[data_idx])<block_end><elif_stmt>method<eq>'least_squares'<block_start>nshots=sum(raw_data2[data_idx])<def_stmt>fun x<block_start><return>sum((raw_data2[data_idx]-np.dot(self._cal_matrix x))<power>2)<block_end>x0=np.random.rand(len(self._state_labels))<line_sep>x0=x0/sum(x0)<line_sep>cons=({'type':'eq' 'fun':<lambda>x:nshots-sum(x)})<line_sep>bnds=tuple((0 nshots)<for>x x0)<line_sep>res=minimize(fun x0 method='SLSQP' constraints=cons bounds=bnds tol=1e-6)<line_sep>raw_data2[data_idx]=res.x<block_end><else_stmt><block_start><raise>QiskitError("Unrecognized method.")<block_end><block_end><if_stmt>data_format<eq>2# flatten back out the list <block_start>raw_data2=raw_data2.flatten()<block_end><elif_stmt>data_format<eq>0# convert back into a counts dictionary <block_start>new_count_dict={}<for_stmt>stateidx,state enumerate(self._state_labels)<block_start><if_stmt>raw_data2[0][stateidx]<ne>0<block_start>new_count_dict[state]=raw_data2[0][stateidx]<block_end><block_end>raw_data2=new_count_dict<block_end><else_stmt># TODO: should probably change to: # raw_data2 = raw_data2[0].tolist() <block_start>raw_data2=raw_data2[0]<block_end><return>raw_data2<block_end><def_stmt>_apply_correction self resultidx raw_data method<block_start>"""Wrapper to call apply with a counts dictionary."""<line_sep>new_counts=self.apply(raw_data.get_counts(resultidx) method=method)<line_sep><return>resultidx new_counts<block_end><block_end><class_stmt>TensoredFilter()<block_start>""" Tensored measurement error mitigation filter. Produced from a tensored measurement calibration fitter and can be applied to data. """<def_stmt>__init__ self cal_matrices:np.matrix substate_labels_list:list mit_pattern:list<block_start>""" Initialize a tensored measurement error mitigation filter using the cal_matrices from a tensored measurement calibration fitter. A simple usage this class is explained [here] (https://qiskit.org/documentation/tutorials/noise/3_measurement_error_mitigation.html). Args: cal_matrices: the calibration matrices for applying the correction. substate_labels_list: for each calibration matrix a list of the states (as strings, states in the subspace) mit_pattern: for each calibration matrix a list of the logical qubit indices (as int, states in the subspace) """<line_sep>self._cal_matrices=cal_matrices<line_sep>self._qubit_list_sizes=[]<line_sep>self._indices_list=[]<line_sep>self._substate_labels_list=[]<line_sep>self.substate_labels_list=substate_labels_list<line_sep>self._mit_pattern=mit_pattern<block_end>@property<def_stmt>cal_matrices self<block_start>"""Return cal_matrices."""<line_sep><return>self._cal_matrices<block_end>@cal_matrices.setter<def_stmt>cal_matrices self new_cal_matrices<block_start>"""Set cal_matrices."""<line_sep>self._cal_matrices=deepcopy(new_cal_matrices)<block_end>@property<def_stmt>substate_labels_list self<block_start>"""Return _substate_labels_list"""<line_sep><return>self._substate_labels_list<block_end>@substate_labels_list.setter<def_stmt>substate_labels_list self new_substate_labels_list<block_start>"""Return _substate_labels_list"""<line_sep>self._substate_labels_list=new_substate_labels_list<line_sep># get the number of qubits in each subspace self._qubit_list_sizes=[]<for_stmt>_,substate_label_list enumerate(self._substate_labels_list)<block_start>self._qubit_list_sizes.append(int(np.log2(len(substate_label_list))))<block_end># get the indices in the calibration matrix self._indices_list=[]<for_stmt>_,sub_labels enumerate(self._substate_labels_list)<block_start>self._indices_list.append({lab:ind<for>ind,lab enumerate(sub_labels)})<block_end><block_end>@property<def_stmt>qubit_list_sizes self<block_start>"""Return _qubit_list_sizes."""<line_sep><return>self._qubit_list_sizes<block_end>@property<def_stmt>nqubits self<block_start>"""Return the number of qubits. See also MeasurementFilter.apply() """<line_sep><return>sum(self._qubit_list_sizes)<block_end><def_stmt>apply self raw_data:Union[qiskit.result.result.Result dict] method:str='least_squares' meas_layout:List[int]=<none><block_start>""" Apply the calibration matrices to results. Args: raw_data (dict or Result): The data to be corrected. Can be in one of two forms: * A counts dictionary from results.get_counts * A Qiskit Result method (str): fitting method. The following methods are supported: * 'pseudo_inverse': direct inversion of the cal matrices. Mitigated counts can contain negative values and the sum of counts would not equal to the shots. Mitigation is conducted qubit wise: For each qubit, mitigate the whole counts using the calibration matrices which affect the corresponding qubit. For example, assume we are mitigating the 3rd bit of the 4-bit counts using '2\times 2' calibration matrix `A_3`. When mitigating the count of '0110' in this step, the following formula is applied: `count['0110'] = A_3^{-1}[1, 0]*count['0100'] + A_3^{-1}[1, 1]*count['0110']`. The total time complexity of this method is `O(m2^{n + t})`, where `n` is the size of calibrated qubits, `m` is the number of sets in `mit_pattern`, and `t` is the size of largest set of mit_pattern. If the `mit_pattern` is shaped like `[[0], [1], [2], ..., [n-1]]`, which corresponds to the tensor product noise model without cross-talk, then the time complexity would be `O(n2^n)`. If the `mit_pattern` is shaped like `[[0, 1, 2, ..., n-1]]`, which exactly corresponds to the complete error mitigation, then the time complexity would be `O(2^(n+n)) = O(4^n)`. * 'least_squares': constrained to have physical probabilities. Instead of directly applying inverse calibration matrices, this method solve a constrained optimization problem to find the closest probability vector to the result from 'pseudo_inverse' method. Sequential least square quadratic programming (SLSQP) is used in the internal process. Every updating step in SLSQP takes `O(m2^{n+t})` time. Since this method is using the SLSQP optimization over the vector with lenght `2^n`, the mitigation for 8 bit counts with the `mit_pattern = [[0], [1], [2], ..., [n-1]]` would take 10 seconds or more. * If `None`, 'least_squares' is used. meas_layout (list of int): the mapping from classical registers to qubits * If you measure qubit `2` to clbit `0`, `0` to `1`, and `1` to `2`, the list becomes `[2, 0, 1]` * If `None`, flatten(mit_pattern) is used. Returns: dict or Result: The corrected data in the same form as raw_data Raises: QiskitError: if raw_data is not in a one of the defined forms. """<line_sep>all_states=count_keys(self.nqubits)<line_sep>num_of_states=2<power>self.nqubits<if_stmt>meas_layout<is><none><block_start>meas_layout=[]<for_stmt>qubits self._mit_pattern<block_start>meas_layout<augadd>qubits<block_end><block_end># check forms of raw_data <if_stmt>isinstance(raw_data dict)# counts dictionary # convert to list <block_start>raw_data2=[np.zeros(num_of_states dtype=float)]<for_stmt>state,count raw_data.items()<block_start>stateidx=int(state 2)<line_sep>raw_data2[0][stateidx]=count<block_end><block_end><elif_stmt>isinstance(raw_data qiskit.result.result.Result)# extract out all the counts, re-call the function with the # counts and push back into the new result <block_start>new_result=deepcopy(raw_data)<line_sep>new_counts_list=parallel_map(self._apply_correction [resultidx<for>resultidx,_ enumerate(raw_data.results)] task_args=(raw_data method meas_layout))<for_stmt>resultidx,new_counts new_counts_list<block_start>new_result.results[resultidx].data.counts=new_counts<block_end><return>new_result<block_end><else_stmt><block_start><raise>QiskitError("Unrecognized type for raw_data.")<block_end><if_stmt>method<eq>'pseudo_inverse'<block_start>pinv_cal_matrices=[]<for_stmt>cal_mat self._cal_matrices<block_start>pinv_cal_matrices.append(la.pinv(cal_mat))<block_end><block_end>meas_layout=meas_layout[::-1]# reverse endian qubits_to_clbits=[-1<for>_ range(max(meas_layout)+1)]<for_stmt>i,qubit enumerate(meas_layout)<block_start>qubits_to_clbits[qubit]=i<block_end># Apply the correction <for_stmt>data_idx,_ enumerate(raw_data2)<block_start><if_stmt>method<eq>'pseudo_inverse'<block_start><for_stmt>pinv_cal_mat,pos_qubits,indices zip(pinv_cal_matrices self._mit_pattern self._indices_list)<block_start>inv_mat_dot_x=np.zeros([num_of_states] dtype=float)<line_sep>pos_clbits=[qubits_to_clbits[qubit]<for>qubit pos_qubits]<for_stmt>state_idx,state enumerate(all_states)<block_start>first_index=self.compute_index_of_cal_mat(state pos_clbits indices)<for_stmt>i range(len(pinv_cal_mat))# i is index of pinv_cal_mat <block_start>source_state=self.flip_state(state i pos_clbits)<line_sep>second_index=self.compute_index_of_cal_mat(source_state pos_clbits indices)<line_sep>inv_mat_dot_x[state_idx]<augadd>pinv_cal_mat[first_index second_index]<times>raw_data2[data_idx][int(source_state 2)]<block_end><block_end>raw_data2[data_idx]=inv_mat_dot_x<block_end><block_end><elif_stmt>method<eq>'least_squares'<block_start><def_stmt>fun x<block_start>mat_dot_x=deepcopy(x)<for_stmt>cal_mat,pos_qubits,indices zip(self._cal_matrices self._mit_pattern self._indices_list)<block_start>res_mat_dot_x=np.zeros([num_of_states] dtype=float)<line_sep>pos_clbits=[qubits_to_clbits[qubit]<for>qubit pos_qubits]<for_stmt>state_idx,state enumerate(all_states)<block_start>second_index=self.compute_index_of_cal_mat(state pos_clbits indices)<for_stmt>i range(len(cal_mat))<block_start>target_state=self.flip_state(state i pos_clbits)<line_sep>first_index=self.compute_index_of_cal_mat(target_state pos_clbits indices)<line_sep>res_mat_dot_x[int(target_state 2)]<augadd>cal_mat[first_index second_index]<times>mat_dot_x[state_idx]<block_end><block_end>mat_dot_x=res_mat_dot_x<block_end><return>sum((raw_data2[data_idx]-mat_dot_x)<power>2)<block_end>x0=np.random.rand(num_of_states)<line_sep>x0=x0/sum(x0)<line_sep>nshots=sum(raw_data2[data_idx])<line_sep>cons=({'type':'eq' 'fun':<lambda>x:nshots-sum(x)})<line_sep>bnds=tuple((0 nshots)<for>x x0)<line_sep>res=minimize(fun x0 method='SLSQP' constraints=cons bounds=bnds tol=1e-6)<line_sep>raw_data2[data_idx]=res.x<block_end><else_stmt><block_start><raise>QiskitError("Unrecognized method.")<block_end><block_end># convert back into a counts dictionary new_count_dict={}<for_stmt>state_idx,state enumerate(all_states)<block_start><if_stmt>raw_data2[0][state_idx]<ne>0<block_start>new_count_dict[state]=raw_data2[0][state_idx]<block_end><block_end><return>new_count_dict<block_end><def_stmt>flip_state self state:str mat_index:int flip_poses:List[int]<arrow>str<block_start>"""Flip the state according to the chosen qubit positions"""<line_sep>flip_poses=[pos<for>i,pos enumerate(flip_poses)<if>(mat_index<rshift>i)&1]<line_sep>flip_poses=sorted(flip_poses)<line_sep>new_state=""<line_sep>pos=0<for_stmt>flip_pos flip_poses<block_start>new_state<augadd>state[pos:flip_pos]<line_sep>new_state<augadd>str(int(state[flip_pos] 2)^1)# flip the state pos=flip_pos+1<block_end>new_state<augadd>state[pos:]<line_sep><return>new_state<block_end><def_stmt>compute_index_of_cal_mat self state:str pos_qubits:List[int] indices:dict<arrow>int<block_start>"""Return the index of (pseudo inverse) calibration matrix for the input quantum state"""<line_sep>sub_state=""<for_stmt>pos pos_qubits<block_start>sub_state<augadd>state[pos]<block_end><return>indices[sub_state]<block_end><def_stmt>_apply_correction self resultidx:int raw_data:qiskit.result.result.Result method:str meas_layout:List[int]<block_start>"""Wrapper to call apply with a counts dictionary."""<line_sep>new_counts=self.apply(raw_data.get_counts(resultidx) method=method meas_layout=meas_layout)<line_sep><return>resultidx new_counts<block_end><block_end>
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # <import_stmt>os<import_from_stmt>az_code_gen.base *<import_from_stmt>AzReflectionCpp format_cpp_annotations<class_stmt>AZEBusInline_Driver(TemplateDriver)<block_start><def_stmt>apply_transformations self json_object<block_start>format_cpp_annotations(json_object)<block_end><def_stmt>render_templates self input_file **template_kwargs<block_start>input_file_name,input_file_ext=os.path.splitext(input_file)<line_sep>self.render_template_to_file("AzEBusInline.tpl" template_kwargs '{}.generated.inline'.format(input_file_name))<block_end><block_end># Factory function - called from launcher <def_stmt>create_drivers env<block_start><return>[AZEBusInline_Driver(env)]<block_end>
# coding: utf-8 <import_from_future_stmt> unicode_literals<import_stmt>re<import_from_stmt>.adobepass AdobePassIE<import_from_stmt>..compat compat_str<import_from_stmt>..utils fix_xml_ampersands xpath_text int_or_none determine_ext float_or_none parse_duration xpath_attr update_url_query ExtractorError strip_or_none url_or_none <class_stmt>TurnerBaseIE(AdobePassIE)<block_start>_AKAMAI_SPE_TOKEN_CACHE={}<def_stmt>_extract_timestamp self video_data<block_start><return>int_or_none(xpath_attr(video_data 'dateCreated' 'uts'))<block_end><def_stmt>_add_akamai_spe_token self tokenizer_src video_url content_id ap_data custom_tokenizer_query=<none><block_start>secure_path=self._search_regex(r'https?://[^/]+(.+/)' video_url 'secure path')+'*'<line_sep>token=self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)<if_stmt><not>token<block_start>query={'path':secure_path }<if_stmt>custom_tokenizer_query<block_start>query.update(custom_tokenizer_query)<block_end><else_stmt><block_start>query['videoId']=content_id<block_end><if_stmt>ap_data.get('auth_required')<block_start>query['accessToken']=self._extract_mvpd_auth(ap_data['url'] content_id ap_data['site_name'] ap_data['site_name'])<block_end>auth=self._download_xml(tokenizer_src content_id query=query)<line_sep>error_msg=xpath_text(auth 'error/msg')<if_stmt>error_msg<block_start><raise>ExtractorError(error_msg expected=<true>)<block_end>token=xpath_text(auth 'token')<if_stmt><not>token<block_start><return>video_url<block_end>self._AKAMAI_SPE_TOKEN_CACHE[secure_path]=token<block_end><return>video_url+'?hdnea='+token<block_end><def_stmt>_extract_cvp_info self data_src video_id path_data={} ap_data={} fatal=<false><block_start>video_data=self._download_xml(data_src video_id transform_source=<lambda>s:fix_xml_ampersands(s).strip() fatal=fatal)<if_stmt><not>video_data<block_start><return>{}<block_end>video_id=video_data.attrib['id']<line_sep>title=xpath_text(video_data 'headline' fatal=<true>)<line_sep>content_id=xpath_text(video_data 'contentId')<or>video_id<line_sep># rtmp_src = xpath_text(video_data, 'akamai/src') # if rtmp_src: # split_rtmp_src = rtmp_src.split(',') # if len(split_rtmp_src) == 2: # rtmp_src = split_rtmp_src[1] # aifp = xpath_text(video_data, 'akamai/aifp', default='') urls=[]<line_sep>formats=[]<line_sep>thumbnails=[]<line_sep>subtitles={}<line_sep>rex=re.compile(r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')<line_sep># Possible formats locations: files/file, files/groupFiles/files # and maybe others <for_stmt>video_file video_data.findall('.//file')<block_start>video_url=url_or_none(video_file.text.strip())<if_stmt><not>video_url<block_start><continue><block_end>ext=determine_ext(video_url)<if_stmt>video_url.startswith('/mp4:protected/')<block_start><continue><line_sep># TODO Correct extraction for these files # protected_path_data = path_data.get('protected') # if not protected_path_data or not rtmp_src: # continue # protected_path = self._search_regex( # r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path') # auth = self._download_webpage( # protected_path_data['tokenizer_src'], query={ # 'path': protected_path, # 'videoId': content_id, # 'aifp': aifp, # }) # token = xpath_text(auth, 'token') # if not token: # continue # video_url = rtmp_src + video_url + '?' + token <block_end><elif_stmt>video_url.startswith('/secure/')<block_start>secure_path_data=path_data.get('secure')<if_stmt><not>secure_path_data<block_start><continue><block_end>video_url=self._add_akamai_spe_token(secure_path_data['tokenizer_src'] secure_path_data['media_src']+video_url content_id ap_data)<block_end><elif_stmt><not>re.match('https?://' video_url)<block_start>base_path_data=path_data.get(ext path_data.get('default' {}))<line_sep>media_src=base_path_data.get('media_src')<if_stmt><not>media_src<block_start><continue><block_end>video_url=media_src+video_url<block_end><if_stmt>video_url<in>urls<block_start><continue><block_end>urls.append(video_url)<line_sep>format_id=video_file.get('bitrate')<if_stmt>ext<in>('scc' 'srt' 'vtt')<block_start>subtitles.setdefault('en' []).append({'ext':ext 'url':video_url })<block_end><elif_stmt>ext<eq>'png'<block_start>thumbnails.append({'id':format_id 'url':video_url })<block_end><elif_stmt>ext<eq>'smil'<block_start>formats.extend(self._extract_smil_formats(video_url video_id fatal=<false>))<block_end><elif_stmt>re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/' video_url)<block_start>formats.extend(self._extract_akamai_formats(video_url video_id {'hds':path_data.get('f4m' {}).get('host') # nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com # ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com # ssl.cdn.turner.com 'http':'pmd.cdn.turner.com' }))<block_end><elif_stmt>ext<eq>'m3u8'<block_start>m3u8_formats=self._extract_m3u8_formats(video_url video_id 'mp4' m3u8_id=format_id<or>'hls' fatal=<false>)<if_stmt>'/secure/'<in>video_url<and>'?hdnea='<in>video_url<block_start><for_stmt>f m3u8_formats<block_start>f['_seekable']=<false><block_end><block_end>formats.extend(m3u8_formats)<block_end><elif_stmt>ext<eq>'f4m'<block_start>formats.extend(self._extract_f4m_formats(update_url_query(video_url {'hdcore':'3.7.0'}) video_id f4m_id=format_id<or>'hds' fatal=<false>))<block_end><else_stmt><block_start>f={'format_id':format_id 'url':video_url 'ext':ext }<line_sep>mobj=rex.search(video_url)<if_stmt>mobj<block_start>f.update({'width':int(mobj.group('width')) 'height':int(mobj.group('height')) 'tbr':int_or_none(mobj.group('bitrate')) })<block_end><elif_stmt>isinstance(format_id compat_str)<block_start><if_stmt>format_id.isdigit()<block_start>f['tbr']=int(format_id)<block_end><else_stmt><block_start>mobj=re.match(r'ios_(audio|[0-9]+)$' format_id)<if_stmt>mobj<block_start><if_stmt>mobj.group(1)<eq>'audio'<block_start>f.update({'vcodec':'none' 'ext':'m4a' })<block_end><else_stmt><block_start>f['tbr']=int(mobj.group(1))<block_end><block_end><block_end><block_end>formats.append(f)<block_end><block_end>self._sort_formats(formats)<for_stmt>source video_data.findall('closedCaptions/source')<block_start><for_stmt>track source.findall('track')<block_start>track_url=url_or_none(track.get('url'))<if_stmt><not>track_url<or>track_url.endswith('/big')<block_start><continue><block_end>lang=track.get('lang')<or>track.get('label')<or>'en'<line_sep>subtitles.setdefault(lang []).append({'url':track_url 'ext':{'scc':'scc' 'webvtt':'vtt' 'smptett':'tt' }.get(source.get('format'))})<block_end><block_end>thumbnails.extend({'id':image.get('cut')<or>image.get('name') 'url':image.text 'width':int_or_none(image.get('width')) 'height':int_or_none(image.get('height')) }<for>image video_data.findall('images/image'))<line_sep>is_live=xpath_text(video_data 'isLive')<eq>'true'<line_sep><return>{'id':video_id 'title':self._live_title(title)<if>is_live<else>title 'formats':formats 'subtitles':subtitles 'thumbnails':thumbnails 'thumbnail':xpath_text(video_data 'poster') 'description':strip_or_none(xpath_text(video_data 'description')) 'duration':parse_duration(xpath_text(video_data 'length')<or>xpath_text(video_data 'trt')) 'timestamp':self._extract_timestamp(video_data) 'upload_date':xpath_attr(video_data 'metas' 'version') 'series':xpath_text(video_data 'showTitle') 'season_number':int_or_none(xpath_text(video_data 'seasonNumber')) 'episode_number':int_or_none(xpath_text(video_data 'episodeNumber')) 'is_live':is_live }<block_end><def_stmt>_extract_ngtv_info self media_id tokenizer_query ap_data=<none><block_start>streams_data=self._download_json('http://medium.ngtv.io/media/%s/tv'%media_id media_id)['media']['tv']<line_sep>duration=<none><line_sep>chapters=[]<line_sep>formats=[]<for_stmt>supported_type ('unprotected' 'bulkaes')<block_start>stream_data=streams_data.get(supported_type {})<line_sep>m3u8_url=stream_data.get('secureUrl')<or>stream_data.get('url')<if_stmt><not>m3u8_url<block_start><continue><block_end><if_stmt>stream_data.get('playlistProtection')<eq>'spe'<block_start>m3u8_url=self._add_akamai_spe_token('http://token.ngtv.io/token/token_spe' m3u8_url media_id ap_data<or>{} tokenizer_query)<block_end>formats.extend(self._extract_m3u8_formats(m3u8_url media_id 'mp4' m3u8_id='hls' fatal=<false>))<line_sep>duration=float_or_none(stream_data.get('totalRuntime'))<if_stmt><not>chapters<block_start><for_stmt>chapter stream_data.get('contentSegments' [])<block_start>start_time=float_or_none(chapter.get('start'))<line_sep>chapter_duration=float_or_none(chapter.get('duration'))<if_stmt>start_time<is><none><or>chapter_duration<is><none><block_start><continue><block_end>chapters.append({'start_time':start_time 'end_time':start_time+chapter_duration })<block_end><block_end><block_end>self._sort_formats(formats)<line_sep><return>{'formats':formats 'chapters':chapters 'duration':duration }<block_end><block_end>
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 <import_stmt>fnmatch<import_from_stmt>io StringIO<import_stmt>json<import_stmt>os<import_stmt>shutil<import_stmt>zipfile<import_stmt>re<import_from_stmt>datetime datetime timedelta tzinfo<import_from_stmt>distutils.util strtobool<import_stmt>boto3<import_stmt>placebo<import_from_stmt>botocore.response StreamingBody<import_from_stmt>placebo pill<import_from_stmt>c7n.testing CustodianTestCore<import_from_stmt>.constants ACCOUNT_ID<line_sep># Custodian Test Account. This is used only for testing. # Access is available for community project maintainers. ########################################################################### # BEGIN PLACEBO MONKEY PATCH # # Placebo is effectively abandoned upstream, since mitch went back to work at AWS, irony... # These monkeypatch patches represent fixes on trunk of that repo that have not been released # into an extant version, we carry them here. We can drop this when this issue is resolved # # https://github.com/garnaat/placebo/issues/63 # # License - Apache 2.0 # Copyright (c) 2015 <NAME> <class_stmt>UTC(tzinfo)<block_start>"""UTC"""<def_stmt>utcoffset self dt<block_start><return>timedelta(0)<block_end><def_stmt>tzname self dt<block_start><return>"UTC"<block_end><def_stmt>dst self dt<block_start><return>timedelta(0)<block_end><block_end>utc=UTC()<def_stmt>deserialize obj<block_start>"""Convert JSON dicts back into objects."""<line_sep># Be careful of shallow copy here target=dict(obj)<line_sep>class_name=<none><if_stmt>"__class__"<in>target<block_start>class_name=target.pop("__class__")<block_end><if_stmt>"__module__"<in>obj<block_start>obj.pop("__module__")<block_end># Use getattr(module, class_name) for custom types if needed <if_stmt>class_name<eq>"datetime"<block_start><return>datetime(tzinfo=utc **target)<block_end><if_stmt>class_name<eq>"StreamingBody"<block_start><return>StringIO(target["body"])<block_end># Return unrecognized structures as-is <return>obj<block_end><def_stmt>serialize obj<block_start>"""Convert objects into JSON structures."""<line_sep># Record class and module information for deserialization result={"__class__":obj.__class__.__name__}<try_stmt><block_start>result["__module__"]=obj.__module__<block_end><except_stmt>AttributeError<block_start><pass><block_end># Convert objects to dictionary representation based on type <if_stmt>isinstance(obj datetime)<block_start>result["year"]=obj.year<line_sep>result["month"]=obj.month<line_sep>result["day"]=obj.day<line_sep>result["hour"]=obj.hour<line_sep>result["minute"]=obj.minute<line_sep>result["second"]=obj.second<line_sep>result["microsecond"]=obj.microsecond<line_sep><return>result<block_end><if_stmt>isinstance(obj StreamingBody)<block_start>result["body"]=obj.read()<line_sep>obj._raw_stream=StringIO(result["body"])<line_sep>obj._amount_read=0<line_sep><return>result<block_end><if_stmt>isinstance(obj bytes)<block_start><return>obj.decode('utf8')<block_end># Raise a TypeError if the object isn't recognized <raise>TypeError("Type not serializable")<block_end>pill.FakeHttpResponse.raw=<none><line_sep>placebo.pill.serialize=serialize<line_sep>placebo.pill.deserialize=deserialize<line_sep># END PLACEBO MONKEY ########################################################################## <class_stmt>BluePill(pill.Pill)<block_start><def_stmt>playback self<block_start>super(BluePill self).playback()<line_sep>self._avail=self.get_available()<block_end><def_stmt>get_available self<block_start><return>{os.path.join(self.data_path n)<for>n fnmatch.filter(os.listdir(self.data_path) "*.json")}<block_end><def_stmt>get_next_file_path self service operation<block_start>fn,format=super(BluePill self).get_next_file_path(service operation)<line_sep># couple of double use cases <if_stmt>fn<in>self._avail<block_start>self._avail.remove(fn)<block_end><else_stmt><block_start>print("\ndouble use %s\n"%fn)<block_end><return>(fn format)<block_end><def_stmt>stop self<block_start>result=super(BluePill self).stop()<if_stmt>self._avail<block_start>print("Unused json files \n %s"%("\n".join(sorted(self._avail))))<block_end><return>result<block_end><block_end><class_stmt>ZippedPill(pill.Pill)<block_start><def_stmt>__init__ self path prefix=<none> debug=<false><block_start>super(ZippedPill self).__init__(prefix debug)<line_sep>self.path=path<line_sep>self._used=set()<line_sep>self.archive=<none><block_end><def_stmt>playback self<block_start>self.archive=zipfile.ZipFile(self.path "r")<line_sep>self._files=set(self.archive.namelist())<line_sep><return>super(ZippedPill self).playback()<block_end><def_stmt>record self<block_start>self.archive=zipfile.ZipFile(self.path "a" zipfile.ZIP_DEFLATED)<line_sep>self._files=set()<line_sep>files={n<for>n self.archive.namelist()<if>n.startswith(self.prefix)}<if_stmt><not>files<block_start><return>super(ZippedPill self).record()<block_end># We can't update files in a zip, so copy self.archive.close()<line_sep>os.rename(self.path "%s.tmp"%self.path)<line_sep>src=zipfile.ZipFile("%s.tmp"%self.path "r")<line_sep>self.archive=zipfile.ZipFile(self.path "w" zipfile.ZIP_DEFLATED)<for_stmt>n src.namelist()<block_start><if_stmt>n<in>files<block_start><continue><block_end>self.archive.writestr(n src.read(n))<block_end>os.remove("%s.tmp"%self.path)<line_sep><return>super(ZippedPill self).record()<block_end><def_stmt>stop self<block_start>super(ZippedPill self).stop()<if_stmt>self.archive<block_start>self.archive.close()<block_end><block_end><def_stmt>save_response self service operation response_data http_response=200<block_start>filepath=self.get_new_file_path(service operation)<line_sep>pill.LOG.debug("save_response: path=%s" filepath)<line_sep>json_data={"status_code":http_response "data":response_data}<line_sep>self.archive.writestr(filepath json.dumps(json_data indent=4 default=pill.serialize) zipfile.ZIP_DEFLATED )<line_sep>self._files.add(filepath)<block_end><def_stmt>load_response self service operation<block_start>response_file=self.get_next_file_path(service operation)<line_sep>self._used.add(response_file)<line_sep>pill.LOG.debug("load_responses: %s" response_file)<line_sep>response_data=json.loads(self.archive.read(response_file) object_hook=pill.deserialize)<line_sep><return>(pill.FakeHttpResponse(response_data["status_code"]) response_data["data"])<block_end><def_stmt>get_new_file_path self service operation<block_start>base_name="{0}.{1}".format(service operation)<if_stmt>self.prefix<block_start>base_name="{0}.{1}".format(self.prefix base_name)<block_end>pill.LOG.debug("get_new_file_path: %s" base_name)<line_sep>index=0<line_sep>glob_pattern=os.path.join(self._data_path base_name+"*")<for_stmt>file_path fnmatch.filter(self._files glob_pattern)<block_start>file_name=os.path.basename(file_path)<line_sep>m=self.filename_re.match(file_name)<if_stmt>m<block_start>i=int(m.group("index"))<if_stmt>i<g>index<block_start>index=i<block_end><block_end><block_end>index<augadd>1<line_sep><return>os.path.join(self._data_path "{0}_{1}.json".format(base_name index))<block_end><def_stmt>get_next_file_path self service operation<block_start>base_name="{0}.{1}".format(service operation)<if_stmt>self.prefix<block_start>base_name="{0}.{1}".format(self.prefix base_name)<block_end>pill.LOG.debug("get_next_file_path: %s" base_name)<line_sep>next_file=<none><while_stmt>next_file<is><none><block_start>index=self._index.setdefault(base_name 1)<line_sep>fn=os.path.join(self._data_path base_name+"_{0}.json".format(index))<line_sep>fn=fn.replace('\\' '/')<if_stmt>fn<in>self._files<block_start>next_file=fn<line_sep>self._index[base_name]<augadd>1<line_sep>self._files.add(fn)<block_end><elif_stmt>index<ne>1<block_start>self._index[base_name]=1<block_end><else_stmt># we are looking for the first index and it's not here <block_start><raise>IOError("response file ({0}) not found".format(fn))<block_end><block_end><return>fn<block_end><block_end><def_stmt>attach session data_path prefix=<none> debug=<false><block_start>pill=ZippedPill(data_path prefix=prefix debug=debug)<line_sep>pill.attach(session prefix)<line_sep><return>pill<block_end><class_stmt>RedPill(pill.Pill)<block_start><def_stmt>datetime_converter self obj<block_start><if_stmt>isinstance(obj datetime)<block_start><return>obj.isoformat()<block_end><block_end><def_stmt>save_response self service operation response_data http_response=200<block_start>""" Override to sanitize response metadata and account_ids """<line_sep># aws sso setups involve a short lived credential transfer <if_stmt>service<eq>"portal.sso"<block_start><return><block_end><if_stmt>'ResponseMetadata'<in>response_data<block_start>response_data['ResponseMetadata']={}<block_end>response_data=json.dumps(response_data default=serialize)<line_sep>response_data=re.sub(r"\b\d{12}\b" ACCOUNT_ID response_data)# noqa response_data=json.loads(response_data object_hook=deserialize)<line_sep>super(RedPill self).save_response(service operation response_data http_response)<block_end><block_end><class_stmt>PillTest(CustodianTestCore)<block_start>archive_path=os.path.join(os.path.dirname(os.path.abspath(__file__)) "placebo_data.zip")<line_sep>placebo_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)) "data" "placebo")<line_sep>output_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)) "data" "output")<line_sep>recording=<false><def_stmt>cleanUp self<block_start>self.pill=<none><block_end><def_stmt>record_flight_data self test_case zdata=<false> augment=<false> region=<none><block_start>self.recording=<true><line_sep>test_dir=os.path.join(self.placebo_dir test_case)<if_stmt><not>(zdata<or>augment)<block_start><if_stmt>os.path.exists(test_dir)<block_start>shutil.rmtree(test_dir)<block_end>os.makedirs(test_dir)<block_end>session=boto3.Session(region_name=region)<line_sep>default_region=session.region_name<if_stmt><not>zdata<block_start>pill=RedPill()<line_sep>pill.attach(session test_dir)<block_end><else_stmt><block_start>pill=attach(session self.archive_path test_case debug=<true>)<block_end>pill.record()<line_sep>self.pill=pill<line_sep>self.addCleanup(pill.stop)<line_sep>self.addCleanup(self.cleanUp)<class_stmt>FakeFactory<block_start><def_stmt>__call__ fake region=<none> assume=<none><block_start>new_session=<none><line_sep># slightly experimental for test recording, using # cross account assumes, note this will record sts # assume role api calls creds into test data, they will # go stale, but its best to modify before commiting. # Disabled by default. <if_stmt>0<and>(assume<is><not><false><and>fake.assume_role)<block_start>client=session.client('sts')<line_sep>creds=client.assume_role(RoleArn=fake.assume_role RoleSessionName='CustodianTest')['Credentials']<line_sep>new_session=boto3.Session(aws_access_key_id=creds['AccessKeyId'] aws_secret_access_key=creds['SecretAccessKey'] aws_session_token=creds['SessionToken'] region_name=region<or>fake.region<or>default_region)<block_end><elif_stmt>region<and>region<ne>default_region<block_start>new_session=boto3.Session(region_name=region)<block_end><if_stmt>new_session<block_start><assert_stmt><not>zdata<line_sep>new_pill=placebo.attach(new_session test_dir debug=<true>)<line_sep>new_pill.record()<line_sep>self.addCleanup(new_pill.stop)<line_sep><return>new_session<block_end><return>session<block_end><block_end><return>FakeFactory()<block_end><def_stmt>replay_flight_data self test_case zdata=<false> region=<none><block_start>""" The `region` argument is to allow functional tests to override the default region. It is unused when replaying stored data. """<if_stmt>strtobool(os.environ.get('C7N_FUNCTIONAL' 'no'))<block_start>self.recording=<true><line_sep><return><lambda>region=region assume=<none>:boto3.Session(region_name=region)<block_end><if_stmt><not>zdata<block_start>test_dir=os.path.join(self.placebo_dir test_case)<if_stmt><not>os.path.exists(test_dir)<block_start><raise>RuntimeError("Invalid Test Dir for flight data %s"%test_dir)<block_end><block_end>session=boto3.Session(region_name=region)<if_stmt><not>zdata<block_start>pill=placebo.attach(session test_dir)<line_sep># pill = BluePill() # pill.attach(session, test_dir) <block_end><else_stmt><block_start>pill=attach(session self.archive_path test_case <false>)<block_end>pill.playback()<line_sep>self.addCleanup(pill.stop)<line_sep>self.addCleanup(self.cleanUp)<line_sep><return><lambda>region=<none> assume=<none>:session<block_end><block_end>
# Test definitions for Lit, the LLVM test runner. # # This is reusing the LLVM Lit test runner in the interim until the new build # rules are upstreamed. # TODO(b/136126535): remove this custom rule. """Lit runner globbing test """<line_sep>load("//tensorflow:tensorflow.bzl" "filegroup")<line_sep>load("@bazel_skylib//lib:paths.bzl" "paths")<line_sep>load("//tensorflow:tensorflow.bzl" "tf_cc_test" "tf_native_cc_binary" "tf_copts")<line_sep># Default values used by the test runner. _default_test_file_exts=["mlir" ".pbtxt" ".td"]<line_sep>_default_driver="@llvm-project//mlir:run_lit.sh"<line_sep>_default_size="small"<line_sep>_default_tags=[]<line_sep># These are patterns which we should never match, for tests, subdirectories, or # test input data files. _ALWAYS_EXCLUDE=["**/LICENSE.txt" "**/README.txt" "**/lit.local.cfg" # Exclude input files that have spaces in their names, since bazel # cannot cope with such "targets" in the srcs list. "**/* *" "**/* */**" ]<def_stmt>_run_lit_test name test_file data size tags driver features exec_properties<block_start>"""Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir. Note that, due to Bazel's hermetic builds, lit only sees the tests that are included in the `data` parameter, regardless of what other tests might exist in the directory searched. Args: name: str, the name of the test, including extension. data: [str], the data input to the test. size: str, the size of the test. tags: [str], tags to attach to the test. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. """<line_sep>name_without_suffix=test_file[0].split('.')[0]<line_sep>local_test_files=name+".test_files"<line_sep>filegroup(name=local_test_files srcs=native.glob(["data/"+name_without_suffix+"*.mlir" ]) )<line_sep>tf_cc_test(name=name srcs=test_file size=size deps=["//tensorflow/compiler/mlir/disc/tests:mlir_feature_test" "//tensorflow/core:test" "//tensorflow/core:test_main" "//tensorflow/core:testlib" ] data=[":"+local_test_files]+data+["//tensorflow/compiler/mlir/disc:disc_compiler_main" "//tensorflow/compiler/mlir:tf-mlir-translate" "//tensorflow/compiler/mlir:tf-opt" ] )<block_end><def_stmt>glob_op_tests exclude=[] test_file_exts=_default_test_file_exts default_size=_default_size size_override={} data=[] per_test_extra_data={} default_tags=_default_tags tags_override={} driver=_default_driver features=[] exec_properties={}<block_start>"""Creates all plausible Lit tests (and their inputs) under this directory. Args: exclude: [str], paths to exclude (for tests and inputs). test_file_exts: [str], extensions for files that are tests. default_size: str, the test size for targets not in "size_override". size_override: {str: str}, sizes to use for specific tests. data: [str], additional input data to the test. per_test_extra_data: {str: [str]}, extra data to attach to a given file. default_tags: [str], additional tags to attach to the test. tags_override: {str: str}, tags to add to specific tests. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. exec_properties: a dictionary of properties to pass on. """<line_sep># Ignore some patterns by default for tests and input data. exclude=_ALWAYS_EXCLUDE+exclude<line_sep>tests=native.glob(["*."+ext<for>ext test_file_exts] exclude=exclude )<line_sep># Run tests individually such that errors can be attributed to a specific # failure. <for_stmt>i range(len(tests))<block_start>curr_test=tests[i]<line_sep># Instantiate this test with updated parameters. lit_test(name=curr_test data=data+per_test_extra_data.get(curr_test []) size=size_override.get(curr_test default_size) tags=default_tags+tags_override.get(curr_test []) driver=driver features=features exec_properties=exec_properties )<block_end><block_end><def_stmt>lit_test name data=[] size=_default_size tags=_default_tags driver=_default_driver features=[] exec_properties={}<block_start>"""Runs test files under lit. Args: name: str, the name of the test. data: [str], labels that should be provided as data inputs. size: str, the size of the test. tags: [str], tags to attach to the test. driver: str, label of the driver shell script. Note: use of a custom driver is not currently supported and specifying a default driver will abort the tests. features: [str], list of extra features to enable. """<line_sep>_run_lit_test(name+".test" [name] data size tags driver features exec_properties)<block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, <NAME>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch - TF 2.0 general utilities."""<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>numpy<line_sep>logger=logging.getLogger(__name__)<def_stmt>convert_tf_weight_name_to_pt_weight_name tf_name start_prefix_to_remove=""<block_start>""" Convert a TF 2.0 model variable name in a pytorch model weight name. Conventions for TF2.0 scopes -> PyTorch attribute names conversions: - '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) - '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) return tuple with: - pytorch model weight name - transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other """<line_sep>tf_name=tf_name.replace(":0" "")# device ids tf_name=re.sub(r"/[^/]*___([^/]*)/" r"/\1/" tf_name)<line_sep># '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) tf_name=tf_name.replace("_._" "/")<line_sep># '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) tf_name=re.sub(r"//+" "/" tf_name)# Remove empty levels at the end tf_name=tf_name.split("/")# Convert from TF2.0 '/' separators to PyTorch '.' separators tf_name=tf_name[1:]# Remove level zero # When should we transpose the weights transpose=bool(tf_name[-1]<eq>"kernel"<or>"emb_projs"<in>tf_name<or>"out_projs"<in>tf_name)<line_sep># Convert standard TF2.0 names in PyTorch names <if_stmt>tf_name[-1]<eq>"kernel"<or>tf_name[-1]<eq>"embeddings"<or>tf_name[-1]<eq>"gamma"<block_start>tf_name[-1]="weight"<block_end><if_stmt>tf_name[-1]<eq>"beta"<block_start>tf_name[-1]="bias"<block_end># Remove prefix if needed tf_name=".".join(tf_name)<if_stmt>start_prefix_to_remove<block_start>tf_name=tf_name.replace(start_prefix_to_remove "" 1)<block_end><return>tf_name transpose<block_end>##################### # PyTorch => TF 2.0 # ##################### <def_stmt>load_pytorch_checkpoint_in_tf2_model tf_model pytorch_checkpoint_path tf_inputs=<none> allow_missing_keys=<false><block_start>""" Load pytorch checkpoints in a TF 2.0 model """<try_stmt><block_start><import_stmt>tensorflow<as>tf# noqa: F401 <import_stmt>torch# noqa: F401 <block_end><except_stmt>ImportError<block_start>logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "<concat>"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")<line_sep><raise><block_end>pt_path=os.path.abspath(pytorch_checkpoint_path)<line_sep>logger.info("Loading PyTorch weights from {}".format(pt_path))<line_sep>pt_state_dict=torch.load(pt_path map_location="cpu")<line_sep>logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel()<for>t pt_state_dict.values())))<line_sep><return>load_pytorch_weights_in_tf2_model(tf_model pt_state_dict tf_inputs=tf_inputs allow_missing_keys=allow_missing_keys)<block_end><def_stmt>load_pytorch_model_in_tf2_model tf_model pt_model tf_inputs=<none> allow_missing_keys=<false><block_start>""" Load pytorch checkpoints in a TF 2.0 model """<line_sep>pt_state_dict=pt_model.state_dict()<line_sep><return>load_pytorch_weights_in_tf2_model(tf_model pt_state_dict tf_inputs=tf_inputs allow_missing_keys=allow_missing_keys)<block_end><def_stmt>load_pytorch_weights_in_tf2_model tf_model pt_state_dict tf_inputs=<none> allow_missing_keys=<false><block_start>""" Load pytorch state_dict in a TF 2.0 model. """<try_stmt><block_start><import_stmt>torch# noqa: F401 <import_stmt>tensorflow<as>tf# noqa: F401 <import_from_stmt>tensorflow.python.keras backend<as>K<block_end><except_stmt>ImportError<block_start>logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "<concat>"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")<line_sep><raise><block_end><if_stmt>tf_inputs<is><none><block_start>tf_inputs=tf_model.dummy_inputs<block_end><if_stmt>tf_inputs<is><not><none><block_start>tf_model(tf_inputs training=<false>)<block_end># Make sure model is built # Adapt state dict - TODO remove this and update the AWS weights files instead # Convert old format to new format if needed from a PyTorch state_dict old_keys=[]<line_sep>new_keys=[]<for_stmt>key pt_state_dict.keys()<block_start>new_key=<none><if_stmt>"gamma"<in>key<block_start>new_key=key.replace("gamma" "weight")<block_end><if_stmt>"beta"<in>key<block_start>new_key=key.replace("beta" "bias")<block_end><if_stmt>new_key<block_start>old_keys.append(key)<line_sep>new_keys.append(new_key)<block_end><block_end><for_stmt>old_key,new_key zip(old_keys new_keys)<block_start>pt_state_dict[new_key]=pt_state_dict.pop(old_key)<block_end># Make sure we are able to load PyTorch base models as well as derived models (with heads) # TF models always have a prefix, some of PyTorch models (base ones) don't start_prefix_to_remove=""<if_stmt><not>any(s.startswith(tf_model.base_model_prefix)<for>s pt_state_dict.keys())<block_start>start_prefix_to_remove=tf_model.base_model_prefix+"."<block_end>symbolic_weights=tf_model.trainable_weights+tf_model.non_trainable_weights<line_sep>tf_loaded_numel=0<line_sep>weight_value_tuples=[]<line_sep>all_pytorch_weights=set(list(pt_state_dict.keys()))<for_stmt>symbolic_weight symbolic_weights<block_start>sw_name=symbolic_weight.name<line_sep>name,transpose=convert_tf_weight_name_to_pt_weight_name(sw_name start_prefix_to_remove=start_prefix_to_remove)<line_sep># Find associated numpy array in pytorch model state dict <if_stmt>name<not><in>pt_state_dict<block_start><if_stmt>allow_missing_keys<block_start><continue><block_end><raise>AttributeError("{} not found in PyTorch model".format(name))<block_end>array=pt_state_dict[name].numpy()<if_stmt>transpose<block_start>array=numpy.transpose(array)<block_end><if_stmt>len(symbolic_weight.shape)<l>len(array.shape)<block_start>array=numpy.squeeze(array)<block_end><elif_stmt>len(symbolic_weight.shape)<g>len(array.shape)<block_start>array=numpy.expand_dims(array axis=0)<block_end><try_stmt><block_start><assert_stmt>list(symbolic_weight.shape)<eq>list(array.shape)<block_end><except_stmt>AssertionError<as>e<block_start>e.args<augadd>(symbolic_weight.shape array.shape)<line_sep><raise>e<block_end>tf_loaded_numel<augadd>array.size<line_sep># logger.warning("Initialize TF weight {}".format(symbolic_weight.name)) weight_value_tuples.append((symbolic_weight array))<line_sep>all_pytorch_weights.discard(name)<block_end>K.batch_set_value(weight_value_tuples)<if_stmt>tf_inputs<is><not><none><block_start>tf_model(tf_inputs training=<false>)<block_end># Make sure restore ops are run logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))<line_sep>logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))<line_sep><return>tf_model<block_end>##################### # TF 2.0 => PyTorch # ##################### <def_stmt>load_tf2_checkpoint_in_pytorch_model pt_model tf_checkpoint_path tf_inputs=<none> allow_missing_keys=<false><block_start>""" Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). """<try_stmt><block_start><import_stmt>tensorflow<as>tf# noqa: F401 <import_stmt>torch# noqa: F401 <block_end><except_stmt>ImportError<block_start>logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "<concat>"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")<line_sep><raise><block_end><import_stmt>transformers<line_sep>logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))<line_sep># Instantiate and load the associated TF 2.0 model tf_model_class_name="TF"+pt_model.__class__.__name__# Add "TF" at the beggining tf_model_class=getattr(transformers tf_model_class_name)<line_sep>tf_model=tf_model_class(pt_model.config)<if_stmt>tf_inputs<is><none><block_start>tf_inputs=tf_model.dummy_inputs<block_end><if_stmt>tf_inputs<is><not><none><block_start>tf_model(tf_inputs training=<false>)<block_end># Make sure model is built tf_model.load_weights(tf_checkpoint_path by_name=<true>)<line_sep><return>load_tf2_model_in_pytorch_model(pt_model tf_model allow_missing_keys=allow_missing_keys)<block_end><def_stmt>load_tf2_model_in_pytorch_model pt_model tf_model allow_missing_keys=<false><block_start>""" Load TF 2.0 model in a pytorch model """<line_sep>weights=tf_model.weights<line_sep><return>load_tf2_weights_in_pytorch_model(pt_model weights allow_missing_keys=allow_missing_keys)<block_end><def_stmt>load_tf2_weights_in_pytorch_model pt_model tf_weights allow_missing_keys=<false><block_start>""" Load TF2.0 symbolic weights in a PyTorch model """<try_stmt><block_start><import_stmt>tensorflow<as>tf# noqa: F401 <import_stmt>torch# noqa: F401 <block_end><except_stmt>ImportError<block_start>logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "<concat>"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")<line_sep><raise><block_end>new_pt_params_dict={}<line_sep>current_pt_params_dict=dict(pt_model.named_parameters())<line_sep># Make sure we are able to load PyTorch base models as well as derived models (with heads) # TF models always have a prefix, some of PyTorch models (base ones) don't start_prefix_to_remove=""<if_stmt><not>any(s.startswith(pt_model.base_model_prefix)<for>s current_pt_params_dict.keys())<block_start>start_prefix_to_remove=pt_model.base_model_prefix+"."<block_end># Build a map from potential PyTorch weight names to TF 2.0 Variables tf_weights_map={}<for_stmt>tf_weight tf_weights<block_start>pt_name,transpose=convert_tf_weight_name_to_pt_weight_name(tf_weight.name start_prefix_to_remove=start_prefix_to_remove)<line_sep>tf_weights_map[pt_name]=(tf_weight.numpy() transpose)<block_end>all_tf_weights=set(list(tf_weights_map.keys()))<line_sep>loaded_pt_weights_data_ptr={}<line_sep>missing_keys_pt=[]<for_stmt>pt_weight_name,pt_weight current_pt_params_dict.items()# Handle PyTorch shared weight ()not duplicated in TF 2.0 <block_start><if_stmt>pt_weight.data_ptr()<in>loaded_pt_weights_data_ptr<block_start>new_pt_params_dict[pt_weight_name]=loaded_pt_weights_data_ptr[pt_weight.data_ptr()]<line_sep><continue><block_end># Find associated numpy array in pytorch model state dict <if_stmt>pt_weight_name<not><in>tf_weights_map<block_start><if_stmt>allow_missing_keys<block_start>missing_keys_pt.append(pt_weight_name)<line_sep><continue><block_end><raise>AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))<block_end>array,transpose=tf_weights_map[pt_weight_name]<if_stmt>transpose<block_start>array=numpy.transpose(array)<block_end><if_stmt>len(pt_weight.shape)<l>len(array.shape)<block_start>array=numpy.squeeze(array)<block_end><elif_stmt>len(pt_weight.shape)<g>len(array.shape)<block_start>array=numpy.expand_dims(array axis=0)<block_end><try_stmt><block_start><assert_stmt>list(pt_weight.shape)<eq>list(array.shape)<block_end><except_stmt>AssertionError<as>e<block_start>e.args<augadd>(pt_weight.shape array.shape)<line_sep><raise>e<block_end># logger.warning("Initialize PyTorch weight {}".format(pt_weight_name)) new_pt_params_dict[pt_weight_name]=torch.from_numpy(array)<line_sep>loaded_pt_weights_data_ptr[pt_weight.data_ptr()]=torch.from_numpy(array)<line_sep>all_tf_weights.discard(pt_weight_name)<block_end>missing_keys,unexpected_keys=pt_model.load_state_dict(new_pt_params_dict strict=<false>)<line_sep>missing_keys<augadd>missing_keys_pt<if_stmt>len(missing_keys)<g>0<block_start>logger.info("Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__ missing_keys))<block_end><if_stmt>len(unexpected_keys)<g>0<block_start>logger.info("Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__ unexpected_keys))<block_end>logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))<line_sep><return>pt_model<block_end>
<import_stmt>sys<import_stmt>pandas<as>pd<import_from_stmt>simpletransformers.classification ClassificationModel<line_sep>prefix="data/"<line_sep>train_df=pd.read_csv(prefix+"train.csv" header=<none>)<line_sep>train_df.head()<line_sep>eval_df=pd.read_csv(prefix+"test.csv" header=<none>)<line_sep>eval_df.head()<line_sep>train_df[0]=(train_df[0]<eq>2).astype(int)<line_sep>eval_df[0]=(eval_df[0]<eq>2).astype(int)<line_sep>train_df=pd.DataFrame({"text":train_df[1].replace(r"\n" " " regex=<true>) "labels":train_df[0]})<line_sep>print(train_df.head())<line_sep>eval_df=pd.DataFrame({"text":eval_df[1].replace(r"\n" " " regex=<true>) "labels":eval_df[0]})<line_sep>print(eval_df.head())<line_sep>model_type=sys.argv[1]<if_stmt>model_type<eq>"bert"<block_start>model_name="bert-base-cased"<block_end><elif_stmt>model_type<eq>"roberta"<block_start>model_name="roberta-base"<block_end><elif_stmt>model_type<eq>"distilbert"<block_start>model_name="distilbert-base-cased"<block_end><elif_stmt>model_type<eq>"distilroberta"<block_start>model_type="roberta"<line_sep>model_name="distilroberta-base"<block_end><elif_stmt>model_type<eq>"electra-base"<block_start>model_type="electra"<line_sep>model_name="google/electra-base-discriminator"<block_end><elif_stmt>model_type<eq>"electra-small"<block_start>model_type="electra"<line_sep>model_name="google/electra-small-discriminator"<block_end><elif_stmt>model_type<eq>"xlnet"<block_start>model_name="xlnet-base-cased"<block_end>train_args={"reprocess_input_data":<true> "overwrite_output_dir":<true> "use_cached_eval_features":<true> "output_dir":f"outputs/{model_type}" "best_model_dir":f"outputs/{model_type}/best_model" "evaluate_during_training":<true> "max_seq_length":128 "num_train_epochs":3 "evaluate_during_training_steps":1000 "wandb_project":"Classification Model Comparison" "wandb_kwargs":{"name":model_name} "save_model_every_epoch":<false> "save_eval_checkpoints":<false> # "use_early_stopping": True, # "early_stopping_metric": "mcc", # "n_gpu": 2, # "manual_seed": 4, # "use_multiprocessing": False, "train_batch_size":128 "eval_batch_size":64 # "config": { # "output_hidden_states": True # } }<if_stmt>model_type<eq>"xlnet"<block_start>train_args["train_batch_size"]=64<line_sep>train_args["gradient_accumulation_steps"]=2<block_end># Create a ClassificationModel model=ClassificationModel(model_type model_name args=train_args)<line_sep># Train the model model.train_model(train_df eval_df=eval_df)<line_sep># # # Evaluate the model # result, model_outputs, wrong_predictions = model.eval_model(eval_df)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable= arguments-differ # pylint: disable= missing-docstring "Addtional image transforms."<import_stmt>random<import_stmt>math<import_stmt>numpy<as>np<import_from_stmt>mxnet image nd<import_from_stmt>mxnet.gluon Block<line_sep>__all__=['RandomCrop' 'RandomErasing']<class_stmt>RandomCrop(Block)<block_start>"""Randomly crop `src` with `size` (width, height). Padding is optional. Upsample result if `src` is smaller than `size`. Parameters ---------- size : int or tuple of (W, H) Size of the final output. pad: int or tuple if int, size of the zero-padding if tuple, number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. interpolation : int Interpolation method for resizing. By default uses bilinear interpolation. See OpenCV's resize function for available choices. Inputs: - **data**: input tensor with (Hi x Wi x C) shape. Outputs: - **out**: output tensor with (size[0] x size[1] x C) or (size x size x C) shape. """<def_stmt>__init__ self size pad=<none> interpolation=2<block_start>super(RandomCrop self).__init__()<line_sep>numeric_types=(float int np.generic)<if_stmt>isinstance(size numeric_types)<block_start>size=(size size)<block_end>self._args=(size interpolation)<line_sep>self.pad=((pad pad) (pad pad) (0 0))<if>isinstance(pad int)<else>pad<block_end><def_stmt>forward self x<block_start><if_stmt>self.pad<block_start><return>image.random_crop(nd.array(np.pad(x.asnumpy() self.pad mode='constant' constant_values=0)) *self._args)[0]<block_end><else_stmt><block_start><return>image.random_crop(x *self._args)[0]<block_end><block_end><block_end><class_stmt>RandomErasing(Block)<block_start>"""Randomly erasing the area in `src` between `s_min` and `s_max` with `probability`. `ratio` controls the ratio between width and height. `mean` means the value in erasing area. Parameters ---------- probability : float Probability of erasing. s_min : float Min area to all area. s_max : float Max area to all area. ratio : float The ratio between width and height. mean : int or tuple of (R, G, B) The value in erasing area. Inputs: - **data**: input tensor with (Hi x Wi x C) shape. Outputs: - **out**: output tensor with (Hi x Wi x C) shape. """<def_stmt>__init__ self probability=0.5 s_min=0.02 s_max=0.4 ratio=0.3 mean=(125.31 122.96 113.86)<block_start>super(RandomErasing self).__init__()<line_sep>self.probability=probability<line_sep>self.mean=mean<line_sep>self.s_min=s_min<line_sep>self.s_max=s_max<line_sep>self.ratio=ratio<block_end><def_stmt>forward self x<block_start><if_stmt><not>isinstance(self.probability float)<block_start><raise>TypeError('Got inappropriate size arg')<block_end><if_stmt><not>isinstance(self.s_min float)<block_start><raise>TypeError('Got inappropriate size arg')<block_end><if_stmt><not>isinstance(self.s_max float)<block_start><raise>TypeError('Got inappropriate size arg')<block_end><if_stmt><not>isinstance(self.ratio float)<block_start><raise>TypeError('Got inappropriate size arg')<block_end><if_stmt><not>isinstance(self.mean (int tuple))<block_start><raise>TypeError('Got inappropriate size arg')<block_end><if_stmt>random.uniform(0 1)<g>self.probability<block_start><return>x<block_end>width,height,_=x.shape<line_sep>area=width<times>height<line_sep>target_area=random.uniform(self.s_min self.s_max)<times>area<line_sep>aspect_ratio=random.uniform(self.ratio 1/self.ratio)<line_sep>w=int(round(math.sqrt(target_area<times>aspect_ratio)))<line_sep>h=int(round(math.sqrt(target_area/aspect_ratio)))<if_stmt>w<l>width<and>h<l>height<block_start>x1=random.randint(0 width-w)<line_sep>y1=random.randint(0 height-h)<line_sep>x[x1:x1+w y1:y1+h 0]=self.mean[0]<line_sep>x[x1:x1+w y1:y1+h 1]=self.mean[1]<line_sep>x[x1:x1+w y1:y1+h 2]=self.mean[2]<block_end><return>x<block_end><block_end>
# -*- coding: utf-8 -*- """Classes (Python) to compute the Bandit UCB (Upper Confidence Bound) arm allocation and choosing the arm to pull next. See :mod:`moe.bandit.bandit_interface` for further details on bandit. """<import_stmt>copy<import_from_stmt>abc abstractmethod<import_from_stmt>moe.bandit.bandit_interface BanditInterface<import_from_stmt>moe.bandit.utils get_winning_arm_names_from_payoff_arm_name_list get_equal_arm_allocations<class_stmt>UCBInterface(BanditInterface)<block_start>r"""Implementation of the constructor of UCB (Upper Confidence Bound) and method allocate_arms. The method get_ucb_payoff is implemented in subclass. A class to encapsulate the computation of bandit UCB. The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf To inherit this class, a subclass needs to implement get_ucb_payoff (see :func:`moe.bandit.ucb.ucb1.UCB1.get_ucb_payoff` for an example), everything else is already implemented. See :mod:`moe.bandit.bandit_interface` docs for further details. """<def_stmt>__init__ self historical_info subtype=<none> <block_start>"""Construct a UCB object. :param historical_info: a dictionary of arms sampled :type historical_info: dictionary of (str, SampleArm()) pairs (see :class:`moe.bandit.data_containers.SampleArm` for more details) :param subtype: subtype of the UCB bandit algorithm (default: None) :type subtype: str """<line_sep>self._historical_info=copy.deepcopy(historical_info)<line_sep>self._subtype=subtype<block_end>@staticmethod<def_stmt>get_unsampled_arm_names arms_sampled<block_start>r"""Compute the set of unsampled arm names based on the given ``arms_sampled``.. Throws an exception when arms_sampled is empty. :param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm` :type arms_sampled: dictionary of (str, SampleArm()) pairs :return: set of names of the unsampled arms :rtype: frozenset(str) :raise: ValueError when ``arms_sampled`` are empty. """<if_stmt><not>arms_sampled<block_start><raise>ValueError('arms_sampled is empty!')<block_end>unsampled_arm_name_list=[name<for>name,sampled_arm arms_sampled.iteritems()<if>sampled_arm.total<eq>0]<line_sep><return>frozenset(unsampled_arm_name_list)<block_end>@abstractmethod<def_stmt>get_ucb_payoff self sampled_arm number_sampled<block_start>r"""Compute the expected upper confidence bound payoff using the UCB subtype formula. See definition in subclasses for details. :param sampled_arm: a sampled arm :type sampled_arm: :class:`moe.bandit.data_containers.SampleArm` :param number_sampled: the overall number of pulls so far :type number_sampled: int :return: ucb payoff :rtype: float64 :raise: ValueError when ``sampled_arm`` is empty. """<line_sep><pass><block_end><def_stmt>allocate_arms self<block_start>r"""Compute the allocation to each arm given ``historical_info``, running bandit ``subtype`` endpoint. Computes the allocation to each arm based on the given subtype, and, historical info. Works with k-armed bandits (k >= 1). The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf If there is at least one unsampled arm, this method will choose to pull the unsampled arm (randomly choose an unsampled arm if there are multiple unsampled arms). If all arms are pulled at least once, this method will pull the optimal arm (best expected upper confidence bound payoff). See :func:`moe.bandit.ucb.ucb_interface.UCBInterface.get_ucb_payoff` for details on how to compute the expected upper confidence bound payoff (expected UCB payoff) In case of a tie, the method will split the allocation among the optimal arms. For example, if we have three arms (arm1, arm2, and arm3) with expected UCB payoff 0.5, 0.5, and 0.1 respectively. We split the allocation between the optimal arms arm1 and arm2. ``{arm1: 0.5, arm2: 0.5, arm3: 0.0}`` :return: the dictionary of (arm, allocation) key-value pairs :rtype: a dictionary of (str, float64) pairs :raise: ValueError when ``sample_arms`` are empty. """<line_sep>arms_sampled=self._historical_info.arms_sampled<if_stmt><not>arms_sampled<block_start><raise>ValueError('sample_arms are empty!')<block_end><return>get_equal_arm_allocations(arms_sampled self.get_winning_arm_names(arms_sampled))<block_end><def_stmt>get_winning_arm_names self arms_sampled<block_start>r"""Compute the set of winning arm names based on the given ``arms_sampled``.. Throws an exception when arms_sampled is empty. :param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm` :type arms_sampled: dictionary of (str, SampleArm()) pairs :return: set of names of the winning arms :rtype: frozenset(str) :raise: ValueError when ``arms_sampled`` are empty. """<if_stmt><not>arms_sampled<block_start><raise>ValueError('arms_sampled is empty!')<block_end># If there exists an unsampled arm, return the names of the unsampled arms unsampled_arm_names=self.get_unsampled_arm_names(arms_sampled)<if_stmt>unsampled_arm_names<block_start><return>unsampled_arm_names<block_end>number_sampled=sum([sampled_arm.total<for>sampled_arm arms_sampled.itervalues()])<line_sep>ucb_payoff_arm_name_list=[(self.get_ucb_payoff(sampled_arm number_sampled) arm_name)<for>arm_name,sampled_arm arms_sampled.iteritems()]<line_sep><return>get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list)<block_end><block_end>
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START forms_delete_watch] <import_from_future_stmt> print_function<import_from_stmt>apiclient discovery<import_from_stmt>httplib2 Http<import_from_stmt>oauth2client client file tools<line_sep>SCOPES="https://www.googleapis.com/auth/drive"<line_sep>API_KEY="<YOUR_API_KEY>"<line_sep>DISCOVERY_DOC=f"https://forms.googleapis.com/$discovery/rest?version=v1beta&key={API_KEY}&labels=FORMS_BETA_TESTERS"<line_sep>store=file.Storage('credentials.json')<line_sep>creds=<none><if_stmt><not>creds<or>creds.invalid<block_start>flow=client.flow_from_clientsecrets('client_secret.json' SCOPES)<line_sep>creds=tools.run_flow(flow store)<block_end>service=discovery.build('forms' 'v1beta' http=creds.authorize(Http()) discoveryServiceUrl=DISCOVERY_DOC static_discovery=<false>)<line_sep>form_id='<YOUR_FORM_ID>'<line_sep>watch_id='<YOUR_WATCH_ID>'<line_sep># Print JSON response after deleting a form watch result=service.forms().watches().delete(formId=form_id watchId=watch_id).execute()<line_sep>print(result)<line_sep># [END forms_delete_watch]
<import_stmt>inspect<import_stmt>json<import_stmt>uuid<import_from_stmt>collections Counter<import_from_stmt>datetime datetime<import_from_stmt>io StringIO<import_stmt>mock<import_from_stmt>django.contrib.admin.utils NestedObjects<import_from_stmt>django.db transaction IntegrityError<import_from_stmt>django.db.models.signals post_delete post_save<import_from_stmt>django.test SimpleTestCase TestCase<import_from_stmt>nose.tools nottest<import_from_stmt>casexml.apps.case.mock CaseFactory CaseIndex CaseStructure<import_from_stmt>corehq.apps.commtrack.helpers make_product<import_from_stmt>corehq.apps.commtrack.tests.util get_single_balance_block<import_from_stmt>corehq.apps.domain.models Domain<import_from_stmt>corehq.apps.dump_reload.sql SqlDataDumper SqlDataLoader<import_from_stmt>corehq.apps.dump_reload.sql.dump get_model_iterator_builders_to_dump get_objects_to_dump <import_from_stmt>corehq.apps.dump_reload.sql.load DefaultDictWithKey constraint_checks_deferred <import_from_stmt>corehq.apps.hqcase.utils submit_case_blocks<import_from_stmt>corehq.apps.products.models SQLProduct<import_from_stmt>corehq.apps.zapier.consts EventTypes<import_from_stmt>corehq.apps.zapier.models ZapierSubscription<import_from_stmt>corehq.apps.zapier.signals.receivers zapier_subscription_post_delete <import_from_stmt>corehq.blobs.models BlobMeta<import_from_stmt>corehq.form_processor.backends.sql.dbaccessors LedgerAccessorSQL<import_from_stmt>corehq.form_processor.interfaces.dbaccessors CaseAccessors FormAccessors <import_from_stmt>corehq.form_processor.models CaseTransaction CommCareCaseIndexSQL CommCareCaseSQL LedgerTransaction LedgerValue XFormInstanceSQL <import_from_stmt>corehq.form_processor.tests.utils FormProcessorTestUtils create_form_for_test sharded <import_from_stmt>corehq.messaging.scheduling.scheduling_partitioned.models AlertScheduleInstance <class_stmt>BaseDumpLoadTest(TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>post_delete.disconnect(zapier_subscription_post_delete sender=ZapierSubscription)<line_sep>super(BaseDumpLoadTest cls).setUpClass()<line_sep>cls.domain_name=uuid.uuid4().hex<line_sep>cls.domain=Domain(name=cls.domain_name)<line_sep>cls.domain.save()<line_sep>cls.default_objects_counts=Counter({})<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>cls.domain.delete()<line_sep>super(BaseDumpLoadTest cls).tearDownClass()<line_sep>post_delete.connect(zapier_subscription_post_delete sender=ZapierSubscription)<block_end><def_stmt>delete_sql_data self<block_start>delete_domain_sql_data_for_dump_load_test(self.domain_name)<block_end><def_stmt>tearDown self<block_start>self.delete_sql_data()<line_sep>super(BaseDumpLoadTest self).tearDown()<block_end><def_stmt>_dump_and_load self expected_dump_counts load_filter=<none> expected_load_counts=<none> dumper_fn=<none><block_start>expected_load_counts=expected_load_counts<or>expected_dump_counts<line_sep>expected_dump_counts.update(self.default_objects_counts)<line_sep>models=list(expected_dump_counts)<line_sep>self._check_signals_handle_raw(models)<line_sep>output_stream=StringIO()<if_stmt>dumper_fn<block_start>dumper_fn(output_stream)<block_end><else_stmt><block_start>SqlDataDumper(self.domain_name [] []).dump(output_stream)<block_end>self.delete_sql_data()<line_sep># make sure that there's no data left in the DB objects_remaining=list(get_objects_to_dump(self.domain_name [] []))<line_sep>object_classes=[obj.__class__.__name__<for>obj objects_remaining]<line_sep>counts=Counter(object_classes)<line_sep>self.assertEqual([] objects_remaining 'Not all data deleted: {}'.format(counts))<line_sep># Dump actual_model_counts,dump_lines=self._parse_dump_output(output_stream)<line_sep>expected_model_counts=_normalize_object_counter(expected_dump_counts)<line_sep>self.assertDictEqual(dict(expected_model_counts) dict(actual_model_counts))<line_sep># Load loader=SqlDataLoader(object_filter=load_filter)<line_sep>loaded_model_counts=loader.load_objects(dump_lines)<line_sep>normalized_expected_loaded_counts=_normalize_object_counter(expected_load_counts for_loaded=<true>)<line_sep>self.assertDictEqual(dict(normalized_expected_loaded_counts) dict(loaded_model_counts))<line_sep>self.assertEqual(sum(expected_load_counts.values()) sum(loaded_model_counts.values()))<line_sep><return>dump_lines<block_end><def_stmt>_parse_dump_output self output_stream<block_start>dump_output=output_stream.getvalue().split('\n')<line_sep>dump_lines=[line.strip()<for>line dump_output<if>line.strip()]<line_sep>actual_model_counts=Counter([json.loads(line)['model']<for>line dump_lines])<line_sep><return>actual_model_counts dump_lines<block_end><def_stmt>_check_signals_handle_raw self models<block_start>"""Ensure that any post_save signal handlers have been updated to handle 'raw' calls."""<line_sep>whitelist_receivers=['django_digest.models._post_save_persist_partial_digests']<for_stmt>model models<block_start><for_stmt>receiver post_save._live_receivers(model)<block_start>receiver_path=receiver.__module__+'.'+receiver.__name__<if_stmt>receiver_path<in>whitelist_receivers<block_start><continue><block_end>args=inspect.getargspec(receiver).args<line_sep>message='Signal handler "{}" for model "{}" missing raw arg'.format(receiver model)<line_sep>self.assertIn('raw' args message)<block_end><block_end><block_end><block_end>@nottest<def_stmt>delete_domain_sql_data_for_dump_load_test domain_name<block_start><for_stmt>model_class,builder get_model_iterator_builders_to_dump(domain_name [] [])<block_start><for_stmt>iterator builder.querysets()<block_start><with_stmt>transaction.atomic(using=iterator.db) constraint_checks_deferred(iterator.db)<block_start>collector=NestedObjects(using=iterator.db)<line_sep>collector.collect(iterator)<line_sep>collector.delete()<block_end><block_end><block_end><assert_stmt>[]<eq>list(get_objects_to_dump(domain_name [] [])) "Not all SQL objects deleted"<block_end>@sharded<class_stmt>TestSQLDumpLoadShardedModels(BaseDumpLoadTest)<block_start>maxDiff=<none><line_sep>@classmethod<def_stmt>setUpClass cls<block_start>super(TestSQLDumpLoadShardedModels cls).setUpClass()<line_sep>cls.factory=CaseFactory(domain=cls.domain_name)<line_sep>cls.form_accessors=FormAccessors(cls.domain_name)<line_sep>cls.case_accessors=CaseAccessors(cls.domain_name)<line_sep>cls.product=make_product(cls.domain_name 'A Product' 'prodcode_a')<line_sep>cls.default_objects_counts.update({SQLProduct:1})<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain_name)<line_sep>super(TestSQLDumpLoadShardedModels cls).tearDownClass()<block_end><def_stmt>test_dump_load_form self<block_start>expected_object_counts=Counter({XFormInstanceSQL:2 BlobMeta:2})<line_sep>pre_forms=[create_form_for_test(self.domain_name) create_form_for_test(self.domain_name)]<line_sep>self._dump_and_load(expected_object_counts)<line_sep>form_ids=self.form_accessors.get_all_form_ids_in_domain('XFormInstance')<line_sep>self.assertEqual(set(form_ids) set(form.form_id<for>form pre_forms))<for_stmt>pre_form pre_forms<block_start>post_form=self.form_accessors.get_form(pre_form.form_id)<line_sep>self.assertDictEqual(pre_form.to_json() post_form.to_json())<block_end><block_end><def_stmt>test_sql_dump_load_case self<block_start>expected_object_counts=Counter({XFormInstanceSQL:2 BlobMeta:2 CommCareCaseSQL:2 CaseTransaction:3 CommCareCaseIndexSQL:1})<line_sep>pre_cases=self.factory.create_or_update_case(CaseStructure(attrs={'case_name':'child' 'update':{'age':3 'diabetic':<false>} 'create':<true>} indices=[CaseIndex(CaseStructure(attrs={'case_name':'parent' 'update':{'age':42} 'create':<true>})) ]))<line_sep>pre_cases[0]=self.factory.create_or_update_case(CaseStructure(case_id=pre_cases[0].case_id attrs={'external_id':'billie jean' 'update':{'name':'<NAME>'}}))[0]<line_sep>self._dump_and_load(expected_object_counts)<line_sep>case_ids=self.case_accessors.get_case_ids_in_domain()<line_sep>self.assertEqual(set(case_ids) set(case.case_id<for>case pre_cases))<for_stmt>pre_case pre_cases<block_start>post_case=self.case_accessors.get_case(pre_case.case_id)<line_sep>self.assertDictEqual(pre_case.to_json() post_case.to_json())<block_end><block_end><def_stmt>test_ledgers self<block_start>expected_object_counts=Counter({XFormInstanceSQL:3 BlobMeta:3 CommCareCaseSQL:1 CaseTransaction:3 LedgerValue:1 LedgerTransaction:2})<line_sep>case=self.factory.create_case()<line_sep>submit_case_blocks([get_single_balance_block(case.case_id self.product._id 10)] self.domain_name)<line_sep>submit_case_blocks([get_single_balance_block(case.case_id self.product._id 5)] self.domain_name)<line_sep>pre_ledger_values=LedgerAccessorSQL.get_ledger_values_for_case(case.case_id)<line_sep>pre_ledger_transactions=LedgerAccessorSQL.get_ledger_transactions_for_case(case.case_id)<line_sep>self.assertEqual(1 len(pre_ledger_values))<line_sep>self.assertEqual(2 len(pre_ledger_transactions))<line_sep>self._dump_and_load(expected_object_counts)<line_sep>post_ledger_values=LedgerAccessorSQL.get_ledger_values_for_case(case.case_id)<line_sep>post_ledger_transactions=LedgerAccessorSQL.get_ledger_transactions_for_case(case.case_id)<line_sep>self.assertEqual(1 len(post_ledger_values))<line_sep>self.assertEqual(2 len(post_ledger_transactions))<line_sep>self.assertEqual(pre_ledger_values[0].ledger_reference post_ledger_values[0].ledger_reference)<line_sep>self.assertDictEqual(pre_ledger_values[0].to_json() post_ledger_values[0].to_json())<line_sep>pre_ledger_transactions=sorted(pre_ledger_transactions key=<lambda>t:t.pk)<line_sep>post_ledger_transactions=sorted(post_ledger_transactions key=<lambda>t:t.pk)<for_stmt>pre,post zip(pre_ledger_transactions post_ledger_transactions)<block_start>self.assertEqual(str(pre) str(post))<block_end><block_end><block_end><class_stmt>TestSQLDumpLoad(BaseDumpLoadTest)<block_start><def_stmt>test_case_search_config self<block_start><import_from_stmt>corehq.apps.case_search.models CaseSearchConfig FuzzyProperties<line_sep>expected_object_counts=Counter({CaseSearchConfig:1 FuzzyProperties:2 })<line_sep>pre_config,created=CaseSearchConfig.objects.get_or_create(pk=self.domain_name)<line_sep>pre_config.enabled=<true><line_sep>pre_fuzzies=[FuzzyProperties(domain=self.domain case_type='dog' properties=['breed' 'color']) FuzzyProperties(domain=self.domain case_type='owner' properties=['name']) ]<for_stmt>fuzzy pre_fuzzies<block_start>fuzzy.save()<block_end>pre_config.fuzzy_properties.set(pre_fuzzies)<line_sep>pre_config.save()<line_sep>self._dump_and_load(expected_object_counts)<line_sep>post_config=CaseSearchConfig.objects.get(domain=self.domain_name)<line_sep>self.assertTrue(post_config.enabled)<line_sep>self.assertEqual(pre_config.fuzzy_properties post_config.fuzzy_properties)<line_sep>post_fuzzies=FuzzyProperties.objects.filter(domain=self.domain_name)<line_sep>self.assertEqual(set(f.case_type<for>f post_fuzzies) {'dog' 'owner'})<block_end><def_stmt>test_users self<block_start><import_from_stmt>corehq.apps.users.models CommCareUser<import_from_stmt>corehq.apps.users.models WebUser<import_from_stmt>django.contrib.auth.models User<line_sep>expected_object_counts=Counter({User:3})<line_sep>ccuser_1=CommCareUser.create(domain=self.domain_name username='user_1' password='<PASSWORD>' created_by=<none> created_via=<none> email='<EMAIL>' )<line_sep>ccuser_2=CommCareUser.create(domain=self.domain_name username='user_2' password='<PASSWORD>' created_by=<none> created_via=<none> email='<EMAIL>' )<line_sep>web_user=WebUser.create(domain=self.domain_name username='webuser_t1' password='<PASSWORD>' created_by=<none> created_via=<none> email='<EMAIL>' )<line_sep>self.addCleanup(ccuser_1.delete self.domain_name deleted_by=<none>)<line_sep>self.addCleanup(ccuser_2.delete self.domain_name deleted_by=<none>)<line_sep>self.addCleanup(web_user.delete self.domain_name deleted_by=<none>)<line_sep>self._dump_and_load(expected_object_counts)<block_end><def_stmt>test_dump_roles self<block_start><import_from_stmt>corehq.apps.users.models UserRole Permissions RoleAssignableBy RolePermission<line_sep>expected_object_counts=Counter({UserRole:2 RolePermission:11 RoleAssignableBy:1})<line_sep>role1=UserRole.create(self.domain_name 'role1')<line_sep>role2=UserRole.create(self.domain_name 'role1' permissions=Permissions(edit_web_users=<true>) assignable_by=[role1.id])<line_sep>self.addCleanup(role1.delete)<line_sep>self.addCleanup(role2.delete)<line_sep>self._dump_and_load(expected_object_counts)<line_sep>role1_loaded=UserRole.objects.get(id=role1.id)<line_sep>role2_loaded=UserRole.objects.get(id=role2.id)<line_sep>self.assertEqual(role1_loaded.permissions.to_list() Permissions().to_list())<line_sep>self.assertEqual(role1_loaded.assignable_by [])<line_sep>self.assertEqual(role2_loaded.permissions.to_list() Permissions(edit_web_users=<true>).to_list())<line_sep>self.assertEqual(role2_loaded.assignable_by [role1_loaded.get_id])<block_end><def_stmt>test_device_logs self<block_start><import_from_stmt>corehq.apps.receiverwrapper.util submit_form_locally<import_from_stmt>phonelog.models DeviceReportEntry ForceCloseEntry UserEntry UserErrorEntry<import_from_stmt>corehq.apps.users.models CommCareUser<import_from_stmt>django.contrib.auth.models User<line_sep>expected_object_counts=Counter({User:1 DeviceReportEntry:7 UserEntry:1 UserErrorEntry:2 ForceCloseEntry:1})<line_sep>user=CommCareUser.create(domain=self.domain_name username='user_1' password='<PASSWORD>' created_by=<none> created_via=<none> email='<EMAIL>' uuid='428d454aa9abc74e1964e16d3565d6b6'# match ID in devicelog.xml )<line_sep>self.addCleanup(user.delete self.domain_name deleted_by=<none>)<with_stmt>open('corehq/ex-submodules/couchforms/tests/data/devicelogs/devicelog.xml' 'rb')<as>f<block_start>xml=f.read()<block_end>submit_form_locally(xml self.domain_name)<line_sep>self._dump_and_load(expected_object_counts)<block_end><def_stmt>test_demo_user_restore self<block_start><import_from_stmt>corehq.apps.users.models CommCareUser<import_from_stmt>corehq.apps.ota.models DemoUserRestore<import_from_stmt>django.contrib.auth.models User<line_sep>expected_object_counts=Counter({User:1 DemoUserRestore:1})<line_sep>user_id=uuid.uuid4().hex<line_sep>user=CommCareUser.create(domain=self.domain_name username='user_1' password='<PASSWORD>' created_by=<none> created_via=<none> email='<EMAIL>' uuid=user_id)<line_sep>self.addCleanup(user.delete self.domain_name deleted_by=<none>)<line_sep>DemoUserRestore(demo_user_id=user_id restore_blob_id=uuid.uuid4().hex content_length=1027 restore_comment="Test migrate demo user restore").save()<line_sep>self._dump_and_load(expected_object_counts)<block_end><def_stmt>test_products self<block_start><import_from_stmt>corehq.apps.products.models SQLProduct<line_sep>expected_object_counts=Counter({SQLProduct:3})<line_sep>p1=SQLProduct.objects.create(domain=self.domain_name product_id='test1' name='test1')<line_sep>p2=SQLProduct.objects.create(domain=self.domain_name product_id='test2' name='test2')<line_sep>parchived=SQLProduct.objects.create(domain=self.domain_name product_id='test3' name='test3' is_archived=<true>)<line_sep>self._dump_and_load(expected_object_counts)<line_sep>self.assertEqual(2 SQLProduct.active_objects.filter(domain=self.domain_name).count())<line_sep>all_active=SQLProduct.active_objects.filter(domain=self.domain_name).all()<line_sep>self.assertTrue(p1<in>all_active)<line_sep>self.assertTrue(p2<in>all_active)<line_sep>self.assertTrue(parchived<not><in>all_active)<block_end><def_stmt>test_location_type self<block_start><import_from_stmt>corehq.apps.locations.models LocationType<import_from_stmt>corehq.apps.locations.tests.test_location_types make_loc_type<line_sep>expected_object_counts=Counter({LocationType:7})<line_sep>state=make_loc_type('state' domain=self.domain_name)<line_sep>district=make_loc_type('district' state domain=self.domain_name)<line_sep>section=make_loc_type('section' district domain=self.domain_name)<line_sep>block=make_loc_type('block' district domain=self.domain_name)<line_sep>center=make_loc_type('center' block domain=self.domain_name)<line_sep>county=make_loc_type('county' state domain=self.domain_name)<line_sep>city=make_loc_type('city' county domain=self.domain_name)<line_sep>self._dump_and_load(expected_object_counts)<line_sep>hierarchy=LocationType.objects.full_hierarchy(self.domain_name)<line_sep>desired_hierarchy={state.id:(state {district.id:(district {section.id:(section {}) block.id:(block {center.id:(center {}) }) } ) county.id:(county {city.id:(city {})} ) } ) }<line_sep>self.assertEqual(hierarchy desired_hierarchy)<block_end><def_stmt>test_location self<block_start><import_from_stmt>corehq.apps.locations.models LocationType SQLLocation<import_from_stmt>corehq.apps.locations.tests.util setup_locations_and_types<line_sep>expected_object_counts=Counter({LocationType:3 SQLLocation:11})<line_sep>location_type_names=['province' 'district' 'city']<line_sep>location_structure=[('Western Cape' [('Cape Winelands' [('Stellenbosch' []) ('Paarl' []) ]) ('Cape Town' [('Cape Town City' []) ])]) ('Gauteng' [('Ekurhuleni ' [('Alberton' []) ('Benoni' []) ('Springs' []) ]) ]) ]<line_sep>location_types,locations=setup_locations_and_types(self.domain_name location_type_names [] location_structure )<line_sep>self._dump_and_load(expected_object_counts)<line_sep>names=['Cape Winelands' 'Paarl' 'Cape Town']<line_sep>location_ids=[locations[name].location_id<for>name names]<line_sep>result=SQLLocation.objects.get_locations_and_children(location_ids)<line_sep>self.assertItemsEqual([loc.name<for>loc result] ['Cape Winelands' 'Stellenbosch' 'Paarl' 'Cape Town' 'Cape Town City'])<line_sep>result=SQLLocation.objects.get_locations_and_children([locations['Gauteng'].location_id])<line_sep>self.assertItemsEqual([loc.name<for>loc result] ['Gauteng' 'Ekurhuleni ' 'Alberton' 'Benoni' 'Springs'])<block_end><def_stmt>test_sms self<block_start><import_from_stmt>corehq.apps.sms.models PhoneNumber MessagingEvent MessagingSubEvent<line_sep>expected_object_counts=Counter({PhoneNumber:1 MessagingEvent:1 MessagingSubEvent:1})<line_sep>phone_number=PhoneNumber(domain=self.domain_name owner_doc_type='CommCareCase' owner_id='fake-owner-id1' phone_number='99912341234' backend_id=<none> ivr_backend_id=<none> verified=<true> is_two_way=<true> pending_verification=<false> contact_last_modified=datetime.utcnow())<line_sep>phone_number.save()<line_sep>event=MessagingEvent.objects.create(domain=self.domain_name date=datetime.utcnow() source=MessagingEvent.SOURCE_REMINDER content_type=MessagingEvent.CONTENT_SMS status=MessagingEvent.STATUS_COMPLETED)<line_sep>MessagingSubEvent.objects.create(parent=event date=datetime.utcnow() recipient_type=MessagingEvent.RECIPIENT_CASE content_type=MessagingEvent.CONTENT_SMS status=MessagingEvent.STATUS_COMPLETED)<line_sep>self._dump_and_load(expected_object_counts)<block_end><def_stmt>test_message_scheduling self<block_start>AlertScheduleInstance(schedule_instance_id=uuid.uuid4() domain=self.domain_name recipient_type='CommCareUser' recipient_id=uuid.uuid4().hex current_event_num=0 schedule_iteration_num=1 next_event_due=datetime(2017 3 1) active=<true> alert_schedule_id=uuid.uuid4() ).save()<line_sep>self._dump_and_load({AlertScheduleInstance:1})<block_end><def_stmt>test_mobile_backend self<block_start><import_from_stmt>corehq.apps.sms.models SQLMobileBackend SQLMobileBackendMapping <line_sep>domain_backend=SQLMobileBackend.objects.create(domain=self.domain_name name='test-domain-mobile-backend' display_name='Test Domain Mobile Backend' hq_api_id='TDMB' inbound_api_key='test-domain-mobile-backend-inbound-api-key' supported_countries=["*"] backend_type=SQLMobileBackend.SMS is_global=<false> )<line_sep>SQLMobileBackendMapping.objects.create(domain=self.domain_name backend=domain_backend backend_type=SQLMobileBackend.SMS prefix='123' )<line_sep>global_backend=SQLMobileBackend.objects.create(domain=<none> name='test-global-mobile-backend' display_name='Test Global Mobile Backend' hq_api_id='TGMB' inbound_api_key='test-global-mobile-backend-inbound-api-key' supported_countries=["*"] backend_type=SQLMobileBackend.SMS is_global=<true> )<line_sep>SQLMobileBackendMapping.objects.create(domain=self.domain_name backend=global_backend backend_type=SQLMobileBackend.SMS prefix='*' )<line_sep>self._dump_and_load({SQLMobileBackendMapping:1 SQLMobileBackend:1 })<line_sep>self.assertEqual(SQLMobileBackend.objects.first().domain self.domain_name)<line_sep>self.assertEqual(SQLMobileBackendMapping.objects.first().domain self.domain_name)<block_end><def_stmt>test_case_importer self<block_start><import_from_stmt>corehq.apps.case_importer.tracking.models CaseUploadFileMeta CaseUploadFormRecord CaseUploadRecord <line_sep>upload_file_meta=CaseUploadFileMeta.objects.create(identifier=uuid.uuid4().hex filename='picture.jpg' length=1024 )<line_sep>case_upload_record=CaseUploadRecord.objects.create(domain=self.domain_name upload_id=uuid.uuid4() task_id=uuid.uuid4() couch_user_id=uuid.uuid4().hex case_type='person' upload_file_meta=upload_file_meta )<line_sep>CaseUploadFormRecord.objects.create(case_upload_record=case_upload_record form_id=uuid.uuid4().hex )<line_sep>self._dump_and_load(Counter({CaseUploadFileMeta:1 CaseUploadRecord:1 CaseUploadFormRecord:1 }))<block_end><def_stmt>test_transifex self<block_start><import_from_stmt>corehq.apps.translations.models TransifexProject TransifexOrganization<line_sep>org=TransifexOrganization.objects.create(slug='test' name='demo' api_token='<PASSWORD>')<line_sep>TransifexProject.objects.create(organization=org slug='testp' name='demop' domain=self.domain_name)<line_sep>TransifexProject.objects.create(organization=org slug='testp1' name='demop1' domain=self.domain_name)<line_sep>self._dump_and_load(Counter({TransifexOrganization:1 TransifexProject:2}))<block_end><def_stmt>test_filtered_dump_load self<block_start><import_from_stmt>corehq.apps.locations.tests.test_location_types make_loc_type<import_from_stmt>corehq.apps.products.models SQLProduct<import_from_stmt>corehq.apps.locations.models LocationType<line_sep>make_loc_type('state' domain=self.domain_name)<line_sep>SQLProduct.objects.create(domain=self.domain_name product_id='test1' name='test1')<line_sep>expected_object_counts=Counter({LocationType:1 SQLProduct:1})<line_sep>self._dump_and_load(expected_object_counts load_filter='sqlproduct' expected_load_counts=Counter({SQLProduct:1}))<line_sep>self.assertEqual(0 LocationType.objects.count())<block_end><def_stmt>test_sms_content self<block_start><import_from_stmt>corehq.messaging.scheduling.models AlertSchedule SMSContent AlertEvent<import_from_stmt>corehq.messaging.scheduling.scheduling_partitioned.dbaccessors delete_alert_schedule_instances_for_schedule<line_sep>schedule=AlertSchedule.create_simple_alert(self.domain SMSContent())<line_sep>schedule.set_custom_alert([(AlertEvent(minutes_to_wait=5) SMSContent()) (AlertEvent(minutes_to_wait=15) SMSContent()) ])<line_sep>self.addCleanup(<lambda>:delete_alert_schedule_instances_for_schedule(AlertScheduleInstance schedule.schedule_id))<line_sep>self._dump_and_load(Counter({AlertSchedule:1 AlertEvent:2 SMSContent:2}))<block_end><def_stmt>test_zapier_subscription self<block_start>ZapierSubscription.objects.create(domain=self.domain_name case_type='case_type' event_name=EventTypes.NEW_CASE url='example.com' user_id='user_id' )<line_sep>self._dump_and_load(Counter({ZapierSubscription:1}))<block_end><block_end>@mock.patch("corehq.apps.dump_reload.sql.load.ENQUEUE_TIMEOUT" 1)<class_stmt>TestSqlLoadWithError(BaseDumpLoadTest)<block_start><def_stmt>setUp self<block_start>self.products=[SQLProduct.objects.create(domain=self.domain_name product_id='test1' name='test1') SQLProduct.objects.create(domain=self.domain_name product_id='test2' name='test2') SQLProduct.objects.create(domain=self.domain_name product_id='test3' name='test3') ]<block_end><def_stmt>test_load_error_queue_full self<block_start>"""Blocks when sending 'test3'"""<line_sep>self._load_with_errors(chunk_size=1)<block_end><def_stmt>test_load_error_queue_full_on_terminate self<block_start>"""Blocks when sending ``None`` into the queue to 'terminate' it."""<line_sep>self._load_with_errors(chunk_size=2)<block_end><def_stmt>_load_with_errors self chunk_size<block_start>output_stream=StringIO()<line_sep>SqlDataDumper(self.domain_name [] []).dump(output_stream)<line_sep>self.delete_sql_data()<line_sep># resave the product to force an error self.products[0].save()<line_sep>actual_model_counts,dump_lines=self._parse_dump_output(output_stream)<line_sep>self.assertEqual(actual_model_counts['products.sqlproduct'] 3)<line_sep>loader=SqlDataLoader()<with_stmt>self.assertRaises(IntegrityError) mock.patch("corehq.apps.dump_reload.sql.load.CHUNK_SIZE" chunk_size)# patch the chunk size so that the queue blocks <block_start>loader.load_objects(dump_lines)<block_end><block_end><block_end><class_stmt>DefaultDictWithKeyTests(SimpleTestCase)<block_start><def_stmt>test_intended_use_case self<block_start><def_stmt>enlist item<block_start><return>[item]<block_end>greasy_spoon=DefaultDictWithKey(enlist)<line_sep>self.assertEqual(greasy_spoon['spam'] ['spam'])<line_sep>greasy_spoon['spam'].append('spam')<line_sep>self.assertEqual(greasy_spoon['spam'] ['spam' 'spam'])<block_end><def_stmt>test_not_enough_params self<block_start><def_stmt>empty_list <block_start><return>[]<block_end>greasy_spoon=DefaultDictWithKey(empty_list)<with_stmt>self.assertRaisesRegex(TypeError r'empty_list\(\) takes 0 positional arguments but 1 was given')<block_start>greasy_spoon['spam']<block_end><block_end><def_stmt>test_too_many_params self<block_start><def_stmt>appender item1 item2<block_start><return>[item1 item2]<block_end>greasy_spoon=DefaultDictWithKey(appender)<with_stmt>self.assertRaisesRegex(TypeError r"appender\(\) missing 1 required positional argument: 'item2'")<block_start>greasy_spoon['spam']<block_end><block_end><def_stmt>test_no_factory self<block_start>greasy_spoon=DefaultDictWithKey()<with_stmt>self.assertRaisesRegex(TypeError "'NoneType' object is not callable")<block_start>greasy_spoon['spam']<block_end><block_end><block_end><def_stmt>_normalize_object_counter counter for_loaded=<false><block_start>"""Converts a <Model Class> keyed counter to an model label keyed counter"""<def_stmt>_model_class_to_label model_class<block_start>label='{}.{}'.format(model_class._meta.app_label model_class.__name__)<line_sep><return>label<if>for_loaded<else>label.lower()<block_end><return>Counter({_model_class_to_label(model_class):count<for>model_class,count counter.items()})<block_end>
<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>keras backend<as>K<import_from_stmt>keras activations<def_stmt>get_standard_values <block_start>''' These are just a set of floats used for testing the activation functions, and are useful in multiple tests. '''<line_sep><return>np.array([[0 0.1 0.5 0.9 1.0]] dtype=K.floatx())<block_end><def_stmt>test_softmax <block_start>''' Test using a reference implementation of softmax '''<def_stmt>softmax values<block_start>m=np.max(values)<line_sep>e=np.exp(values-m)<line_sep><return>e/np.sum(e)<block_end>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.softmax(x)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep>expected=softmax(test_values)<line_sep>assert_allclose(result expected rtol=1e-05)<block_end><def_stmt>test_time_distributed_softmax <block_start>x=K.placeholder(shape=(1 1 5))<line_sep>f=K.function([x] [activations.softmax(x)])<line_sep>test_values=get_standard_values()<line_sep>test_values=np.reshape(test_values (1 1 np.size(test_values)))<line_sep>f([test_values])[0]<block_end><def_stmt>test_softplus <block_start>''' Test using a reference softplus implementation '''<def_stmt>softplus x<block_start><return>np.log(np.ones_like(x)+np.exp(x))<block_end>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.softplus(x)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep>expected=softplus(test_values)<line_sep>assert_allclose(result expected rtol=1e-05)<block_end><def_stmt>test_softsign <block_start>''' Test using a reference softsign implementation '''<def_stmt>softsign x<block_start><return>np.divide(x np.ones_like(x)+np.absolute(x))<block_end>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.softsign(x)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep>expected=softsign(test_values)<line_sep>assert_allclose(result expected rtol=1e-05)<block_end><def_stmt>test_sigmoid <block_start>''' Test using a numerically stable reference sigmoid implementation '''<def_stmt>ref_sigmoid x<block_start><if_stmt>x<ge>0<block_start><return>1/(1+np.exp(-x))<block_end><else_stmt><block_start>z=np.exp(x)<line_sep><return>z/(1+z)<block_end><block_end>sigmoid=np.vectorize(ref_sigmoid)<line_sep>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.sigmoid(x)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep>expected=sigmoid(test_values)<line_sep>assert_allclose(result expected rtol=1e-05)<block_end><def_stmt>test_hard_sigmoid <block_start>''' Test using a reference hard sigmoid implementation '''<def_stmt>ref_hard_sigmoid x<block_start>''' Reference hard sigmoid with slope and shift values from theano, see https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py '''<line_sep>x=(x<times>0.2)+0.5<line_sep>z=0.0<if>x<le>0<else>(1.0<if>x<ge>1<else>x)<line_sep><return>z<block_end>hard_sigmoid=np.vectorize(ref_hard_sigmoid)<line_sep>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.hard_sigmoid(x)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep>expected=hard_sigmoid(test_values)<line_sep>assert_allclose(result expected rtol=1e-05)<block_end><def_stmt>test_relu <block_start>''' Relu implementation doesn't depend on the value being a theano variable. Testing ints, floats and theano tensors. '''<line_sep>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.relu(x)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep># because no negatives in test values assert_allclose(result test_values rtol=1e-05)<block_end><def_stmt>test_elu <block_start>x=K.placeholder(ndim=2)<line_sep>f=K.function([x] [activations.elu(x 0.5)])<line_sep>test_values=get_standard_values()<line_sep>result=f([test_values])[0]<line_sep># because no negatives in test values assert_allclose(result test_values rtol=1e-05)<line_sep>negative_values=np.array([[-1 -2]] dtype=K.floatx())<line_sep>result=f([negative_values])[0]<line_sep>true_result=(np.exp(negative_values)-1)/2<line_sep>assert_allclose(result true_result)<block_end><def_stmt>test_tanh <block_start>test_values=get_standard_values()<line_sep>x=K.placeholder(ndim=2)<line_sep>exp=activations.tanh(x)<line_sep>f=K.function([x] [exp])<line_sep>result=f([test_values])[0]<line_sep>expected=np.tanh(test_values)<line_sep>assert_allclose(result expected rtol=1e-05)<block_end><def_stmt>test_linear <block_start>''' This function does no input validation, it just returns the thing that was passed in. '''<line_sep>xs=[1 5 <true> <none> 'foo']<for_stmt>x xs<block_start><assert_stmt>(x<eq>activations.linear(x))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main([__file__])<block_end>
<import_stmt>numpy<as>np<import_stmt>unittest<import_from_stmt>pydlm.modeler.trends trend<import_from_stmt>pydlm.modeler.seasonality seasonality<import_from_stmt>pydlm.modeler.builder builder<import_from_stmt>pydlm.base.kalmanFilter kalmanFilter<class_stmt>testKalmanFilter(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.kf1=kalmanFilter(discount=[1])<line_sep>self.kf0=kalmanFilter(discount=[1e-10])<line_sep>self.kf11=kalmanFilter(discount=[1 1])<line_sep>self.trend0=trend(degree=0 discount=1 w=1.0)<line_sep>self.trend0_90=trend(degree=0 discount=0.9 w=1.0)<line_sep>self.trend0_98=trend(degree=0 discount=0.98 w=1.0 name='a')<line_sep>self.trend1=trend(degree=1 discount=1 w=1.0)<block_end><def_stmt>testForwardFilter self<block_start>dlm=builder()<line_sep>dlm.add(self.trend0)<line_sep>dlm.initialize()<line_sep>self.kf1.predict(dlm.model)<line_sep>self.assertAlmostEqual(dlm.model.prediction.obs 0)<line_sep># the prior on the mean is zero, but observe 1, with # discount = 1, one should expect the filterd mean to be 0.5 self.kf1.forwardFilter(dlm.model 1)<line_sep>self.assertAlmostEqual(dlm.model.obs 0.5)<line_sep>self.assertAlmostEqual(dlm.model.prediction.obs 0)<line_sep>self.assertAlmostEqual(dlm.model.sysVar 0.375)<line_sep>self.kf1.predict(dlm.model)<line_sep>self.assertAlmostEqual(dlm.model.obs 0.5)<line_sep>self.assertAlmostEqual(dlm.model.prediction.obs 0.5)<line_sep>dlm.initialize()<line_sep>self.kf0.predict(dlm.model)<line_sep>self.assertAlmostEqual(dlm.model.prediction.obs 0)<line_sep># the prior on the mean is zero, but observe 1, with discount = 0 # one should expect the filtered mean close to 1 self.kf0.forwardFilter(dlm.model 1)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 1)<line_sep>self.assertAlmostEqual(dlm.model.prediction.obs[0 0] 0)<line_sep>self.assertAlmostEqual(dlm.model.sysVar[0 0] 0.5)<line_sep>self.kf0.predict(dlm.model)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 1)<line_sep>self.assertAlmostEqual(dlm.model.prediction.obs[0 0] 1)<block_end><def_stmt>testForwardFilterMultiDim self<block_start>dlm=builder()<line_sep>dlm.add(seasonality(period=2 discount=1 w=1.0))<line_sep>dlm.initialize()<line_sep>self.kf11.forwardFilter(dlm.model 1)<line_sep>self.assertAlmostEqual(dlm.model.state[0][0 0] 0.33333333333)<line_sep>self.assertAlmostEqual(dlm.model.state[1][0 0] -0.33333333333)<line_sep>self.kf11.forwardFilter(dlm.model -1)<line_sep>self.assertAlmostEqual(dlm.model.state[0][0 0] -0.5)<line_sep>self.assertAlmostEqual(dlm.model.state[1][0 0] 0.5)<block_end><def_stmt>testBackwardSmoother self<block_start>dlm=builder()<line_sep>dlm.add(self.trend0)<line_sep>dlm.initialize()<line_sep># with mean being 0 and observe 1 and 0 consectively, one shall # expect the smoothed mean at 1 will be 1/3, for discount = 1 self.kf1.forwardFilter(dlm.model 1)<line_sep>self.kf1.forwardFilter(dlm.model 0)<line_sep>self.kf1.backwardSmoother(dlm.model np.matrix([[0.5]]) np.matrix([[0.375]]))<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 1.0/3)<line_sep>self.assertAlmostEqual(dlm.model.sysVar[0 0] 0.18518519)<block_end># second order trend with discount = 1. The smoothed result should be # equal to a direct fit on the three data points, 0, 1, -1. Thus, the # smoothed observation should be 0.0 <def_stmt>testBackwardSmootherMultiDim self<block_start>dlm=builder()<line_sep>dlm.add(self.trend1)<line_sep>dlm.initialize()<line_sep>self.kf11.forwardFilter(dlm.model 1)<line_sep>state1=dlm.model.state<line_sep>cov1=dlm.model.sysVar<line_sep>self.kf11.forwardFilter(dlm.model -1)<line_sep>self.kf11.backwardSmoother(dlm.model rawState=state1 rawSysVar=cov1)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 0.0)<block_end><def_stmt>testMissingData self<block_start>dlm=builder()<line_sep>dlm.add(self.trend0)<line_sep>dlm.initialize()<line_sep>self.kf0.forwardFilter(dlm.model 1)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 1.0)<line_sep>self.assertAlmostEqual(dlm.model.obsVar[0 0] 1.0)<line_sep>self.kf0.forwardFilter(dlm.model <none>)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 1.0)<line_sep>self.assertAlmostEqual(dlm.model.obsVar[0 0]/1e10 0.5)<line_sep>self.kf0.forwardFilter(dlm.model <none>)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 1.0)<line_sep>self.assertAlmostEqual(dlm.model.obsVar[0 0]/1e10 0.5)<line_sep>self.kf0.forwardFilter(dlm.model 0)<line_sep>self.assertAlmostEqual(dlm.model.obs[0 0] 0.0)<block_end><def_stmt>testMissingEvaluation self<block_start>dlm=builder()<line_sep>dlm.add(self.trend0)<line_sep>dlm.initialize()<line_sep>dlm.model.evaluation=np.matrix([[<none>]])<line_sep>self.kf1.forwardFilter(dlm.model 1.0 dealWithMissingEvaluation=<true>)<line_sep>self.assertAlmostEqual(dlm.model.obs 0.0)<line_sep>self.assertAlmostEqual(dlm.model.transition 1.0)<block_end><def_stmt>testEvolveMode self<block_start>dlm=builder()<line_sep>dlm.add(self.trend0_90)<line_sep>dlm.add(self.trend0_98)<line_sep>dlm.initialize()<line_sep>kf2=kalmanFilter(discount=[0.9 0.98] updateInnovation='component' index=dlm.componentIndex)<line_sep>kf2.forwardFilter(dlm.model 1.0)<line_sep>self.assertAlmostEqual(dlm.model.innovation[0 1] 0.0)<line_sep>self.assertAlmostEqual(dlm.model.innovation[1 0] 0.0)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>time<import_stmt>os<import_stmt>sys<import_stmt>shutil<import_stmt>json<import_stmt>argparse<import_from_stmt>zipfile ZipFile<import_from_stmt>contextlib contextmanager<import_from_stmt>datetime datetime<import_from_stmt>Tests.private_build.upload_packs_private download_and_extract_index update_index_with_priced_packs extract_packs_artifacts<import_from_stmt>Tests.Marketplace.marketplace_services init_storage_client<import_from_stmt>Tests.scripts.utils.log_util install_logging<import_from_stmt>Tests.scripts.utils logging_wrapper<as>logging<line_sep>MAX_SECONDS_TO_WAIT_FOR_LOCK=600<line_sep>LOCK_FILE_PATH='lock.txt'<line_sep>@contextmanager<def_stmt>lock_and_unlock_dummy_index public_storage_bucket dummy_index_lock_path<block_start><try_stmt><block_start>acquire_dummy_index_lock(public_storage_bucket dummy_index_lock_path)<line_sep><yield><block_end><except_stmt>Exception<block_start>logging.exception("Error in dummy index lock context manager.")<block_end><finally_stmt><block_start>release_dummy_index_lock(public_storage_bucket dummy_index_lock_path)<block_end><block_end><def_stmt>change_pack_price_to_zero path_to_pack_metadata<block_start><with_stmt>open(path_to_pack_metadata 'r')<as>pack_metadata_file<block_start>pack_metadata=json.load(pack_metadata_file)<block_end>pack_metadata['price']=0<with_stmt>open(path_to_pack_metadata 'w')<as>pack_metadata_file<block_start>json.dump(pack_metadata pack_metadata_file indent=4)<block_end><block_end><def_stmt>change_packs_price_to_zero public_index_folder_path<block_start>paths_to_packs_in_merged_index=[pack_dir.path<for>pack_dir os.scandir(public_index_folder_path)<if>pack_dir.is_dir()]<for_stmt>path_to_pack paths_to_packs_in_merged_index<block_start>path_to_pack_metadata=os.path.join(path_to_pack 'metadata.json')<line_sep>change_pack_price_to_zero(path_to_pack_metadata)<block_end><block_end><def_stmt>merge_private_index_into_public_index public_index_folder_path private_index_folder_path<block_start>packs_in_private_index=[pack_dir.name<for>pack_dir os.scandir(private_index_folder_path)<if>pack_dir.is_dir()]<for_stmt>pack_name packs_in_private_index<block_start>path_to_pack_in_private_index=os.path.join(private_index_folder_path pack_name)<line_sep>path_to_pack_in_public_index=os.path.join(public_index_folder_path pack_name)<line_sep>shutil.copy(path_to_pack_in_private_index path_to_pack_in_public_index)<block_end><block_end><def_stmt>upload_modified_index public_index_folder_path extract_destination_path public_ci_dummy_index_blob build_number private_packs<block_start>"""Upload updated index zip to cloud storage. Args: public_index_folder_path (str): public index folder full path. extract_destination_path (str): extract folder full path. public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob. build_number (str): circleCI build number, used as an index revision. private_packs (list): List of private packs and their price. """<with_stmt>open(os.path.join(public_index_folder_path "index.json") "w+")<as>index_file<block_start><for_stmt>private_pack private_packs<block_start>private_pack['price']=0<block_end>index={'revision':build_number 'modified':datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') 'packs':private_packs}<line_sep>json.dump(index index_file indent=4)<block_end>index_zip_name=os.path.basename(public_index_folder_path)<line_sep>index_zip_path=shutil.make_archive(base_name=public_index_folder_path format="zip" root_dir=extract_destination_path base_dir=index_zip_name)<try_stmt><block_start>public_ci_dummy_index_blob.reload()<line_sep>public_ci_dummy_index_blob.cache_control="no-cache,max-age=0"# disabling caching for index blob public_ci_dummy_index_blob.upload_from_filename(index_zip_path)<line_sep>logging.success("Finished uploading index.zip to storage.")<block_end><except_stmt>Exception<block_start>logging.exception("Failed in uploading index. Mismatch in index file generation.")<line_sep>sys.exit(1)<block_end><finally_stmt><block_start>shutil.rmtree(public_index_folder_path)<block_end><block_end><def_stmt>option_handler <block_start>"""Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """<line_sep>parser=argparse.ArgumentParser(description="Store packs in cloud storage.")<line_sep># disable-secrets-detection-start parser.add_argument('-b' '--public_bucket_name' help="CI public bucket name" required=<true>)<line_sep>parser.add_argument('-pb' '--private_bucket_name' help="CI private bucket name" required=<true>)<line_sep>parser.add_argument('-s' '--service_account' help=("Path to gcloud service account, is for circleCI usage. "<concat>"For local development use your personal account and "<concat>"authenticate using Google Cloud SDK by running: "<concat>"`gcloud auth application-default login` and leave this parameter blank. "<concat>"For more information go to: "<concat>"https://googleapis.dev/python/google-api-core/latest/auth.html") required=<false>)<line_sep>parser.add_argument('-n' '--ci_build_number' help="CircleCi build number (will be used as hash revision at index file)" required=<true>)<line_sep>parser.add_argument('-e' '--extract_public_index_path' help="Full path of folder to extract the public index" required=<true>)<line_sep>parser.add_argument('-sb' '--storage_base_path' help="Storage base path of the directory to upload to." required=<false>)<line_sep>parser.add_argument('-p' '--pack_name' help="Modified pack to upload to gcs.")<line_sep>parser.add_argument('-a' '--artifacts_path' help="The full path of packs artifacts" required=<true>)<line_sep>parser.add_argument('-ea' '--extract_artifacts_path' help="Full path of folder to extract wanted packs" required=<true>)<line_sep>parser.add_argument('-di' '--dummy_index_dir_path' help="Full path to the dummy index in the private CI bucket" required=<true>)<line_sep># disable-secrets-detection-end <return>parser.parse_args()<block_end><def_stmt>is_dummy_index_locked public_storage_bucket dummy_index_lock_path<block_start>dummy_index_lock_blob=public_storage_bucket.blob(dummy_index_lock_path)<line_sep><return>dummy_index_lock_blob.exists()<block_end><def_stmt>lock_dummy_index public_storage_bucket dummy_index_lock_path<block_start>dummy_index_lock_blob=public_storage_bucket.blob(dummy_index_lock_path)<with_stmt>open(LOCK_FILE_PATH 'w')<as>lock_file<block_start>lock_file.write('locked')<block_end><with_stmt>open(LOCK_FILE_PATH 'rb')<as>lock_file<block_start>dummy_index_lock_blob.upload_from_file(lock_file)<block_end><block_end><def_stmt>acquire_dummy_index_lock public_storage_bucket dummy_index_lock_path<block_start>total_seconds_waited=0<while_stmt>is_dummy_index_locked(public_storage_bucket dummy_index_lock_path)<block_start><if_stmt>total_seconds_waited<ge>MAX_SECONDS_TO_WAIT_FOR_LOCK<block_start>logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")<line_sep>sys.exit(1)<block_end><if_stmt>total_seconds_waited%60<eq>0# Printing a message every minute to keep the machine from dying due to no output <block_start>logging.info("Waiting to acquire lock.")<block_end>total_seconds_waited<augadd>10<line_sep>time.sleep(10)<block_end>lock_dummy_index(public_storage_bucket dummy_index_lock_path)<block_end><def_stmt>release_dummy_index_lock public_storage_bucket dummy_index_lock_path<block_start>dummy_index_lock_blob=public_storage_bucket.blob(dummy_index_lock_path)<line_sep>dummy_index_lock_blob.delete()<line_sep>os.remove(LOCK_FILE_PATH)<block_end><def_stmt>add_private_packs_from_dummy_index private_packs dummy_index_blob<block_start>downloaded_dummy_index_path='current_dummy_index.zip'<line_sep>extracted_dummy_index_path='dummy_index'<line_sep>dummy_index_json_path=os.path.join(extracted_dummy_index_path 'index' 'index.json')<line_sep>dummy_index_blob.download_to_filename(downloaded_dummy_index_path)<line_sep>os.mkdir(extracted_dummy_index_path)<if_stmt>os.path.exists(downloaded_dummy_index_path)<block_start><with_stmt>ZipFile(downloaded_dummy_index_path 'r')<as>index_zip<block_start>index_zip.extractall(extracted_dummy_index_path)<block_end><block_end><with_stmt>open(dummy_index_json_path)<as>index_file<block_start>index_json=json.load(index_file)<line_sep>packs_from_dummy_index=index_json.get('packs' [])<for_stmt>pack private_packs<block_start>is_pack_in_dummy_index=any([pack['id']<eq>dummy_index_pack['id']<for>dummy_index_pack packs_from_dummy_index])<if_stmt><not>is_pack_in_dummy_index<block_start>packs_from_dummy_index.append(pack)<block_end><block_end><block_end>os.remove(downloaded_dummy_index_path)<line_sep>shutil.rmtree(extracted_dummy_index_path)<line_sep><return>packs_from_dummy_index<block_end><def_stmt>main <block_start>install_logging('prepare_public_index_for_private_testing.log' logger=logging)<line_sep>upload_config=option_handler()<line_sep>service_account=upload_config.service_account<line_sep>build_number=upload_config.ci_build_number<line_sep>public_bucket_name=upload_config.public_bucket_name<line_sep>private_bucket_name=upload_config.private_bucket_name<line_sep>storage_base_path=upload_config.storage_base_path<line_sep>extract_public_index_path=upload_config.extract_public_index_path<line_sep>changed_pack=upload_config.pack_name<line_sep>extract_destination_path=upload_config.extract_artifacts_path<line_sep>packs_artifacts_path=upload_config.artifacts_path<line_sep>dummy_index_dir_path=upload_config.dummy_index_dir_path<line_sep>dummy_index_path=os.path.join(dummy_index_dir_path 'index.zip')<line_sep>dummy_index_lock_path=os.path.join(dummy_index_dir_path 'lock.txt')<line_sep>storage_client=init_storage_client(service_account)<line_sep>public_storage_bucket=storage_client.bucket(public_bucket_name)<line_sep>private_storage_bucket=storage_client.bucket(private_bucket_name)<line_sep>dummy_index_blob=public_storage_bucket.blob(dummy_index_path)<with_stmt>lock_and_unlock_dummy_index(public_storage_bucket dummy_index_lock_path)<block_start>extract_packs_artifacts(packs_artifacts_path extract_destination_path)<line_sep>public_index_folder_path,public_index_blob,_=download_and_extract_index(public_storage_bucket extract_public_index_path storage_base_path)<line_sep># In order for the packs to be downloaded successfully, their price has to be 0 change_packs_price_to_zero(public_index_folder_path)<line_sep>private_packs,private_index_path,private_index_blob=update_index_with_priced_packs(private_storage_bucket extract_destination_path public_index_folder_path changed_pack <true> storage_base_path)<line_sep>private_packs=add_private_packs_from_dummy_index(private_packs dummy_index_blob)<line_sep>upload_modified_index(public_index_folder_path extract_public_index_path dummy_index_blob build_number private_packs)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Copyright 2019 Intel Corporation. <import_stmt>logging<import_from_stmt>collections namedtuple<import_stmt>numpy<as>np<import_stmt>six<import_from_stmt>plaidml2 DType<import_from_stmt>plaidml2.core TensorShape Buffer<import_from_stmt>plaidml2.ffi ForeignObject ffi ffi_call lib<line_sep>logger=logging.getLogger(__name__)<def_stmt>__init <block_start>"""Docstring for function plaidml2.edsl.__init"""<line_sep>ffi_call(lib.plaidml_edsl_init)<block_end>ffi.init_once(__init 'plaidml_edsl_init')<class_stmt>LogicalShape(ForeignObject)<block_start>"""Docstring for class LogicalShape"""<line_sep>__ffi_del__=lib.plaidml_logical_shape_free<line_sep>__ffi_repr__=lib.plaidml_logical_shape_repr<def_stmt>__init__ self dtype=<none> dims=[] ptr=<none><block_start><if_stmt>ptr<block_start>ffi_obj=ptr<block_end><elif_stmt>dtype<is><not><none><block_start>raw_dims=ffi.new('int64_t[]' [0<if>x<is><none><else>x<for>x dims])<line_sep>ffi_obj=ffi_call(lib.plaidml_logical_shape_alloc dtype len(dims) raw_dims)<block_end><else_stmt><block_start><raise>ValueError('One of dtype= or ptr= must be specified.')<block_end>super(LogicalShape self).__init__(ffi_obj)<block_end>@property<def_stmt>dtype self<block_start><return>DType(ffi_call(lib.plaidml_logical_shape_get_dtype self.as_ptr()))<block_end>@property<def_stmt>ndims self<block_start><return>ffi_call(lib.plaidml_logical_shape_get_ndims self.as_ptr())<block_end>@property<def_stmt>int_dims self<block_start>"""Returns the dimensions of a LogicalShape as a list. Args: self (pointer): The object pointer for a LogicalShape Returns: list (int): Integer dimensions of the LogicalShape. """<line_sep><return>[ffi_call(lib.plaidml_logical_shape_get_dim_int self.as_ptr() i)<for>i range(self.ndims)]<block_end><def_stmt>into_TensorShape self<block_start><return>TensorShape(ptr=ffi_call(lib.plaidml_logical_shape_into_tensor_shape self.as_ptr()))<block_end><block_end>Constraint=namedtuple('Constraint' ['lhs' 'rhs'])<def_stmt>wrap_dim x<block_start><if_stmt>isinstance(x six.integer_types)<block_start><return>TensorDim(expr=ffi_call(lib.plaidml_dim_expr_int x))<block_end><return>x<block_end><def_stmt>dim_op op *args<block_start>args=[wrap_dim(x)<for>x args]<line_sep>raw_args=[x.as_ptr()<for>x args]<line_sep><return>ffi_call(lib.plaidml_dim_expr_op op len(args) raw_args)<block_end><class_stmt>TensorDim(ForeignObject)<block_start>"""Docstring for class TensorDim"""<line_sep>__ffi_del__=lib.plaidml_dim_expr_free<line_sep>__ffi_repr__=lib.plaidml_dim_expr_repr<def_stmt>__init__ self expr=<none><block_start><if_stmt>expr<is><none><block_start>expr=ffi_call(lib.plaidml_dim_expr_none)<block_end>super(TensorDim self).__init__(expr)<block_end><def_stmt>_bind self expr<block_start>self.take_ptr(expr)<block_end><def_stmt>__neg__ self<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_NEG self))<block_end><def_stmt>__add__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD self other))<block_end><def_stmt>__radd__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD other self))<block_end><def_stmt>__sub__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB self other))<block_end><def_stmt>__rsub__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB other self))<block_end><def_stmt>__mul__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL self other))<block_end><def_stmt>__rmul__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL other self))<block_end><def_stmt>__floordiv__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV self other))<block_end><def_stmt>__rfloordiv__ self other<block_start><return>TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV other self))<block_end><block_end><def_stmt>wrap_poly x<block_start><if_stmt>isinstance(x six.integer_types)<block_start><return>TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_literal x))<block_end><if_stmt>isinstance(x TensorDim)<block_start><return>TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_dim x.as_ptr()))<block_end><return>x<block_end><def_stmt>poly_op op *args<block_start>args=[wrap_poly(x)<for>x args]<line_sep>raw_args=[x.as_ptr()<for>x args]<line_sep><return>ffi_call(lib.plaidml_poly_expr_op op len(args) raw_args)<block_end><class_stmt>TensorIndex(ForeignObject)<block_start>"""Docstring for class TensorIndex"""<line_sep>__ffi_del__=lib.plaidml_poly_expr_free<line_sep>__ffi_repr__=lib.plaidml_poly_expr_repr<def_stmt>__init__ self expr=<none> name=''<block_start><if_stmt>expr<is><none><block_start>expr=ffi_call(lib.plaidml_poly_expr_index name.encode())<block_end>super(TensorIndex self).__init__(expr)<block_end><def_stmt>__lt__ self rhs<block_start><return>Constraint(self wrap_dim(rhs))<block_end><def_stmt>__neg__ self<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_NEG self))<block_end><def_stmt>__add__ self rhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD self rhs))<block_end><def_stmt>__radd__ self lhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD lhs self))<block_end><def_stmt>__sub__ self rhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB self rhs))<block_end><def_stmt>__rsub__ self lhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB lhs self))<block_end><def_stmt>__mul__ self rhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL self rhs))<block_end><def_stmt>__rmul__ self lhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL lhs self))<block_end><def_stmt>__floordiv__ self rhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV self rhs))<block_end><def_stmt>__rfloordiv__ self lhs<block_start><return>TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV lhs self))<block_end><block_end><class_stmt>_IndexMap(ForeignObject)<block_start>__ffi_del__=lib.plaidml_expr_free<line_sep>__ffi_repr__=lib.plaidml_expr_repr<def_stmt>__init__ self ref key<block_start><if_stmt>isinstance(key tuple)<or>isinstance(key list)<block_start>idxs=key<block_end><else_stmt><block_start>idxs=[key]<block_end>idxs=[wrap_poly(x)<for>x idxs]<line_sep>raw_idxs=[x.as_ptr()<for>x idxs]<line_sep>expr=ffi_call(lib.plaidml_expr_index_map ref.as_ptr() len(idxs) raw_idxs)<line_sep>super(_IndexMap self).__init__(expr)<block_end><block_end><class_stmt>_SizeMap(ForeignObject)<block_start>__ffi_del__=lib.plaidml_expr_free<line_sep>__ffi_repr__=lib.plaidml_expr_repr<def_stmt>__init__ self dims<block_start>dims=[wrap_dim(x)<for>x dims]<line_sep>raw_dims=[x.as_ptr()<for>x dims]<line_sep>expr=ffi_call(lib.plaidml_expr_size_map len(dims) raw_dims)<line_sep>super(_SizeMap self).__init__(expr)<block_end><block_end><class_stmt>_Contraction(ForeignObject)<block_start>__ffi_del__=lib.plaidml_expr_free<line_sep>__ffi_repr__=lib.plaidml_expr_repr<def_stmt>__init__ self agg_op combo_op src_idxs sink_idxs sink_sizes name<block_start>src_idxs=[x.as_ptr()<for>x src_idxs]<line_sep>expr=ffi_call(lib.plaidml_expr_contraction agg_op combo_op sink_idxs.as_ptr() sink_sizes.as_ptr() len(src_idxs) src_idxs name.encode() )<line_sep>super(_Contraction self).__init__(expr)<block_end><block_end>_ContractionPart=namedtuple('_ContractionPart' ['op' 'args'])<class_stmt>IndexedTensor(object)<block_start>"""Docstring for class IndexedTensor"""<def_stmt>__init__ self impl tensor=<none><block_start>self._impl=impl<line_sep>self._tensor=tensor<block_end><def_stmt>__repr__ self<block_start><return>repr(self._impl)<block_end># Represents an aggregation_op of SUM in a contraction <def_stmt>__iadd__ self rhs<block_start><return>IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_SUM rhs))<block_end># Represents an aggregation_op of PROD in a contraction <def_stmt>__imul__ self rhs<block_start><return>IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_PROD rhs))<block_end># Represents an aggregation_op of MAX in a contraction <def_stmt>__ge__ self rhs<block_start>self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MAX rhs))<block_end># Represents an aggregation_op of MIN in a contraction <def_stmt>__le__ self rhs<block_start>self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MIN rhs))<block_end># Represents a combo_op of PLUS in a contraction <def_stmt>__add__ self rhs<block_start><return>IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_ADD (self rhs)))<block_end># Represents a combo_op of MULTIPLY in a contraction <def_stmt>__mul__ self rhs<block_start><return>IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_MUL (self rhs)))<block_end># Represents a combo_op of EQ in a contraction <def_stmt>__eq__ self rhs<block_start><return>IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_EQ (self rhs)))<block_end><def_stmt>_make_contraction self agg_op rhs# Extract combo_op and inputs <block_start><if_stmt>isinstance(rhs._impl _IndexMap)# Unary op <block_start>combo_op=lib.PLAIDML_COMBO_OP_NONE<line_sep>inputs=[rhs._impl]<block_end><elif_stmt>isinstance(rhs._impl _ContractionPart)# Binary/Ternary op <block_start>combo_op=rhs._impl.op<line_sep>inputs=[x._impl<for>x rhs._impl.args]<block_end><else_stmt><block_start><raise>ValueError('Invalid impl')<block_end><return>_Contraction(agg_op combo_op inputs self._impl _SizeMap(self._tensor._dims) self._tensor._name )<block_end><block_end><class_stmt>Tensor(ForeignObject)<block_start>"""Docstring for class Tensor"""<line_sep>__ffi_del__=lib.plaidml_expr_free<line_sep>__ffi_repr__=lib.plaidml_expr_repr<line_sep>_dims=<none><line_sep>_is_contraction=<false><def_stmt>__init__ self shape=<none> dims=<none> expr=<none> value=<none> name='' buffer=<none><block_start>self._name=name<line_sep>self._buffer=buffer<if_stmt>shape<block_start><if_stmt>buffer<is><none><block_start>raw_buffer=ffi.NULL<block_end><else_stmt><block_start>raw_buffer=buffer.as_ptr()<block_end>expr=ffi_call(lib.plaidml_expr_placeholder shape.as_ptr() raw_buffer name.encode())<block_end><elif_stmt>dims<is><not><none><block_start>self._dims=dims<line_sep>expr=<none><block_end><elif_stmt>value<is><not><none><block_start><if_stmt>isinstance(value six.integer_types)<block_start>expr=ffi_call(lib.plaidml_expr_int value)<block_end><elif_stmt>isinstance(value float)<block_start>expr=ffi_call(lib.plaidml_expr_float value)<block_end><else_stmt><block_start><raise>TypeError('Invalid type for value={}'.format(value))<block_end><block_end><elif_stmt>expr<is><none><block_start><raise>ValueError('One of dims=, shape=, or expr= must be specified.')<block_end>super(Tensor self).__init__(expr)<block_end><def_stmt>set_param_value self buffer# Changes the value of a parameter tensor (i.e. one explicitly set to a buffer value) # Illegal on other tensors <block_start>ffi_call(lib.plaidml_expr_param_reset self.__ffi_obj__ buffer.as_ptr())<block_end><def_stmt>__hash__ self<block_start><return>hash((self.as_ptr() self._dims self._is_contraction))<block_end><def_stmt>__getitem__ self key<block_start><return>IndexedTensor(_IndexMap(self key) tensor=self)<block_end><def_stmt>__setitem__ self key value<block_start><if_stmt>isinstance(value._impl _Contraction)# standard contraction <block_start>self._set_contraction(value._impl)<block_end><elif_stmt>isinstance(value Tensor)<block_start><pass><block_end><elif_stmt>isinstance(value._impl _IndexMap)# Unary ASSIGN contraction <block_start>self._set_contraction(_Contraction(lib.PLAIDML_AGG_OP_ASSIGN lib.PLAIDML_COMBO_OP_NONE [value._impl] _IndexMap(self key) _SizeMap(self._dims) self._name ))<block_end><elif_stmt>isinstance(value._impl _ContractionPart)# Binary or ternary ASSIGN contraction <block_start>self._set_contraction(_Contraction(lib.PLAIDML_AGG_OP_ASSIGN value._impl.op [x._impl<for>x value._impl.args] _IndexMap(self key) _SizeMap(self._dims) self._name ))<block_end><else_stmt><block_start><raise>ValueError('Invalid impl when assigning to a Tensor (Type: {})'.format(type(value._impl)))<block_end><block_end><def_stmt>_set_contraction self cion<block_start>self._is_contraction=<true><line_sep>self.take_ptr(cion)<block_end># Represents an eltwise negation <def_stmt>__neg__ self<block_start><return>call('neg' self)<block_end># Represents an eltwise bit_not <def_stmt>__invert__ self<block_start><return>call('bit_not' self)<block_end># Represents an eltwise addition <def_stmt>__add__ self rhs<block_start><return>call('add' self rhs)<block_end><def_stmt>__radd__ self lhs<block_start><return>call('add' lhs self)<block_end># Represents an eltwise subtraction <def_stmt>__sub__ self rhs<block_start><return>call('sub' self rhs)<block_end><def_stmt>__rsub__ self lhs<block_start><return>call('sub' lhs self)<block_end># Represents an eltwise multiplication <def_stmt>__mul__ self rhs<block_start><return>call('mul' self rhs)<block_end><def_stmt>__rmul__ self lhs<block_start><return>call('mul' lhs self)<block_end># Represents an eltwise division <def_stmt>__div__ self rhs<block_start><return>call('div' self rhs)<block_end><def_stmt>__rdiv__ self lhs<block_start><return>call('div' lhs self)<block_end># Represents an eltwise division <def_stmt>__truediv__ self rhs<block_start><return>call('div' self rhs)<block_end><def_stmt>__rtruediv__ self lhs<block_start><return>call('div' lhs self)<block_end># Represents an eltwise cmp_eq <def_stmt>__eq__ self rhs<block_start><return>call('cmp_eq' self rhs)<block_end># Represents an eltwise cmp_ne <def_stmt>__ne__ self rhs<block_start><return>call('cmp_ne' self rhs)<block_end># Represents an eltwise cmp_lt <def_stmt>__lt__ self rhs<block_start><return>call('cmp_lt' self rhs)<block_end># Represents an eltwise cmp_gt <def_stmt>__gt__ self rhs<block_start><return>call('cmp_gt' self rhs)<block_end># Represents an eltwise cmp_le <def_stmt>__le__ self rhs<block_start><return>call('cmp_le' self rhs)<block_end># Represents an eltwise cmp_ge <def_stmt>__ge__ self rhs<block_start><return>call('cmp_ge' self rhs)<block_end># Represents an eltwise bit_left <def_stmt>__lshift__ self rhs<block_start><return>call('bit_left' self rhs)<block_end><def_stmt>__rlshift__ self lhs<block_start><return>call('bit_left' lhs self)<block_end># Represents an eltwise bit_right <def_stmt>__rshift__ self rhs<block_start><return>call('bit_right' self rhs)<block_end><def_stmt>__rrshift__ self lhs<block_start><return>call('bit_right' lhs self)<block_end># Represents an eltwise bit_and <def_stmt>__and__ self rhs<block_start><return>call('bit_and' self rhs)<block_end><def_stmt>__rand__ self lhs<block_start><return>call('bit_and' lhs self)<block_end># Represents an eltwise bit_or <def_stmt>__or__ self rhs<block_start><return>call('bit_or' self rhs)<block_end><def_stmt>__ror__ self lhs<block_start><return>call('bit_or' lhs self)<block_end># Represents an eltwise bit_xor <def_stmt>__xor__ self rhs<block_start><return>call('bit_xor' self rhs)<block_end><def_stmt>__rxor__ self lhs<block_start><return>call('bit_xor' lhs self)<block_end># Enable no_reduce on a contraction <def_stmt>no_reduce self<block_start><if_stmt><not>self._is_contraction<block_start><raise>TypeError('no_reduce can only be specified on a contraction.')<block_end>ffi_call(lib.plaidml_expr_contraction_set_no_reduce self.as_ptr() <true>)<line_sep><return>self<block_end># Set use_default on a contraction <def_stmt>use_default self rhs<block_start><if_stmt><not>self._is_contraction<block_start><raise>TypeError('use_default can only be specified on a contraction.')<block_end>ffi_call(lib.plaidml_expr_contraction_set_use_default self.as_ptr() rhs.as_ptr())<line_sep><return>self<block_end><def_stmt>add_constraint self constraint<block_start>ffi_call(lib.plaidml_expr_contraction_add_constraint self.as_ptr() constraint.lhs.as_ptr() constraint.rhs.as_ptr() )<block_end><def_stmt>add_constraints self constraints<block_start><for_stmt>constraint constraints<block_start>self.add_constraint(constraint)<block_end><block_end># Return the tensor's shape @property<def_stmt>shape self<block_start><return>LogicalShape(ptr=ffi_call(lib.plaidml_expr_get_shape self.as_ptr()))<block_end># Verify that the specified dims match the dims of this tensor. <def_stmt>bind_dims self *dims<block_start>raw_dims=[x.as_ptr()<for>x dims]<line_sep>ffi_call(lib.plaidml_expr_bind_dims self.as_ptr() len(raw_dims) raw_dims)<block_end># bind a concrete shape to this tensor <def_stmt>bind self shape<block_start>ffi_call(lib.plaidml_expr_bind_shape self.as_ptr() shape.as_ptr())<block_end><block_end><class_stmt>TensorRef<block_start>"""Docstring for class TensorRef"""<def_stmt>__init__ self tensor<block_start>self.tensor=tensor<block_end><def_stmt>__hash__ self<block_start><return>hash(ffi_call(lib.plaidml_expr_ptr self.tensor.as_ptr()))<block_end><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other Tensor)<block_start><return>self.__hash__()<eq>TensorRef(other).__hash__()<block_end><return>self.__hash__()<eq>other.__hash__()<block_end><block_end><class_stmt>Value(ForeignObject)<block_start>"""Docstring for class Value"""<line_sep>__ffi_del__=lib.plaidml_value_free<line_sep>__ffi_repr__=lib.plaidml_value_repr<def_stmt>__init__ self value# logger.debug('Value({})'.format(value)) <block_start><if_stmt>isinstance(value np.ndarray)<block_start><if_stmt>value.ndim<eq>0<block_start>value=value.item()<block_end><else_stmt><block_start>value=value.tolist()<block_end><block_end><if_stmt>value<is><none><block_start>ffi_obj=ffi_call(lib.plaidml_value_none)<block_end><elif_stmt>isinstance(value (six.integer_types bool))<block_start>ffi_obj=ffi_call(lib.plaidml_value_int value)<block_end><elif_stmt>isinstance(value float)<block_start>ffi_obj=ffi_call(lib.plaidml_value_float value)<block_end><elif_stmt>isinstance(value TensorDim)<block_start>ffi_obj=ffi_call(lib.plaidml_value_dim value.as_ptr())<block_end><elif_stmt>isinstance(value Tensor)<block_start>ffi_obj=ffi_call(lib.plaidml_value_expr value.as_ptr())<block_end><elif_stmt>isinstance(value (list tuple))<block_start>self._elts=[Value(x)<for>x value]<line_sep>raw_elts=[x.as_ptr()<for>x self._elts]<line_sep>ffi_obj=ffi_call(lib.plaidml_value_tuple len(raw_elts) raw_elts)<block_end><elif_stmt>isinstance(value six.string_types)<block_start>ffi_obj=ffi_call(lib.plaidml_value_str value.encode('utf-8'))<block_end><elif_stmt>isinstance(value ffi.CData)<and>ffi.typeof(value)<is>ffi.typeof('plaidml_value*')<block_start>ffi_obj=value<block_end><else_stmt><block_start><raise>TypeError('Unsupported type {} for value={}'.format(type(value) value))<block_end>super(Value self).__init__(ffi_obj)<block_end><def_stmt>as_tensor self<block_start><return>Tensor(expr=ffi_call(lib.plaidml_value_expr_get self.as_ptr()))<block_end><block_end><def_stmt>TensorOutput *args<block_start><return>Tensor(dims=args)<block_end><def_stmt>TensorDims count<block_start><return>[TensorDim()<for>i range(count)]<block_end><def_stmt>TensorIndexes count<block_start><return>[TensorIndex()<for>i range(count)]<block_end><class_stmt>ProgramArgument<block_start>"""Docstring for class ProgramArgument"""<def_stmt>__init__ self arg<block_start>self.is_input=arg.is_input<line_sep>self.ref=TensorRef(Tensor(expr=ffi_call(lib.plaidml_expr_clone arg.tensor)))<line_sep>self.shape=LogicalShape(ptr=ffi_call(lib.plaidml_logical_shape_clone arg.shape))<if_stmt>arg.buffer<block_start>tensor_shape=self.shape.into_TensorShape()<line_sep>self.buffer=Buffer(tensor_shape ptr=ffi_call(lib.plaidml_buffer_clone arg.buffer))<block_end><else_stmt><block_start>self.buffer=<none><block_end><block_end><block_end><class_stmt>Program(ForeignObject)<block_start>"""Docstring for class Program"""<line_sep>__ffi_del__=lib.plaidml_program_free<line_sep>__ffi_repr__=lib.plaidml_program_repr<def_stmt>__init__ self name outputs updates=[]<block_start>raw_outputs=[x.as_ptr()<for>x outputs]<line_sep>dst_updates=[x[0].as_ptr()<for>x updates]<line_sep>src_updates=[x[1].as_ptr()<for>x updates]<line_sep>raw_args=ffi.new('plaidml_program_args**')<line_sep>ffi_obj=ffi_call(lib.plaidml_program_evaluate name.encode() len(raw_outputs) raw_outputs len(updates) src_updates dst_updates raw_args )<line_sep>self.args=[ProgramArgument(raw_args[0].args[i])<for>i range(raw_args[0].nargs)]<line_sep>ffi_call(lib.plaidml_program_args_free raw_args[0])<line_sep>super(Program self).__init__(ffi_obj)<block_end>@property<def_stmt>inputs self<block_start><return>[x<for>x self.args<if>x.is_input]<block_end>@property<def_stmt>outputs self<block_start><return>[x<for>x self.args<if><not>x.is_input]<block_end><block_end><def_stmt>wrap_tensor x<block_start><if_stmt>isinstance(x six.integer_types)<block_start><return>Tensor(expr=ffi_call(lib.plaidml_expr_int x))<block_end><if_stmt>np.issubdtype(type(x) np.integer)<block_start><return>Tensor(expr=ffi_call(lib.plaidml_expr_int x.item()))<block_end><if_stmt>isinstance(x float)<block_start><return>Tensor(expr=ffi_call(lib.plaidml_expr_float x))<block_end><if_stmt>isinstance(x TensorDim)<block_start><return>Tensor(expr=ffi_call(lib.plaidml_expr_dim x.as_ptr()))<block_end><if_stmt>isinstance(x Tensor)<block_start><return>x<block_end><raise>TypeError('Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'.format(type(x) fn args x))<block_end><def_stmt>call fn *args<block_start>args=[wrap_tensor(x)<for>x args]<line_sep>raw_args=[x.as_ptr()<for>x args]<line_sep><return>Tensor(expr=ffi_call(lib.plaidml_expr_call fn.encode() len(args) raw_args))<block_end><def_stmt>cast x dtype<block_start><return>Tensor(expr=ffi_call(lib.plaidml_expr_cast wrap_tensor(x).as_ptr() dtype))<block_end><def_stmt>as_bool x<block_start><return>cast(x DType.BOOLEAN)<block_end><def_stmt>as_float x bit_size<block_start>map={16:DType.FLOAT16 32:DType.FLOAT32 64:DType.FLOAT64 }<line_sep>dtype=map.get(bit_size)<if_stmt><not>dtype<block_start><raise>'Unsupport bit_size for as_float'<block_end><return>cast(x dtype)<block_end><def_stmt>as_int x bit_size<block_start>map={8:DType.INT8 16:DType.INT16 32:DType.INT32 64:DType.INT64 }<line_sep>dtype=map.get(bit_size)<if_stmt><not>dtype<block_start><raise>'Unsupport bit_size for as_int'<block_end><return>cast(x dtype)<block_end><def_stmt>as_uint x bit_size<block_start>map={8:DType.UINT8 16:DType.UINT16 32:DType.UINT32 64:DType.UINT64 }<line_sep>dtype=map.get(bit_size)<if_stmt><not>dtype<block_start><raise>'Unsupport bit_size for as_uint'<block_end><return>cast(x dtype)<block_end><def_stmt>ceil x<block_start><return>call('ceil' x)<block_end><def_stmt>cond lhs rhs true_case<block_start><return>IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_COND (lhs rhs true_case)))<block_end><def_stmt>cos x<block_start><return>call('cos' x)<block_end><def_stmt>exp x<block_start><return>call('exp' x)<block_end><def_stmt>floor x<block_start><return>call('floor' x)<block_end><def_stmt>gather x y<block_start><return>call('gather' x y)<block_end><def_stmt>gradients loss variables<block_start>wrts=[x.as_ptr()<for>x variables]<line_sep>raw_grads=ffi.new('plaidml_expr*[]' len(wrts))<line_sep>ffi_call(lib.plaidml_expr_gradient len(wrts) wrts loss.as_ptr() raw_grads )<line_sep><return>[Tensor(expr=x)<for>x raw_grads]<block_end><def_stmt>ident x<block_start><return>call('ident' x)<block_end><def_stmt>index x axis<block_start><return>call('index' x axis)<block_end><def_stmt>jacobian loss variables<block_start>wrts=[x.as_ptr()<for>x variables]<line_sep>raw_grads=ffi.new('plaidml_expr*[]' len(wrts))<line_sep>ffi_call(lib.plaidml_expr_jacobian len(wrts) wrts loss.as_ptr() raw_grads )<line_sep><return>[Tensor(expr=x)<for>x raw_grads]<block_end><def_stmt>log x<block_start><return>call('log' x)<block_end><def_stmt>max x y<block_start><return>call('max' x y)<block_end><def_stmt>min x y<block_start><return>call('min' x y)<block_end><def_stmt>pow x y<block_start><return>call('pow' x y)<block_end><def_stmt>prng state shape<block_start><return>call('prng' state *shape)<block_end><def_stmt>reshape x dims<block_start><return>call('reshape' x *dims)<block_end><def_stmt>round x<block_start><return>call('round' x)<block_end><def_stmt>scatter x y z<block_start><return>call('scatter' x y z)<block_end><def_stmt>select cond true_case false_case<block_start><return>call('cond' cond true_case false_case)<block_end><def_stmt>shape x<block_start><return>call('shape' x)<block_end><def_stmt>sin x<block_start><return>call('sin' x)<block_end><def_stmt>sqrt x<block_start><return>call('sqrt' x)<block_end><def_stmt>tan x<block_start><return>call('tan' x)<block_end><def_stmt>tanh x<block_start><return>call('tanh' x)<block_end>
# -*- coding: utf-8 -*- # Copyright 2012 Viewfinder Inc. All Rights Reserved. """Apple Push Notification service utilities. Original copyright for this code: https://github.com/jayridge/apnstornado TokenToBinary(): converts a hex-encoded token into a binary value CreateMessage(): formats a binary APNs message from parameters ParseResponse(): parses APNs binary response for status & identifier ErrorStatusToString(): converts error status to error message """<line_sep>__author__='<EMAIL> (<NAME>)'<import_stmt>base64<import_stmt>json<import_stmt>struct<import_stmt>time<import_from_stmt>tornado escape<line_sep>_MAX_PAYLOAD_BYTES=256<line_sep>"""Maximum number of bytes in the APNS payload."""<line_sep>_ELLIPSIS_BYTES=escape.utf8(u'…')<line_sep>"""UTF-8 encoding of the Unicode ellipsis character."""<def_stmt>TokenToBinary token<block_start><return>base64.b64decode(token)<block_end><def_stmt>TokenFromBinary bin_token<block_start><return>base64.b64encode(bin_token)<block_end><def_stmt>CreateMessage token alert=<none> badge=<none> sound=<none> identifier=0 expiry=<none> extra=<none> allow_truncate=<true><block_start>token=TokenToBinary(token)<if_stmt>len(token)<ne>32<block_start><raise>ValueError u'Token must be a 32-byte binary string.'<block_end><if_stmt>(alert<is><not><none>)<and>(<not>isinstance(alert (basestring dict)))<block_start><raise>ValueError u'Alert message must be a string or a dictionary.'<block_end><if_stmt>expiry<is><none><block_start>expiry=long(time.time()+365<times>86400)<block_end># Start by determining the length of the UTF-8 encoded JSON with no alert text. This allows us to # determine how much space is left for the message. # 'content-available': 1 is necessary to trigger iOS 7's background download processing. aps={'alert':'' 'content-available':1}<if_stmt>badge<is><not><none><block_start>aps['badge']=badge<block_end><if_stmt>sound<is><not><none><block_start>aps['sound']=sound<block_end>data={'aps':aps}<if_stmt>extra<is><not><none><block_start>data.update(extra)<block_end># Create compact JSON representation with no extra space and no escaping of non-ascii chars (i.e. use # direct UTF-8 representation rather than "\u1234" escaping). This maximizes the amount of space that's # left for the alert text. encoded=escape.utf8(json.dumps(escape.recursive_unicode(data) separators=(',' ':') ensure_ascii=<false>))<line_sep>bytes_left=_MAX_PAYLOAD_BYTES-len(encoded)<if_stmt>allow_truncate<and>isinstance(alert basestring)<block_start>alert=_TruncateAlert(alert bytes_left)<block_end><elif_stmt>alert<and>len(escape.utf8(alert))<g>bytes_left<block_start><raise>ValueError u'max payload(%d) exceeded: %d'%(_MAX_PAYLOAD_BYTES len(escape.utf8(alert)))<block_end># Now re-encode including the alert text. aps['alert']=alert<line_sep>encoded=escape.utf8(json.dumps(escape.recursive_unicode(data) separators=(',' ':') ensure_ascii=<false>))<line_sep>length=len(encoded)<assert_stmt>length<le>_MAX_PAYLOAD_BYTES (encoded length)<line_sep><return>struct.pack('!bIIH32sH%(length)ds'%{'length':length} 1 identifier expiry 32 token length encoded)<block_end><def_stmt>ParseResponse bytes<block_start><if_stmt>len(bytes)<ne>6<block_start><raise>ValueError u'response must be a 6-byte binary string.'<block_end>command,status,identifier=struct.unpack_from('!bbI' bytes 0)<if_stmt>command<ne>8<block_start><raise>ValueError u'response command must equal 8.'<block_end><return>status identifier ErrorStatusToString(status)<block_end><def_stmt>ErrorStatusToString status<block_start><if_stmt>status<is>0<block_start><return>'No errors encountered'<block_end><elif_stmt>status<is>1<block_start><return>'Processing error'<block_end><elif_stmt>status<is>2<block_start><return>'Missing device token'<block_end><elif_stmt>status<is>3<block_start><return>'Missing topic'<block_end><elif_stmt>status<is>4<block_start><return>'Missing payload'<block_end><elif_stmt>status<is>5<block_start><return>'Invalid token size'<block_end><elif_stmt>status<is>6<block_start><return>'Invalid topic size'<block_end><elif_stmt>status<is>7<block_start><return>'Invalid payload size'<block_end><elif_stmt>status<is>8<block_start><return>'Invalid token'<block_end><elif_stmt>status<is>255<block_start><return>'None (unknown)'<block_end><else_stmt><block_start><return>''<block_end><block_end><def_stmt>_TruncateAlert alert max_bytes<block_start>"""Converts the alert text to UTF-8 encoded JSON format, which is how the alert will be stored in the APNS payload. If the number of resulting bytes exceeds "max_bytes", then truncates the alert text at a Unicode character boundary, taking care not to split JSON escape sequences. Returns the truncated UTF-8 encoded alert text, including a trailing ellipsis character. """<line_sep>alert_json=escape.utf8(json.dumps(escape.recursive_unicode(alert) ensure_ascii=<false>))<line_sep># Strip quotes added by JSON. alert_json=alert_json[1:-1]<line_sep># Check if alert fits with no truncation. <if_stmt>len(alert_json)<le>max_bytes<block_start><return>escape.utf8(alert)<block_end># Make room for an appended ellipsis. <assert_stmt>max_bytes<ge>len(_ELLIPSIS_BYTES) 'max_bytes must be at least %d'%len(_ELLIPSIS_BYTES)<line_sep>max_bytes<augsub>len(_ELLIPSIS_BYTES)<line_sep># Truncate the JSON UTF8 string at a Unicode character boundary. truncated=alert_json[:max_bytes].decode('utf-8' errors='ignore')<line_sep># If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep # chopping trailing characters until the truncated string is valid JSON. It may take several # tries, such as in the case where a "\u1234" sequence has been split. <while_stmt><true><block_start><try_stmt><block_start>alert=json.loads(u'"%s"'%truncated)<line_sep><break><block_end><except_stmt>Exception<block_start>truncated=truncated[:-1]<block_end><block_end># Return the UTF-8 encoding of the alert with the ellipsis appended to it. <return>escape.utf8(alert)+_ELLIPSIS_BYTES<block_end>
r"""Training and evaluating quantum kernels =========================================== .. meta:: :property="og:description": Kernels and alignment training with Pennylane. :property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png .. related:: tutorial_kernel_based_training Kernel-based training with scikit-learn tutorial_data_reuploading_classifier Classification with data reuploading *Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Posted: 24 June 2021* Kernel methods are one of the cornerstones of classical machine learning. Here we are concerned with kernels that can be evaluated on quantum computers, *quantum kernels* for short. In this tutorial you will learn how to evaluate kernels, use them for classification and train them with gradient-based optimization, and all that using the functionality of PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__. The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own `QHack <https://qhack.ai/>`__ hackathon. What are kernel methods? ------------------------ To understand what a kernel method does, let's first revisit one of the simplest methods to assign binary labels to datapoints: linear classification. Imagine we want to discern two different classes of points that lie in different corners of the plane. A linear classifier corresponds to drawing a line and assigning different labels to the regions on opposing sides of the line: .. figure:: ../demonstrations/kernels_module/linear_classification.png :align: center :width: 30% We can mathematically formalize this by assigning the label :math:`y` via .. math:: y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b). The vector :math:`\boldsymbol{w}` points perpendicular to the line and thus determine its slope. The independent term :math:`b` specifies the position on the plane. In this form, linear classification can also be extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a line does not divide the entire space into two regions anymore. Instead one needs a *hyperplane*. It is immediately clear that this method is not very powerful, as datasets that are not separable by a hyperplane can't be classified without error. We can actually sneak around this limitation by performing a neat trick: if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our datapoints into a larger *feature space* and then perform linear classification there, we could actually realise non-linear classification in our original space! .. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png :align: center :width: 65% If we go back to the expression for our prediction and include the embedding, we get .. math:: y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b). We will forgo one tiny step, but it can be shown that for the purpose of optimal classification, we can choose the vector defining the decision boundary as a linear combination of the embedded datapoints :math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting this into the formula yields .. math:: y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right). This rewriting might not seem useful at first, but notice the above formula only contains inner products between vectors in the embedding space: .. math:: k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle. We call this function the *kernel*. It provides the advantage that we can often find an explicit formula for the kernel :math:`k` that makes it superfluous to actually perform the (potentially expensive) embedding :math:`\phi`. Consider for example the following embedding and the associated kernel: .. math:: \phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\ k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2. This means by just replacing the regular scalar product in our linear classification with the map :math:`k`, we can actually express much more intricate decision boundaries! This is very important, because in many interesting cases the embedding :math:`\phi` will be much costlier to compute than the kernel :math:`k`. In this demo, we will explore one particular kind of kernel that can be realized on near-term quantum computers, namely *Quantum Embedding Kernels (QEKs)*. These are kernels that arise from embedding data into the space of quantum states. We formalize this by considering a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps a datapoint :math:`\boldsymbol{x}` to the state .. math:: |\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle. The kernel value is then given by the *overlap* of the associated embedded quantum states .. math:: k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2. """<line_sep>############################################################################## # A toy problem # ------------- # In this demo, we will treat a toy problem that showcases the # inner workings of classification with quantum embedding kernels, # training variational embedding kernels and the available functionalities # to do both in PennyLane. We of course need to start with some imports: <import_from_stmt>pennylane numpy<as>np<import_stmt>matplotlib<as>mpl<line_sep>np.random.seed(1359)<line_sep>############################################################################## # And we proceed right away to create a dataset to work with, the # ``DoubleCake`` dataset. Firstly, we define two functions to enable us to # generate the data. # The details of these functions are not essential for understanding the demo, # so don't mind them if they are confusing. <def_stmt>_make_circular_data num_sectors<block_start>"""Generate datapoints arranged in an even circle."""<line_sep>center_indices=np.array(range(0 num_sectors))<line_sep>sector_angle=2<times>np.pi/num_sectors<line_sep>angles=(center_indices+0.5)<times>sector_angle<line_sep>x=0.7<times>np.cos(angles)<line_sep>y=0.7<times>np.sin(angles)<line_sep>labels=2<times>np.remainder(np.floor_divide(angles sector_angle) 2)-1<line_sep><return>x y labels<block_end><def_stmt>make_double_cake_data num_sectors<block_start>x1,y1,labels1=_make_circular_data(num_sectors)<line_sep>x2,y2,labels2=_make_circular_data(num_sectors)<line_sep># x and y coordinates of the datapoints x=np.hstack([x1 0.5<times>x2])<line_sep>y=np.hstack([y1 0.5<times>y2])<line_sep># Canonical form of dataset X=np.vstack([x y]).T<line_sep>labels=np.hstack([labels1 -1<times>labels2])<line_sep># Canonical form of labels Y=labels.astype(int)<line_sep><return>X Y<block_end>############################################################################## # Next, we define a function to help plot the ``DoubleCake`` data: <def_stmt>plot_double_cake_data X Y ax num_sectors=<none><block_start>"""Plot double cake data and corresponding sectors."""<line_sep>x,y=X.T<line_sep>cmap=mpl.colors.ListedColormap(["#FF0000" "#0000FF"])<line_sep>ax.scatter(x y c=Y cmap=cmap s=25 marker="s")<if_stmt>num_sectors<is><not><none><block_start>sector_angle=360/num_sectors<for_stmt>i range(num_sectors)<block_start>color=["#FF0000" "#0000FF"][(i%2)]<line_sep>other_color=["#FF0000" "#0000FF"][((i+1)%2)]<line_sep>ax.add_artist(mpl.patches.Wedge((0 0) 1 i<times>sector_angle (i+1)<times>sector_angle lw=0 color=color alpha=0.1 width=0.5 ))<line_sep>ax.add_artist(mpl.patches.Wedge((0 0) 0.5 i<times>sector_angle (i+1)<times>sector_angle lw=0 color=other_color alpha=0.1 ))<line_sep>ax.set_xlim(-1 1)<block_end><block_end>ax.set_ylim(-1 1)<line_sep>ax.set_aspect("equal")<line_sep>ax.axis("off")<line_sep><return>ax<block_end>############################################################################## # Let's now have a look at our dataset. In our example, we will work with # 3 sectors: <import_stmt>matplotlib.pyplot<as>plt<line_sep>num_sectors=3<line_sep>X,Y=make_double_cake_data(num_sectors)<line_sep>ax=plot_double_cake_data(X Y plt.gca() num_sectors=num_sectors)<line_sep>############################################################################## # Defining a Quantum Embedding Kernel # ----------------------------------- # PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__ # allows for a particularly simple # implementation of Quantum Embedding Kernels. The first ingredient we # need for this is an *ansatz*, which we will construct by repeating a # layer as building block. Let's start by defining this layer: <import_stmt>pennylane<as>qml<def_stmt>layer x params wires i0=0 inc=1<block_start>"""Building block of the embedding ansatz"""<line_sep>i=i0<for_stmt>j,wire enumerate(wires)<block_start>qml.Hadamard(wires=[wire])<line_sep>qml.RZ(x[i%len(x)] wires=[wire])<line_sep>i<augadd>inc<line_sep>qml.RY(params[0 j] wires=[wire])<block_end>qml.broadcast(unitary=qml.CRZ pattern="ring" wires=wires parameters=params[1])<block_end>############################################################################## # To construct the ansatz, this layer is repeated multiple times, reusing # the datapoint ``x`` but feeding different variational # parameters ``params`` into each of them. # Together, the datapoint and the variational parameters fully determine # the embedding ansatz :math:`U(\boldsymbol{x})`. # In order to construct the full kernel circuit, we also require its adjoint # :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``. <def_stmt>ansatz x params wires<block_start>"""The embedding ansatz"""<for_stmt>j,layer_params enumerate(params)<block_start>layer(x layer_params wires i0=j<times>len(wires))<block_end><block_end>adjoint_ansatz=qml.adjoint(ansatz)<def_stmt>random_params num_wires num_layers<block_start>"""Generate random variational parameters in the shape for the ansatz."""<line_sep><return>np.random.uniform(0 2<times>np.pi (num_layers 2 num_wires) requires_grad=<true>)<block_end>############################################################################## # Together with the ansatz we only need a device to run the quantum circuit on. # For the purpose of this tutorial we will use PennyLane's ``default.qubit`` # device with 5 wires in analytic mode. dev=qml.device("default.qubit" wires=5 shots=<none>)<line_sep>wires=dev.wires.tolist()<line_sep>############################################################################## # Let us now define the quantum circuit that realizes the kernel. We will compute # the overlap of the quantum states by first applying the embedding of the first # datapoint and then the adjoint of the embedding of the second datapoint. We # finally extract the probabilities of observing each basis state. @qml.qnode(dev)<def_stmt>kernel_circuit x1 x2 params<block_start>ansatz(x1 params wires=wires)<line_sep>adjoint_ansatz(x2 params wires=wires)<line_sep><return>qml.probs(wires=wires)<block_end>############################################################################## # The kernel function itself is now obtained by looking at the probability # of observing the all-zero state at the end of the kernel circuit -- because # of the ordering in ``qml.probs``, this is the first entry: <def_stmt>kernel x1 x2 params<block_start><return>kernel_circuit(x1 x2 params)[0]<block_end>############################################################################## # # .. note:: # An alternative way to set up the kernel circuit in PennyLane would be # to use the observable type # `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__. # This is shown in the # `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more # background information on the kernel circuit structure itself. # # Before focusing on the kernel values we have to provide values for the # variational parameters. At this point we fix the number of layers in the # ansatz circuit to :math:`6`. init_params=random_params(num_wires=5 num_layers=6)<line_sep>############################################################################## # Now we can have a look at the kernel value between the first and the # second datapoint: kernel_value=kernel(X[0] X[1] init_params)<line_sep>print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")<line_sep>############################################################################## # The mutual kernel values between all elements of the dataset form the # *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix`` # method, which makes use of symmetry of the kernel, # :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`. # In addition, the option ``assume_normalized_kernel=True`` ensures that we do not # calculate the entries between the same datapoints, as we know them to be 1 # for our noiseless simulation. Overall this means that we compute # :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints. # To include the variational parameters, we construct a ``lambda`` function that # fixes them to the values we sampled above. init_kernel=<lambda>x1 x2:kernel(x1 x2 init_params)<line_sep>K_init=qml.kernels.square_kernel_matrix(X init_kernel assume_normalized_kernel=<true>)<with_stmt>np.printoptions(precision=3 suppress=<true>)<block_start>print(K_init)<block_end>############################################################################## # Using the Quantum Embedding Kernel for predictions # -------------------------------------------------- # The quantum kernel alone can not be used to make predictions on a # dataset, becaues it is essentially just a tool to measure the similarity # between two datapoints. To perform an actual prediction we will make use # of scikit-learn's Support Vector Classifier (SVC). <import_from_stmt>sklearn.svm SVC<line_sep>############################################################################## # To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function # that takes two sets of datapoints and returns the associated kernel matrix. # We can make use of the function ``qml.kernels.kernel_matrix`` that provides # this functionality. It expects the kernel to not have additional parameters # besides the datapoints, which is why we again supply the variational # parameters via the ``lambda`` function from above. # Once we have this, we can let scikit-learn adjust the SVM from our Quantum # Embedding Kernel. # # .. note:: # This step does *not* modify the variational parameters in our circuit # ansatz. What it does is solving a different optimization task for the # :math:`\alpha` and :math:`b` vectors we introduced in the beginning. svm=SVC(kernel=<lambda>X1 X2:qml.kernels.kernel_matrix(X1 X2 init_kernel)).fit(X Y)<line_sep>############################################################################## # To see how well our classifier performs we will measure which percentage # of the dataset it classifies correctly. <def_stmt>accuracy classifier X Y_target<block_start><return>1-np.count_nonzero(classifier.predict(X)-Y_target)/len(Y_target)<block_end>accuracy_init=accuracy(svm X Y)<line_sep>print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")<line_sep>############################################################################## # We are also interested in seeing what the decision boundaries in this # classification look like. This could help us spotting overfitting issues # visually in more complex data sets. To this end we will introduce a # second helper method. <def_stmt>plot_decision_boundaries classifier ax N_gridpoints=14<block_start>_xx,_yy=np.meshgrid(np.linspace(-1 1 N_gridpoints) np.linspace(-1 1 N_gridpoints))<line_sep>_zz=np.zeros_like(_xx)<for_stmt>idx np.ndindex(*_xx.shape)<block_start>_zz[idx]=classifier.predict(np.array([_xx[idx] _yy[idx]])[np.newaxis :])<block_end>plot_data={"_xx":_xx "_yy":_yy "_zz":_zz}<line_sep>ax.contourf(_xx _yy _zz cmap=mpl.colors.ListedColormap(["#FF0000" "#0000FF"]) alpha=0.2 levels=[-1 0 1] )<line_sep>plot_double_cake_data(X Y ax)<line_sep><return>plot_data<block_end>############################################################################## # With that done, let's have a look at the decision boundaries for our # initial classifier: init_plot_data=plot_decision_boundaries(svm plt.gca())<line_sep>############################################################################## # We see the outer points in the dataset can be correctly classified, but # we still struggle with the inner circle. But remember we have a circuit # with many free parameters! It is reasonable to believe we can give # values to those variational parameters which improve the overall accuracy # of our SVC. # # Training the Quantum Embedding Kernel # ------------------------------------- # # To be able to train the Quantum Embedding Kernel we need some measure of # how well it fits the dataset in question. Performing an exhaustive # search in parameter space is not a good solution because it is very # resource intensive, and since the accuracy is a discrete quantity we # would not be able to detect small improvements. # # We can, however, resort to a more specialized measure, the # *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the # similarity predicted by the quantum kernel to the actual labels of the # training data. It is based on *kernel alignment*, a similiarity measure # between two kernels with given kernel matrices :math:`K_1` and # :math:`K_2`: # # .. math:: # \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}. # # .. note:: # Seen from a more theoretical side, :math:`\operatorname{KA}` # is nothing else than the cosine of the angle between the kernel # matrices :math:`K_1` and :math:`K_2` if we see them as vectors # in the space of matrices with the Hilbert-Schmidt (or # Frobenius) scalar product # :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This # reinforces the geometric picture of how this measure relates # to objects, namely two kernels, being aligned in a vector space. # # The training data enters the picture by defining an *ideal* kernel # function that expresses the original labelling in the vector # :math:`\boldsymbol{y}` by assigning to two datapoints the product # of the corresponding labels: # # .. math:: # k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j. # # The assigned kernel is thus :math:`+1` if both datapoints lie in the # same class and :math:`-1` otherwise and its kernel matrix is simply # given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`. # The kernel-target alignment is then defined as the kernel alignment # of the kernel matrix :math:`K` generated by the # quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`: # # .. math:: # \operatorname{KTA}_{\boldsymbol{y}}(K) # = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}} # = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N} # # where :math:`N` is the number of elements in :math:`\boldsymbol{y}`, # that is the number of datapoints in the dataset. # # In summary, the kernel-target alignment effectively captures how well # the kernel you chose reproduces the actual similarities of the data. It # does have one drawback, however: having a high kernel-target alignment # is only a necessary but not a sufficient condition for a good # performance of the kernel [#Alignment]_. This means having good alignment is # guaranteed for good performance, but optimal alignment will not always # bring optimal training accuracy with it. # # Let's now come back to the actual implementation. PennyLane's # ``kernels`` module allows you to easily evaluate the kernel # target alignment: kta_init=qml.kernels.target_alignment(X Y init_kernel assume_normalized_kernel=<true>)<line_sep>print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")<line_sep>############################################################################## # Now let's code up an optimization loop and improve the kernel-target alignment! # # We will make use of regular gradient descent optimization. To speed up # the optimization we will not use the entire training set to compute # :math:`\operatorname{KTA}` but rather # sample smaller subsets of the data at each step, we choose :math:`4` # datapoints at random. Remember that PennyLane's built-in optimizer works # to *minimize* the cost function that is given to it, which is why we # have to multiply the kernel target alignment by :math:`-1` to actually # *maximize* it in the process. # # .. note:: # Currently, the function ``qml.kernels.target_alignment`` is not # differentiable yet, making it unfit for gradient descent optimization. # We therefore first define a differentiable version of this function. <def_stmt>target_alignment X Y kernel assume_normalized_kernel=<false> rescale_class_labels=<true> <block_start>"""Kernel-target alignment between kernel and labels."""<line_sep>K=qml.kernels.square_kernel_matrix(X kernel assume_normalized_kernel=assume_normalized_kernel )<if_stmt>rescale_class_labels<block_start>nplus=np.count_nonzero(np.array(Y)<eq>1)<line_sep>nminus=len(Y)-nplus<line_sep>_Y=np.array([y/nplus<if>y<eq>1<else>y/nminus<for>y Y])<block_end><else_stmt><block_start>_Y=np.array(Y)<block_end>T=np.outer(_Y _Y)<line_sep>inner_product=np.sum(K<times>T)<line_sep>norm=np.sqrt(np.sum(K<times>K)<times>np.sum(T<times>T))<line_sep>inner_product=inner_product/norm<line_sep><return>inner_product<block_end>params=init_params<line_sep>opt=qml.GradientDescentOptimizer(0.2)<for_stmt>i range(500)# Choose subset of datapoints to compute the KTA on. <block_start>subset=np.random.choice(list(range(len(X))) 4)<line_sep># Define the cost function for optimization cost=<lambda>_params:-target_alignment(X[subset] Y[subset] <lambda>x1 x2:kernel(x1 x2 _params) assume_normalized_kernel=<true> )<line_sep># Optimization step params=opt.step(cost params)<line_sep># Report the alignment on the full dataset every 50 steps. <if_stmt>(i+1)%50<eq>0<block_start>current_alignment=target_alignment(X Y <lambda>x1 x2:kernel(x1 x2 params) assume_normalized_kernel=<true> )<line_sep>print(f"Step {i+1} - Alignment = {current_alignment:.3f}")<block_end><block_end>############################################################################## # We want to assess the impact of training the parameters of the quantum # kernel. Thus, let's build a second support vector classifier with the # trained kernel: # First create a kernel with the trained parameter baked into it. trained_kernel=<lambda>x1 x2:kernel(x1 x2 params)<line_sep># Second create a kernel matrix function using the trained kernel. trained_kernel_matrix=<lambda>X1 X2:qml.kernels.kernel_matrix(X1 X2 trained_kernel)<line_sep># Note that SVC expects the kernel argument to be a kernel matrix function. svm_trained=SVC(kernel=trained_kernel_matrix).fit(X Y)<line_sep>############################################################################## # We expect to see an accuracy improvement vs. the SVM with random # parameters: accuracy_trained=accuracy(svm_trained X Y)<line_sep>print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")<line_sep>############################################################################## # We have now achieved perfect classification! 🎆 # # Following on the results that SVM's have proven good generalisation # behavior, it will be interesting to inspect the decision boundaries of # our classifier: trained_plot_data=plot_decision_boundaries(svm_trained plt.gca())<line_sep>############################################################################## # Indeed, we see that now not only every data instance falls within the # correct class, but also that there are no strong artifacts that would make us # distrust the model. In this sense, our approach benefits from both: on # one hand it can adjust itself to the dataset, and on the other hand # is not expected to suffer from bad generalisation. # # References # ---------- # # .. [#Training_QEKs] # # <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, and <NAME>. # "Training Quantum Embedding Kernels on Near-Term Quantum Computers." # `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021. # # .. [#Alignment] # # <NAME>, <NAME>, and <NAME>. # "An overview of kernel alignment and its applications." # `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
"""TurboGears project related information"""<line_sep>version="2.4.3"<line_sep>description="Next generation TurboGears"<line_sep>long_description=""" TurboGears brings together a best of breed python tools to create a flexible, full featured, and easy to use web framework. TurboGears 2 provides an integrated and well tested set of tools for everything you need to build dynamic, database driven applications. It provides a full range of tools for front end javascript develeopment, back database development and everything in between: * dynamic javascript powered widgets (ToscaWidgets2) * automatic JSON generation from your controllers * powerful, designer friendly XHTML based templating * object or route based URL dispatching * powerful Object Relational Mappers (SQLAlchemy) The latest development version is available in the `TurboGears Git repositories`_. .. _TurboGears Git repositories: https://github.com/TurboGears """<line_sep>url="http://www.turbogears.org/"<line_sep>author="<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and the TurboGears community"<line_sep>email="<EMAIL>"<line_sep>copyright="""Copyright 2005-2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and contributors"""<line_sep>license="MIT"<line_sep>
<import_stmt>KratosMultiphysics<import_stmt>KratosMultiphysics.KratosUnittest<as>UnitTest<import_stmt>KratosMultiphysics.ChimeraApplication<import_from_stmt>KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis FluidChimeraAnalysis<class_stmt>ChimeraAnalysisBaseTest(UnitTest.TestCase)<block_start><def_stmt>setUp self# Set to true to get post-process files for the test <block_start>self.print_output=<false><block_end><def_stmt>_run_test self settings_file_name<block_start>model=KratosMultiphysics.Model()<with_stmt>open(settings_file_name 'r')<as>settings_file<block_start>settings=KratosMultiphysics.Parameters(settings_file.read())<block_end># to check the results: add output settings block if needed <if_stmt>self.print_output<block_start>settings.AddValue("output_processes" KratosMultiphysics.Parameters(r'''{ "vtk_output" : [{ "python_module" : "vtk_output_process", "kratos_module" : "KratosMultiphysics", "process_name" : "VtkOutputProcess", "help" : "This process writes postprocessing files for Paraview", "Parameters" : { "model_part_name" : "FluidModelPart.Parts_background_surface", "output_control_type" : "step", "output_frequency" : 1, "file_format" : "ascii", "output_precision" : 3, "output_sub_model_parts" : false, "write_deformed_configuration" : true, "folder_name" : "test_vtk_output", "save_output_files_in_folder" : true, "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"], "nodal_data_value_variables" : [], "element_flags" : ["ACTIVE"], "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"], "element_data_value_variables" : [], "condition_data_value_variables" : [] } },{ "python_module" : "vtk_output_process", "kratos_module" : "KratosMultiphysics", "process_name" : "VtkOutputProcess", "help" : "This process writes postprocessing files for Paraview", "Parameters" : { "model_part_name" : "FluidModelPart.Parts_patch_surface", "output_control_type" : "step", "output_frequency" : 1, "file_format" : "ascii", "output_precision" : 3, "output_sub_model_parts" : false, "write_deformed_configuration" : true, "folder_name" : "test_vtk_output", "save_output_files_in_folder" : true, "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"], "nodal_data_value_variables" : [], "element_flags" : ["ACTIVE"], "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"], "element_data_value_variables" : [], "condition_data_value_variables" : [] } }] }'''))<block_end>analysis=FluidChimeraAnalysis(model settings)<line_sep>analysis.Run()<block_end><block_end>
# Importar a classe da língua inglesa (English) e criar um objeto nlp <import_from_stmt>____ ____<line_sep>nlp=____<line_sep># Processar o texto doc=____("I like tree kangaroos and narwhals.")<line_sep># Selecionar o primeiro token first_token=doc[____]<line_sep># Imprimir o texto do primeito token print(first_token.____)<line_sep>
# Copyright 2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <def_stmt>get_args batch_size=8 image_size=256 max_iter=100000<block_start>""" Get command line arguments. Arguments set the default values of command line arguments. """<import_stmt>argparse<import_stmt>os<line_sep>description="Example of Lightweight GAN."<line_sep>parser=argparse.ArgumentParser(description)<line_sep>parser.add_argument("-d" "--device-id" type=str default="0" help="Device id.")<line_sep>parser.add_argument("-c" "--context" type=str default="cudnn" help="Context.")<line_sep>parser.add_argument("--type-config" "-t" type=str default='float' help='Type of computation. e.g. "float", "half".')<line_sep>parser.add_argument("--img-path" type=str default="~/AnimalFace-dog" help="Image path.")<line_sep>parser.add_argument("--image-size" type=int default=image_size help="Image size.")<line_sep>parser.add_argument("--batch-size" "-b" type=int default=batch_size help="Batch size.")<line_sep>parser.add_argument("--max-iter" "-i" type=int default=max_iter help="Max iterations.")<line_sep>parser.add_argument("--save-interval" type=int default=50000 help="Interval for saving models.")<line_sep>parser.add_argument("--test-interval" type=int default=5000 help="Interval for testing models.")<line_sep>parser.add_argument("--latent" type=int default=256 help="Number of latent variables.")<line_sep>parser.add_argument("--monitor-path" type=str default="./result/tmp" help="Monitor path.")<line_sep>parser.add_argument("--model-load-path" type=str default="." help="Path to load parameters from")<line_sep>parser.add_argument("--train-samples" type=int default=-1 help="Number of data to be used. When -1 is set all data is used.")<line_sep>parser.add_argument("--lr" type=float default=2e-4 help="Learning rate")<line_sep>parser.add_argument("--aug-list" nargs="+" default=["lrflip" "translation" "color"])<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>save_args args mode="train"<block_start><import_from_stmt>nnabla logger<import_stmt>os<if_stmt><not>os.path.exists(args.monitor_path)<block_start>os.makedirs(args.monitor_path)<block_end>path="{}/Arguments-{}.txt".format(args.monitor_path mode)<line_sep>logger.info("Arguments are saved to {}.".format(path))<with_stmt>open(path "w")<as>fp<block_start><for_stmt>k,v sorted(vars(args).items())<block_start>logger.info("{}={}".format(k v))<line_sep>fp.write("{}={}\n".format(k v))<block_end><block_end><block_end>
<import_stmt>functools<import_from_stmt>collections OrderedDict<import_from_stmt>typing Any Callable Dict List Mapping Sequence Tuple Union cast<import_stmt>torch<import_from_stmt>ignite.engine Engine EventEnum Events<import_from_stmt>ignite.handlers.timing Timer<class_stmt>BasicTimeProfiler<block_start>""" BasicTimeProfiler can be used to profile the handlers, events, data loading and data processing times. Examples: .. code-block:: python from ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater) # Create an object of the profiler and attach an engine to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 """<line_sep>events_to_ignore=[Events.EXCEPTION_RAISED Events.TERMINATE Events.TERMINATE_SINGLE_EPOCH Events.DATALOADER_STOP_ITERATION ]<def_stmt>__init__ self<arrow><none><block_start>self._dataflow_timer=Timer()<line_sep>self._processing_timer=Timer()<line_sep>self._event_handlers_timer=Timer()<line_sep>self.dataflow_times=torch.zeros(1)<line_sep>self.processing_times=torch.zeros(1)<line_sep>self.event_handlers_times={}# type: Dict[EventEnum, torch.Tensor] self._events=[Events.EPOCH_STARTED Events.EPOCH_COMPLETED Events.ITERATION_STARTED Events.ITERATION_COMPLETED Events.GET_BATCH_STARTED Events.GET_BATCH_COMPLETED Events.COMPLETED ]<line_sep>self._fmethods=[self._as_first_epoch_started self._as_first_epoch_completed self._as_first_iter_started self._as_first_iter_completed self._as_first_get_batch_started self._as_first_get_batch_completed self._as_first_completed ]<line_sep>self._lmethods=[self._as_last_epoch_started self._as_last_epoch_completed self._as_last_iter_started self._as_last_iter_completed self._as_last_get_batch_started self._as_last_get_batch_completed self._as_last_completed ]<block_end><def_stmt>_reset self num_epochs:int total_num_iters:int<arrow><none><block_start>self.dataflow_times=torch.zeros(total_num_iters)<line_sep>self.processing_times=torch.zeros(total_num_iters)<line_sep>self.event_handlers_times={Events.STARTED:torch.zeros(1) Events.COMPLETED:torch.zeros(1) Events.EPOCH_STARTED:torch.zeros(num_epochs) Events.EPOCH_COMPLETED:torch.zeros(num_epochs) Events.ITERATION_STARTED:torch.zeros(total_num_iters) Events.ITERATION_COMPLETED:torch.zeros(total_num_iters) Events.GET_BATCH_COMPLETED:torch.zeros(total_num_iters) Events.GET_BATCH_STARTED:torch.zeros(total_num_iters) }<block_end><def_stmt>_as_first_started self engine:Engine<arrow><none><block_start><if_stmt>hasattr(engine.state.dataloader "__len__")<block_start>num_iters_per_epoch=len(engine.state.dataloader)# type: ignore[arg-type] <block_end><else_stmt><block_start><if_stmt>engine.state.epoch_length<is><none><block_start><raise>ValueError("As epoch_length is not set, we can not use BasicTimeProfiler in this case."<concat>"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this.")<block_end>num_iters_per_epoch=engine.state.epoch_length<block_end>self.max_epochs=cast(int engine.state.max_epochs)<line_sep>self.total_num_iters=self.max_epochs<times>num_iters_per_epoch<line_sep>self._reset(self.max_epochs self.total_num_iters)<line_sep>self.event_handlers_names={e:[h.__qualname__<if>hasattr(h "__qualname__")<else>h.__class__.__name__<for>(h _ _) engine._event_handlers[e]<if>"BasicTimeProfiler."<not><in>repr(h)# avoid adding internal handlers into output ]<for>e Events<if>e<not><in>self.events_to_ignore}<line_sep># Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started (engine ) {}))<for_stmt>e,m zip(self._events self._fmethods)<block_start>engine._event_handlers[e].insert(0 (m (engine ) {}))<block_end><for_stmt>e,m zip(self._events self._lmethods)<block_start>engine._event_handlers[e].append((m (engine ) {}))<block_end># Let's go self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_started self engine:Engine<arrow><none><block_start>self.event_handlers_times[Events.STARTED][0]=self._event_handlers_timer.value()<block_end><def_stmt>_as_first_epoch_started self engine:Engine<arrow><none><block_start>self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_epoch_started self engine:Engine<arrow><none><block_start>t=self._event_handlers_timer.value()<line_sep>e=engine.state.epoch-1<line_sep>self.event_handlers_times[Events.EPOCH_STARTED][e]=t<block_end><def_stmt>_as_first_get_batch_started self engine:Engine<arrow><none><block_start>self._event_handlers_timer.reset()<line_sep>self._dataflow_timer.reset()<block_end><def_stmt>_as_last_get_batch_started self engine:Engine<arrow><none><block_start>t=self._event_handlers_timer.value()<line_sep>i=engine.state.iteration-1<line_sep>self.event_handlers_times[Events.GET_BATCH_STARTED][i]=t<block_end><def_stmt>_as_first_get_batch_completed self engine:Engine<arrow><none><block_start>self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_get_batch_completed self engine:Engine<arrow><none><block_start>t=self._event_handlers_timer.value()<line_sep>i=engine.state.iteration-1<line_sep>self.event_handlers_times[Events.GET_BATCH_COMPLETED][i]=t<line_sep>d=self._dataflow_timer.value()<line_sep>self.dataflow_times[i]=d<line_sep>self._dataflow_timer.reset()<block_end><def_stmt>_as_first_iter_started self engine:Engine<arrow><none><block_start>self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_iter_started self engine:Engine<arrow><none><block_start>t=self._event_handlers_timer.value()<line_sep>i=engine.state.iteration-1<line_sep>self.event_handlers_times[Events.ITERATION_STARTED][i]=t<line_sep>self._processing_timer.reset()<block_end><def_stmt>_as_first_iter_completed self engine:Engine<arrow><none><block_start>t=self._processing_timer.value()<line_sep>i=engine.state.iteration-1<line_sep>self.processing_times[i]=t<line_sep>self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_iter_completed self engine:Engine<arrow><none><block_start>t=self._event_handlers_timer.value()<line_sep>i=engine.state.iteration-1<line_sep>self.event_handlers_times[Events.ITERATION_COMPLETED][i]=t<block_end><def_stmt>_as_first_epoch_completed self engine:Engine<arrow><none><block_start>self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_epoch_completed self engine:Engine<arrow><none><block_start>t=self._event_handlers_timer.value()<line_sep>e=engine.state.epoch-1<line_sep>self.event_handlers_times[Events.EPOCH_COMPLETED][e]=t<block_end><def_stmt>_as_first_completed self engine:Engine<arrow><none><block_start>self._event_handlers_timer.reset()<block_end><def_stmt>_as_last_completed self engine:Engine<arrow><none><block_start>self.event_handlers_times[Events.COMPLETED][0]=self._event_handlers_timer.value()<line_sep># Remove added handlers: engine.remove_event_handler(self._as_last_started Events.STARTED)<for_stmt>e,m zip(self._events self._fmethods)<block_start>engine.remove_event_handler(m e)<block_end><for_stmt>e,m zip(self._events self._lmethods)<block_start>engine.remove_event_handler(m e)<block_end><block_end><def_stmt>attach self engine:Engine<arrow><none><block_start>"""Attach BasicTimeProfiler to the given engine. Args: engine: the instance of Engine to attach """<if_stmt><not>isinstance(engine Engine)<block_start><raise>TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")<block_end><if_stmt><not>engine.has_event_handler(self._as_first_started)<block_start>engine._event_handlers[Events.STARTED].insert(0 (self._as_first_started (engine ) {}))<block_end><block_end>@staticmethod<def_stmt>_compute_basic_stats data:torch.Tensor<arrow>Dict[str Union[str float Tuple[Union[float] Union[float]]]]# compute on non-zero data: <block_start>data=data[data<g>0]<line_sep>out=[("total" torch.sum(data).item()<if>len(data)<g>0<else>"not yet triggered")]<line_sep># type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] <if_stmt>len(data)<g>1<block_start>out<augadd>[("min/index" (torch.min(data).item() torch.argmin(data).item())) ("max/index" (torch.max(data).item() torch.argmax(data).item())) ("mean" torch.mean(data).item()) ("std" torch.std(data).item()) ]<block_end><return>OrderedDict(out)<block_end><def_stmt>get_results self<arrow>Dict[str Dict[str Any]]<block_start>""" Method to fetch the aggregated profiler results after the engine is run .. code-block:: python results = profiler.get_results() """<line_sep>total_eh_time=sum([(self.event_handlers_times[e]).sum()<for>e Events<if>e<not><in>self.events_to_ignore])<line_sep># type: Union[int, torch.Tensor] event_handlers_stats=dict([(str(e.name).replace("." "_") self._compute_basic_stats(self.event_handlers_times[e]))<for>e Events<if>e<not><in>self.events_to_ignore]+[("total_time" total_eh_time)]# type: ignore[list-item] )<line_sep><return>OrderedDict([("processing_stats" self._compute_basic_stats(self.processing_times)) ("dataflow_stats" self._compute_basic_stats(self.dataflow_times)) ("event_handlers_stats" event_handlers_stats) ("event_handlers_names" {str(e.name).replace("." "_")+"_names":v<for>e,v self.event_handlers_names.items()} ) ])<block_end><def_stmt>write_results self output_path:str<arrow><none><block_start>""" Method to store the unaggregated profiling results to a csv file Args: output_path: file output path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123 """<try_stmt><block_start><import_stmt>pandas<as>pd<block_end><except_stmt>ImportError<block_start><raise>RuntimeError("Need pandas to write results as files")<block_end>iters_per_epoch=self.total_num_iters<floordiv>self.max_epochs<line_sep>epochs=torch.arange(self.max_epochs dtype=torch.float32).repeat_interleave(iters_per_epoch)+1<line_sep>iterations=torch.arange(self.total_num_iters dtype=torch.float32)+1<line_sep>processing_stats=self.processing_times<line_sep>dataflow_stats=self.dataflow_times<line_sep>event_started=self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)<line_sep>event_completed=self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)<line_sep>event_epoch_started=self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)<line_sep>event_epoch_completed=self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)<line_sep>event_iter_started=self.event_handlers_times[Events.ITERATION_STARTED]<line_sep>event_iter_completed=self.event_handlers_times[Events.ITERATION_COMPLETED]<line_sep>event_batch_started=self.event_handlers_times[Events.GET_BATCH_STARTED]<line_sep>event_batch_completed=self.event_handlers_times[Events.GET_BATCH_COMPLETED]<line_sep>results_dump=torch.stack([epochs iterations processing_stats dataflow_stats event_started event_completed event_epoch_started event_epoch_completed event_iter_started event_iter_completed event_batch_started event_batch_completed ] dim=1 ).numpy()<line_sep>results_df=pd.DataFrame(data=results_dump columns=["epoch" "iteration" "processing_stats" "dataflow_stats" "Event_STARTED" "Event_COMPLETED" "Event_EPOCH_STARTED" "Event_EPOCH_COMPLETED" "Event_ITERATION_STARTED" "Event_ITERATION_COMPLETED" "Event_GET_BATCH_STARTED" "Event_GET_BATCH_COMPLETED" ] )<line_sep>results_df.to_csv(output_path index=<false>)<block_end>@staticmethod<def_stmt>print_results results:Dict<arrow>str<block_start>""" Method to print the aggregated results from the profiler Args: results: the aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total | min/index | max/index | mean | std Processing function: 157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693 Event handlers: 2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED: [] not yet triggered """<def_stmt>to_str v:Union[str tuple]<arrow>str<block_start><if_stmt>isinstance(v str)<block_start><return>v<block_end><elif_stmt>isinstance(v tuple)<block_start><return>f"{v[0]:.5f}/{v[1]}"<block_end><return>f"{v:.5f}"<block_end><def_stmt>odict_to_str d:Mapping<arrow>str<block_start>out=" | ".join([to_str(v)<for>v d.values()])<line_sep><return>out<block_end>others={k:odict_to_str(v)<if>isinstance(v OrderedDict)<else>v<for>k,v results["event_handlers_stats"].items()}<line_sep>others.update(results["event_handlers_names"])<line_sep>output_message=""" ---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total | min/index | max/index | mean | std Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} """.format(processing_stats=odict_to_str(results["processing_stats"]) dataflow_stats=odict_to_str(results["dataflow_stats"]) **others )<line_sep>print(output_message)<line_sep><return>output_message<block_end><block_end><class_stmt>HandlersTimeProfiler<block_start>""" HandlersTimeProfiler can be used to profile the handlers, data loading and data processing times. Custom events are also profiled by this profiler Examples: .. code-block:: python from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater) # Create an object of the profiler and attach an engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 """<line_sep>EVENT_FILTER_THESHOLD_TIME=0.0001<def_stmt>__init__ self<arrow><none><block_start>self._dataflow_timer=Timer()<line_sep>self._processing_timer=Timer()<line_sep>self._event_handlers_timer=Timer()<line_sep>self.dataflow_times=[]# type: List[float] self.processing_times=[]# type: List[float] self.event_handlers_times={}<block_end># type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod<def_stmt>_get_callable_name handler:Callable<arrow>str# get name of the callable handler <block_start><return>getattr(handler "__qualname__" handler.__class__.__name__)<block_end><def_stmt>_create_wrapped_handler self handler:Callable event:EventEnum<arrow>Callable<block_start>@functools.wraps(handler)<def_stmt>_timeit_handler *args:Any **kwargs:Any<arrow><none><block_start>self._event_handlers_timer.reset()<line_sep>handler(*args **kwargs)<line_sep>t=self._event_handlers_timer.value()<line_sep>hname=self._get_callable_name(handler)<line_sep># filter profiled time if the handler was attached to event with event filter <if_stmt><not>hasattr(handler "_parent")<or>t<ge>self.EVENT_FILTER_THESHOLD_TIME<block_start>self.event_handlers_times[event][hname].append(t)<block_end><block_end># required to revert back to original handler after profiling setattr(_timeit_handler "_profiler_original" handler)<line_sep><return>_timeit_handler<block_end><def_stmt>_timeit_processing self<arrow><none># handler used for profiling processing times <block_start>t=self._processing_timer.value()<line_sep>self.processing_times.append(t)<block_end><def_stmt>_timeit_dataflow self<arrow><none># handler used for profiling dataflow times <block_start>t=self._dataflow_timer.value()<line_sep>self.dataflow_times.append(t)<block_end><def_stmt>_reset self event_handlers_names:Mapping[EventEnum List[str]]<arrow><none># reset the variables used for profiling <block_start>self.dataflow_times=[]<line_sep>self.processing_times=[]<line_sep>self.event_handlers_times={e:{h:[]<for>h event_handlers_names[e]}<for>e event_handlers_names}<block_end>@staticmethod<def_stmt>_is_internal_handler handler:Callable<arrow>bool# checks whether the handler is internal <block_start><return>any(n<in>repr(handler)<for>n ["HandlersTimeProfiler." "Timer."])<block_end><def_stmt>_detach_profiler_handlers self engine:Engine<arrow><none># reverts handlers to original handlers <block_start><for_stmt>e engine._event_handlers<block_start><for_stmt>i,(func args kwargs) enumerate(engine._event_handlers[e])<block_start><if_stmt>hasattr(func "_profiler_original")<block_start>engine._event_handlers[e][i]=(func._profiler_original args kwargs)<block_end><block_end><block_end><block_end><def_stmt>_as_first_started self engine:Engine<arrow><none># wraps original handlers for profiling <block_start>self.event_handlers_names={e:[self._get_callable_name(h)<for>(h _ _) engine._event_handlers[e]<if><not>self._is_internal_handler(h)]<for>e engine._allowed_events}<line_sep>self._reset(self.event_handlers_names)<for_stmt>e engine._allowed_events<block_start><for_stmt>i,(func args kwargs) enumerate(engine._event_handlers[e])<block_start><if_stmt><not>self._is_internal_handler(func)<block_start>engine._event_handlers[e][i]=(self._create_wrapped_handler(func e) args kwargs)<block_end><block_end><block_end># processing timer engine.add_event_handler(Events.ITERATION_STARTED self._processing_timer.reset)<line_sep>engine._event_handlers[Events.ITERATION_COMPLETED].insert(0 (self._timeit_processing () {}))<line_sep># dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED self._dataflow_timer.reset)<line_sep>engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0 (self._timeit_dataflow () {}))<line_sep># revert back the wrapped handlers with original handlers at the end engine.add_event_handler(Events.COMPLETED self._detach_profiler_handlers)<block_end><def_stmt>attach self engine:Engine<arrow><none><block_start>"""Attach HandlersTimeProfiler to the given engine. Args: engine: the instance of Engine to attach """<if_stmt><not>isinstance(engine Engine)<block_start><raise>TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")<block_end><if_stmt><not>engine.has_event_handler(self._as_first_started)<block_start>engine._event_handlers[Events.STARTED].insert(0 (self._as_first_started (engine ) {}))<block_end><block_end><def_stmt>get_results self<arrow>List[List[Union[str float]]]<block_start>""" Method to fetch the aggregated profiler results after the engine is run .. code-block:: python results = profiler.get_results() """<line_sep>total_eh_time=sum([sum(self.event_handlers_times[e][h])<for>e self.event_handlers_times<for>h self.event_handlers_times[e]])<line_sep>total_eh_time=round(float(total_eh_time) 5)<def_stmt>compute_basic_stats times:Union[Sequence torch.Tensor]<arrow>List[Union[str float Tuple[Union[str float] Union[str float]]]]<block_start>data=torch.as_tensor(times dtype=torch.float32)<line_sep># compute on non-zero data: data=data[data<g>0]<line_sep>total=round(torch.sum(data).item() 5)<if>len(data)<g>0<else>"not triggered"# type: Union[str, float] min_index=("None" "None")# type: Tuple[Union[str, float], Union[str, float]] max_index=("None" "None")# type: Tuple[Union[str, float], Union[str, float]] mean="None"# type: Union[str, float] std="None"# type: Union[str, float] <if_stmt>len(data)<g>0<block_start>min_index=(round(torch.min(data).item() 5) torch.argmin(data).item())<line_sep>max_index=(round(torch.max(data).item() 5) torch.argmax(data).item())<line_sep>mean=round(torch.mean(data).item() 5)<if_stmt>len(data)<g>1<block_start>std=round(torch.std(data).item() 5)<block_end><block_end><return>[total min_index max_index mean std]<block_end>event_handler_stats=[[h getattr(e "name" str(e)) *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h] dtype=torch.float32)) ]<for>e self.event_handlers_times<for>h self.event_handlers_times[e]]<line_sep>event_handler_stats.append(["Total" "" total_eh_time "" "" "" ""])<line_sep>event_handler_stats.append(["Processing" "None" *compute_basic_stats(self.processing_times)])<line_sep>event_handler_stats.append(["Dataflow" "None" *compute_basic_stats(self.dataflow_times)])<line_sep><return>event_handler_stats<block_end><def_stmt>write_results self output_path:str<arrow><none><block_start>""" Method to store the unaggregated profiling results to a csv file Args: output_path: file output path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123 """<try_stmt><block_start><import_stmt>pandas<as>pd<block_end><except_stmt>ImportError<block_start><raise>RuntimeError("Need pandas to write results as files")<block_end>processing_stats=torch.tensor(self.processing_times dtype=torch.float32)<line_sep>dataflow_stats=torch.tensor(self.dataflow_times dtype=torch.float32)<line_sep>cols=[processing_stats dataflow_stats]<line_sep>headers=["processing_stats" "dataflow_stats"]<for_stmt>e self.event_handlers_times<block_start><for_stmt>h self.event_handlers_times[e]<block_start>headers.append(f"{h} ({getattr(e 'name' str(e))})")<line_sep>cols.append(torch.tensor(self.event_handlers_times[e][h] dtype=torch.float32))<block_end><block_end># Determine maximum length max_len=max([x.numel()<for>x cols])<line_sep>count_col=torch.arange(max_len dtype=torch.float32)+1<line_sep>cols.insert(0 count_col)<line_sep>headers.insert(0 "#")<line_sep># pad all tensors to have same length cols=[torch.nn.functional.pad(x pad=(0 max_len-x.numel()) mode="constant" value=0)<for>x cols]<line_sep>results_dump=torch.stack(cols dim=1).numpy()<line_sep>results_df=pd.DataFrame(data=results_dump columns=headers)<line_sep>results_df.to_csv(output_path index=<false>)<block_end>@staticmethod<def_stmt>print_results results:List[List[Union[str float]]]<arrow><none><block_start>""" Method to print the aggregated results from the profiler Args: results: the aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s] """<line_sep># adopted implementation of torch.autograd.profiler.build_table handler_column_width=max([len(item[0])<for>item results])+4# type: ignore[arg-type] event_column_width=max([len(item[1])<for>item results])+4# type: ignore[arg-type] DEFAULT_COLUMN_WIDTH=14<line_sep>headers=["Handler" "Event Name" "Total(s)" "Min(s)/IDX" "Max(s)/IDX" "Mean(s)" "Std(s)" ]<line_sep># Have to use a list because nonlocal is Py3 only... SPACING_SIZE=2<line_sep>row_format_lst=[""]<line_sep>header_sep_lst=[""]<line_sep>line_length_lst=[-SPACING_SIZE]<def_stmt>add_column padding:int text_dir:str=">"<arrow><none><block_start>row_format_lst[0]<augadd>"{: "+text_dir+str(padding)+"}"+(" "<times>SPACING_SIZE)<line_sep>header_sep_lst[0]<augadd>"-"<times>padding+(" "<times>SPACING_SIZE)<line_sep>line_length_lst[0]<augadd>padding+SPACING_SIZE<block_end>add_column(handler_column_width text_dir="<")<line_sep>add_column(event_column_width text_dir="<")<for_stmt>_ headers[2:]<block_start>add_column(DEFAULT_COLUMN_WIDTH)<block_end>row_format=row_format_lst[0]<line_sep>header_sep=header_sep_lst[0]<line_sep>result=[]<def_stmt>append s:str<arrow><none><block_start>result.append(s)<line_sep>result.append("\n")<block_end>result.append("\n")<line_sep>append(header_sep)<line_sep>append(row_format.format(*headers))<line_sep>append(header_sep)<for_stmt>row results[:-3]# format min/idx and max/idx <block_start>row[3]="{}/{}".format(*row[3])# type: ignore[misc] row[4]="{}/{}".format(*row[4])# type: ignore[misc] append(row_format.format(*row))<block_end>append(header_sep)<line_sep># print total handlers time row append(row_format.format(*results[-3]))<line_sep>append(header_sep)<line_sep>summary_format="{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"<for_stmt>row results[-2:]<block_start>row[3]="{}s/{}".format(*row[3])# type: ignore[misc] row[4]="{}s/{}".format(*row[4])# type: ignore[misc] <del_stmt>row[1]<line_sep>append(summary_format.format(*row))<block_end>print("".join(result))<block_end><block_end>
# # Copyright (c) 2020, Andrey "Limych" Khrolenok <<EMAIL>> # Creative Commons BY-NC-SA 4.0 International Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # """ The Snowtire binary sensor. For more details about this platform, please refer to the documentation at https://github.com/Limych/ha-snowtire/ """<line_sep>
<import_from_future_stmt> division <import_from_stmt>pomegranate *<import_from_stmt>pomegranate.io DataGenerator<import_from_stmt>pomegranate.io DataFrameGenerator<import_from_stmt>nose.tools with_setup<import_from_stmt>nose.tools assert_almost_equal<import_from_stmt>nose.tools assert_equal<import_from_stmt>nose.tools assert_not_equal<import_from_stmt>nose.tools assert_less_equal<import_from_stmt>nose.tools assert_raises<import_from_stmt>nose.tools assert_true<import_from_stmt>numpy.testing assert_array_almost_equal<import_stmt>pandas<import_stmt>random<import_stmt>pickle<import_stmt>numpy<as>np<line_sep>nan=numpy.nan<def_stmt>setup_multivariate_gaussian <block_start>mu,cov=[0 0 0] numpy.eye(3)<line_sep>d1=MultivariateGaussianDistribution(mu cov)<line_sep>mu,cov=[2 2 2] numpy.eye(3)<line_sep>d2=MultivariateGaussianDistribution(mu cov)<line_sep><global>model<line_sep>model=BayesClassifier([d1 d2])<line_sep><global>X<line_sep>X=numpy.array([[0.3 0.5 0.1] [0.8 1.4 0.5] [1.4 2.6 1.8] [4.2 3.3 3.7] [2.6 3.6 3.3] [3.1 2.2 1.7] [1.8 2.2 1.8] [-1.2 -1.8 -1.5] [-1.8 0.3 0.5] [0.7 -1.3 -0.1]])<line_sep><global>y<line_sep>y=[0 0 0 1 1 1 1 0 0 0]<line_sep><global>X_nan<line_sep>X_nan=numpy.array([[0.3 nan 0.1] [nan 1.4 nan] [1.4 2.6 nan] [nan nan nan] [nan 3.6 3.3] [3.1 nan 1.7] [nan nan 1.8] [-1.2 -1.8 -1.5] [nan 0.3 0.5] [nan -1.3 nan]])<block_end><def_stmt>setup_multivariate_mixed <block_start>mu,cov=[0 0 0] numpy.eye(3)<line_sep>d1=MultivariateGaussianDistribution(mu cov)<line_sep>d21=ExponentialDistribution(5)<line_sep>d22=LogNormalDistribution(0.2 0.8)<line_sep>d23=PoissonDistribution(3)<line_sep>d2=IndependentComponentsDistribution([d21 d22 d23])<line_sep><global>model<line_sep>model=BayesClassifier([d1 d2])<line_sep><global>X<line_sep>X=numpy.array([[0.3 0.5 0.1] [0.8 1.4 0.5] [1.4 2.6 1.8] [4.2 3.3 3.7] [2.6 3.6 3.3] [3.1 2.2 1.7] [1.8 2.2 1.8] [1.2 1.8 1.5] [1.8 0.3 0.5] [0.7 1.3 0.1]])<line_sep><global>y<line_sep>y=[0 0 0 1 1 1 1 0 0 0]<line_sep><global>X_nan<line_sep>X_nan=numpy.array([[0.3 nan 0.1] [nan 1.4 nan] [1.4 2.6 nan] [nan nan nan] [nan 3.6 3.3] [3.1 nan 1.7] [nan nan 1.8] [1.2 1.8 1.5] [nan 0.3 0.5] [nan 1.3 nan]])<block_end><def_stmt>setup_hmm <block_start><global>model<line_sep><global>hmm1<line_sep><global>hmm2<line_sep><global>hmm3<line_sep>rigged=State(DiscreteDistribution({'H':0.8 'T':0.2}))<line_sep>unrigged=State(DiscreteDistribution({'H':0.5 'T':0.5}))<line_sep>hmm1=HiddenMarkovModel()<line_sep>hmm1.start=rigged<line_sep>hmm1.add_transition(rigged rigged 1)<line_sep>hmm1.bake()<line_sep>hmm2=HiddenMarkovModel()<line_sep>hmm2.start=unrigged<line_sep>hmm2.add_transition(unrigged unrigged 1)<line_sep>hmm2.bake()<line_sep>hmm3=HiddenMarkovModel()<line_sep>hmm3.add_transition(hmm3.start unrigged 0.5)<line_sep>hmm3.add_transition(hmm3.start rigged 0.5)<line_sep>hmm3.add_transition(rigged rigged 0.5)<line_sep>hmm3.add_transition(rigged unrigged 0.5)<line_sep>hmm3.add_transition(unrigged rigged 0.5)<line_sep>hmm3.add_transition(unrigged unrigged 0.5)<line_sep>hmm3.bake()<line_sep>model=BayesClassifier([hmm1 hmm2 hmm3])<block_end><def_stmt>setup_multivariate <block_start><pass><block_end><def_stmt>teardown <block_start><pass><block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_initialization <block_start>assert_equal(model.d 3)<line_sep>assert_equal(model.n 2)<line_sep>assert_equal(model.is_vl_ <false>)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_initialization <block_start>assert_equal(model.d 3)<line_sep>assert_equal(model.n 2)<line_sep>assert_equal(model.is_vl_ <false>)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_predict_log_proba <block_start>y_hat=model.predict_log_proba(X)<line_sep>y=[[-1.48842547e-02 -4.21488425e+00] [-4.37487950e-01 -1.03748795e+00] [-5.60369104e+00 -3.69104343e-03] [-1.64000001e+01 -7.54345812e-08] [-1.30000023e+01 -2.26032685e-06] [-8.00033541e+00 -3.35406373e-04] [-5.60369104e+00 -3.69104343e-03] [-3.05902274e-07 -1.50000003e+01] [-3.35406373e-04 -8.00033541e+00] [-6.11066022e-04 -7.40061107e+00]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_predict_log_proba <block_start>y_hat=model.predict_log_proba(X)<line_sep>y=[[-5.03107596e-01 -9.27980626e-01] [-1.86355320e-01 -1.77183117e+00] [-5.58542088e-01 -8.48731256e-01] [-7.67315597e-01 -6.24101927e-01] [-2.32860808e+00 -1.02510436e-01] [-3.06641866e-03 -5.78877778e+00] [-9.85292840e-02 -2.36626165e+00] [-2.61764180e-01 -1.46833995e+00] [-2.01640009e-03 -6.20744952e+00] [-1.47371167e-01 -1.98758175e+00]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_nan_predict_log_proba <block_start>y_hat=model.predict_log_proba(X_nan)<line_sep>y=[[-3.99533332e-02 -3.23995333e+00] [-1.17110067e+00 -3.71100666e-01] [-4.01814993e+00 -1.81499279e-02] [-6.93147181e-01 -6.93147181e-01] [-9.80005545e+00 -5.54500620e-05] [-5.60369104e+00 -3.69104343e-03] [-1.78390074e+00 -1.83900741e-01] [-3.05902274e-07 -1.50000003e+01] [-8.68361522e-02 -2.48683615e+00] [-1.00016521e-02 -4.61000165e+00]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_nan_predict_log_proba <block_start>y_hat=model.predict_log_proba(X_nan)<line_sep>y=[[-3.57980882e-01 -1.20093223e+00] [-1.20735130e+00 -3.55230506e-01] [-2.43174286e-01 -1.53310132e+00] [-6.93147181e-01 -6.93147181e-01] [-9.31781101e+00 -8.98143220e-05] [-6.29755079e-04 -7.37049444e+00] [-1.31307006e+00 -3.13332194e-01] [-2.61764180e-01 -1.46833995e+00] [-2.29725479e-01 -1.58353505e+00] [-1.17299253e+00 -3.70251760e-01]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_predict_log_proba_parallel <block_start>y_hat=model.predict_log_proba(X n_jobs=2)<line_sep>y=[[-1.48842547e-02 -4.21488425e+00] [-4.37487950e-01 -1.03748795e+00] [-5.60369104e+00 -3.69104343e-03] [-1.64000001e+01 -7.54345812e-08] [-1.30000023e+01 -2.26032685e-06] [-8.00033541e+00 -3.35406373e-04] [-5.60369104e+00 -3.69104343e-03] [-3.05902274e-07 -1.50000003e+01] [-3.35406373e-04 -8.00033541e+00] [-6.11066022e-04 -7.40061107e+00]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_predict_log_proba_parallel <block_start>y_hat=model.predict_log_proba(X n_jobs=2)<line_sep>y=[[-5.03107596e-01 -9.27980626e-01] [-1.86355320e-01 -1.77183117e+00] [-5.58542088e-01 -8.48731256e-01] [-7.67315597e-01 -6.24101927e-01] [-2.32860808e+00 -1.02510436e-01] [-3.06641866e-03 -5.78877778e+00] [-9.85292840e-02 -2.36626165e+00] [-2.61764180e-01 -1.46833995e+00] [-2.01640009e-03 -6.20744952e+00] [-1.47371167e-01 -1.98758175e+00]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_predict_proba <block_start>y_hat=model.predict_proba(X)<line_sep>y=[[9.85225968e-01 1.47740317e-02] [6.45656306e-01 3.54343694e-01] [3.68423990e-03 9.96315760e-01] [7.54345778e-08 9.99999925e-01] [2.26032430e-06 9.99997740e-01] [3.35350130e-04 9.99664650e-01] [3.68423990e-03 9.96315760e-01] [9.99999694e-01 3.05902227e-07] [9.99664650e-01 3.35350130e-04] [9.99389121e-01 6.10879359e-04]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_predict_proba <block_start>y_hat=model.predict_proba(X)<line_sep>y=[[0.60464873 0.39535127] [0.82997863 0.17002137] [0.57204244 0.42795756] [0.46425765 0.53574235] [0.09743127 0.90256873] [0.99693828 0.00306172] [0.90616916 0.09383084] [0.76969251 0.23030749] [0.99798563 0.00201437] [0.86297361 0.13702639]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_nan_predict_proba <block_start>y_hat=model.predict_proba(X_nan)<line_sep>y=[[9.60834277e-01 3.91657228e-02] [3.10025519e-01 6.89974481e-01] [1.79862100e-02 9.82013790e-01] [5.00000000e-01 5.00000000e-01] [5.54485247e-05 9.99944551e-01] [3.68423990e-03 9.96315760e-01] [1.67981615e-01 8.32018385e-01] [9.99999694e-01 3.05902227e-07] [9.16827304e-01 8.31726965e-02] [9.90048198e-01 9.95180187e-03]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_nan_predict_proba <block_start>y_hat=model.predict_proba(X_nan)<line_sep>y=[[6.99086440e-01 3.00913560e-01] [2.98988163e-01 7.01011837e-01] [7.84134838e-01 2.15865162e-01] [5.00000000e-01 5.00000000e-01] [8.98102888e-05 9.99910190e-01] [9.99370443e-01 6.29556825e-04] [2.68992964e-01 7.31007036e-01] [7.69692511e-01 2.30307489e-01] [7.94751748e-01 2.05248252e-01] [3.09439547e-01 6.90560453e-01]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_predict_proba_parallel <block_start>y_hat=model.predict_proba(X n_jobs=2)<line_sep>y=[[9.85225968e-01 1.47740317e-02] [6.45656306e-01 3.54343694e-01] [3.68423990e-03 9.96315760e-01] [7.54345778e-08 9.99999925e-01] [2.26032430e-06 9.99997740e-01] [3.35350130e-04 9.99664650e-01] [3.68423990e-03 9.96315760e-01] [9.99999694e-01 3.05902227e-07] [9.99664650e-01 3.35350130e-04] [9.99389121e-01 6.10879359e-04]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_predict_proba_parallel <block_start>y_hat=model.predict_proba(X n_jobs=2)<line_sep>y=[[0.60464873 0.39535127] [0.82997863 0.17002137] [0.57204244 0.42795756] [0.46425765 0.53574235] [0.09743127 0.90256873] [0.99693828 0.00306172] [0.90616916 0.09383084] [0.76969251 0.23030749] [0.99798563 0.00201437] [0.86297361 0.13702639]]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_predict <block_start>y_hat=model.predict(X)<line_sep>y=[0 0 1 1 1 1 1 0 0 0]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_predict <block_start>y_hat=model.predict(X)<line_sep>y=[0 0 0 1 1 0 0 0 0 0]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_nan_predict <block_start>y_hat=model.predict(X_nan)<line_sep>y=[0 1 1 0 1 1 1 0 0 0]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_nan_predict <block_start>y_hat=model.predict(X_nan)<line_sep>y=[0 1 0 0 1 0 1 0 0 1]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_predict_parallel <block_start>y_hat=model.predict(X n_jobs=2)<line_sep>y=[0 0 1 1 1 1 1 0 0 0]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_predict_parallel <block_start>y_hat=model.predict(X n_jobs=2)<line_sep>y=[0 0 0 1 1 0 0 0 0 0]<line_sep>assert_array_almost_equal(y y_hat)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_fit_parallel <block_start>model.fit(X y n_jobs=2)<line_sep>mu1=model.distributions[0].parameters[0]<line_sep>cov1=model.distributions[0].parameters[1]<line_sep>mu1_t=[0.03333333 0.28333333 0.21666666]<line_sep>cov1_t=[[1.3088888 0.9272222 0.6227777] [0.9272222 2.2513888 1.3402777] [0.6227777 1.3402777 0.9547222]]<line_sep>mu2=model.distributions[1].parameters[0]<line_sep>cov2=model.distributions[1].parameters[1]<line_sep>mu2_t=[2.925 2.825 2.625]<line_sep>cov2_t=[[0.75687499 0.23687499 0.4793750] [0.23687499 0.40187499 0.5318749] [0.47937500 0.53187499 0.7868750]]<line_sep>assert_array_almost_equal(mu1 mu1_t)<line_sep>assert_array_almost_equal(cov1 cov1_t)<line_sep>assert_array_almost_equal(mu2 mu2_t)<line_sep>assert_array_almost_equal(cov2 cov2_t)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_fit_parallel <block_start>model.fit(X y n_jobs=2)<line_sep>mu1=model.distributions[0].parameters[0]<line_sep>cov1=model.distributions[0].parameters[1]<line_sep>mu1_t=[1.033333 1.3166667 0.75]<line_sep>cov1_t=[[0.242222 0.0594444 0.178333] [0.059444 0.5980555 0.414166] [0.178333 0.4141666 0.439166]]<line_sep>d21=model.distributions[1].distributions[0]<line_sep>d22=model.distributions[1].distributions[1]<line_sep>d23=model.distributions[1].distributions[2]<line_sep>assert_array_almost_equal(mu1 mu1_t)<line_sep>assert_array_almost_equal(cov1 cov1_t)<line_sep>assert_array_almost_equal(d21.parameters [0.34188034])<line_sep>assert_array_almost_equal(d22.parameters [1.01294275 0.22658346])<line_sep>assert_array_almost_equal(d23.parameters [2.625])<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_from_samples <block_start>model=BayesClassifier.from_samples(MultivariateGaussianDistribution X y)<line_sep>mu1=model.distributions[0].parameters[0]<line_sep>cov1=model.distributions[0].parameters[1]<line_sep>mu1_t=[0.03333333 0.2833333 0.21666666]<line_sep>cov1_t=[[1.308888888 0.9272222222 0.6227777777] [0.927222222 2.251388888 1.340277777] [0.622777777 1.340277777 0.9547222222]]<line_sep>mu2=model.distributions[1].parameters[0]<line_sep>cov2=model.distributions[1].parameters[1]<line_sep>mu2_t=[2.925 2.825 2.625]<line_sep>cov2_t=[[0.75687500 0.23687499 0.47937500] [0.23687499 0.40187499 0.53187499] [0.47937500 0.53187499 0.78687500]]<line_sep>assert_array_almost_equal(mu1 mu1_t)<line_sep>assert_array_almost_equal(cov1 cov1_t)<line_sep>assert_array_almost_equal(mu2 mu2_t)<line_sep>assert_array_almost_equal(cov2 cov2_t)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_pickle <block_start>model2=pickle.loads(pickle.dumps(model))<line_sep>assert_true(isinstance(model2 BayesClassifier))<line_sep>assert_true(isinstance(model2.distributions[0] MultivariateGaussianDistribution))<line_sep>assert_true(isinstance(model2.distributions[1] MultivariateGaussianDistribution))<line_sep>assert_array_almost_equal(model.weights model2.weights)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_pickle <block_start>model2=pickle.loads(pickle.dumps(model))<line_sep>assert_true(isinstance(model2 BayesClassifier))<line_sep>assert_true(isinstance(model2.distributions[0] MultivariateGaussianDistribution))<line_sep>assert_true(isinstance(model2.distributions[1] IndependentComponentsDistribution))<line_sep>assert_array_almost_equal(model.weights model2.weights)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_to_json <block_start>model2=BayesClassifier.from_json(model.to_json())<line_sep>assert_true(isinstance(model2 BayesClassifier))<line_sep>assert_true(isinstance(model2.distributions[0] MultivariateGaussianDistribution))<line_sep>assert_true(isinstance(model2.distributions[1] MultivariateGaussianDistribution))<line_sep>assert_array_almost_equal(model.weights model2.weights)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_to_json <block_start>model2=BayesClassifier.from_json(model.to_json())<line_sep>assert_true(isinstance(model2 BayesClassifier))<line_sep>assert_true(isinstance(model2.distributions[0] MultivariateGaussianDistribution))<line_sep>assert_true(isinstance(model2.distributions[1] IndependentComponentsDistribution))<line_sep>assert_array_almost_equal(model.weights model2.weights)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_bc_multivariate_gaussian_robust_from_json <block_start>model2=from_json(model.to_json())<line_sep>assert_true(isinstance(model2 BayesClassifier))<line_sep>assert_true(isinstance(model2.distributions[0] MultivariateGaussianDistribution))<line_sep>assert_true(isinstance(model2.distributions[1] MultivariateGaussianDistribution))<line_sep>assert_array_almost_equal(model.weights model2.weights)<block_end>@with_setup(setup_multivariate_mixed teardown)<def_stmt>test_bc_multivariate_mixed_robust_from_json <block_start>model2=from_json(model.to_json())<line_sep>assert_true(isinstance(model2 BayesClassifier))<line_sep>assert_true(isinstance(model2.distributions[0] MultivariateGaussianDistribution))<line_sep>assert_true(isinstance(model2.distributions[1] IndependentComponentsDistribution))<line_sep>assert_array_almost_equal(model.weights model2.weights)<block_end>@with_setup(setup_hmm teardown)<def_stmt>test_model <block_start>assert_almost_equal(hmm1.log_probability(list('H')) -0.2231435513142097)<line_sep>assert_almost_equal(hmm1.log_probability(list('T')) -1.6094379124341003)<line_sep>assert_almost_equal(hmm1.log_probability(list('HHHH')) -0.8925742052568388)<line_sep>assert_almost_equal(hmm1.log_probability(list('THHH')) -2.2788685663767296)<line_sep>assert_almost_equal(hmm1.log_probability(list('TTTT')) -6.437751649736401)<line_sep>assert_almost_equal(hmm2.log_probability(list('H')) -0.6931471805599453)<line_sep>assert_almost_equal(hmm2.log_probability(list('T')) -0.6931471805599453)<line_sep>assert_almost_equal(hmm2.log_probability(list('HHHH')) -2.772588722239781)<line_sep>assert_almost_equal(hmm2.log_probability(list('THHH')) -2.772588722239781)<line_sep>assert_almost_equal(hmm2.log_probability(list('TTTT')) -2.772588722239781)<line_sep>assert_almost_equal(hmm3.log_probability(list('H')) -0.43078291609245417)<line_sep>assert_almost_equal(hmm3.log_probability(list('T')) -1.0498221244986776)<line_sep>assert_almost_equal(hmm3.log_probability(list('HHHH')) -1.7231316643698167)<line_sep>assert_almost_equal(hmm3.log_probability(list('THHH')) -2.3421708727760397)<line_sep>assert_almost_equal(hmm3.log_probability(list('TTTT')) -4.1992884979947105)<line_sep>assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')) -8.883630243546788)<line_sep>assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')) -7.645551826734343)<line_sep>assert_equal(model.d 1)<block_end>@with_setup(setup_hmm teardown)<def_stmt>test_hmm_log_proba <block_start>logs=model.predict_log_proba(np.array([list('H') list('THHH') list('TTTT') list('THTHTHTHTHTH') list('THTHHHHHTHTH')]))<line_sep>assert_almost_equal(logs[0][0] -0.89097292388986515)<line_sep>assert_almost_equal(logs[0][1] -1.3609765531356006)<line_sep>assert_almost_equal(logs[0][2] -1.0986122886681096)<line_sep>assert_almost_equal(logs[1][0] -0.93570553121744293)<line_sep>assert_almost_equal(logs[1][1] -1.429425687080494)<line_sep>assert_almost_equal(logs[1][2] -0.9990078376167526)<line_sep>assert_almost_equal(logs[2][0] -3.9007882563128864)<line_sep>assert_almost_equal(logs[2][1] -0.23562532881626597)<line_sep>assert_almost_equal(logs[2][2] -1.6623251045711958)<line_sep>assert_almost_equal(logs[3][0] -3.1703366478831185)<line_sep>assert_almost_equal(logs[3][1] -0.49261403211260379)<line_sep>assert_almost_equal(logs[3][2] -1.058478108940049)<line_sep>assert_almost_equal(logs[4][0] -1.3058441172130273)<line_sep>assert_almost_equal(logs[4][1] -1.4007102236822906)<line_sep>assert_almost_equal(logs[4][2] -0.7284958836972919)<block_end>@with_setup(setup_hmm teardown)<def_stmt>test_hmm_proba <block_start>probs=model.predict_proba(np.array([list('H') list('THHH') list('TTTT') list('THTHTHTHTHTH') list('THTHHHHHTHTH')]))<line_sep>assert_almost_equal(probs[0][0] 0.41025641025641024)<line_sep>assert_almost_equal(probs[0][1] 0.25641025641025639)<line_sep>assert_almost_equal(probs[0][2] 0.33333333333333331)<line_sep>assert_almost_equal(probs[1][0] 0.39230898163446098)<line_sep>assert_almost_equal(probs[1][1] 0.23944639992337707)<line_sep>assert_almost_equal(probs[1][2] 0.36824461844216183)<line_sep>assert_almost_equal(probs[2][0] 0.020225961918306088)<line_sep>assert_almost_equal(probs[2][1] 0.79007663743383105)<line_sep>assert_almost_equal(probs[2][2] 0.18969740064786292)<line_sep>assert_almost_equal(probs[3][0] 0.041989459861032523)<line_sep>assert_almost_equal(probs[3][1] 0.61102706038265642)<line_sep>assert_almost_equal(probs[3][2] 0.346983479756311)<line_sep>assert_almost_equal(probs[4][0] 0.27094373022369794)<line_sep>assert_almost_equal(probs[4][1] 0.24642188711704707)<line_sep>assert_almost_equal(probs[4][2] 0.48263438265925512)<block_end>@with_setup(setup_hmm teardown)<def_stmt>test_hmm_prediction <block_start>predicts=model.predict(np.array([list('H') list('THHH') list('TTTT') list('THTHTHTHTHTH') list('THTHHHHHTHTH')]))<line_sep>assert_equal(predicts[0] 0)<line_sep>assert_equal(predicts[1] 0)<line_sep>assert_equal(predicts[2] 1)<line_sep>assert_equal(predicts[3] 1)<line_sep>assert_equal(predicts[4] 2)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_io_log_probability <block_start>X2=DataGenerator(X)<line_sep>X3=DataFrameGenerator(pandas.DataFrame(X))<line_sep>logp1=model.log_probability(X)<line_sep>logp2=model.log_probability(X2)<line_sep>logp3=model.log_probability(X3)<line_sep>assert_array_almost_equal(logp1 logp2)<line_sep>assert_array_almost_equal(logp1 logp3)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_io_predict <block_start>X2=DataGenerator(X)<line_sep>X3=DataFrameGenerator(pandas.DataFrame(X))<line_sep>y_hat1=model.predict(X)<line_sep>y_hat2=model.predict(X2)<line_sep>y_hat3=model.predict(X3)<line_sep>assert_array_almost_equal(y_hat1 y_hat2)<line_sep>assert_array_almost_equal(y_hat1 y_hat3)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_io_predict_proba <block_start>X2=DataGenerator(X)<line_sep>X3=DataFrameGenerator(pandas.DataFrame(X))<line_sep>y_hat1=model.predict_proba(X)<line_sep>y_hat2=model.predict_proba(X2)<line_sep>y_hat3=model.predict_proba(X3)<line_sep>assert_array_almost_equal(y_hat1 y_hat2)<line_sep>assert_array_almost_equal(y_hat1 y_hat3)<block_end>@with_setup(setup_multivariate_gaussian teardown)<def_stmt>test_io_predict_log_proba <block_start>X2=DataGenerator(X)<line_sep>X3=DataFrameGenerator(pandas.DataFrame(X))<line_sep>y_hat1=model.predict_log_proba(X)<line_sep>y_hat2=model.predict_log_proba(X2)<line_sep>y_hat3=model.predict_log_proba(X3)<line_sep>assert_array_almost_equal(y_hat1 y_hat2)<line_sep>assert_array_almost_equal(y_hat1 y_hat3)<block_end><def_stmt>test_io_fit <block_start>X=numpy.random.randn(100 5)+0.5<line_sep>weights=numpy.abs(numpy.random.randn(100))<line_sep>y=numpy.random.randint(2 size=100)<line_sep>data_generator=DataGenerator(X weights y)<line_sep>mu1=numpy.array([0 0 0 0 0])<line_sep>mu2=numpy.array([1 1 1 1 1])<line_sep>cov=numpy.eye(5)<line_sep>d1=MultivariateGaussianDistribution(mu1 cov)<line_sep>d2=MultivariateGaussianDistribution(mu2 cov)<line_sep>bc1=BayesClassifier([d1 d2])<line_sep>bc1.fit(X y weights)<line_sep>d1=MultivariateGaussianDistribution(mu1 cov)<line_sep>d2=MultivariateGaussianDistribution(mu2 cov)<line_sep>bc2=BayesClassifier([d1 d2])<line_sep>bc2.fit(data_generator)<line_sep>logp1=bc1.log_probability(X)<line_sep>logp2=bc2.log_probability(X)<line_sep>assert_array_almost_equal(logp1 logp2)<block_end><def_stmt>test_io_from_samples <block_start>X=numpy.random.randn(100 5)+0.5<line_sep>weights=numpy.abs(numpy.random.randn(100))<line_sep>y=numpy.random.randint(2 size=100)<line_sep>data_generator=DataGenerator(X weights y)<line_sep>d=MultivariateGaussianDistribution<line_sep>bc1=BayesClassifier.from_samples(d X=X y=y weights=weights)<line_sep>bc2=BayesClassifier.from_samples(d X=data_generator)<line_sep>logp1=bc1.log_probability(X)<line_sep>logp2=bc2.log_probability(X)<line_sep>assert_array_almost_equal(logp1 logp2)<block_end>
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>typing Tuple<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>ignite.engine Engine<import_from_stmt>monai.handlers SurfaceDistance<def_stmt>create_spherical_seg_3d radius:float=20.0 centre:Tuple[int int int]=(49 49 49) im_shape:Tuple[int int int]=(99 99 99)<arrow>np.ndarray<block_start>""" Return a 3D image with a sphere inside. Voxel values will be 1 inside the sphere, and 0 elsewhere. Args: radius: radius of sphere (in terms of number of voxels, can be partial) centre: location of sphere centre. im_shape: shape of image to create See also: :py:meth:`~create_test_image_3d` """<line_sep># Create image image=np.zeros(im_shape dtype=np.int32)<line_sep>spy,spx,spz=np.ogrid[-centre[0]:im_shape[0]-centre[0] -centre[1]:im_shape[1]-centre[1] -centre[2]:im_shape[2]-centre[2]]<line_sep>circle=(spx<times>spx+spy<times>spy+spz<times>spz)<le>radius<times>radius<line_sep>image[circle]=1<line_sep>image[~circle]=0<line_sep><return>image<block_end>sampler_sphere=torch.Tensor(create_spherical_seg_3d(radius=20 centre=(20 20 20))).unsqueeze(0).unsqueeze(0)<line_sep># test input a list of channel-first tensor sampler_sphere_gt=[torch.Tensor(create_spherical_seg_3d(radius=20 centre=(10 20 20))).unsqueeze(0)]<line_sep>sampler_sphere_zeros=torch.zeros_like(sampler_sphere)<line_sep>TEST_SAMPLE_1=[sampler_sphere sampler_sphere_gt]<line_sep>TEST_SAMPLE_2=[sampler_sphere_gt sampler_sphere_gt]<line_sep>TEST_SAMPLE_3=[sampler_sphere_zeros sampler_sphere_gt]<line_sep>TEST_SAMPLE_4=[sampler_sphere_zeros sampler_sphere_zeros]<class_stmt>TestHandlerSurfaceDistance(unittest.TestCase)# TODO test multi node Surface Distance <block_start><def_stmt>test_compute self<block_start>sur_metric=SurfaceDistance(include_background=<true>)<def_stmt>_val_func engine batch<block_start><pass><block_end>engine=Engine(_val_func)<line_sep>sur_metric.attach(engine "surface_distance")<line_sep>y_pred,y=TEST_SAMPLE_1<line_sep>sur_metric.update([y_pred y])<line_sep>self.assertAlmostEqual(sur_metric.compute() 4.17133 places=4)<line_sep>y_pred,y=TEST_SAMPLE_2<line_sep>sur_metric.update([y_pred y])<line_sep>self.assertAlmostEqual(sur_metric.compute() 2.08566 places=4)<line_sep>y_pred,y=TEST_SAMPLE_3<line_sep>sur_metric.update([y_pred y])<line_sep>self.assertAlmostEqual(sur_metric.compute() float("inf"))<line_sep>y_pred,y=TEST_SAMPLE_4<line_sep>sur_metric.update([y_pred y])<line_sep>self.assertAlmostEqual(sur_metric.compute() float("inf"))<block_end><def_stmt>test_shape_mismatch self<block_start>sur_metric=SurfaceDistance(include_background=<true>)<with_stmt>self.assertRaises((AssertionError ValueError))<block_start>y_pred=TEST_SAMPLE_1[0]<line_sep>y=torch.ones((1 1 10 10 10))<line_sep>sur_metric.update([y_pred y])<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Specify and constraints to determine which targets are observable for an observer. """<import_from_future_stmt> absolute_import division print_function unicode_literals <line_sep># Standard library <import_from_stmt>abc ABCMeta abstractmethod<import_stmt>datetime<import_stmt>time<import_stmt>warnings<line_sep># Third-party <import_from_stmt>astropy.time Time<import_stmt>astropy.units<as>u<import_from_stmt>astropy.coordinates get_body get_sun get_moon Galactic SkyCoord<import_from_stmt>astropy table<import_stmt>numpy<as>np<import_from_stmt>numpy.lib.stride_tricks as_strided<line_sep># Package <import_from_stmt>.moon moon_illumination<import_from_stmt>.utils time_grid_from_range<import_from_stmt>.target get_skycoord<line_sep>__all__=["AltitudeConstraint" "AirmassConstraint" "AtNightConstraint" "is_observable" "is_always_observable" "time_grid_from_range" "GalacticLatitudeConstraint" "SunSeparationConstraint" "MoonSeparationConstraint" "MoonIlluminationConstraint" "LocalTimeConstraint" "PrimaryEclipseConstraint" "SecondaryEclipseConstraint" "Constraint" "TimeConstraint" "observability_table" "months_observable" "max_best_rescale" "min_best_rescale" "PhaseConstraint" "is_event_observable"]<line_sep>_current_year=time.localtime().tm_year# needed for backward compatibility _current_year_time_range=Time(# needed for backward compatibility [str(_current_year)+'-01-01' str(_current_year)+'-12-31'])<def_stmt>_make_cache_key times targets<block_start>""" Make a unique key to reference this combination of ``times`` and ``targets``. Often, we wish to store expensive calculations for a combination of ``targets`` and ``times`` in a cache on an ``observer``` object. This routine will provide an appropriate, hashable, key to store these calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key : tuple A hashable tuple for use as a cache key """<line_sep># make a tuple from times <try_stmt><block_start>timekey=tuple(times.jd)+times.shape<block_end><except_stmt>BaseException# must be scalar <block_start>timekey=(times.jd )<block_end># make hashable thing from targets coords <try_stmt><block_start><if_stmt>hasattr(targets 'frame')# treat as a SkyCoord object. Accessing the longitude # attribute of the frame data should be unique and is # quicker than accessing the ra attribute. <block_start>targkey=tuple(targets.frame.data.lon.value.ravel())+targets.shape<block_end><else_stmt># assume targets is a string. <block_start>targkey=(targets )<block_end><block_end><except_stmt>BaseException<block_start>targkey=(targets.frame.data.lon )<block_end><return>timekey+targkey<block_end><def_stmt>_get_altaz times observer targets force_zero_pressure=<false><block_start>""" Calculate alt/az for ``target`` at times linearly spaced between the two times in ``time_range`` with grid spacing ``time_resolution`` for ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets. observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- altaz_dict : dict Dictionary containing two key-value pairs. (1) 'times' contains the times for the alt/az computations, (2) 'altaz' contains the corresponding alt/az coordinates at those times. """<if_stmt><not>hasattr(observer '_altaz_cache')<block_start>observer._altaz_cache={}<block_end># convert times, targets to tuple for hashing aakey=_make_cache_key(times targets)<if_stmt>aakey<not><in>observer._altaz_cache<block_start><try_stmt><block_start><if_stmt>force_zero_pressure<block_start>observer_old_pressure=observer.pressure<line_sep>observer.pressure=0<block_end>altaz=observer.altaz(times targets grid_times_targets=<false>)<line_sep>observer._altaz_cache[aakey]=dict(times=times altaz=altaz)<block_end><finally_stmt><block_start><if_stmt>force_zero_pressure<block_start>observer.pressure=observer_old_pressure<block_end><block_end><block_end><return>observer._altaz_cache[aakey]<block_end><def_stmt>_get_moon_data times observer force_zero_pressure=<false><block_start>""" Calculate moon altitude az and illumination for an array of times for ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- moon_dict : dict Dictionary containing three key-value pairs. (1) 'times' contains the times for the computations, (2) 'altaz' contains the corresponding alt/az coordinates at those times and (3) contains the moon illumination for those times. """<if_stmt><not>hasattr(observer '_moon_cache')<block_start>observer._moon_cache={}<block_end># convert times to tuple for hashing aakey=_make_cache_key(times 'moon')<if_stmt>aakey<not><in>observer._moon_cache<block_start><try_stmt><block_start><if_stmt>force_zero_pressure<block_start>observer_old_pressure=observer.pressure<line_sep>observer.pressure=0<block_end>altaz=observer.moon_altaz(times)<line_sep>illumination=np.array(moon_illumination(times))<line_sep>observer._moon_cache[aakey]=dict(times=times illum=illumination altaz=altaz)<block_end><finally_stmt><block_start><if_stmt>force_zero_pressure<block_start>observer.pressure=observer_old_pressure<block_end><block_end><block_end><return>observer._moon_cache[aakey]<block_end><def_stmt>_get_meridian_transit_times times observer targets<block_start>""" Calculate next meridian transit for an array of times for ``targets`` and ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets Returns ------- time_dict : dict Dictionary containing a key-value pair. 'times' contains the meridian_transit times. """<if_stmt><not>hasattr(observer '_meridian_transit_cache')<block_start>observer._meridian_transit_cache={}<block_end># convert times to tuple for hashing aakey=_make_cache_key(times targets)<if_stmt>aakey<not><in>observer._meridian_transit_cache<block_start>meridian_transit_times=observer.target_meridian_transit_time(times targets)<line_sep>observer._meridian_transit_cache[aakey]=dict(times=meridian_transit_times)<block_end><return>observer._meridian_transit_cache[aakey]<block_end>@abstractmethod<class_stmt>Constraint(object)<block_start>""" Abstract class for objects defining observational constraints. """<line_sep>__metaclass__=ABCMeta<def_stmt>__call__ self observer targets times=<none> time_range=<none> time_grid_resolution=0.5<times>u.hour grid_times_targets=<false><block_start>""" Compute the constraint for this class Parameters ---------- observer : `~astroplan.Observer` the observation location from which to apply the constraints targets : sequence of `~astroplan.Target` The targets on which to apply the constraints. times : `~astropy.time.Time` The times to compute the constraint. WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length = 2) Lower and upper bounds on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if True, grids the constraint result with targets along the first index and times along the second. Otherwise, we rely on broadcasting the shapes together using standard numpy rules. Returns ------- constraint_result : 1D or 2D array of float or bool The constraints. If 2D with targets along the first index and times along the second. """<if_stmt>times<is><none><and>time_range<is><not><none><block_start>times=time_grid_from_range(time_range time_resolution=time_grid_resolution)<block_end><if_stmt>grid_times_targets<block_start>targets=get_skycoord(targets)<line_sep># TODO: these broadcasting operations are relatively slow # but there is potential for huge speedup if the end user # disables gridding and re-shapes the coords themselves # prior to evaluating multiple constraints. <if_stmt>targets.isscalar# ensure we have a (1, 1) shape coord <block_start>targets=SkyCoord(np.tile(targets 1))[: np.newaxis]<block_end><else_stmt><block_start>targets=targets[<ellipsis> np.newaxis]<block_end><block_end>times,targets=observer._preprocess_inputs(times targets grid_times_targets=<false>)<line_sep>result=self.compute_constraint(times observer targets)<line_sep># make sure the output has the same shape as would result from # broadcasting times and targets against each other <if_stmt>targets<is><not><none># broadcasting times v targets is slow due to # complex nature of these objects. We make # to simple numpy arrays of the same shape and # broadcast these to find the correct shape <block_start>shp1,shp2=times.shape targets.shape<line_sep>x=np.array([1])<line_sep>a=as_strided(x shape=shp1 strides=[0]<times>len(shp1))<line_sep>b=as_strided(x shape=shp2 strides=[0]<times>len(shp2))<line_sep>output_shape=np.broadcast(a b).shape<if_stmt>output_shape<ne>np.array(result).shape<block_start>result=np.broadcast_to(result output_shape)<block_end><block_end><return>result<block_end>@abstractmethod<def_stmt>compute_constraint self times observer targets<block_start>""" Actually do the real work of computing the constraint. Subclasses override this. Parameters ---------- times : `~astropy.time.Time` The times to compute the constraint observer : `~astroplan.Observer` the observaton location from which to apply the constraints targets : sequence of `~astroplan.Target` The targets on which to apply the constraints. Returns ------- constraint_result : 2D array of float or bool The constraints, with targets along the first index and times along the second. """<line_sep># Should be implemented on each subclass of Constraint <raise>NotImplementedError<block_end><block_end><class_stmt>AltitudeConstraint(Constraint)<block_start>""" Constrain the altitude of the target. .. note:: This can misbehave if you try to constrain negative altitudes, as the `~astropy.coordinates.AltAz` frame tends to mishandle negative Parameters ---------- min : `~astropy.units.Quantity` or `None` Minimum altitude of the target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` Maximum altitude of the target (inclusive). `None` indicates no limit. boolean_constraint : bool If True, the constraint is treated as a boolean (True for within the limits and False for outside). If False, the constraint returns a float on [0, 1], where 0 is the min altitude and 1 is the max. """<def_stmt>__init__ self min=<none> max=<none> boolean_constraint=<true><block_start><if_stmt>min<is><none><block_start>self.min=-90<times>u.deg<block_end><else_stmt><block_start>self.min=min<block_end><if_stmt>max<is><none><block_start>self.max=90<times>u.deg<block_end><else_stmt><block_start>self.max=max<block_end>self.boolean_constraint=boolean_constraint<block_end><def_stmt>compute_constraint self times observer targets<block_start>cached_altaz=_get_altaz(times observer targets)<line_sep>alt=cached_altaz['altaz'].alt<if_stmt>self.boolean_constraint<block_start>lowermask=self.min<le>alt<line_sep>uppermask=alt<le>self.max<line_sep><return>lowermask&uppermask<block_end><else_stmt><block_start><return>max_best_rescale(alt self.min self.max)<block_end><block_end><block_end><class_stmt>AirmassConstraint(AltitudeConstraint)<block_start>""" Constrain the airmass of a target. In the current implementation the airmass is approximated by the secant of the zenith angle. .. note:: The ``max`` and ``min`` arguments appear in the order (max, min) in this initializer to support the common case for users who care about the upper limit on the airmass (``max``) and not the lower limit. Parameters ---------- max : float or `None` Maximum airmass of the target. `None` indicates no limit. min : float or `None` Minimum airmass of the target. `None` indicates no limit. boolean_contstraint : bool Examples -------- To create a constraint that requires the airmass be "better than 2", i.e. at a higher altitude than airmass=2:: AirmassConstraint(2) """<def_stmt>__init__ self max=<none> min=1 boolean_constraint=<true><block_start>self.min=min<line_sep>self.max=max<line_sep>self.boolean_constraint=boolean_constraint<block_end><def_stmt>compute_constraint self times observer targets<block_start>cached_altaz=_get_altaz(times observer targets)<line_sep>secz=cached_altaz['altaz'].secz.value<if_stmt>self.boolean_constraint<block_start><if_stmt>self.min<is><none><and>self.max<is><not><none><block_start>mask=secz<le>self.max<block_end><elif_stmt>self.max<is><none><and>self.min<is><not><none><block_start>mask=self.min<le>secz<block_end><elif_stmt>self.min<is><not><none><and>self.max<is><not><none><block_start>mask=(self.min<le>secz)&(secz<le>self.max)<block_end><else_stmt><block_start><raise>ValueError("No max and/or min specified in "<concat>"AirmassConstraint.")<block_end><return>mask<block_end><else_stmt><block_start><if_stmt>self.max<is><none><block_start><raise>ValueError("Cannot have a float AirmassConstraint if max is None.")<block_end><else_stmt><block_start>mx=self.max<block_end>mi=1<if>self.min<is><none><else>self.min<line_sep># values below 1 should be disregarded <return>min_best_rescale(secz mi mx less_than_min=0)<block_end><block_end><block_end><class_stmt>AtNightConstraint(Constraint)<block_start>""" Constrain the Sun to be below ``horizon``. """<line_sep>@u.quantity_input(horizon=u.deg)<def_stmt>__init__ self max_solar_altitude=0<times>u.deg force_pressure_zero=<true><block_start>""" Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The altitude of the sun below which it is considered to be "night" (inclusive). force_pressure_zero : bool (optional) Force the pressure to zero for solar altitude calculations. This avoids errors in the altitude of the Sun that can occur when the Sun is below the horizon and the corrections for atmospheric refraction return nonsense values. """<line_sep>self.max_solar_altitude=max_solar_altitude<line_sep>self.force_pressure_zero=force_pressure_zero<block_end>@classmethod<def_stmt>twilight_civil cls **kwargs<block_start>""" Consider nighttime as time between civil twilights (-6 degrees). """<line_sep><return>cls(max_solar_altitude=-6<times>u.deg **kwargs)<block_end>@classmethod<def_stmt>twilight_nautical cls **kwargs<block_start>""" Consider nighttime as time between nautical twilights (-12 degrees). """<line_sep><return>cls(max_solar_altitude=-12<times>u.deg **kwargs)<block_end>@classmethod<def_stmt>twilight_astronomical cls **kwargs<block_start>""" Consider nighttime as time between astronomical twilights (-18 degrees). """<line_sep><return>cls(max_solar_altitude=-18<times>u.deg **kwargs)<block_end><def_stmt>_get_solar_altitudes self times observer targets<block_start><if_stmt><not>hasattr(observer '_altaz_cache')<block_start>observer._altaz_cache={}<block_end>aakey=_make_cache_key(times 'sun')<if_stmt>aakey<not><in>observer._altaz_cache<block_start><try_stmt><block_start><if_stmt>self.force_pressure_zero<block_start>observer_old_pressure=observer.pressure<line_sep>observer.pressure=0<block_end># find solar altitude at these times altaz=observer.altaz(times get_sun(times))<line_sep>altitude=altaz.alt<line_sep># cache the altitude observer._altaz_cache[aakey]=dict(times=times altitude=altitude)<block_end><finally_stmt><block_start><if_stmt>self.force_pressure_zero<block_start>observer.pressure=observer_old_pressure<block_end><block_end><block_end><else_stmt><block_start>altitude=observer._altaz_cache[aakey]['altitude']<block_end><return>altitude<block_end><def_stmt>compute_constraint self times observer targets<block_start>solar_altitude=self._get_solar_altitudes(times observer targets)<line_sep>mask=solar_altitude<le>self.max_solar_altitude<line_sep><return>mask<block_end><block_end><class_stmt>GalacticLatitudeConstraint(Constraint)<block_start>""" Constrain the distance between the Galactic plane and some targets. """<def_stmt>__init__ self min=<none> max=<none><block_start>""" Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no limit. """<line_sep>self.min=min<line_sep>self.max=max<block_end><def_stmt>compute_constraint self times observer targets<block_start>separation=abs(targets.transform_to(Galactic).b)<if_stmt>self.min<is><none><and>self.max<is><not><none><block_start>mask=self.max<ge>separation<block_end><elif_stmt>self.max<is><none><and>self.min<is><not><none><block_start>mask=self.min<le>separation<block_end><elif_stmt>self.min<is><not><none><and>self.max<is><not><none><block_start>mask=((self.min<le>separation)&(separation<le>self.max))<block_end><else_stmt><block_start><raise>ValueError("No max and/or min specified in "<concat>"GalacticLatitudeConstraint.")<block_end><return>mask<block_end><block_end><class_stmt>SunSeparationConstraint(Constraint)<block_start>""" Constrain the distance between the Sun and some targets. """<def_stmt>__init__ self min=<none> max=<none><block_start>""" Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between Sun and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between Sun and target (inclusive). `None` indicates no limit. """<line_sep>self.min=min<line_sep>self.max=max<block_end><def_stmt>compute_constraint self times observer targets# use get_body rather than get sun here, since # it returns the Sun's coordinates in an observer # centred frame, so the separation is as-seen # by the observer. # 'get_sun' returns ICRS coords. <block_start>sun=get_body('sun' times location=observer.location)<line_sep>solar_separation=sun.separation(targets)<if_stmt>self.min<is><none><and>self.max<is><not><none><block_start>mask=self.max<ge>solar_separation<block_end><elif_stmt>self.max<is><none><and>self.min<is><not><none><block_start>mask=self.min<le>solar_separation<block_end><elif_stmt>self.min<is><not><none><and>self.max<is><not><none><block_start>mask=((self.min<le>solar_separation)&(solar_separation<le>self.max))<block_end><else_stmt><block_start><raise>ValueError("No max and/or min specified in "<concat>"SunSeparationConstraint.")<block_end><return>mask<block_end><block_end><class_stmt>MoonSeparationConstraint(Constraint)<block_start>""" Constrain the distance between the Earth's moon and some targets. """<def_stmt>__init__ self min=<none> max=<none> ephemeris=<none><block_start>""" Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between moon and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between moon and target (inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). """<line_sep>self.min=min<line_sep>self.max=max<line_sep>self.ephemeris=ephemeris<block_end><def_stmt>compute_constraint self times observer targets# removed the location argument here, which causes small <1 deg # innacuracies, but it is needed until astropy PR #5897 is released # which should be astropy 1.3.2 <block_start>moon=get_moon(times ephemeris=self.ephemeris)<line_sep># note to future editors - the order matters here # moon.separation(targets) is NOT the same as targets.separation(moon) # the former calculates the separation in the frame of the moon coord # which is GCRS, and that is what we want. moon_separation=moon.separation(targets)<if_stmt>self.min<is><none><and>self.max<is><not><none><block_start>mask=self.max<ge>moon_separation<block_end><elif_stmt>self.max<is><none><and>self.min<is><not><none><block_start>mask=self.min<le>moon_separation<block_end><elif_stmt>self.min<is><not><none><and>self.max<is><not><none><block_start>mask=((self.min<le>moon_separation)&(moon_separation<le>self.max))<block_end><else_stmt><block_start><raise>ValueError("No max and/or min specified in "<concat>"MoonSeparationConstraint.")<block_end><return>mask<block_end><block_end><class_stmt>MoonIlluminationConstraint(Constraint)<block_start>""" Constrain the fractional illumination of the Earth's moon. Constraint is also satisfied if the Moon has set. """<def_stmt>__init__ self min=<none> max=<none> ephemeris=<none><block_start>""" Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris to use. If not given, use the one set with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by default). """<line_sep>self.min=min<line_sep>self.max=max<line_sep>self.ephemeris=ephemeris<block_end>@classmethod<def_stmt>dark cls min=<none> max=0.25 **kwargs<block_start>""" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum and a maximum of 0.25 Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. """<line_sep><return>cls(min max **kwargs)<block_end>@classmethod<def_stmt>grey cls min=0.25 max=0.65 **kwargs<block_start>""" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.25 and a maximum of 0.65 Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. """<line_sep><return>cls(min max **kwargs)<block_end>@classmethod<def_stmt>bright cls min=0.65 max=<none> **kwargs<block_start>""" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.65 and no maximum Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. """<line_sep><return>cls(min max **kwargs)<block_end><def_stmt>compute_constraint self times observer targets# first is the moon up? <block_start>cached_moon=_get_moon_data(times observer)<line_sep>moon_alt=cached_moon['altaz'].alt<line_sep>moon_down_mask=moon_alt<l>0<line_sep>moon_up_mask=moon_alt<ge>0<line_sep>illumination=cached_moon['illum']<if_stmt>self.min<is><none><and>self.max<is><not><none><block_start>mask=(self.max<ge>illumination)|moon_down_mask<block_end><elif_stmt>self.max<is><none><and>self.min<is><not><none><block_start>mask=(self.min<le>illumination)&moon_up_mask<block_end><elif_stmt>self.min<is><not><none><and>self.max<is><not><none><block_start>mask=((self.min<le>illumination)&(illumination<le>self.max))&moon_up_mask<block_end><else_stmt><block_start><raise>ValueError("No max and/or min specified in "<concat>"MoonSeparationConstraint.")<block_end><return>mask<block_end><block_end><class_stmt>LocalTimeConstraint(Constraint)<block_start>""" Constrain the observable hours. """<def_stmt>__init__ self min=<none> max=<none><block_start>""" Parameters ---------- min : `~datetime.time` Earliest local time (inclusive). `None` indicates no limit. max : `~datetime.time` Latest local time (inclusive). `None` indicates no limit. Examples -------- Constrain the observations to targets that are observable between 23:50 and 04:08 local time: >>> from astroplan import Observer >>> from astroplan.constraints import LocalTimeConstraint >>> import datetime as dt >>> subaru = Observer.at_site("Subaru", timezone="US/Hawaii") >>> # bound times between 23:50 and 04:08 local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) """<line_sep>self.min=min<line_sep>self.max=max<if_stmt>self.min<is><none><and>self.max<is><none><block_start><raise>ValueError("You must at least supply either a minimum or a maximum time.")<block_end><if_stmt>self.min<is><not><none><block_start><if_stmt><not>isinstance(self.min datetime.time)<block_start><raise>TypeError("Time limits must be specified as datetime.time objects.")<block_end><block_end><if_stmt>self.max<is><not><none><block_start><if_stmt><not>isinstance(self.max datetime.time)<block_start><raise>TypeError("Time limits must be specified as datetime.time objects.")<block_end><block_end><block_end><def_stmt>compute_constraint self times observer targets<block_start>timezone=<none><line_sep># get timezone from time objects, or from observer <if_stmt>self.min<is><not><none><block_start>timezone=self.min.tzinfo<block_end><elif_stmt>self.max<is><not><none><block_start>timezone=self.max.tzinfo<block_end><if_stmt>timezone<is><none><block_start>timezone=observer.timezone<block_end><if_stmt>self.min<is><not><none><block_start>min_time=self.min<block_end><else_stmt><block_start>min_time=self.min=datetime.time(0 0 0)<block_end><if_stmt>self.max<is><not><none><block_start>max_time=self.max<block_end><else_stmt><block_start>max_time=datetime.time(23 59 59)<block_end># If time limits occur on same day: <if_stmt>min_time<l>max_time<block_start><try_stmt><block_start>mask=np.array([min_time<le>t.time()<le>max_time<for>t times.datetime])<block_end><except_stmt>BaseException# use np.bool so shape queries don't cause problems <block_start>mask=np.bool_(min_time<le>times.datetime.time()<le>max_time)<block_end><block_end># If time boundaries straddle midnight: <else_stmt><block_start><try_stmt><block_start>mask=np.array([(t.time()<ge>min_time)<or>(t.time()<le>max_time)<for>t times.datetime])<block_end><except_stmt>BaseException<block_start>mask=np.bool_((times.datetime.time()<ge>min_time)<or>(times.datetime.time()<le>max_time))<block_end><block_end><return>mask<block_end><block_end><class_stmt>TimeConstraint(Constraint)<block_start>"""Constrain the observing time to be within certain time limits. An example use case for this class would be to associate an acceptable time range with a specific observing block. This can be useful if not all observing blocks are valid over the time limits used in calls to `is_observable` or `is_always_observable`. """<def_stmt>__init__ self min=<none> max=<none><block_start>""" Parameters ---------- min : `~astropy.time.Time` Earliest time (inclusive). `None` indicates no limit. max : `~astropy.time.Time` Latest time (inclusive). `None` indicates no limit. Examples -------- Constrain the observations to targets that are observable between 2016-03-28 and 2016-03-30: >>> from astroplan import Observer >>> from astropy.time import Time >>> subaru = Observer.at_site("Subaru") >>> t1 = Time("2016-03-28T12:00:00") >>> t2 = Time("2016-03-30T12:00:00") >>> constraint = TimeConstraint(t1,t2) """<line_sep>self.min=min<line_sep>self.max=max<if_stmt>self.min<is><none><and>self.max<is><none><block_start><raise>ValueError("You must at least supply either a minimum or a "<concat>"maximum time.")<block_end><if_stmt>self.min<is><not><none><block_start><if_stmt><not>isinstance(self.min Time)<block_start><raise>TypeError("Time limits must be specified as "<concat>"astropy.time.Time objects.")<block_end><block_end><if_stmt>self.max<is><not><none><block_start><if_stmt><not>isinstance(self.max Time)<block_start><raise>TypeError("Time limits must be specified as "<concat>"astropy.time.Time objects.")<block_end><block_end><block_end><def_stmt>compute_constraint self times observer targets<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter('ignore')<line_sep>min_time=Time("1950-01-01T00:00:00")<if>self.min<is><none><else>self.min<line_sep>max_time=Time("2120-01-01T00:00:00")<if>self.max<is><none><else>self.max<block_end>mask=np.logical_and(times<g>min_time times<l>max_time)<line_sep><return>mask<block_end><block_end><class_stmt>PrimaryEclipseConstraint(Constraint)<block_start>""" Constrain observations to times during primary eclipse. """<def_stmt>__init__ self eclipsing_system<block_start>""" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in primary eclipse. """<line_sep>self.eclipsing_system=eclipsing_system<block_end><def_stmt>compute_constraint self times observer=<none> targets=<none><block_start>mask=self.eclipsing_system.in_primary_eclipse(times)<line_sep><return>mask<block_end><block_end><class_stmt>SecondaryEclipseConstraint(Constraint)<block_start>""" Constrain observations to times during secondary eclipse. """<def_stmt>__init__ self eclipsing_system<block_start>""" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in secondary eclipse. """<line_sep>self.eclipsing_system=eclipsing_system<block_end><def_stmt>compute_constraint self times observer=<none> targets=<none><block_start>mask=self.eclipsing_system.in_secondary_eclipse(times)<line_sep><return>mask<block_end><block_end><class_stmt>PhaseConstraint(Constraint)<block_start>""" Constrain observations to times in some range of phases for a periodic event (e.g.~transiting exoplanets, eclipsing binaries). """<def_stmt>__init__ self periodic_event min=<none> max=<none><block_start>""" Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on which to compute the phase. For example, the system could be an eclipsing or non-eclipsing binary, or exoplanet system. min : float (optional) Minimum phase (inclusive) on interval [0, 1). Default is zero. max : float (optional) Maximum phase (inclusive) on interval [0, 1). Default is one. Examples -------- To constrain observations on orbital phases between 0.4 and 0.6, >>> from astroplan import PeriodicEvent >>> from astropy.time import Time >>> import astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum phase must be described on the interval [0, 1). To constrain observations on orbital phases between 0.6 and 1.2, for example, you should subtract one from the second number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) """<line_sep>self.periodic_event=periodic_event<if_stmt>(min<l>0)<or>(min<g>1)<or>(max<l>0)<or>(max<g>1)<block_start><raise>ValueError('The minimum of the PhaseConstraint must be within'<concat>' the interval [0, 1).')<block_end>self.min=min<if>min<is><not><none><else>0.0<line_sep>self.max=max<if>max<is><not><none><else>1.0<block_end><def_stmt>compute_constraint self times observer=<none> targets=<none><block_start>phase=self.periodic_event.phase(times)<line_sep>mask=np.where(self.max<g>self.min (phase<ge>self.min)&(phase<le>self.max) (phase<ge>self.min)|(phase<le>self.max))<line_sep><return>mask<block_end><block_end><def_stmt>is_always_observable constraints observer targets times=<none> time_range=<none> time_grid_resolution=0.5<times>u.hour<block_start>""" A function to determine whether ``targets`` are always observable throughout ``time_range`` given constraints in the ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list List of booleans of same length as ``targets`` for whether or not each target is observable in the time range given the constraints. """<if_stmt><not>hasattr(constraints '__len__')<block_start>constraints=[constraints]<block_end>applied_constraints=[constraint(observer targets times=times time_range=time_range time_grid_resolution=time_grid_resolution grid_times_targets=<true>)<for>constraint constraints]<line_sep>constraint_arr=np.logical_and.reduce(applied_constraints)<line_sep><return>np.all(constraint_arr axis=1)<block_end><def_stmt>is_observable constraints observer targets times=<none> time_range=<none> time_grid_resolution=0.5<times>u.hour<block_start>""" Determines if the ``targets`` are observable during ``time_range`` given constraints in ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list List of booleans of same length as ``targets`` for whether or not each target is ever observable in the time range given the constraints. """<if_stmt><not>hasattr(constraints '__len__')<block_start>constraints=[constraints]<block_end>applied_constraints=[constraint(observer targets times=times time_range=time_range time_grid_resolution=time_grid_resolution grid_times_targets=<true>)<for>constraint constraints]<line_sep>constraint_arr=np.logical_and.reduce(applied_constraints)<line_sep><return>np.any(constraint_arr axis=1)<block_end><def_stmt>is_event_observable constraints observer target times=<none> times_ingress_egress=<none><block_start>""" Determines if the ``target`` is observable at each time in ``times``, given constraints in ``constraints`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array of mid-event times on which to test the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress and egress times for ``N`` events, with shape (``N``, 2). Returns ------- event_observable : `~numpy.ndarray` Array of booleans of same length as ``times`` for whether or not the target is ever observable at each time, given the constraints. """<if_stmt><not>hasattr(constraints '__len__')<block_start>constraints=[constraints]<block_end><if_stmt>times<is><not><none><block_start>applied_constraints=[constraint(observer target times=times grid_times_targets=<true>)<for>constraint constraints]<line_sep>constraint_arr=np.logical_and.reduce(applied_constraints)<block_end><else_stmt><block_start>times_ing=times_ingress_egress[: 0]<line_sep>times_egr=times_ingress_egress[: 1]<line_sep>applied_constraints_ing=[constraint(observer target times=times_ing grid_times_targets=<true>)<for>constraint constraints]<line_sep>applied_constraints_egr=[constraint(observer target times=times_egr grid_times_targets=<true>)<for>constraint constraints]<line_sep>constraint_arr=np.logical_and(np.logical_and.reduce(applied_constraints_ing) np.logical_and.reduce(applied_constraints_egr))<block_end><return>constraint_arr<block_end><def_stmt>months_observable constraints observer targets time_range=_current_year_time_range time_grid_resolution=0.5<times>u.hour<block_start>""" Determines which month the specified ``targets`` are observable for a specific ``observer``, given the supplied ``constraints``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence If ``time_range`` is not specified, defaults to current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observable_months : list List of sets of unique integers representing each month that a target is observable, one set per target. These integers are 1-based so that January maps to 1, February maps to 2, etc. """<line_sep># TODO: This method could be sped up a lot by dropping to the trigonometric # altitude calculations. <if_stmt><not>hasattr(constraints '__len__')<block_start>constraints=[constraints]<block_end>times=time_grid_from_range(time_range time_grid_resolution)<line_sep># TODO: This method could be sped up a lot by dropping to the trigonometric # altitude calculations. applied_constraints=[constraint(observer targets times=times grid_times_targets=<true>)<for>constraint constraints]<line_sep>constraint_arr=np.logical_and.reduce(applied_constraints)<line_sep>months_observable=[]<for_stmt>target,observable zip(targets constraint_arr)<block_start>s=set([t.datetime.month<for>t times[observable]])<line_sep>months_observable.append(s)<block_end><return>months_observable<block_end><def_stmt>observability_table constraints observer targets times=<none> time_range=<none> time_grid_resolution=0.5<times>u.hour<block_start>""" Creates a table with information about observability for all the ``targets`` over the requested ``time_range``, given the constraints in ``constraints_list`` for ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. If a single (scalar) time, the table will be for a 24 hour period centered on that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observability_table : `~astropy.table.Table` A Table containing the observability information for each of the ``targets``. The table contains four columns with information about the target and it's observability: ``'target name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction of time observable'``. The column ``'time observable'`` will also be present if the ``time_range`` is given as a scalar. It also contains metadata entries ``'times'`` (with an array of all the times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing the supplied ``constraints``). """<if_stmt><not>hasattr(constraints '__len__')<block_start>constraints=[constraints]<block_end>is_24hr_table=<false><if_stmt>hasattr(time_range 'isscalar')<and>time_range.isscalar<block_start>time_range=(time_range-12<times>u.hour time_range+12<times>u.hour)<line_sep>is_24hr_table=<true><block_end>applied_constraints=[constraint(observer targets times=times time_range=time_range time_grid_resolution=time_grid_resolution grid_times_targets=<true>)<for>constraint constraints]<line_sep>constraint_arr=np.logical_and.reduce(applied_constraints)<line_sep>colnames=['target name' 'ever observable' 'always observable' 'fraction of time observable']<line_sep>target_names=[target.name<for>target targets]<line_sep>ever_obs=np.any(constraint_arr axis=1)<line_sep>always_obs=np.all(constraint_arr axis=1)<line_sep>frac_obs=np.sum(constraint_arr axis=1)/constraint_arr.shape[1]<line_sep>tab=table.Table(names=colnames data=[target_names ever_obs always_obs frac_obs])<if_stmt>times<is><none><and>time_range<is><not><none><block_start>times=time_grid_from_range(time_range time_resolution=time_grid_resolution)<block_end><if_stmt>is_24hr_table<block_start>tab['time observable']=tab['fraction of time observable']<times>24<times>u.hour<block_end>tab.meta['times']=times.datetime<line_sep>tab.meta['observer']=observer<line_sep>tab.meta['constraints']=constraints<line_sep><return>tab<block_end><def_stmt>min_best_rescale vals min_val max_val less_than_min=1<block_start>""" rescales an input array ``vals`` to be a score (between zero and one), where the ``min_val`` goes to one, and the ``max_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) less_than_min : 0 or 1 what is returned for ``vals`` below ``min_val``. (in some cases anything less than ``min_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``max_val`` equal 0 and those equal to ``min_val`` equal 1 Examples -------- rescale airmasses to between 0 and 1, with the best (1) and worst (2.25). All values outside the range should return 0. >>> from astroplan.constraints import min_best_rescale >>> import numpy as np >>> airmasses = np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0. , 0. ]) """<line_sep>rescaled=(vals-max_val)/(min_val-max_val)<line_sep>below=vals<l>min_val<line_sep>above=vals<g>max_val<line_sep>rescaled[below]=less_than_min<line_sep>rescaled[above]=0<line_sep><return>rescaled<block_end><def_stmt>max_best_rescale vals min_val max_val greater_than_max=1<block_start>""" rescales an input array ``vals`` to be a score (between zero and one), where the ``max_val`` goes to one, and the ``min_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) greater_than_max : 0 or 1 what is returned for ``vals`` above ``max_val``. (in some cases anything higher than ``max_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``min_val`` equal 0 and those equal to ``max_val`` equal 1 Examples -------- rescale an array of altitudes to be between 0 and 1, with the best (60) going to 1 and worst (35) going to 0. For values outside the range, the rescale should return 0 below 35 and 1 above 60. >>> from astroplan.constraints import max_best_rescale >>> import numpy as np >>> altitudes = np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ]) """<line_sep>rescaled=(vals-min_val)/(max_val-min_val)<line_sep>below=vals<l>min_val<line_sep>above=vals<g>max_val<line_sep>rescaled[below]=0<line_sep>rescaled[above]=greater_than_max<line_sep><return>rescaled<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ test pretrained models """<import_from_future_stmt> print_function<import_stmt>mxnet<as>mx<import_from_stmt>common find_mxnet modelzoo<import_from_stmt>score score<line_sep>VAL_DATA='data/val-5k-256.rec'<def_stmt>download_data <block_start><return>mx.test_utils.download('http://data.mxnet.io/data/val-5k-256.rec' VAL_DATA)<block_end><def_stmt>test_imagenet1k_resnet **kwargs<block_start>models=['imagenet1k-resnet-50' 'imagenet1k-resnet-152']<line_sep>accs=[.77 .78]<for_stmt>(m g) zip(models accs)<block_start>acc=mx.metric.create('acc')<line_sep>(speed )=score(model=m data_val=VAL_DATA rgb_mean='0,0,0' metrics=acc **kwargs)<line_sep>r=acc.get()[1]<line_sep>print('Tested %s, acc = %f, speed = %f img/sec'%(m r speed))<assert_stmt>r<g>g<and>r<l>g+.1<block_end><block_end><def_stmt>test_imagenet1k_inception_bn **kwargs<block_start>acc=mx.metric.create('acc')<line_sep>m='imagenet1k-inception-bn'<line_sep>g=0.75<line_sep>(speed )=score(model=m data_val=VAL_DATA rgb_mean='123.68,116.779,103.939' metrics=acc **kwargs)<line_sep>r=acc.get()[1]<line_sep>print('Tested %s acc = %f, speed = %f img/sec'%(m r speed))<assert_stmt>r<g>g<and>r<l>g+.1<block_end><if_stmt>__name__<eq>'__main__'<block_start>gpus=mx.test_utils.list_gpus()<assert_stmt>len(gpus)<g>0<line_sep>batch_size=16<times>len(gpus)<line_sep>gpus=','.join([str(i)<for>i gpus])<line_sep>kwargs={'gpus':gpus 'batch_size':batch_size 'max_num_examples':500}<line_sep>download_data()<line_sep>test_imagenet1k_resnet(**kwargs)<line_sep>test_imagenet1k_inception_bn(**kwargs)<block_end>
"Actions for compiling resx files"<line_sep>load("@io_bazel_rules_dotnet//dotnet/private:providers.bzl" "DotnetResourceInfo" )<def_stmt>_make_runner_arglist dotnet source output resgen<block_start>args=dotnet.actions.args()<if_stmt>type(source)<eq>"Target"<block_start>args.add_all(source.files)<block_end><else_stmt><block_start>args.add(source)<block_end>args.add(output)<line_sep><return>args<block_end><def_stmt>emit_resx_core dotnet name="" src=<none> identifier=<none> out=<none> customresgen=<none><block_start>"""The function adds an action that compiles a single .resx file into .resources file. Returns [DotnetResourceInfo](api.md#dotnetresourceinfo). Args: dotnet: [DotnetContextInfo](api.md#dotnetcontextinfo). name: name of the file to generate. src: The .resx source file that is transformed into .resources file. Only `.resx` files are permitted. identifier: The logical name for the resource; the name that is used to load the resource. The default is the basename of the file name (no subfolder). out: An alternative name of the output file (if name should not be used). customresgen: custom resgen program to use. Returns: DotnetResourceInfo: [DotnetResourceInfo](api.md#dotnetresourceinfo). """<if_stmt>name<eq>""<and>out<eq><none><block_start>fail("either name or out must be set")<block_end><if_stmt><not>out<block_start>result=dotnet.actions.declare_file(name+".resources")<block_end><else_stmt><block_start>result=dotnet.actions.declare_file(out)<block_end>args=_make_runner_arglist(dotnet src result customresgen.files_to_run.executable.path)<line_sep># We use the command to extrace shell path and force runfiles creation resolve=dotnet._ctx.resolve_tools(tools=[customresgen])<line_sep>inputs=src.files.to_list()<if>type(src)<eq>"Target"<else>[src]<line_sep>dotnet.actions.run(inputs=inputs+resolve[0].to_list() tools=customresgen.default_runfiles.files outputs=[result] executable=customresgen.files_to_run arguments=[args] env={"RUNFILES_MANIFEST_FILE":customresgen.files_to_run.runfiles_manifest.path} mnemonic="CoreResxCompile" input_manifests=resolve[1] progress_message=("Compiling resoources"+dotnet.label.package+":"+dotnet.label.name) )<line_sep><return>DotnetResourceInfo(name=name result=result identifier=identifier )<block_end>
# Owner(s): ["oncall: jit"] <import_stmt>torch<import_stmt>os<import_stmt>sys<import_from_stmt>torch.testing._internal.jit_utils JitTestCase<line_sep># Make the helper files in test/ importable pytorch_test_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<line_sep>sys.path.append(pytorch_test_dir)<if_stmt>__name__<eq>'__main__'<block_start><raise>RuntimeError("This test file is not meant to be run directly, use:\n\n"<concat>"\tpython test/test_jit.py TESTNAME\n\n"<concat>"instead.")<block_end><class_stmt>TestModules(JitTestCase)<block_start><def_stmt>test_script_module_with_constants_list self<block_start>""" Test that a module that has __constants__ set to something that is not a set can be scripted. """<line_sep># torch.nn.Linear has a __constants__ attribute defined # and intialized to a list. <class_stmt>Net(torch.nn.Linear)<block_start>x:torch.jit.Final[int]<def_stmt>__init__ self<block_start>super().__init__(5 10)<line_sep>self.x=0<block_end><block_end>self.checkModule(Net() (torch.randn(5) ))<block_end><block_end>
""" Grains for Cisco NX-OS minions .. versionadded:: 2016.11.0 For documentation on setting up the nxos proxy minion look in the documentation for :mod:`salt.proxy.nxos<salt.proxy.nxos>`. """<import_stmt>logging<import_stmt>salt.utils.nxos<import_stmt>salt.utils.platform<import_from_stmt>salt.exceptions NxosClientError<line_sep>log=logging.getLogger(__name__)<line_sep>__proxyenabled__=["nxos"]<line_sep>__virtualname__="nxos"<def_stmt>__virtual__ <block_start><try_stmt><block_start>salt.utils.nxos.version_info()<block_end><except_stmt>NxosClientError<as>err<block_start><return><false> err<block_end><return>__virtualname__<block_end><def_stmt>system_information proxy=<none><block_start><if_stmt>salt.utils.platform.is_proxy()<block_start><if_stmt>proxy<is><none><block_start><return>{}<block_end><if_stmt>proxy["nxos.initialized"]()<is><false><block_start><return>{}<block_end><return>{"nxos":proxy["nxos.grains"]()}<block_end><else_stmt><block_start>data=salt.utils.nxos.version_info()<line_sep><return>salt.utils.nxos.system_info(data)<block_end><block_end>
<import_stmt>pymsteams<import_stmt>logging<import_from_stmt>oncall.constants TEAMS_SUPPORT<class_stmt>teams_messenger(object)<block_start>supports=frozenset([TEAMS_SUPPORT])<def_stmt>__init__ self config<block_start>self.webhook=config['webhook']<block_end><def_stmt>send self message<block_start>heading=message.get("subject")<line_sep>final_message="User: "+message.get("user")+" Message: "+message.get("body")<try_stmt><block_start>myTeamsMessage=pymsteams.connectorcard(self.webhook)<line_sep>myTeamsMessage.title(str(heading))<line_sep>myTeamsMessage.text(str(final_message))<line_sep>myTeamsMessage.send()<block_end><except_stmt><block_start>logging.info("An issue occured while sending message to teams messenger")<block_end><block_end><block_end>
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sparse Fine-tuning the library models for question answering. """<line_sep># You can also adapt this script on your own question answering task. Pointers for this are left as comments. <import_from_stmt>nn_pruning.sparse_trainer SparseTrainer<import_from_stmt>.qa_train QATrainer<line_sep># SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer) <class_stmt>QASparseTrainer(SparseTrainer QATrainer)<block_start><def_stmt>__init__ self sparse_args *args **kwargs<block_start>QATrainer.__init__(self *args **kwargs)<line_sep>SparseTrainer.__init__(self sparse_args)<block_end><block_end>
# Barcode Example # # This example shows off how easy it is to detect bar codes using the # OpenMV Cam M7. Barcode detection does not work on the M4 Camera. <import_stmt>sensor image time math<line_sep>sensor.reset()<line_sep>sensor.set_pixformat(sensor.GRAYSCALE)<line_sep>sensor.set_framesize(sensor.VGA)# High Res! sensor.set_windowing((640 80))# V Res of 80 == less work (40 for 2X the speed). sensor.skip_frames(time=2000)<line_sep>sensor.set_auto_gain(<false>)# must turn this off to prevent image washout... sensor.set_auto_whitebal(<false>)# must turn this off to prevent image washout... clock=time.clock()<line_sep># Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's # OV7725 camera module. Barcode detection will also work in RGB565 mode but at # a lower resolution. That said, barcode detection requires a higher resolution # to work well so it should always be run at 640x480 in grayscale... <def_stmt>barcode_name code<block_start><if_stmt>(code.type()<eq>image.EAN2)<block_start><return>"EAN2"<block_end><if_stmt>(code.type()<eq>image.EAN5)<block_start><return>"EAN5"<block_end><if_stmt>(code.type()<eq>image.EAN8)<block_start><return>"EAN8"<block_end><if_stmt>(code.type()<eq>image.UPCE)<block_start><return>"UPCE"<block_end><if_stmt>(code.type()<eq>image.ISBN10)<block_start><return>"ISBN10"<block_end><if_stmt>(code.type()<eq>image.UPCA)<block_start><return>"UPCA"<block_end><if_stmt>(code.type()<eq>image.EAN13)<block_start><return>"EAN13"<block_end><if_stmt>(code.type()<eq>image.ISBN13)<block_start><return>"ISBN13"<block_end><if_stmt>(code.type()<eq>image.I25)<block_start><return>"I25"<block_end><if_stmt>(code.type()<eq>image.DATABAR)<block_start><return>"DATABAR"<block_end><if_stmt>(code.type()<eq>image.DATABAR_EXP)<block_start><return>"DATABAR_EXP"<block_end><if_stmt>(code.type()<eq>image.CODABAR)<block_start><return>"CODABAR"<block_end><if_stmt>(code.type()<eq>image.CODE39)<block_start><return>"CODE39"<block_end><if_stmt>(code.type()<eq>image.PDF417)<block_start><return>"PDF417"<block_end><if_stmt>(code.type()<eq>image.CODE93)<block_start><return>"CODE93"<block_end><if_stmt>(code.type()<eq>image.CODE128)<block_start><return>"CODE128"<block_end><block_end><while_stmt>(<true>)<block_start>clock.tick()<line_sep>img=sensor.snapshot()<line_sep>codes=img.find_barcodes()<for_stmt>code codes<block_start>img.draw_rectangle(code.rect())<line_sep>print_args=(barcode_name(code) code.payload() (180<times>code.rotation())/math.pi code.quality() clock.fps())<line_sep>print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f"%print_args)<block_end><if_stmt><not>codes<block_start>print("FPS %f"%clock.fps())<block_end><block_end>
""" Custom management command to rebuild thumbnail images - May be required after importing a new dataset, for example """<import_stmt>os<import_stmt>logging<import_from_stmt>PIL UnidentifiedImageError<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.conf settings<import_from_stmt>django.db.utils OperationalError ProgrammingError<import_from_stmt>company.models Company<import_from_stmt>part.models Part<line_sep>logger=logging.getLogger("inventree-thumbnails")<class_stmt>Command(BaseCommand)<block_start>""" Rebuild all thumbnail images """<def_stmt>rebuild_thumbnail self model<block_start>""" Rebuild the thumbnail specified by the "image" field of the provided model """<if_stmt><not>model.image<block_start><return><block_end>img=model.image<line_sep>url=img.thumbnail.name<line_sep>loc=os.path.join(settings.MEDIA_ROOT url)<if_stmt><not>os.path.exists(loc)<block_start>logger.info(f"Generating thumbnail image for '{img}'")<try_stmt><block_start>model.image.render_variations(replace=<false>)<block_end><except_stmt>FileNotFoundError<block_start>logger.error(f"ERROR: Image file '{img}' is missing")<block_end><except_stmt>UnidentifiedImageError<block_start>logger.error(f"ERROR: Image file '{img}' is not a valid image")<block_end><block_end><block_end><def_stmt>handle self *args **kwargs<block_start>logger.setLevel(logging.INFO)<line_sep>logger.info("Rebuilding Part thumbnails")<for_stmt>part Part.objects.exclude(image=<none>)<block_start><try_stmt><block_start>self.rebuild_thumbnail(part)<block_end><except_stmt>(OperationalError ProgrammingError)<block_start>logger.error("ERROR: Database read error.")<line_sep><break><block_end><block_end>logger.info("Rebuilding Company thumbnails")<for_stmt>company Company.objects.exclude(image=<none>)<block_start><try_stmt><block_start>self.rebuild_thumbnail(company)<block_end><except_stmt>(OperationalError ProgrammingError)<block_start>logger.error("ERROR: abase read error.")<line_sep><break><block_end><block_end><block_end><block_end>
<import_from_stmt>glue.core.data_factories.helpers has_extension<import_from_stmt>glue.config data_factory<line_sep>__all__=['tabular_data']<line_sep>@data_factory(label="ASCII Table" identifier=has_extension('csv txt tsv tbl dat '<concat>'csv.gz txt.gz tbl.bz '<concat>'dat.gz') priority=1)<def_stmt>tabular_data path **kwargs<block_start><import_from_stmt>glue.core.data_factories.astropy_table astropy_tabular_data<import_from_stmt>glue.core.data_factories.pandas pandas_read_table<for_stmt>fac [astropy_tabular_data pandas_read_table]<block_start><try_stmt><block_start><return>fac(path **kwargs)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><else_stmt><block_start><raise>IOError("Could not parse file: %s"%path)<block_end><block_end>
# -*- coding: UTF-8 -*- <import_from_stmt>PySide2.QtWidgets QWidget QPushButton QVBoxLayout<import_from_stmt>PySide2.QtCore Signal<import_from_stmt>moduels.component.NormalValue 常量<import_from_stmt>moduels.component.SponsorDialog SponsorDialog<import_stmt>os webbrowser<class_stmt>Tab_Help(QWidget)<block_start>状态栏消息=Signal(str int)<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.initElement()# 先初始化各个控件 self.initSlots()# 再将各个控件连接到信号槽 self.initLayout()# 然后布局 self.initValue()<block_end># 再定义各个控件的值 <def_stmt>initElement self<block_start>self.打开帮助按钮=QPushButton(self.tr('打开帮助文档'))<line_sep>self.ffmpegMannualNoteButton=QPushButton(self.tr('查看作者的 FFmpeg 笔记'))<line_sep>self.openVideoHelpButtone=QPushButton(self.tr('查看视频教程'))<line_sep>self.openGiteePage=QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本'))<line_sep>self.openGithubPage=QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本'))<line_sep>self.linkToDiscussPage=QPushButton(self.tr('加入 QQ 群'))<line_sep>self.tipButton=QPushButton(self.tr('打赏作者'))<line_sep>self.masterLayout=QVBoxLayout()<block_end><def_stmt>initSlots self<block_start>self.打开帮助按钮.clicked.connect(self.openHelpDocument)<line_sep>self.ffmpegMannualNoteButton.clicked.connect(<lambda>:webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489')))<line_sep>self.openVideoHelpButtone.clicked.connect(<lambda>:webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/')))<line_sep>self.openGiteePage.clicked.connect(<lambda>:webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases')))<line_sep>self.openGithubPage.clicked.connect(<lambda>:webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases')))<line_sep>self.linkToDiscussPage.clicked.connect(<lambda>:webbrowser.open(self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi')))<line_sep>self.tipButton.clicked.connect(<lambda>:SponsorDialog(self))<block_end><def_stmt>initLayout self<block_start>self.setLayout(self.masterLayout)<line_sep># self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone)<line_sep>self.masterLayout.addWidget(self.openGiteePage)<line_sep>self.masterLayout.addWidget(self.openGithubPage)<line_sep>self.masterLayout.addWidget(self.linkToDiscussPage)<line_sep>self.masterLayout.addWidget(self.tipButton)<block_end><def_stmt>initValue self<block_start>self.打开帮助按钮.setMaximumHeight(100)<line_sep>self.ffmpegMannualNoteButton.setMaximumHeight(100)<line_sep>self.openVideoHelpButtone.setMaximumHeight(100)<line_sep>self.openGiteePage.setMaximumHeight(100)<line_sep>self.openGithubPage.setMaximumHeight(100)<line_sep>self.linkToDiscussPage.setMaximumHeight(100)<line_sep>self.tipButton.setMaximumHeight(100)<block_end><def_stmt>openHelpDocument self<block_start><try_stmt><block_start><if_stmt>常量.系统平台<eq>'Darwin'<block_start><import_stmt>shlex<line_sep>os.system("open "+shlex.quote(self.tr("./misc/Docs/README_zh.html")))<block_end><elif_stmt>常量.系统平台<eq>'Windows'<block_start>os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html')))<block_end><block_end><except_stmt><block_start>print('未能打开帮助文档')<block_end><block_end><block_end>
<def_stmt>test_xrange judge_command<block_start>judge_command("XRANGE somestream - +" {"command":"XRANGE" "key":"somestream" "stream_id":["-" "+"]} )<line_sep>judge_command("XRANGE somestream 1526985054069 1526985055069" {"command":"XRANGE" "key":"somestream" "stream_id":["1526985054069" "1526985055069"] } )<line_sep>judge_command("XRANGE somestream 1526985054069 1526985055069-10" {"command":"XRANGE" "key":"somestream" "stream_id":["1526985054069" "1526985055069-10"] } )<line_sep>judge_command("XRANGE somestream 1526985054069 1526985055069-10 count 10" {"command":"XRANGE" "key":"somestream" "stream_id":["1526985054069" "1526985055069-10"] "count_const":"count" "count":"10" } )<block_end><def_stmt>test_xgroup_create judge_command<block_start>judge_command("XGROUP CREATE mykey mygroup 123" {"command":"XGROUP" "stream_create":"CREATE" "key":"mykey" "group":"mygroup" "stream_id":"123" } )<line_sep>judge_command("XGROUP CREATE mykey mygroup $" {"command":"XGROUP" "stream_create":"CREATE" "key":"mykey" "group":"mygroup" "stream_id":"$" } )<line_sep># short of a parameter judge_command("XGROUP CREATE mykey mygroup" <none>)<line_sep>judge_command("XGROUP CREATE mykey" <none>)<block_end><def_stmt>test_xgroup_setid judge_command<block_start>judge_command("XGROUP SETID mykey mygroup 123" {"command":"XGROUP" "stream_setid":"SETID" "key":"mykey" "group":"mygroup" "stream_id":"123" } )<line_sep>judge_command("XGROUP SETID mykey mygroup $" {"command":"XGROUP" "stream_setid":"SETID" "key":"mykey" "group":"mygroup" "stream_id":"$" } )<line_sep># two subcommand together shouldn't match judge_command("XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $" <none>)<block_end><def_stmt>test_xgroup_destroy judge_command<block_start>judge_command("XGROUP destroy mykey mygroup" {"command":"XGROUP" "stream_destroy":"destroy" "key":"mykey" "group":"mygroup" } )<line_sep>judge_command("XGROUP destroy mykey" <none>)<line_sep>judge_command("XGROUP DESTROY mykey mygroup $" <none>)<block_end><def_stmt>test_xgroup_delconsumer judge_command<block_start>judge_command("XGROUP delconsumer mykey mygroup myconsumer" {"command":"XGROUP" "stream_delconsumer":"delconsumer" "key":"mykey" "group":"mygroup" "consumer":"myconsumer" } )<line_sep>judge_command("XGROUP delconsumer mykey mygroup $" {"command":"XGROUP" "stream_delconsumer":"delconsumer" "key":"mykey" "group":"mygroup" "consumer":"$" } )<line_sep>judge_command("XGROUP delconsumer mykey mygroup" <none>)<block_end><def_stmt>test_xgroup_stream judge_command<block_start>judge_command("XACK mystream group1 123123" {"command":"XACK" "key":"mystream" "group":"group1" "stream_id":"123123" } )<line_sep>judge_command("XACK mystream group1 123123 111" {"command":"XACK" "key":"mystream" "group":"group1" "stream_id":"111"} )<block_end><def_stmt>test_xinfo judge_command<block_start>judge_command("XINFO consumers mystream mygroup" {"command":"XINFO" "stream_consumers":"consumers" "key":"mystream" "group":"mygroup" } )<line_sep>judge_command("XINFO GROUPS mystream" {"command":"XINFO" "stream_groups":"GROUPS" "key":"mystream"} )<line_sep>judge_command("XINFO STREAM mystream" {"command":"XINFO" "stream":"STREAM" "key":"mystream"} )<line_sep>judge_command("XINFO HELP" {"command":"XINFO" "help":"HELP"})<line_sep>judge_command("XINFO consumers mystream mygroup GROUPS mystream" <none>)<line_sep>judge_command("XINFO groups mystream mygroup" <none>)<block_end><def_stmt>test_xinfo_with_full judge_command<block_start>judge_command("XINFO STREAM mystream FULL" {"command":"XINFO" "stream":"STREAM" "key":"mystream" "full_const":"FULL" } )<line_sep>judge_command("XINFO STREAM mystream FULL count 10" {"command":"XINFO" "stream":"STREAM" "key":"mystream" "full_const":"FULL" "count_const":"count" "count":"10" } )<block_end><def_stmt>test_xpending judge_command<block_start>judge_command("XPENDING mystream group55" {"command":"XPENDING" "key":"mystream" "group":"group55"} )<line_sep>judge_command("XPENDING mystream group55 myconsumer" {"command":"XPENDING" "key":"mystream" "group":"group55" "consumer":"myconsumer" } )<line_sep>judge_command("XPENDING mystream group55 - + 10" {"command":"XPENDING" "key":"mystream" "group":"group55" "stream_id":["-" "+"] "count":"10" } )<line_sep>judge_command("XPENDING mystream group55 - + 10 myconsumer" {"command":"XPENDING" "key":"mystream" "group":"group55" "stream_id":["-" "+"] "count":"10" "consumer":"myconsumer" } )<line_sep>judge_command("XPENDING mystream group55 - + " <none>)<block_end><def_stmt>test_xadd judge_command<block_start>judge_command("xadd mystream MAXLEN ~ 1000 * key value" {"command":"xadd" "key":"mystream" "maxlen":"MAXLEN" "approximately":"~" "count":"1000" "sfield":"key" "svalue":"value" "stream_id":"*" } )<line_sep># test for MAXLEN option judge_command("xadd mystream MAXLEN 1000 * key value" {"command":"xadd" "key":"mystream" "maxlen":"MAXLEN" "count":"1000" "sfield":"key" "svalue":"value" "stream_id":"*" } )<line_sep>judge_command("xadd mystream * key value" {"command":"xadd" "key":"mystream" "sfield":"key" "svalue":"value" "stream_id":"*" } )<line_sep># spcify stream id judge_command("xadd mystream 123-123 key value" {"command":"xadd" "key":"mystream" "sfield":"key" "svalue":"value" "stream_id":"123-123" } )<line_sep>judge_command("xadd mystream 123-123 key value foo bar hello world" {"command":"xadd" "key":"mystream" "sfield":"hello" "svalue":"world" "stream_id":"123-123" } )<block_end><def_stmt>test_xtrim judge_command<block_start>judge_command(" XTRIM mystream MAXLEN 2" {"command":"XTRIM" "key":"mystream" "maxlen":"MAXLEN" "count":"2"} )<line_sep>judge_command(" XTRIM mystream MAXLEN ~ 2" {"command":"XTRIM" "key":"mystream" "maxlen":"MAXLEN" "count":"2" "approximately":"~" } )<line_sep>judge_command(" XTRIM mystream" <none>)<block_end><def_stmt>test_xdel judge_command<block_start>judge_command("XDEL mystream 1581165000000 1549611229000 1581060831000" {"command":"XDEL" "key":"mystream" "stream_id":"1581060831000"} )<line_sep>judge_command("XDEL mystream 1581165000000" {"command":"XDEL" "key":"mystream" "stream_id":"1581165000000"} )<block_end><def_stmt>test_xclaim judge_command<block_start>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":"3600000" "stream_id":"1526569498055-0" } )<line_sep>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":"3600000" "stream_id":"789" } )<line_sep>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":["3600000" "300"] "stream_id":"1526569498055-0" "idel":"IDEL" } )<line_sep>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":"3600000" "stream_id":"1526569498055-0" "retrycount":"retrycount" "count":"7" } )<line_sep>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":"3600000" "stream_id":"1526569498055-0" "time":"TIME" "timestamp":"123456789" } )<line_sep>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":"3600000" "stream_id":"1526569498055-0" "force":"FORCE" } )<line_sep>judge_command("XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID" {"command":"XCLAIM" "key":"mystream" "group":"mygroup" "consumer":"Alice" "millisecond":"3600000" "stream_id":"1526569498055-0" "justid":"JUSTID" } )<block_end><def_stmt>test_xread judge_command<block_start>judge_command("XREAD COUNT 2 STREAMS mystream writers 0-0 0-0" {"command":"XREAD" "count_const":"COUNT" "count":"2" "streams":"STREAMS" # FIXME current grammar can't support multiple tokens # so the ids will be recongized to keys. "keys":"mystream writers 0-0" "stream_id":"0-0" } )<line_sep>judge_command("XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0" {"command":"XREAD" "count_const":"COUNT" "count":"2" "streams":"STREAMS" "keys":"mystream writers 0-0" "block":"BLOCK" "millisecond":"1000" "stream_id":"0-0" } )<block_end><def_stmt>test_xreadgroup judge_command<block_start>judge_command("XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2 2" {"command":"XREADGROUP" "stream_group":"GROUP" "group":"mygroup1" "consumer":"Bob" "count_const":"COUNT" "count":"1" "block":"BLOCK" "millisecond":"100" "noack":"NOACK" "streams":"STREAMS" "keys":"key1 1 key2" "stream_id":"2" } )<line_sep>judge_command("XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2" {"command":"XREADGROUP" "stream_group":"GROUP" "group":"mygroup1" "consumer":"Bob" "streams":"STREAMS" "keys":"key1 1 key2" "stream_id":"2" } )<line_sep>judge_command("XREADGROUP GROUP group consumer" <none>)<block_end>
<import_stmt>logging<import_stmt>os<import_stmt>pickle<import_stmt>sys<import_stmt>threading<import_stmt>time<import_from_stmt>typing List<import_from_stmt>Giveme5W1H.extractor.root path<import_from_stmt>Giveme5W1H.extractor.tools.util bytes_2_human_readable<class_stmt>KeyValueCache(object)<block_start><def_stmt>__init__ self cache_path<block_start>""" :param cache_path: path to cache, must be relative to the root.py file """<line_sep>self.log=logging.getLogger('GiveMe5W')<line_sep># resolve path relative to the path file self._cache_path=path(cache_path)<line_sep># ad a meaningful extension self._cache_path=self._cache_path+'.prickle'<line_sep>self._cache={}<if_stmt>cache_path<and>os.path.isfile(self._cache_path)<and>os.path.getsize(self._cache_path)<g>0# reload cache object form disc, if any <block_start><with_stmt>open(self._cache_path 'rb')<as>ff<block_start>self._cache=pickle.load(ff)<line_sep>self.log.debug('KeyValueCache: '+self._cache_path+' restored')<line_sep>self.log_stats()<block_end><block_end><else_stmt><block_start>self._cache={}<block_end>self._lock=threading.Lock()<block_end><def_stmt>log_stats self# size is not considering child's <block_start>self.log.info(self._cache_path+' entries: '+str(len(self._cache))+' size: '+bytes_2_human_readable(sys.getsizeof(self._cache)))<block_end><def_stmt>persist self<block_start><with_stmt>open(self._cache_path 'wb')<as>f<block_start>pickle.dump(self._cache f pickle.HIGHEST_PROTOCOL)<block_end><block_end><def_stmt>cache self key:str value:object<block_start>""" None values are considered as invalid results (ToughRequest) is producing none for exceptions set -1 if you want to store "No distance" :param key: :param value: :return: """<line_sep>self._lock.acquire()<if_stmt>value<is><not><none><block_start>self._cache[key]=self._pack(value)<line_sep>self.log.debug(self._cache_path+' CACHED: '+str(key)+': '+str(value))<line_sep>self.persist()<block_end>self._lock.release()<block_end><def_stmt>get self key<block_start>""" Read cache entries :param key: :return: """<line_sep>self._lock.acquire()<line_sep>result=<none><line_sep>value=self._cache.get(key)<if_stmt>value<is><not><none><block_start>self.log.debug(self._cache_path+' LOADED: '+str(key)+': '+str(value))<line_sep>result=self._unpack(value)<block_end>self._lock.release()<line_sep><return>result<block_end><def_stmt>get_complex self list_of_keys:List[str]<block_start>""" Read complex cache entries """<line_sep><return>self.get(self._get_id(list_of_keys))<block_end><def_stmt>cache_complex self list_of_keys:List[str] value<block_start>""" helper to cache multi (string)key values. They are sorted before concatenation, therefore an order is determined. """<line_sep>self.cache(self._get_id(list_of_keys) value)<block_end><def_stmt>_get_id self list_of_keys:List[str]<block_start>""" sorts list_of_keys, concatenates with # for readability :param list_of_keys: :return: """<line_sep>sorted(list_of_keys)<line_sep><return>"#".join(list_of_keys)<block_end><def_stmt>_pack self value<block_start>""" cache tracks the age of an entry, may be helpful in the future :param value: :return: """<line_sep><return>[value str(time.time())]<block_end><def_stmt>_unpack self value<block_start>""" removes the timestamp around the cached value, if any :param value: :return: """<line_sep># there are some old entries without timestamp <if_stmt>isinstance(value str)<or>isinstance(value int)<block_start><return>value<block_end><return>value[0]<block_end><block_end>
"""Trainining script for seq2seq text-to-speech synthesis model. usage: train.py [options] options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore model from checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore postnet model from checkpoint path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only Train only postnet model. --restore-parts=<path> Restore part of the model. --log-event-path=<name> Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets. -h, --help Show this help message and exit """<import_from_stmt>docopt docopt<import_stmt>sys<import_from_stmt>os.path dirname join<import_from_stmt>tqdm tqdm trange<import_from_stmt>datetime datetime<line_sep># The deepvoice3 model <import_from_stmt>dv3.deepvoice3_pytorch frontend builder<import_stmt>dv3.audio<import_stmt>dv3.lrschedule<import_stmt>torch<import_from_stmt>torch.utils data<as>data_utils<import_from_stmt>torch.autograd Variable<import_from_stmt>torch nn<import_from_stmt>torch optim<import_stmt>torch.backends.cudnn<as>cudnn<import_from_stmt>torch.utils data<as>data_utils<import_from_stmt>torch.utils.data.sampler Sampler<import_stmt>numpy<as>np<import_from_stmt>numba jit<import_from_stmt>nnmnkwii.datasets FileSourceDataset FileDataSource<import_from_stmt>os.path join expanduser<import_stmt>random<import_stmt>librosa.display<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>sys<import_stmt>os<import_from_stmt>tensorboardX SummaryWriter<import_from_stmt>matplotlib cm<import_from_stmt>warnings warn<import_from_stmt>dv3.hparams hparams hparams_debug_string<line_sep>fs=hparams.sample_rate<line_sep>global_step=0<line_sep>global_epoch=0<line_sep>use_cuda=torch.cuda.is_available()<if_stmt>use_cuda<block_start>cudnn.benchmark=<false><block_end>_frontend=<none># to be set later <def_stmt>_pad seq max_len constant_values=0<block_start><return>np.pad(seq (0 max_len-len(seq)) mode='constant' constant_values=constant_values)<block_end><def_stmt>_pad_2d x max_len b_pad=0<block_start>x=np.pad(x [(b_pad max_len-len(x)-b_pad) (0 0)] mode="constant" constant_values=0)<line_sep><return>x<block_end><def_stmt>plot_alignment alignment path info=<none><block_start>fig,ax=plt.subplots()<line_sep>im=ax.imshow(alignment aspect='auto' origin='lower' interpolation='none')<line_sep>fig.colorbar(im ax=ax)<line_sep>xlabel='Decoder timestep'<if_stmt>info<is><not><none><block_start>xlabel<augadd>'\n\n'+info<block_end>plt.xlabel(xlabel)<line_sep>plt.ylabel('Encoder timestep')<line_sep>plt.tight_layout()<line_sep>plt.savefig(path format='png')<line_sep>plt.close()<block_end><class_stmt>TextDataSource(FileDataSource)<block_start><def_stmt>__init__ self data_root speaker_id=<none><block_start>self.data_root=data_root<line_sep>self.speaker_ids=<none><line_sep>self.multi_speaker=<false><line_sep># If not None, filter by speaker_id self.speaker_id=speaker_id<block_end><def_stmt>collect_files self<block_start>meta=join(self.data_root "train.txt")<with_stmt>open(meta "rb")<as>f<block_start>lines=f.readlines()<block_end>l=lines[0].decode("utf-8").split("|")<assert_stmt>len(l)<eq>4<or>len(l)<eq>5<line_sep>self.multi_speaker=len(l)<eq>5<line_sep>texts=list(map(<lambda>l:l.decode("utf-8").split("|")[3] lines))<if_stmt>self.multi_speaker<block_start>speaker_ids=list(map(<lambda>l:int(l.decode("utf-8").split("|")[-1]) lines))<line_sep># Filter by speaker_id # using multi-speaker dataset as a single speaker dataset <if_stmt>self.speaker_id<is><not><none><block_start>indices=np.array(speaker_ids)<eq>self.speaker_id<line_sep>texts=list(np.array(texts)[indices])<line_sep>self.multi_speaker=<false><line_sep><return>texts<block_end><return>texts speaker_ids<block_end><else_stmt><block_start><return>texts<block_end><block_end><def_stmt>collect_features self *args<block_start><if_stmt>self.multi_speaker<block_start>text,speaker_id=args<block_end><else_stmt><block_start>text=args[0]<block_end>seq=_frontend.text_to_sequence(text p=hparams.replace_pronunciation_prob)<if_stmt>self.multi_speaker<block_start><return>np.asarray(seq dtype=np.int32) int(speaker_id)<block_end><else_stmt><block_start><return>np.asarray(seq dtype=np.int32)<block_end><block_end><block_end><class_stmt>_NPYDataSource(FileDataSource)<block_start><def_stmt>__init__ self data_root col speaker_id=<none><block_start>self.data_root=data_root<line_sep>self.col=col<line_sep>self.frame_lengths=[]<line_sep>self.speaker_id=speaker_id<block_end><def_stmt>collect_files self<block_start>meta=join(self.data_root "train.txt")<with_stmt>open(meta "rb")<as>f<block_start>lines=f.readlines()<block_end>l=lines[0].decode("utf-8").split("|")<assert_stmt>len(l)<eq>4<or>len(l)<eq>5<line_sep>multi_speaker=len(l)<eq>5<line_sep>self.frame_lengths=list(map(<lambda>l:int(l.decode("utf-8").split("|")[2]) lines))<line_sep>paths=list(map(<lambda>l:l.decode("utf-8").split("|")[self.col] lines))<line_sep>paths=list(map(<lambda>f:join(self.data_root f) paths))<if_stmt>multi_speaker<and>self.speaker_id<is><not><none><block_start>speaker_ids=list(map(<lambda>l:int(l.decode("utf-8").split("|")[-1]) lines))<line_sep># Filter by speaker_id # using multi-speaker dataset as a single speaker dataset indices=np.array(speaker_ids)<eq>self.speaker_id<line_sep>paths=list(np.array(paths)[indices])<line_sep>self.frame_lengths=list(np.array(self.frame_lengths)[indices])<line_sep># aha, need to cast numpy.int64 to int self.frame_lengths=list(map(int self.frame_lengths))<block_end><return>paths<block_end><def_stmt>collect_features self path<block_start><return>np.load(path)<block_end><block_end><class_stmt>MelSpecDataSource(_NPYDataSource)<block_start><def_stmt>__init__ self data_root speaker_id=<none><block_start>super(MelSpecDataSource self).__init__(data_root 1 speaker_id)<block_end><block_end><class_stmt>LinearSpecDataSource(_NPYDataSource)<block_start><def_stmt>__init__ self data_root speaker_id=<none><block_start>super(LinearSpecDataSource self).__init__(data_root 0 speaker_id)<block_end><block_end><class_stmt>PartialyRandomizedSimilarTimeLengthSampler(Sampler)<block_start>"""Partially randmoized sampler 1. Sort by lengths 2. Pick a small patch and randomize it 3. Permutate mini-batchs """<def_stmt>__init__ self lengths batch_size=16 batch_group_size=<none> permutate=<true><block_start>self.lengths,self.sorted_indices=torch.sort(torch.LongTensor(lengths))<line_sep>self.batch_size=batch_size<if_stmt>batch_group_size<is><none><block_start>batch_group_size=min(batch_size<times>32 len(self.lengths))<if_stmt>batch_group_size%batch_size<ne>0<block_start>batch_group_size<augsub>batch_group_size%batch_size<block_end><block_end>self.batch_group_size=batch_group_size<assert_stmt>batch_group_size%batch_size<eq>0<line_sep>self.permutate=permutate<block_end><def_stmt>__iter__ self<block_start>indices=self.sorted_indices.clone()<line_sep>batch_group_size=self.batch_group_size<line_sep>s,e=0 0<for_stmt>i range(len(indices)<floordiv>batch_group_size)<block_start>s=i<times>batch_group_size<line_sep>e=s+batch_group_size<line_sep>random.shuffle(indices[s:e])<block_end># Permutate batches <if_stmt>self.permutate<block_start>perm=np.arange(len(indices[:e])<floordiv>self.batch_size)<line_sep>random.shuffle(perm)<line_sep>indices[:e]=indices[:e].view(-1 self.batch_size)[perm :].view(-1)<block_end># Handle last elements s<augadd>batch_group_size<if_stmt>s<l>len(indices)<block_start>random.shuffle(indices[s:])<block_end><return>iter(indices)<block_end><def_stmt>__len__ self<block_start><return>len(self.sorted_indices)<block_end><block_end><class_stmt>PyTorchDataset(object)<block_start><def_stmt>__init__ self X Mel Y<block_start>self.X=X<line_sep>self.Mel=Mel<line_sep>self.Y=Y<line_sep># alias self.multi_speaker=X.file_data_source.multi_speaker<block_end><def_stmt>__getitem__ self idx<block_start><if_stmt>self.multi_speaker<block_start>text,speaker_id=self.X[idx]<line_sep><return>text self.Mel[idx] self.Y[idx] speaker_id<block_end><else_stmt><block_start><return>self.X[idx] self.Mel[idx] self.Y[idx]<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.X)<block_end><block_end><def_stmt>sequence_mask sequence_length max_len=<none><block_start><if_stmt>max_len<is><none><block_start>max_len=sequence_length.data.max()<block_end>batch_size=sequence_length.size(0)<line_sep>seq_range=torch.arange(0 max_len).long()<line_sep>seq_range_expand=seq_range.unsqueeze(0).expand(batch_size max_len)<line_sep>seq_range_expand=Variable(seq_range_expand)<if_stmt>sequence_length.is_cuda<block_start>seq_range_expand=seq_range_expand.cuda()<block_end>seq_length_expand=sequence_length.unsqueeze(1).expand_as(seq_range_expand)<line_sep><return>(seq_range_expand<l>seq_length_expand).float()<block_end><class_stmt>MaskedL1Loss(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(MaskedL1Loss self).__init__()<line_sep>self.criterion=nn.L1Loss(size_average=<false>)<block_end><def_stmt>forward self input target lengths=<none> mask=<none> max_len=<none><block_start><if_stmt>lengths<is><none><and>mask<is><none><block_start><raise>RuntimeError("Should provide either lengths or mask")<block_end># (B, T, 1) <if_stmt>mask<is><none><block_start>mask=sequence_mask(lengths max_len).unsqueeze(-1)<block_end># (B, T, D) mask_=mask.expand_as(input)<line_sep>loss=self.criterion(input<times>mask_ target<times>mask_)<line_sep><return>loss/mask_.sum()<block_end><block_end><def_stmt>collate_fn batch<block_start>"""Create batch"""<line_sep>r=hparams.outputs_per_step<line_sep>downsample_step=hparams.downsample_step<line_sep>multi_speaker=len(batch[0])<eq>4<line_sep># Lengths input_lengths=[len(x[0])<for>x batch]<line_sep>max_input_len=max(input_lengths)<line_sep>target_lengths=[len(x[1])<for>x batch]<line_sep>max_target_len=max(target_lengths)<if_stmt>max_target_len%r<ne>0<block_start>max_target_len<augadd>r-max_target_len%r<assert_stmt>max_target_len%r<eq>0<block_end><if_stmt>max_target_len%downsample_step<ne>0<block_start>max_target_len<augadd>downsample_step-max_target_len%downsample_step<assert_stmt>max_target_len%downsample_step<eq>0<block_end># Set 0 for zero beginning padding # imitates initial decoder states b_pad=r<line_sep>max_target_len<augadd>b_pad<times>downsample_step<line_sep>a=np.array([_pad(x[0] max_input_len)<for>x batch] dtype=np.int)<line_sep>x_batch=torch.LongTensor(a)<line_sep>input_lengths=torch.LongTensor(input_lengths)<line_sep>target_lengths=torch.LongTensor(target_lengths)<line_sep>b=np.array([_pad_2d(x[1] max_target_len b_pad=b_pad)<for>x batch] dtype=np.float32)<line_sep>mel_batch=torch.FloatTensor(b)<line_sep>c=np.array([_pad_2d(x[2] max_target_len b_pad=b_pad)<for>x batch] dtype=np.float32)<line_sep>y_batch=torch.FloatTensor(c)<line_sep># text positions text_positions=np.array([_pad(np.arange(1 len(x[0])+1) max_input_len)<for>x batch] dtype=np.int)<line_sep>text_positions=torch.LongTensor(text_positions)<line_sep>max_decoder_target_len=max_target_len<floordiv>r<floordiv>downsample_step<line_sep># frame positions s,e=1 max_decoder_target_len+1<line_sep># if b_pad > 0: # s, e = s - 1, e - 1 frame_positions=torch.arange(s e).long().unsqueeze(0).expand(len(batch) max_decoder_target_len)<line_sep># done flags done=np.array([_pad(np.zeros(len(x[1])<floordiv>r<floordiv>downsample_step-1) max_decoder_target_len constant_values=1)<for>x batch])<line_sep>done=torch.FloatTensor(done).unsqueeze(-1)<if_stmt>multi_speaker<block_start>speaker_ids=torch.LongTensor([x[3]<for>x batch])<block_end><else_stmt><block_start>speaker_ids=<none><block_end><return>x_batch input_lengths mel_batch y_batch (text_positions frame_positions) done target_lengths speaker_ids<block_end><def_stmt>time_string <block_start><return>datetime.now().strftime('%Y-%m-%d %H:%M')<block_end><def_stmt>save_alignment path attn<block_start>plot_alignment(attn.T path info="{}, {}, step={}".format(hparams.builder time_string() global_step))<block_end><def_stmt>prepare_spec_image spectrogram# [0, 1] <block_start>spectrogram=(spectrogram-np.min(spectrogram))/(np.max(spectrogram)-np.min(spectrogram))<line_sep>spectrogram=np.flip(spectrogram axis=1)# flip against freq axis <return>np.uint8(cm.magma(spectrogram.T)<times>255)<block_end><def_stmt>eval_model global_step writer model checkpoint_dir ismultispeaker# harded coded <block_start>texts=["Scientists at the CERN laboratory say they have discovered a new particle." "There's a way to measure the acute emotional intelligence that has never gone out of style." "President Trump met with other leaders at the Group of 20 conference." "Generative adversarial network or variational auto-encoder." "Please call Stella." "Some have accepted this as a miracle without any physical explanation." ]<import_stmt>dv3.synthesis<line_sep>synthesis._frontend=_frontend<line_sep>eval_output_dir=join(checkpoint_dir "eval")<line_sep>os.makedirs(eval_output_dir exist_ok=<true>)<line_sep># hard coded speaker_ids=[0 1 10]<if>ismultispeaker<else>[<none>]<for_stmt>speaker_id speaker_ids<block_start>speaker_str="multispeaker{}".format(speaker_id)<if>speaker_id<is><not><none><else>"single"<for_stmt>idx,text enumerate(texts)<block_start>signal,alignment,_,mel=synthesis.tts(model text p=0 speaker_id=speaker_id fast=<false>)<line_sep>signal<augdiv>np.max(np.abs(signal))<line_sep># Alignment path=join(eval_output_dir "step{:09d}_text{}_{}_alignment.png".format(global_step idx speaker_str))<line_sep>save_alignment(path alignment)<line_sep>tag="eval_averaged_alignment_{}_{}".format(idx speaker_str)<line_sep>writer.add_image(tag np.uint8(cm.viridis(np.flip(alignment 1).T)<times>255) global_step)<line_sep># Mel writer.add_image("(Eval) Predicted mel spectrogram text{}_{}".format(idx speaker_str) prepare_spec_image(mel) global_step)<line_sep># Audio path=join(eval_output_dir "step{:09d}_text{}_{}_predicted.wav".format(global_step idx speaker_str))<line_sep>dv3.audio.save_wav(signal path)<try_stmt><block_start>writer.add_audio("(Eval) Predicted audio signal {}_{}".format(idx speaker_str) signal global_step sample_rate=fs)<block_end><except_stmt>Exception<as>e<block_start>warn(str(e))<line_sep><pass><block_end><block_end><block_end><block_end><def_stmt>save_states global_step writer mel_outputs linear_outputs attn mel y input_lengths checkpoint_dir=<none><block_start>print("Save intermediate states at step {}".format(global_step))<line_sep># idx = np.random.randint(0, len(input_lengths)) idx=min(1 len(input_lengths)-1)<line_sep>input_length=input_lengths[idx]<line_sep># Alignment # Multi-hop attention <if_stmt>attn<is><not><none><and>attn.dim()<eq>4<block_start><for_stmt>i,alignment enumerate(attn)<block_start>alignment=alignment[idx].cpu().data.numpy()<line_sep>tag="alignment_layer{}".format(i+1)<line_sep>writer.add_image(tag np.uint8(cm.viridis(np.flip(alignment 1).T)<times>255) global_step)<line_sep># save files as well for now alignment_dir=join(checkpoint_dir "alignment_layer{}".format(i+1))<line_sep>os.makedirs(alignment_dir exist_ok=<true>)<line_sep>path=join(alignment_dir "step{:09d}_layer_{}_alignment.png".format(global_step i+1))<line_sep>save_alignment(path alignment)<block_end># Save averaged alignment alignment_dir=join(checkpoint_dir "alignment_ave")<line_sep>os.makedirs(alignment_dir exist_ok=<true>)<line_sep>path=join(alignment_dir "step{:09d}_alignment.png".format(global_step))<line_sep>alignment=attn.mean(0)[idx].cpu().data.numpy()<line_sep>save_alignment(path alignment)<line_sep>tag="averaged_alignment"<line_sep>writer.add_image(tag np.uint8(cm.viridis(np.flip(alignment 1).T)<times>255) global_step)<block_end># Predicted mel spectrogram <if_stmt>mel_outputs<is><not><none><block_start>mel_output=mel_outputs[idx].cpu().data.numpy()<line_sep>mel_output=prepare_spec_image(dv3.audio._denormalize(mel_output))<line_sep>writer.add_image("Predicted mel spectrogram" mel_output global_step)<block_end># Predicted spectrogram <if_stmt>linear_outputs<is><not><none><block_start>linear_output=linear_outputs[idx].cpu().data.numpy()<line_sep>spectrogram=prepare_spec_image(dv3.audio._denormalize(linear_output))<line_sep>writer.add_image("Predicted linear spectrogram" spectrogram global_step)<line_sep># Predicted audio signal signal=dv3.audio.inv_spectrogram(linear_output.T)<line_sep>signal<augdiv>np.max(np.abs(signal))<line_sep>path=join(checkpoint_dir "step{:09d}_predicted.wav".format(global_step))<try_stmt><block_start>writer.add_audio("Predicted audio signal" signal global_step sample_rate=fs)<block_end><except_stmt>Exception<as>e<block_start>warn(str(e))<line_sep><pass><block_end>dv3.audio.save_wav(signal path)<block_end># Target mel spectrogram <if_stmt>mel_outputs<is><not><none><block_start>mel_output=mel[idx].cpu().data.numpy()<line_sep>mel_output=prepare_spec_image(dv3.audio._denormalize(mel_output))<line_sep>writer.add_image("Target mel spectrogram" mel_output global_step)<block_end># Target spectrogram <if_stmt>linear_outputs<is><not><none><block_start>linear_output=y[idx].cpu().data.numpy()<line_sep>spectrogram=prepare_spec_image(dv3.audio._denormalize(linear_output))<line_sep>writer.add_image("Target linear spectrogram" spectrogram global_step)<block_end><block_end><def_stmt>logit x eps=1e-8<block_start><return>torch.log(x+eps)-torch.log(1-x+eps)<block_end><def_stmt>masked_mean y mask# (B, T, D) <block_start>mask_=mask.expand_as(y)<line_sep><return>(y<times>mask_).sum()/mask_.sum()<block_end><def_stmt>spec_loss y_hat y mask priority_bin=<none> priority_w=0<block_start>masked_l1=MaskedL1Loss()<line_sep>l1=nn.L1Loss()<line_sep>w=hparams.masked_loss_weight<line_sep># L1 loss <if_stmt>w<g>0<block_start><assert_stmt>mask<is><not><none><line_sep>l1_loss=w<times>masked_l1(y_hat y mask=mask)+(1-w)<times>l1(y_hat y)<block_end><else_stmt><block_start><assert_stmt>mask<is><none><line_sep>l1_loss=l1(y_hat y)<block_end># Priority L1 loss <if_stmt>priority_bin<is><not><none><and>priority_w<g>0<block_start><if_stmt>w<g>0<block_start>priority_loss=w<times>masked_l1(y_hat[: : :priority_bin] y[: : :priority_bin] mask=mask)+(1-w)<times>l1(y_hat[: : :priority_bin] y[: : :priority_bin])<block_end><else_stmt><block_start>priority_loss=l1(y_hat[: : :priority_bin] y[: : :priority_bin])<block_end>l1_loss=(1-priority_w)<times>l1_loss+priority_w<times>priority_loss<block_end># Binary divergence loss <if_stmt>hparams.binary_divergence_weight<le>0<block_start>binary_div=Variable(y.data.new(1).zero_())<block_end><else_stmt><block_start>y_hat_logits=logit(y_hat)<line_sep>z=-y<times>y_hat_logits+torch.log(1+torch.exp(y_hat_logits))<if_stmt>w<g>0<block_start>binary_div=w<times>masked_mean(z mask)+(1-w)<times>z.mean()<block_end><else_stmt><block_start>binary_div=z.mean()<block_end><block_end><return>l1_loss binary_div<block_end>@jit(nopython=<true>)<def_stmt>guided_attention N max_N T max_T g<block_start>W=np.zeros((max_N max_T) dtype=np.float32)<for_stmt>n range(N)<block_start><for_stmt>t range(T)<block_start>W[n t]=1-np.exp(-(n/N-t/T)<power>2/(2<times>g<times>g))<block_end><block_end><return>W<block_end><def_stmt>guided_attentions input_lengths target_lengths max_target_len g=0.2<block_start>B=len(input_lengths)<line_sep>max_input_len=input_lengths.max()<line_sep>W=np.zeros((B max_target_len max_input_len) dtype=np.float32)<for_stmt>b range(B)<block_start>W[b]=guided_attention(input_lengths[b] max_input_len target_lengths[b] max_target_len g).T<block_end><return>W<block_end><def_stmt>train model data_loader optimizer writer init_lr=0.002 checkpoint_dir=<none> checkpoint_interval=<none> nepochs=<none> clip_thresh=1.0 train_seq2seq=<true> train_postnet=<true><block_start><if_stmt>use_cuda<block_start>model=model.cuda()<block_end>linear_dim=model.linear_dim<line_sep>r=hparams.outputs_per_step<line_sep>downsample_step=hparams.downsample_step<line_sep>current_lr=init_lr<line_sep>binary_criterion=nn.BCELoss()<assert_stmt>train_seq2seq<or>train_postnet<line_sep><global>global_step global_epoch<while_stmt>global_epoch<l>nepochs<block_start>running_loss=0.<for_stmt>step,(x input_lengths mel y positions done target_lengths speaker_ids) tqdm(enumerate(data_loader))<block_start>model.train()<line_sep>ismultispeaker=speaker_ids<is><not><none><line_sep># Learning rate schedule <if_stmt>hparams.lr_schedule<is><not><none><block_start>lr_schedule_f=getattr(dv3.lrschedule hparams.lr_schedule)<line_sep>current_lr=lr_schedule_f(init_lr global_step **hparams.lr_schedule_kwargs)<for_stmt>param_group optimizer.param_groups<block_start>param_group['lr']=current_lr<block_end><block_end>optimizer.zero_grad()<line_sep># Used for Position encoding text_positions,frame_positions=positions<line_sep># Downsample mel spectrogram <if_stmt>downsample_step<g>1<block_start>mel=mel[: 0::downsample_step :].contiguous()<block_end># Lengths input_lengths=input_lengths.long().numpy()<line_sep>decoder_lengths=target_lengths.long().numpy()<floordiv>r<floordiv>downsample_step<line_sep># Feed data x,mel,y=Variable(x) Variable(mel) Variable(y)<line_sep>text_positions=Variable(text_positions)<line_sep>frame_positions=Variable(frame_positions)<line_sep>done=Variable(done)<line_sep>target_lengths=Variable(target_lengths)<line_sep>speaker_ids=Variable(speaker_ids)<if>ismultispeaker<else><none><if_stmt>use_cuda<block_start><if_stmt>train_seq2seq<block_start>x=x.cuda()<line_sep>text_positions=text_positions.cuda()<line_sep>frame_positions=frame_positions.cuda()<block_end><if_stmt>train_postnet<block_start>y=y.cuda()<block_end>mel=mel.cuda()<line_sep>done,target_lengths=done.cuda() target_lengths.cuda()<line_sep>speaker_ids=speaker_ids.cuda()<if>ismultispeaker<else><none><block_end># Create mask if we use masked loss <if_stmt>hparams.masked_loss_weight<g>0# decoder output domain mask <block_start>decoder_target_mask=sequence_mask(target_lengths/(r<times>downsample_step) max_len=mel.size(1)).unsqueeze(-1)<if_stmt>downsample_step<g>1# spectrogram-domain mask <block_start>target_mask=sequence_mask(target_lengths max_len=y.size(1)).unsqueeze(-1)<block_end><else_stmt><block_start>target_mask=decoder_target_mask<block_end># shift mask decoder_target_mask=decoder_target_mask[: r: :]<line_sep>target_mask=target_mask[: r: :]<block_end><else_stmt><block_start>decoder_target_mask,target_mask=<none> <none><block_end># Apply model <if_stmt>train_seq2seq<and>train_postnet<block_start>mel_outputs,linear_outputs,attn,done_hat=model(x mel speaker_ids=speaker_ids text_positions=text_positions frame_positions=frame_positions input_lengths=input_lengths)<block_end><elif_stmt>train_seq2seq<block_start><assert_stmt>speaker_ids<is><none><line_sep>mel_outputs,attn,done_hat,_=model.seq2seq(x mel text_positions=text_positions frame_positions=frame_positions input_lengths=input_lengths)<line_sep># reshape mel_outputs=mel_outputs.view(len(mel) -1 mel.size(-1))<line_sep>linear_outputs=<none><block_end><elif_stmt>train_postnet<block_start><assert_stmt>speaker_ids<is><none><line_sep>linear_outputs=model.postnet(mel)<line_sep>mel_outputs,attn,done_hat=<none> <none> <none><block_end># Losses w=hparams.binary_divergence_weight<line_sep># mel: <if_stmt>train_seq2seq<block_start>mel_l1_loss,mel_binary_div=spec_loss(mel_outputs[: :-r :] mel[: r: :] decoder_target_mask)<line_sep>mel_loss=(1-w)<times>mel_l1_loss+w<times>mel_binary_div<block_end># done: <if_stmt>train_seq2seq<block_start>done_loss=binary_criterion(done_hat done)<block_end># linear: <if_stmt>train_postnet<block_start>n_priority_freq=int(hparams.priority_freq/(fs<times>0.5)<times>linear_dim)<line_sep>linear_l1_loss,linear_binary_div=spec_loss(linear_outputs[: :-r :] y[: r: :] target_mask priority_bin=n_priority_freq priority_w=hparams.priority_freq_weight)<line_sep>linear_loss=(1-w)<times>linear_l1_loss+w<times>linear_binary_div<block_end># Combine losses <if_stmt>train_seq2seq<and>train_postnet<block_start>loss=mel_loss+linear_loss+done_loss<block_end><elif_stmt>train_seq2seq<block_start>loss=mel_loss+done_loss<block_end><elif_stmt>train_postnet<block_start>loss=linear_loss<block_end># attention <if_stmt>train_seq2seq<and>hparams.use_guided_attention<block_start>soft_mask=guided_attentions(input_lengths decoder_lengths attn.size(-2) g=hparams.guided_attention_sigma)<line_sep>soft_mask=Variable(torch.from_numpy(soft_mask))<line_sep>soft_mask=soft_mask.cuda()<if>use_cuda<else>soft_mask<line_sep>attn_loss=(attn<times>soft_mask).mean()<line_sep>loss<augadd>attn_loss<block_end><if_stmt>global_step<g>0<and>global_step%checkpoint_interval<eq>0<block_start>save_states(global_step writer mel_outputs linear_outputs attn mel y input_lengths checkpoint_dir)<line_sep>save_checkpoint(model optimizer global_step checkpoint_dir global_epoch train_seq2seq train_postnet)<block_end><if_stmt>global_step<g>0<and>global_step%hparams.eval_interval<eq>0<block_start>eval_model(global_step writer model checkpoint_dir ismultispeaker)<block_end># Update loss.backward()<if_stmt>clip_thresh<g>0<block_start>grad_norm=torch.nn.utils.clip_grad_norm(model.get_trainable_parameters() clip_thresh)<block_end>optimizer.step()<line_sep># Logs writer.add_scalar("loss" float(loss.data[0]) global_step)<if_stmt>train_seq2seq<block_start>writer.add_scalar("done_loss" float(done_loss.data[0]) global_step)<line_sep>writer.add_scalar("mel loss" float(mel_loss.data[0]) global_step)<line_sep>writer.add_scalar("mel_l1_loss" float(mel_l1_loss.data[0]) global_step)<line_sep>writer.add_scalar("mel_binary_div_loss" float(mel_binary_div.data[0]) global_step)<block_end><if_stmt>train_postnet<block_start>writer.add_scalar("linear_loss" float(linear_loss.data[0]) global_step)<line_sep>writer.add_scalar("linear_l1_loss" float(linear_l1_loss.data[0]) global_step)<line_sep>writer.add_scalar("linear_binary_div_loss" float(linear_binary_div.data[0]) global_step)<block_end><if_stmt>train_seq2seq<and>hparams.use_guided_attention<block_start>writer.add_scalar("attn_loss" float(attn_loss.data[0]) global_step)<block_end><if_stmt>clip_thresh<g>0<block_start>writer.add_scalar("gradient norm" grad_norm global_step)<block_end>writer.add_scalar("learning rate" current_lr global_step)<line_sep>global_step<augadd>1<line_sep>running_loss<augadd>loss.data[0]<block_end>averaged_loss=running_loss/(len(data_loader))<line_sep>writer.add_scalar("loss (per epoch)" averaged_loss global_epoch)<line_sep>print("Loss: {}".format(running_loss/(len(data_loader))))<line_sep>global_epoch<augadd>1<block_end><block_end><def_stmt>save_checkpoint model optimizer step checkpoint_dir epoch train_seq2seq train_postnet<block_start><if_stmt>train_seq2seq<and>train_postnet<block_start>suffix=""<line_sep>m=model<block_end><elif_stmt>train_seq2seq<block_start>suffix="_seq2seq"<line_sep>m=model.seq2seq<block_end><elif_stmt>train_postnet<block_start>suffix="_postnet"<line_sep>m=model.postnet<block_end>checkpoint_path=join(checkpoint_dir "checkpoint_step{:09d}{}.pth".format(global_step suffix))<line_sep>optimizer_state=optimizer.state_dict()<if>hparams.save_optimizer_state<else><none><line_sep>torch.save({"state_dict":m.state_dict() "optimizer":optimizer_state "global_step":step "global_epoch":epoch } checkpoint_path)<line_sep>print("Saved checkpoint:" checkpoint_path)<block_end><def_stmt>build_model <block_start>model=getattr(builder hparams.builder)(n_speakers=hparams.n_speakers speaker_embed_dim=hparams.speaker_embed_dim n_vocab=_frontend.n_vocab embed_dim=hparams.text_embed_dim mel_dim=hparams.num_mels linear_dim=hparams.fft_size<floordiv>2+1 r=hparams.outputs_per_step downsample_step=hparams.downsample_step padding_idx=hparams.padding_idx dropout=hparams.dropout kernel_size=hparams.kernel_size encoder_channels=hparams.encoder_channels decoder_channels=hparams.decoder_channels converter_channels=hparams.converter_channels use_memory_mask=hparams.use_memory_mask trainable_positional_encodings=hparams.trainable_positional_encodings force_monotonic_attention=hparams.force_monotonic_attention use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input max_positions=hparams.max_positions speaker_embedding_weight_std=hparams.speaker_embedding_weight_std freeze_embedding=hparams.freeze_embedding window_ahead=hparams.window_ahead window_backward=hparams.window_backward key_projection=hparams.key_projection value_projection=hparams.value_projection )<line_sep><return>model<block_end><def_stmt>load_checkpoint path model optimizer reset_optimizer<block_start><global>global_step<line_sep><global>global_epoch<line_sep>print("Load checkpoint from: {}".format(path))<line_sep>checkpoint=torch.load(path)<line_sep>model.load_state_dict(checkpoint["state_dict"])<if_stmt><not>reset_optimizer<block_start>optimizer_state=checkpoint["optimizer"]<if_stmt>optimizer_state<is><not><none><block_start>print("Load optimizer state from {}".format(path))<line_sep>optimizer.load_state_dict(checkpoint["optimizer"])<block_end><block_end>global_step=checkpoint["global_step"]<line_sep>global_epoch=checkpoint["global_epoch"]<line_sep><return>model<block_end><def_stmt>_load_embedding path model<block_start>state=torch.load(path)["state_dict"]<line_sep>key="seq2seq.encoder.embed_tokens.weight"<line_sep>model.seq2seq.encoder.embed_tokens.weight.data=state[key]<block_end># https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 <def_stmt>restore_parts path model<block_start>print("Restore part of the model from: {}".format(path))<line_sep>state=torch.load(path)["state_dict"]<line_sep>model_dict=model.state_dict()<line_sep>valid_state_dict={k:v<for>k,v state.items()<if>k<in>model_dict}<line_sep>model_dict.update(valid_state_dict)<line_sep>model.load_state_dict(model_dict)<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=docopt(__doc__)<line_sep>print("Command line args:\n" args)<line_sep>checkpoint_dir=args["--checkpoint-dir"]<line_sep>checkpoint_path=args["--checkpoint"]<line_sep>checkpoint_seq2seq_path=args["--checkpoint-seq2seq"]<line_sep>checkpoint_postnet_path=args["--checkpoint-postnet"]<line_sep>load_embedding=args["--load-embedding"]<line_sep>checkpoint_restore_parts=args["--restore-parts"]<line_sep>speaker_id=args["--speaker-id"]<line_sep>speaker_id=int(speaker_id)<if>speaker_id<is><not><none><else><none><line_sep>data_root=args["--data-root"]<if_stmt>data_root<is><none><block_start>data_root=join(dirname(__file__) "data" "ljspeech")<block_end>log_event_path=args["--log-event-path"]<line_sep>reset_optimizer=args["--reset-optimizer"]<line_sep># Which model to be trained train_seq2seq=args["--train-seq2seq-only"]<line_sep>train_postnet=args["--train-postnet-only"]<line_sep># train both if not specified <if_stmt><not>train_seq2seq<and><not>train_postnet<block_start>print("Training whole model")<line_sep>train_seq2seq,train_postnet=<true> <true><block_end><if_stmt>train_seq2seq<block_start>print("Training seq2seq model")<block_end><elif_stmt>train_postnet<block_start>print("Training postnet model")<block_end><else_stmt><block_start><assert_stmt><false> "must be specified wrong args"<block_end># Override hyper parameters hparams.parse(args["--hparams"])<line_sep>print(hparams_debug_string())<assert_stmt>hparams.name<eq>"deepvoice3"<line_sep># Presets <if_stmt>hparams.preset<is><not><none><and>hparams.preset<ne>""<block_start>preset=hparams.presets[hparams.preset]<import_stmt>json<line_sep>hparams.parse_json(json.dumps(preset))<line_sep>print("Override hyper parameters with preset \"{}\": {}".format(hparams.preset json.dumps(preset indent=4)))<block_end>_frontend=getattr(frontend hparams.frontend)<line_sep>os.makedirs(checkpoint_dir exist_ok=<true>)<line_sep># Input dataset definitions X=FileSourceDataset(TextDataSource(data_root speaker_id))<line_sep>Mel=FileSourceDataset(MelSpecDataSource(data_root speaker_id))<line_sep>Y=FileSourceDataset(LinearSpecDataSource(data_root speaker_id))<line_sep># Prepare sampler frame_lengths=Mel.file_data_source.frame_lengths<line_sep>sampler=PartialyRandomizedSimilarTimeLengthSampler(frame_lengths batch_size=hparams.batch_size)<line_sep># Dataset and Dataloader setup dataset=PyTorchDataset(X Mel Y)<line_sep>data_loader=data_utils.DataLoader(dataset batch_size=hparams.batch_size num_workers=hparams.num_workers sampler=sampler collate_fn=collate_fn pin_memory=hparams.pin_memory)<line_sep>print("dataloader_prepared")<line_sep># Model model=build_model()<if_stmt>use_cuda<block_start>model=model.cuda()<block_end>optimizer=optim.Adam(model.get_trainable_parameters() lr=hparams.initial_learning_rate betas=(hparams.adam_beta1 hparams.adam_beta2) eps=hparams.adam_eps weight_decay=hparams.weight_decay)<if_stmt>checkpoint_restore_parts<is><not><none><block_start>restore_parts(checkpoint_restore_parts model)<block_end># Load checkpoints <if_stmt>checkpoint_postnet_path<is><not><none><block_start>load_checkpoint(checkpoint_postnet_path model.postnet optimizer reset_optimizer)<block_end><if_stmt>checkpoint_seq2seq_path<is><not><none><block_start>load_checkpoint(checkpoint_seq2seq_path model.seq2seq optimizer reset_optimizer)<block_end><if_stmt>checkpoint_path<is><not><none><block_start>load_checkpoint(checkpoint_path model optimizer reset_optimizer)<block_end># Load embedding <if_stmt>load_embedding<is><not><none><block_start>print("Loading embedding from {}".format(load_embedding))<line_sep>_load_embedding(load_embedding model)<block_end># Setup summary writer for tensorboard <if_stmt>log_event_path<is><none><block_start>log_event_path="log/run-test"+str(datetime.now()).replace(" " "_")<block_end>print("Los event path: {}".format(log_event_path))<line_sep>writer=SummaryWriter(log_dir=log_event_path)<line_sep># Train! <try_stmt><block_start>train(model data_loader optimizer writer init_lr=hparams.initial_learning_rate checkpoint_dir=checkpoint_dir checkpoint_interval=hparams.checkpoint_interval nepochs=hparams.nepochs clip_thresh=hparams.clip_thresh train_seq2seq=train_seq2seq train_postnet=train_postnet)<block_end><except_stmt>KeyboardInterrupt<block_start>save_checkpoint(model optimizer global_step checkpoint_dir global_epoch train_seq2seq train_postnet)<block_end>print("Finished")<line_sep>sys.exit(0)<block_end>
# deltat.py time difference calculation for sensor fusion # Released under the MIT License (MIT) # Copyright (c) 2018 <NAME> # Provides TimeDiff function and DeltaT class. # The following notes cover special cases. Where the device performing fusion # is linked to the IMU and is running MicroPython no special treatment is # needed. # The special cases are: # 1. Device connected to the IMU is linked to a separate platform doing fusion. # 2. Either or both are not running MicroPython. # If the device providing the vectors is not running on MicroPython the user # must supply timestamps and a function capable of differencing these. The # function is passed to the Fusion constructor and the timestamp is provided # along with the vector, being the time when the vector was acquired. # If the device providing the vectors is running MicroPython but fusion is # being performed on a device which is not, the user must provide their own # implementation of ticks_diff which accounts for MicroPython rollover and # must supply the returned ticks_us() values as a timestamp. # Under MicroPython TimeDiff(start, end) uses time.ticks_diff. # A DeltaT instance, called with function call syntax, returns a time # difference from the previous call as a float value. Units seconds. # If running under MicroPython and no time differencing function is supplied # to the Fusion constructor it uses time.ticks_us as its time source and a # default timediff function using time.ticks_diff() with a division by 1e6. # If time differencing function is supplied a timestamp must be passsed as an # arg to instance calls of Fusion.update() or Fusion.update_nomag(). In the # async version the user supplied read_coro() must return a timestamp with the # vector. # On 1st pass dt evidently can't be computed. A notional value of 100μs is # returned. The Madgwick algorithm takes seconds to stabilise. <try_stmt><block_start><import_stmt>utime<as>time<block_end><except_stmt>ImportError<block_start><import_stmt>time<block_end>is_micropython=hasattr(time 'ticks_diff')<class_stmt>DeltaT()<block_start><def_stmt>__init__ self timediff<block_start><if_stmt>timediff<is><none><block_start>self.expect_ts=<false><if_stmt>is_micropython<block_start>self.timediff=<lambda>start end:time.ticks_diff(start end)/1000000<block_end><else_stmt><block_start><raise>ValueError('You must define a timediff function')<block_end><block_end><else_stmt><block_start>self.expect_ts=<true><line_sep>self.timediff=timediff<block_end>self.start_time=<none><block_end><def_stmt>__call__ self ts<block_start><if_stmt>self.expect_ts<block_start><if_stmt>ts<is><none><block_start><raise>ValueError('Timestamp expected but not supplied.')<block_end><block_end><else_stmt><block_start><if_stmt>is_micropython<block_start>ts=time.ticks_us()<block_end><else_stmt><block_start><raise>RuntimeError('Not MicroPython: provide timestamps and a timediff function')<block_end><block_end># ts is now valid <if_stmt>self.start_time<is><none># 1st call: self.start_time is invalid <block_start>self.start_time=ts<line_sep><return>0.0001<block_end># 100μs notional delay. 1st reading is invalid in any case dt=self.timediff(ts self.start_time)<line_sep>self.start_time=ts<line_sep><return>dt<block_end><block_end>
<import_from_stmt>fuzzconfig FuzzConfig<import_stmt>interconnect<import_stmt>nets<import_stmt>pytrellis<import_stmt>re<line_sep>jobs=[{"pos":[(47 0) (48 0) (49 0)] "cfg":FuzzConfig(job="PIOROUTEL" family="ECP5" device="LFE5U-45F" ncl="pioroute.ncl" tiles=["MIB_R47C0:PICL0" "MIB_R48C0:PICL1" "MIB_R49C0:PICL2"])} {"pos":[(47 90) (48 90) (49 90)] "cfg":FuzzConfig(job="PIOROUTER" family="ECP5" device="LFE5U-45F" ncl="pioroute.ncl" tiles=["MIB_R47C90:PICR0" "MIB_R48C90:PICR1" "MIB_R49C90:PICR2"])} {"pos":[(0 22) (1 23) (0 22) (1 23)] "cfg":FuzzConfig(job="PIOROUTET" family="ECP5" device="LFE5U-45F" ncl="pioroute.ncl" tiles=["MIB_R0C22:PIOT0" "MIB_R0C23:PIOT1" "MIB_R1C22:PICT0" "MIB_R1C23:PICT1"])} {"pos":[(71 11) (71 12) (70 11) (70 12)] "cfg":FuzzConfig(job="PIOROUTET" family="ECP5" device="LFE5U-45F" ncl="pioroute.ncl" tiles=["MIB_R71C11:PICB0" "MIB_R71C12:PICB1"])} {"pos":[(71 18) (70 18)] "cfg":FuzzConfig(job="PIOROUTESB" family="ECP5" device="LFE5U-45F" ncl="pioroute_spicb.ncl" tiles=["MIB_R71C18:SPICB0"])} ]<def_stmt>main <block_start>pytrellis.load_database("../../../database")<for_stmt>job jobs<block_start>cfg=job["cfg"]<line_sep>cfg.setup()<def_stmt>nn_filter net netnames<block_start><return><not>nets.is_cib(net)<block_end>orig_tiles=cfg.tiles<for_stmt>pos job["pos"]# Put fixed connections in the most appropriate tile <block_start>target_tile=<none><for_stmt>tile orig_tiles<block_start><if_stmt>"R{}C{}".format(pos[0] pos[1])<in>tile<block_start>target_tile=tile<line_sep><break><block_end><block_end><if_stmt>target_tile<is><not><none><block_start>cfg.tiles=[target_tile]+[_<for>_ orig_tiles<if>_<ne>orig_tiles]<block_end><else_stmt><block_start>cfg.tiles=orig_tiles<block_end>interconnect.fuzz_interconnect(config=cfg location=pos netname_predicate=nn_filter netname_filter_union=<false> func_cib=<true>)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
""" The foo integration instruments the bar and baz features of the foo library. Enabling ~~~~~~~~ The foo integration is enabled automatically when using :ref:`ddtrace-run <ddtracerun>` or :ref:`patch_all() <patch_all>`. Or use :ref:`patch() <patch>` to manually enable the integration:: from ddtrace import patch patch(foo=True) Global Configuration ~~~~~~~~~~~~~~~~~~~~ .. py:data:: ddtrace.config.foo["service"] The service name reported by default for foo instances. This option can also be set with the ``DD_FOO_SERVICE`` environment variable. Default: ``"foo"`` Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ To configure the foo integration on an per-instance basis use the ``Pin`` API:: import foo from ddtrace import Pin myfoo = foo.Foo() Pin.override(myfoo, service="myfoo") """<import_from_stmt>...internal.utils.importlib require_modules<line_sep>required_modules=["foo"]<with_stmt>require_modules(required_modules)<as>missing_modules<block_start><if_stmt><not>missing_modules<block_start><import_from_stmt>.patch patch<import_from_stmt>.patch unpatch<line_sep>__all__=["patch" "unpatch"]<block_end><block_end>
<import_stmt>lldb<import_from_stmt>lldbsuite.test.decorators *<import_stmt>lldbsuite.test.lldbtest<as>lldbtest<import_stmt>lldbsuite.test.lldbutil<as>lldbutil<import_stmt>os<import_stmt>unittest2<class_stmt>TestSwiftOptimizedBoundGenericEnum(lldbtest.TestBase)<block_start>mydir=lldbtest.TestBase.compute_mydir(__file__)<line_sep>@swiftTest<def_stmt>test self<block_start>"""Test the bound generic enum types in "optimized" code."""<line_sep>self.build()<line_sep>target,process,thread,bkpt=lldbutil.run_to_source_breakpoint(self 'break one' lldb.SBFileSpec('main.swift'))<line_sep>bkpt_two=target.BreakpointCreateBySourceRegex('break two' lldb.SBFileSpec('main.swift'))<line_sep>self.assertGreater(bkpt_two.GetNumLocations() 0)<line_sep>var_self=self.frame().FindVariable("self")<line_sep># FIXME, this fails with a data extractor error. lldbutil.check_variable(self var_self <false> value=<none>)<line_sep>lldbutil.continue_to_breakpoint(process bkpt_two)<line_sep>var_self=self.frame().FindVariable("self")<line_sep>lldbutil.check_variable(self var_self <true> value="success")<block_end><block_end>
# ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ # These tiling implementations are adapted from PANDA Kaggle solutions, for example: # https://github.com/kentaroy47/Kaggle-PANDA-1st-place-solution/blob/master/src/data_process/a00_save_tiles.py <import_from_stmt>typing Any Optional Tuple<import_stmt>numpy<as>np<def_stmt>get_1d_padding length:int tile_size:int<arrow>Tuple[int int]<block_start>"""Computes symmetric padding for `length` to be divisible by `tile_size`."""<line_sep>pad=(tile_size-length%tile_size)%tile_size<line_sep><return>(pad<floordiv>2 pad-pad<floordiv>2)<block_end><def_stmt>pad_for_tiling_2d array:np.ndarray tile_size:int channels_first:Optional[bool]=<true> **pad_kwargs:Any<arrow>Tuple[np.ndarray np.ndarray]<block_start>"""Symmetrically pads a 2D `array` such that both dimensions are divisible by `tile_size`. :param array: 2D image array. :param tile_size: Width/height of each tile in pixels. :param channels_first: Whether `array` is in CHW (`True`, default) or HWC (`False`) layout. :param pad_kwargs: Keyword arguments to be passed to `np.pad()` (e.g. `constant_values=0`). :return: A tuple containing: - `padded_array`: Resulting array, in the same CHW/HWC layout as the input. - `offset`: XY offset introduced by the padding. Add this to coordinates relative to the original array to obtain indices for the padded array. """<line_sep>height,width=array.shape[1:]<if>channels_first<else>array.shape[:-1]<line_sep>padding_h=get_1d_padding(height tile_size)<line_sep>padding_w=get_1d_padding(width tile_size)<line_sep>padding=[padding_h padding_w]<line_sep>channels_axis=0<if>channels_first<else>2<line_sep>padding.insert(channels_axis (0 0))# zero padding on channels axis padded_array=np.pad(array padding **pad_kwargs)<line_sep>offset=(padding_w[0] padding_h[0])<line_sep><return>padded_array np.array(offset)<block_end><def_stmt>tile_array_2d array:np.ndarray tile_size:int channels_first:Optional[bool]=<true> **pad_kwargs:Any<arrow>Tuple[np.ndarray np.ndarray]<block_start>"""Split an image array into square non-overlapping tiles. The array will be padded symmetrically if its dimensions are not exact multiples of `tile_size`. :param array: Image array. :param tile_size: Width/height of each tile in pixels. :param pad_kwargs: Keyword arguments to be passed to `np.pad()` (e.g. `constant_values=0`). :param channels_first: Whether `array` is in CHW (`True`, default) or HWC (`False`) layout. :return: A tuple containing: - `tiles`: A batch of tiles in NCHW layout. - `coords`: XY coordinates of each tile, in the same order. """<line_sep>padded_array,(offset_w offset_h)=pad_for_tiling_2d(array tile_size channels_first **pad_kwargs)<if_stmt>channels_first<block_start>channels,height,width=padded_array.shape<block_end><else_stmt><block_start>height,width,channels=padded_array.shape<block_end>n_tiles_h=height<floordiv>tile_size<line_sep>n_tiles_w=width<floordiv>tile_size<if_stmt>channels_first<block_start>intermediate_shape=(channels n_tiles_h tile_size n_tiles_w tile_size)<line_sep>axis_order=(1 3 0 2 4)# (n_tiles_h, n_tiles_w, channels, tile_size, tile_size) output_shape=(n_tiles_h<times>n_tiles_w channels tile_size tile_size)<block_end><else_stmt><block_start>intermediate_shape=(n_tiles_h tile_size n_tiles_w tile_size channels)<line_sep>axis_order=(0 2 1 3 4)# (n_tiles_h, n_tiles_w, tile_size, tile_size, channels) output_shape=(n_tiles_h<times>n_tiles_w tile_size tile_size channels)<block_end>tiles=padded_array.reshape(intermediate_shape)# Split width and height axes tiles=tiles.transpose(axis_order)<line_sep>tiles=tiles.reshape(output_shape)# Flatten tile batch dimension # Compute top-left coordinates of every tile, relative to the original array's origin coords_h=tile_size<times>np.arange(n_tiles_h)-offset_h<line_sep>coords_w=tile_size<times>np.arange(n_tiles_w)-offset_w<line_sep># Shape: (n_tiles_h * n_tiles_w, 2) coords=np.stack(np.meshgrid(coords_w coords_h) axis=-1).reshape(-1 2)<line_sep><return>tiles coords<block_end><def_stmt>assemble_tiles_2d tiles:np.ndarray coords:np.ndarray fill_value:Optional[float]=np.nan channels_first:Optional[bool]=<true><arrow>Tuple[np.ndarray np.ndarray]<block_start>"""Assembles a 2D array from sequences of tiles and coordinates. :param tiles: Stack of tiles with batch dimension first. :param coords: XY tile coordinates, assumed to be spaced by multiples of `tile_size` (shape: [N, 2]). :param tile_size: Size of each tile; must be >0. :param fill_value: Value to assign to empty elements (default: `NaN`). :param channels_first: Whether each tile is in CHW (`True`, default) or HWC (`False`) layout. :return: A tuple containing: - `array`: The reassembled 2D array with the smallest dimensions to contain all given tiles. - `offset`: The lowest XY coordinates. - `offset`: XY offset introduced by the assembly. Add this to tile coordinates to obtain indices for the assembled array. """<if_stmt>coords.shape[0]<ne>tiles.shape[0]<block_start><raise>ValueError(f"Tile coordinates and values must have the same length, "<concat>f"got {coords.shape[0]} and {tiles.shape[0]}")<block_end><if_stmt>channels_first<block_start>n_tiles,channels,tile_size,_=tiles.shape<block_end><else_stmt><block_start>n_tiles,tile_size,_,channels=tiles.shape<block_end>tile_xs,tile_ys=coords.T<line_sep>x_min,x_max=min(tile_xs) max(tile_xs+tile_size)<line_sep>y_min,y_max=min(tile_ys) max(tile_ys+tile_size)<line_sep>width=x_max-x_min<line_sep>height=y_max-y_min<line_sep>output_shape=(channels height width)<if>channels_first<else>(height width channels)<line_sep>array=np.full(output_shape fill_value)<line_sep>offset=np.array([-x_min -y_min])<for_stmt>idx range(n_tiles)<block_start>row=coords[idx 1]+offset[1]<line_sep>col=coords[idx 0]+offset[0]<if_stmt>channels_first<block_start>array[: row:row+tile_size col:col+tile_size]=tiles[idx]<block_end><else_stmt><block_start>array[row:row+tile_size col:col+tile_size :]=tiles[idx]<block_end><block_end><return>array offset<block_end>
# -*- coding:utf-8 -*- <import_from_stmt>uiObject uiObject<line_sep># main入口 <if_stmt>__name__<eq>'__main__'<block_start>ui=uiObject()<line_sep>ui.ui_process()<block_end>
<import_from_stmt>mahjong.hand_calculating.hand HandCalculator<import_from_stmt>mahjong.meld Meld<import_from_stmt>mahjong.hand_calculating.hand_config HandConfig OptionalRules<import_from_stmt>mahjong.shanten Shanten<import_from_stmt>mahjong.tile TilesConverter<line_sep>calculator=HandCalculator()<line_sep># useful helper <def_stmt>print_hand_result hand_result<block_start>print(hand_result.han hand_result.fu)<line_sep>print(hand_result.cost['main'])<line_sep>print(hand_result.yaku)<for_stmt>fu_item hand_result.fu_details<block_start>print(fu_item)<block_end>print('')<block_end>#################################################################### # Tanyao hand by ron # #################################################################### # we had to use all 14 tiles in that array tiles=TilesConverter.string_to_136_array(man='22444' pin='333567' sou='444')<line_sep>win_tile=TilesConverter.string_to_136_array(sou='4')[0]<line_sep>result=calculator.estimate_hand_value(tiles win_tile)<line_sep>print_hand_result(result)<line_sep>#################################################################### # Tanyao hand by tsumo # #################################################################### result=calculator.estimate_hand_value(tiles win_tile config=HandConfig(is_tsumo=<true>))<line_sep>print_hand_result(result)<line_sep>#################################################################### # Add open set to hand # #################################################################### melds=[Meld(meld_type=Meld.PON tiles=TilesConverter.string_to_136_array(man='444'))]<line_sep>result=calculator.estimate_hand_value(tiles win_tile melds=melds config=HandConfig(options=OptionalRules(has_open_tanyao=<true>)))<line_sep>print_hand_result(result)<line_sep>#################################################################### # Shanten calculation # #################################################################### shanten=Shanten()<line_sep>tiles=TilesConverter.string_to_34_array(man='13569' pin='123459' sou='443')<line_sep>result=shanten.calculate_shanten(tiles)<line_sep>print(result)<line_sep>#################################################################### # Kazoe as a sanbaiman # #################################################################### tiles=TilesConverter.string_to_136_array(man='22244466677788')<line_sep>win_tile=TilesConverter.string_to_136_array(man='7')[0]<line_sep>melds=[Meld(Meld.KAN TilesConverter.string_to_136_array(man='2222') <false>)]<line_sep>dora_indicators=[TilesConverter.string_to_136_array(man='1')[0] TilesConverter.string_to_136_array(man='1')[0] TilesConverter.string_to_136_array(man='1')[0] TilesConverter.string_to_136_array(man='1')[0] ]<line_sep>config=HandConfig(is_riichi=<true> options=OptionalRules(kazoe=HandConfig.KAZOE_SANBAIMAN))<line_sep>result=calculator.estimate_hand_value(tiles win_tile melds dora_indicators config)<line_sep>print_hand_result(result)<line_sep>#################################################################### # Change the cost of yaku # #################################################################### config=HandConfig(is_renhou=<true>)<line_sep># renhou as an yakuman - old style config.yaku.renhou.han_closed=13<line_sep>tiles=TilesConverter.string_to_136_array(man='22444' pin='333567' sou='444')<line_sep>win_tile=TilesConverter.string_to_136_array(sou='4')[0]<line_sep>result=calculator.estimate_hand_value(tiles win_tile config=config)<line_sep>print_hand_result(result)<line_sep>
<import_from_future_stmt> division print_function<line_sep>__author__='saeedamen'# <NAME> / <EMAIL> # # Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro # # See the License for the specific language governing permissions and limitations under the License. # ## Web server components <import_stmt>dash_core_components<as>dcc<import_stmt>dash_html_components<as>html<import_stmt>base64<import_stmt>os<line_sep>## Date/time components <import_stmt>pandas<as>pd<import_stmt>datetime<import_from_stmt>datetime timedelta<import_from_stmt>collections OrderedDict<import_from_stmt>pandas.tseries.offsets *<import_from_stmt>tcapy.vis.layoutdash LayoutDash<line_sep>######################################################################################################################## <class_stmt>LayoutDashImplGen(LayoutDash)<block_start>"""This implements the LayoutDash abstract class, to create the web based GUI for the tcapy application. It creates two web pages - detailed_page - for doing detailed tcapy analysis for a specific currency pair - aggregated_page - for more aggregated style analysis across multiple currency pairs and over multiple time periods """<def_stmt>__init__ self app=<none> constants=<none> url_prefix=''<block_start>super(LayoutDashImplGen self).__init__(app=app constants=constants url_prefix=url_prefix)<line_sep>available_dates=pd.date_range(datetime.datetime.today().date()-timedelta(days=self._constants.gui_lookback_window) datetime.datetime.today().date() freq=BDay())<line_sep>times=pd.date_range("0:00" "23:59" freq="15min")<line_sep>### create the possible values for drop down boxes on both pages # Reverse date list (for both detailed and aggregated pages) self.available_dates=[x.date()<for>x available_dates[::-1]]<line_sep># For detailed page only self.available_times=[t.strftime("%H:%M")<for>t times]<line_sep>self.available_tickers=self._constants.available_tickers_dictionary['All']<line_sep>self.available_venues=self._constants.available_venues_dictionary['All']<line_sep>self.available_brokers=self._constants.available_brokers_dictionary['All']<line_sep>self.available_algos=self._constants.available_algos_dictionary['All']<line_sep>self.available_market_data=self._constants.available_market_data<line_sep>self.available_order_plot_lines=['candlestick' 'mid' 'bid' 'ask' 'arrival' 'twap' 'vwap' 'buy trade' 'sell trade']<line_sep>self.available_execution_plot_lines=['candlestick' 'mid' 'bid' 'ask' 'buy trade' 'sell trade']<line_sep>self.available_slippage_bounds=['0.25' '0.5' '1.0' '1.25' '1.5' '2.0' 'bid/ask']<line_sep># For aggregated page only self.available_grouped_tickers=self._flatten_dictionary(self._constants.available_tickers_dictionary)<line_sep>self.available_grouped_venues=self._flatten_dictionary(self._constants.available_venues_dictionary)<line_sep>self.available_grouped_brokers=self._flatten_dictionary(self._constants.available_brokers_dictionary)<line_sep>self.available_grouped_algos=self._flatten_dictionary(self._constants.available_algos_dictionary)<line_sep>self.available_event_types=self._constants.available_event_types<line_sep>self.available_metrics=self._constants.available_metrics<line_sep>self.available_reload=['no' 'yes']<line_sep>self.available_visualization=['yes' 'no']<line_sep>self.construct_layout()<block_end><def_stmt>_flatten_dictionary self dictionary<block_start>available=dictionary['All']<line_sep>available_groups=self._util_func.dict_key_list(dictionary.keys())<line_sep><return>self.flatten_list_of_strings([available_groups available])<block_end><def_stmt>construct_layout self<block_start>self.page_content=html.Div([dcc.Location(id='url' refresh=<false>) html.Div(id='page-content')])<line_sep>link_bar_dict={'Detailed':'detailed' 'Aggregated':'aggregated' 'Compliance':'compliance'}<line_sep>trade_outliers_cols=['Date' 'ticker' 'side' 'notional cur' 'benchmark' 'exec not' 'exec not in rep cur' 'slippage']<line_sep>broker_cols=['Date' 'by broker notional (rep cur)']<line_sep># Main page for detailed analysing of (eg. over the course of a few days) self.pages['detailed']=html.Div([self._sc.header_bar('FX: Detailed - Trader Analysis' img='logo.png') self._sc.link_bar(link_bar_dict) self._sc.width_row_cell(html.B("Status: ok" id='detailed-status') margin_left=5) self._sc.horizontal_bar() # Dropdown selection boxes html.Div([self._sc.drop_down(caption='Start Date' id={'start-date-val':self.available_dates 'start-time-val':self.available_times} prefix_id='detailed') self._sc.drop_down(caption='Finish Date' id=OrderedDict([('finish-date-val' self.available_dates) ('finish-time-val' self.available_times)]) prefix_id='detailed') self._sc.drop_down(caption='Ticker' id='ticker-val' prefix_id='detailed' drop_down_values=self.available_tickers) self._sc.drop_down(caption='Broker' id='broker-val' prefix_id='detailed' drop_down_values=self.available_grouped_brokers) self._sc.drop_down(caption='Algo' id='algo-val' prefix_id='detailed' drop_down_values=self.available_grouped_algos) self._sc.drop_down(caption='Venue' id='venue-val' prefix_id='detailed' drop_down_values=self.available_grouped_venues) self._sc.drop_down(caption='Market Data' id='market-data-val' prefix_id='detailed' drop_down_values=self.available_market_data) self._sc.drop_down(caption='Metric' id='metric-val' prefix_id='detailed' drop_down_values=self.available_metrics)]) self._sc.horizontal_bar() self._sc.button(caption='Calculate' id='calculation-button' prefix_id='detailed') # self.button(caption = 'Print PDF', id = 'detailed-print-pdf-button', className = 'no-print'), # Orders self._sc.horizontal_bar() self._sc.plot(caption='Orders: Timeline' id='order-candle-timeline-plot' prefix_id='detailed' element_add=self._sc.timeline_dropdown('detailed-order-candle-timeline-plot' self.available_order_plot_lines) downloadplot_caption='Download CSV' downloadplot_tag='order-candle-timeline-download-link' download_file='download_order_candle_timeline' height=500) self._sc.plot(caption='Orders: Markout' id='order-markout-plot' prefix_id='detailed' height=500) self._sc.plot(caption='Orders: Histogram vs PDF fit' id='order-dist-plot' prefix_id='detailed' height=500) # Execution trades self._sc.horizontal_bar() self._sc.plot(caption='Executions: Timeline' id='execution-candle-timeline-plot' prefix_id='detailed' element_add=self._sc.timeline_dropdown('detailed-execution-candle-timeline-plot' self.available_execution_plot_lines) downloadplot_caption='Download CSV' downloadplot_tag='execution-candle-timeline-download-link' download_file='download_execution_candle_timeline.csv' height=500) self._sc.plot(caption='Executions: Markout' id='execution-markout-plot' prefix_id='detailed' height=500) self._sc.plot(caption='Executions: Histogram vs PDF fit' id='execution-dist-plot' prefix_id='detailed' height=500) # Detailed tcapy markout table for executions html.Div([html.H3('Executions: Markout Table') html.Div(id='detailed-execution-table')] style={'width':'1000px' 'display':'inline-block' 'marginBottom':5 'marginTop':5 'marginLeft':5 'marginRight':5}) ] style={'width':'1000px' 'marginRight':'auto' 'marginLeft':'auto'})<line_sep>################################################################################################################ # Secondary page for analysing aggregated statistics over long periods of time, eg. who is the best broker? self.pages['aggregated']=html.Div([self._sc.header_bar('FX: Aggregated - Trader Analysis' img='logo.png') self._sc.link_bar(link_bar_dict) self._sc.width_row_cell(html.B("Status: ok" id='aggregated-status') margin_left=5) self._sc.horizontal_bar() # dropdown selection boxes html.Div([self._sc.drop_down(caption='Start Date' id='start-date-val' prefix_id='aggregated' drop_down_values=self.available_dates) self._sc.drop_down(caption='Finish Date' id='finish-date-val' prefix_id='aggregated' drop_down_values=self.available_dates) self._sc.drop_down(caption='Ticker' id='ticker-val' prefix_id='aggregated' drop_down_values=self.available_grouped_tickers multiselect=<true>) self._sc.drop_down(caption='Broker' id='broker-val' prefix_id='aggregated' drop_down_values=self.available_grouped_brokers multiselect=<true>) self._sc.drop_down(caption='Algo' id='algo-val' prefix_id='aggregated' drop_down_values=self.available_grouped_algos multiselect=<true>) self._sc.drop_down(caption='Venue' id='venue-val' prefix_id='aggregated' drop_down_values=self.available_grouped_venues multiselect=<true>) self._sc.drop_down(caption='Reload' id='reload-val' prefix_id='aggregated' drop_down_values=self.available_reload) self._sc.drop_down(caption='Market Data' id='market-data-val' prefix_id='aggregated' drop_down_values=self.available_market_data) self._sc.drop_down(caption='Event Type' id='event-type-val' prefix_id='aggregated' drop_down_values=self.available_event_types) self._sc.drop_down(caption='Metric' id='metric-val' prefix_id='aggregated' drop_down_values=self.available_metrics) ]) self._sc.horizontal_bar() self._sc.button(caption='Calculate' id='calculation-button' prefix_id='aggregated') # , msg_id='aggregated-status'), self._sc.horizontal_bar() # self.date_picker_range(caption='Start/Finish Dates', id='aggregated-date-val', offset=[-7,-1]), self._sc.plot(caption='Aggregated Trader: Summary' id=['execution-by-ticker-bar-plot' 'execution-by-venue-bar-plot'] prefix_id='aggregated' height=500) self._sc.horizontal_bar() self._sc.plot(caption='Aggregated Trader: Timeline' id='execution-by-ticker-timeline-plot' prefix_id='aggregated' height=500) self._sc.horizontal_bar() self._sc.plot(caption='Aggregated Trader: PDF fit ('+self._constants.reporting_currency+' notional)' id=['execution-by-ticker-dist-plot' 'execution-by-venue-dist-plot'] prefix_id='aggregated' height=500) self._sc.horizontal_bar()] style={'width':'1000px' 'marginRight':'auto' 'marginLeft':'auto'})<line_sep>################################################################################################################ self.pages['compliance']=html.Div([self._sc.header_bar('FX: Compliance Analysis' img='logo.png') self._sc.link_bar(link_bar_dict) self._sc.width_row_cell(html.B("Status: ok" id='compliance-status') margin_left=5) self._sc.horizontal_bar() # Dropdown selection boxes html.Div([self._sc.drop_down(caption='Start Date' id='start-date-val' prefix_id='compliance' drop_down_values=self.available_dates) self._sc.drop_down(caption='Finish Date' id='finish-date-val' prefix_id='compliance' drop_down_values=self.available_dates) self._sc.drop_down(caption='Ticker' id='ticker-val' prefix_id='compliance' drop_down_values=self.available_grouped_tickers multiselect=<true>) self._sc.drop_down(caption='Broker' id='broker-val' prefix_id='compliance' drop_down_values=self.available_grouped_brokers multiselect=<true>) self._sc.drop_down(caption='Algo' id='algo-val' prefix_id='compliance' drop_down_values=self.available_grouped_algos multiselect=<true>) self._sc.drop_down(caption='Venue' id='venue-val' prefix_id='compliance' drop_down_values=self.available_grouped_venues multiselect=<true>) self._sc.drop_down(caption='Reload' id='reload-val' prefix_id='compliance' drop_down_values=self.available_reload) self._sc.drop_down(caption='Market Data' id='market-data-val' prefix_id='compliance' drop_down_values=self.available_market_data) self._sc.drop_down(caption='Filter by Time' id='filter-time-of-day-val' prefix_id='compliance' drop_down_values=self.available_reload) self._sc.drop_down(caption='Start Time of Day' id='start-time-of-day-val' prefix_id='compliance' drop_down_values=self.available_times) self._sc.drop_down(caption='Finish Time of Day' id='finish-time-of-day-val' prefix_id='compliance' drop_down_values=self.available_times) self._sc.drop_down(caption='Slippage to Mid (bp)' id='slippage-bounds-val' prefix_id='compliance' drop_down_values=self.available_slippage_bounds) self._sc.drop_down(caption='Visualization' id='visualization-val' prefix_id='compliance' drop_down_values=self.available_visualization)]) self._sc.horizontal_bar() html.Div([self._sc.button(caption='Calculate' id='calculation-button' prefix_id='compliance') # self.date_picker(caption='Start Date', id='start-date-dtpicker', prefix_id='compliance'), # self.date_picker(caption='Finish Date', id='finish-date-dtpicker', prefix_id='compliance'), ]) self._sc.horizontal_bar() self._sc.table(caption='Compliance: Trade Outliers' id='execution-by-anomalous-table' prefix_id='compliance' columns=trade_outliers_cols downloadplot_caption='Trade outliers CSV' downloadplot_tag='execution-by-anomalous-download-link' download_file='download_execution_by_anomalous.csv') self._sc.table(caption='Compliance: Totals by Broker' id='summary-by-broker-table' prefix_id='compliance' columns=broker_cols downloadplot_caption='Download broker CSV' downloadplot_tag='summary-by-broker-download-link' download_file='download_broker.csv') self._sc.horizontal_bar()] style={'width':'1000px' 'marginRight':'auto' 'marginLeft':'auto'})<line_sep># ID flags self.id_flags={# Detailed trader page # 'timeline_trade_orders' : {'client-orders': 'order', 'executions': 'trade'}, # 'markout_trade_orders' : {'client-orders': 'order_df', 'executions': 'trade_df'}, 'detailed_candle_timeline_trade_order':{'execution':'sparse_market_trade_df' 'order':'sparse_market_order_df'} 'detailed_markout_trade_order':{'execution':'trade_df' 'order':'order_df'} 'detailed_table_trade_order':{'execution':'table_trade_df_markout_by_all'} 'detailed_dist_trade_order':{'execution':'dist_trade_df_by/pdf/side' 'order':'dist_order_df_by/pdf/side'} 'detailed_download_link_trade_order':{'execution-candle-timeline':'sparse_market_trade_df' 'order-candle-timeline':'sparse_market_order_df'} # Aggregated trader page 'aggregated_bar_trade_order':{'execution-by-ticker':'bar_trade_df_by/mean/ticker' 'execution-by-venue':'bar_trade_df_by/mean/venue'} 'aggregated_timeline_trade_order':{'execution-by-ticker':'timeline_trade_df_by/mean_date/ticker' 'execution-by-venue':'timeline_trade_df_by/mean_date/venue'} 'aggregated_dist_trade_order':{'execution-by-ticker':'dist_trade_df_by/pdf/ticker' 'execution-by-venue':'dist_trade_df_by/pdf/venue'} # Compliance page 'compliance_metric_table_trade_order':{'execution-by-anomalous':'table_trade_df_slippage_by_worst_all' 'summary-by-broker':'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'} 'compliance_download_link_trade_order':{'execution-by-anomalous':'table_trade_df_slippage_by_worst_all' 'summary-by-broker':'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'} }<block_end><block_end>
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A wrapper for subprocess to make calling shell commands easier."""<import_stmt>codecs<import_stmt>logging<import_stmt>os<import_stmt>pipes<import_stmt>select<import_stmt>signal<import_stmt>string<import_stmt>subprocess<import_stmt>sys<import_stmt>time<line_sep>CATAPULT_ROOT_PATH=os.path.abspath(os.path.join(os.path.dirname(__file__) '..' '..' '..'))<line_sep>SIX_PATH=os.path.join(CATAPULT_ROOT_PATH 'third_party' 'six')<if_stmt>SIX_PATH<not><in>sys.path<block_start>sys.path.append(SIX_PATH)<block_end><import_stmt>six<import_from_stmt>devil base_error<line_sep>logger=logging.getLogger(__name__)<line_sep>_SafeShellChars=frozenset(string.ascii_letters+string.digits+'@%_-+=:,./')<line_sep># Cache the string-escape codec to ensure subprocess can find it # later. Return value doesn't matter. <if_stmt>six.PY2<block_start>codecs.lookup('string-escape')<block_end><def_stmt>SingleQuote s<block_start>"""Return an shell-escaped version of the string using single quotes. Reliably quote a string which may contain unsafe characters (e.g. space, quote, or other special characters such as '$'). The returned value can be used in a shell command line as one token that gets to be interpreted literally. Args: s: The string to quote. Return: The string quoted using single quotes. """<line_sep><return>pipes.quote(s)<block_end><def_stmt>DoubleQuote s<block_start>"""Return an shell-escaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the shell implementation. This set usually includes: '$', '`', '\', '!', '*', and '@'. Args: s: The string to quote. Return: The string quoted using double quotes. """<if_stmt><not>s<block_start><return>'""'<block_end><elif_stmt>all(c<in>_SafeShellChars<for>c s)<block_start><return>s<block_end><else_stmt><block_start><return>'"'+s.replace('"' '\\"')+'"'<block_end><block_end><def_stmt>ShrinkToSnippet cmd_parts var_name var_value<block_start>"""Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen. Args: cmd_parts: A list of command arguments. var_name: The variable that holds var_value. var_value: The string to replace in cmd_parts with $var_name Returns: A shell snippet that does not include setting the variable. """<def_stmt>shrink value<block_start>parts=(x<and>SingleQuote(x)<for>x value.split(var_value))<line_sep>with_substitutions=('"$%s"'%var_name).join(parts)<line_sep><return>with_substitutions<or>"''"<block_end><return>' '.join(shrink(part)<for>part cmd_parts)<block_end><def_stmt>Popen args stdin=<none> stdout=<none> stderr=<none> shell=<none> cwd=<none> env=<none># preexec_fn isn't supported on windows. # pylint: disable=unexpected-keyword-arg <block_start><if_stmt>sys.platform<eq>'win32'<block_start>close_fds=(stdin<is><none><and>stdout<is><none><and>stderr<is><none>)<line_sep>preexec_fn=<none><block_end><else_stmt><block_start>close_fds=<true><line_sep>preexec_fn=<lambda>:signal.signal(signal.SIGPIPE signal.SIG_DFL)<block_end><if_stmt>six.PY2<block_start><return>subprocess.Popen(args=args cwd=cwd stdin=stdin stdout=stdout stderr=stderr shell=shell close_fds=close_fds env=env preexec_fn=preexec_fn)<block_end><else_stmt># opens stdout in text mode, so that caller side always get 'str', # and there will be no type mismatch error. # Ignore any decoding error, so that caller will not crash due to # uncaught exception. Decoding errors are unavoidable, as we # do not know the encoding of the output, and in some output there # will be multiple encodings (e.g. adb logcat) <block_start><return>subprocess.Popen(args=args cwd=cwd stdin=stdin stdout=stdout stderr=stderr shell=shell close_fds=close_fds env=env preexec_fn=preexec_fn universal_newlines=<true> encoding='utf-8' errors='ignore')<block_end><block_end><def_stmt>Call args stdout=<none> stderr=<none> shell=<none> cwd=<none> env=<none><block_start>pipe=Popen(args stdout=stdout stderr=stderr shell=shell cwd=cwd env=env)<line_sep>pipe.communicate()<line_sep><return>pipe.wait()<block_end><def_stmt>RunCmd args cwd=<none><block_start>"""Opens a subprocess to execute a program and returns its return value. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. Returns: Return code from the command execution. """<line_sep>logger.debug(str(args)+' '+(cwd<or>''))<line_sep><return>Call(args cwd=cwd)<block_end><def_stmt>GetCmdOutput args cwd=<none> shell=<false> env=<none><block_start>"""Open a subprocess to execute a program and returns its output. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. env: If not None, a mapping that defines environment variables for the subprocess. Returns: Captures and returns the command's stdout. Prints the command's stderr to logger (which defaults to stdout). """<line_sep>(_ output)=GetCmdStatusAndOutput(args cwd shell env)<line_sep><return>output<block_end><def_stmt>_ValidateAndLogCommand args cwd shell<block_start><if_stmt>isinstance(args six.string_types)<block_start><if_stmt><not>shell<block_start><raise>Exception('string args must be run with shell=True')<block_end><block_end><else_stmt><block_start><if_stmt>shell<block_start><raise>Exception('array args must be run with shell=False')<block_end>args=' '.join(SingleQuote(str(c))<for>c args)<block_end><if_stmt>cwd<is><none><block_start>cwd=''<block_end><else_stmt><block_start>cwd=':'+cwd<block_end>logger.debug('[host]%s> %s' cwd args)<line_sep><return>args<block_end><def_stmt>GetCmdStatusAndOutput args cwd=<none> shell=<false> env=<none> merge_stderr=<false><block_start>"""Executes a subprocess and returns its exit code and output. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. merge_stderr: If True, captures stderr as part of stdout. Returns: The 2-tuple (exit code, stdout). """<line_sep>status,stdout,stderr=GetCmdStatusOutputAndError(args cwd=cwd shell=shell env=env merge_stderr=merge_stderr)<if_stmt>stderr<block_start>logger.critical('STDERR: %s' stderr)<block_end>logger.debug('STDOUT: %s%s' stdout[:4096].rstrip() '<truncated>'<if>len(stdout)<g>4096<else>'')<line_sep><return>(status stdout)<block_end><def_stmt>StartCmd args cwd=<none> shell=<false> env=<none><block_start>"""Starts a subprocess and returns a handle to the process. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. Returns: A process handle from subprocess.Popen. """<line_sep>_ValidateAndLogCommand(args cwd shell)<line_sep><return>Popen(args stdout=subprocess.PIPE stderr=subprocess.PIPE shell=shell cwd=cwd env=env)<block_end><def_stmt>GetCmdStatusOutputAndError args cwd=<none> shell=<false> env=<none> merge_stderr=<false><block_start>"""Executes a subprocess and returns its exit code, output, and errors. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. merge_stderr: If True, captures stderr as part of stdout. Returns: The 3-tuple (exit code, stdout, stderr). """<line_sep>_ValidateAndLogCommand(args cwd shell)<line_sep>stderr=subprocess.STDOUT<if>merge_stderr<else>subprocess.PIPE<line_sep>pipe=Popen(args stdout=subprocess.PIPE stderr=stderr shell=shell cwd=cwd env=env)<line_sep>stdout,stderr=pipe.communicate()<line_sep><return>(pipe.returncode stdout stderr)<block_end><class_stmt>TimeoutError(base_error.BaseError)<block_start>"""Module-specific timeout exception."""<def_stmt>__init__ self output=<none><block_start>super(TimeoutError self).__init__('Timeout')<line_sep>self._output=output<block_end>@property<def_stmt>output self<block_start><return>self._output<block_end><block_end><def_stmt>_read_and_decode fd buffer_size<block_start>data=os.read(fd buffer_size)<if_stmt>data<and>six.PY3<block_start>data=data.decode('utf-8' errors='ignore')<block_end><return>data<block_end><def_stmt>_IterProcessStdoutFcntl process iter_timeout=<none> timeout=<none> buffer_size=4096 poll_interval=1<block_start>"""An fcntl-based implementation of _IterProcessStdout."""<line_sep># pylint: disable=too-many-nested-blocks <import_stmt>fcntl<try_stmt># Enable non-blocking reads from the child's stdout. <block_start>child_fd=process.stdout.fileno()<line_sep>fl=fcntl.fcntl(child_fd fcntl.F_GETFL)<line_sep>fcntl.fcntl(child_fd fcntl.F_SETFL fl|os.O_NONBLOCK)<line_sep>end_time=(time.time()+timeout)<if>timeout<else><none><line_sep>iter_end_time=(time.time()+iter_timeout)<if>iter_timeout<else><none><while_stmt><true><block_start><if_stmt>end_time<and>time.time()<g>end_time<block_start><raise>TimeoutError()<block_end><if_stmt>iter_end_time<and>time.time()<g>iter_end_time<block_start><yield><none><line_sep>iter_end_time=time.time()+iter_timeout<block_end><if_stmt>iter_end_time<block_start>iter_aware_poll_interval=min(poll_interval max(0 iter_end_time-time.time()))<block_end><else_stmt><block_start>iter_aware_poll_interval=poll_interval<block_end>read_fds,_,_=select.select([child_fd] [] [] iter_aware_poll_interval)<if_stmt>child_fd<in>read_fds<block_start>data=_read_and_decode(child_fd buffer_size)<if_stmt><not>data<block_start><break><block_end><yield>data<block_end><if_stmt>process.poll()<is><not><none># If process is closed, keep checking for output data (because of timing # issues). <block_start><while_stmt><true><block_start>read_fds,_,_=select.select([child_fd] [] [] iter_aware_poll_interval)<if_stmt>child_fd<in>read_fds<block_start>data=_read_and_decode(child_fd buffer_size)<if_stmt>data<block_start><yield>data<line_sep><continue><block_end><block_end><break><block_end><break><block_end><block_end><block_end><finally_stmt><block_start><try_stmt><block_start><if_stmt>process.returncode<is><none># Make sure the process doesn't stick around if we fail with an # exception. <block_start>process.kill()<block_end><block_end><except_stmt>OSError<block_start><pass><block_end>process.wait()<block_end><block_end><def_stmt>_IterProcessStdoutQueue process iter_timeout=<none> timeout=<none> buffer_size=4096 poll_interval=1<block_start>"""A Queue.Queue-based implementation of _IterProcessStdout. TODO(jbudorick): Evaluate whether this is a suitable replacement for _IterProcessStdoutFcntl on all platforms. """<line_sep># pylint: disable=unused-argument <if_stmt>six.PY3<block_start><import_stmt>queue<block_end><else_stmt><block_start><import_stmt>Queue<as>queue<block_end><import_stmt>threading<line_sep>stdout_queue=queue.Queue()<def_stmt>read_process_stdout # TODO(jbudorick): Pick an appropriate read size here. <block_start><while_stmt><true><block_start><try_stmt><block_start>output_chunk=_read_and_decode(process.stdout.fileno() buffer_size)<block_end><except_stmt>IOError<block_start><break><block_end>stdout_queue.put(output_chunk <true>)<if_stmt><not>output_chunk<and>process.poll()<is><not><none><block_start><break><block_end><block_end><block_end>reader_thread=threading.Thread(target=read_process_stdout)<line_sep>reader_thread.start()<line_sep>end_time=(time.time()+timeout)<if>timeout<else><none><try_stmt><block_start><while_stmt><true><block_start><if_stmt>end_time<and>time.time()<g>end_time<block_start><raise>TimeoutError()<block_end><try_stmt><block_start>s=stdout_queue.get(<true> iter_timeout)<if_stmt><not>s<block_start><break><block_end><yield>s<block_end><except_stmt>queue.Empty<block_start><yield><none><block_end><block_end><block_end><finally_stmt><block_start><try_stmt><block_start><if_stmt>process.returncode<is><none># Make sure the process doesn't stick around if we fail with an # exception. <block_start>process.kill()<block_end><block_end><except_stmt>OSError<block_start><pass><block_end>process.wait()<line_sep>reader_thread.join()<block_end><block_end>_IterProcessStdout=(_IterProcessStdoutQueue<if>sys.platform<eq>'win32'<else>_IterProcessStdoutFcntl)<line_sep>"""Iterate over a process's stdout. This is intentionally not public. Args: process: The process in question. iter_timeout: An optional length of time, in seconds, to wait in between each iteration. If no output is received in the given time, this generator will yield None. timeout: An optional length of time, in seconds, during which the process must finish. If it fails to do so, a TimeoutError will be raised. buffer_size: The maximum number of bytes to read (and thus yield) at once. poll_interval: The length of time to wait in calls to `select.select`. If iter_timeout is set, the remaining length of time in the iteration may take precedence. Raises: TimeoutError: if timeout is set and the process does not complete. Yields: basestrings of data or None. """<def_stmt>GetCmdStatusAndOutputWithTimeout args timeout cwd=<none> shell=<false> logfile=<none> env=<none><block_start>"""Executes a subprocess with a timeout. Args: args: List of arguments to the program, the program to execute is the first element. timeout: the timeout in seconds or None to wait forever. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. logfile: Optional file-like object that will receive output from the command as it is running. env: If not None, a mapping that defines environment variables for the subprocess. Returns: The 2-tuple (exit code, output). Raises: TimeoutError on timeout. """<line_sep>_ValidateAndLogCommand(args cwd shell)<line_sep>output=six.StringIO()<line_sep>process=Popen(args cwd=cwd shell=shell stdout=subprocess.PIPE stderr=subprocess.STDOUT env=env)<try_stmt><block_start><for_stmt>data _IterProcessStdout(process timeout=timeout)<block_start><if_stmt>logfile<block_start>logfile.write(data)<block_end>output.write(data)<block_end><block_end><except_stmt>TimeoutError<block_start><raise>TimeoutError(output.getvalue())<block_end>str_output=output.getvalue()<line_sep>logger.debug('STDOUT+STDERR: %s%s' str_output[:4096].rstrip() '<truncated>'<if>len(str_output)<g>4096<else>'')<line_sep><return>process.returncode str_output<block_end><def_stmt>IterCmdOutputLines args iter_timeout=<none> timeout=<none> cwd=<none> shell=<false> env=<none> check_status=<true><block_start>"""Executes a subprocess and continuously yields lines from its output. Args: args: List of arguments to the program, the program to execute is the first element. iter_timeout: Timeout for each iteration, in seconds. timeout: Timeout for the entire command, in seconds. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. check_status: A boolean indicating whether to check the exit status of the process after all output has been read. Yields: The output of the subprocess, line by line. Raises: CalledProcessError if check_status is True and the process exited with a non-zero exit status. """<line_sep>cmd=_ValidateAndLogCommand(args cwd shell)<line_sep>process=Popen(args cwd=cwd shell=shell env=env stdout=subprocess.PIPE stderr=subprocess.STDOUT)<line_sep><return>_IterCmdOutputLines(process cmd iter_timeout=iter_timeout timeout=timeout check_status=check_status)<block_end><def_stmt>_IterCmdOutputLines process cmd iter_timeout=<none> timeout=<none> check_status=<true><block_start>buffer_output=''<line_sep>iter_end=<none><line_sep>cur_iter_timeout=<none><if_stmt>iter_timeout<block_start>iter_end=time.time()+iter_timeout<line_sep>cur_iter_timeout=iter_timeout<block_end><for_stmt>data _IterProcessStdout(process iter_timeout=cur_iter_timeout timeout=timeout)<block_start><if_stmt>iter_timeout# Check whether the current iteration has timed out. <block_start>cur_iter_timeout=iter_end-time.time()<if_stmt>data<is><none><or>cur_iter_timeout<l>0<block_start><yield><none><line_sep>iter_end=time.time()+iter_timeout<line_sep><continue><block_end><block_end><else_stmt><block_start><assert_stmt>data<is><not><none> ('Iteration received no data despite no iter_timeout being set. '<concat>'cmd: %s'%cmd)<block_end># Construct lines to yield from raw data. buffer_output<augadd>data<line_sep>has_incomplete_line=buffer_output[-1]<not><in>'\r\n'<line_sep>lines=buffer_output.splitlines()<line_sep>buffer_output=lines.pop()<if>has_incomplete_line<else>''<for_stmt>line lines<block_start><yield>line<if_stmt>iter_timeout<block_start>iter_end=time.time()+iter_timeout<block_end><block_end><block_end><if_stmt>buffer_output<block_start><yield>buffer_output<block_end><if_stmt>check_status<and>process.returncode<block_start><raise>subprocess.CalledProcessError(process.returncode cmd)<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<import_stmt>argparse<import_stmt>importlib<import_stmt>itertools<import_stmt>time<import_from_stmt>multiprocessing Pool<import_stmt>numpy<as>np<import_stmt>os<import_stmt>pdb<import_stmt>pickle<import_stmt>subprocess<import_stmt>sys<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.contrib.slim<as>slim<import_stmt>threading<import_stmt>init_paths<import_from_stmt>models.sample_models *<line_sep>target_tasks="autoencoder colorization curvature denoise edge2d edge3d ego_motion fix_pose impainting_whole jigsaw keypoint2d keypoint3d non_fixated_pose point_match reshade rgb2depth rgb2mist rgb2sfnorm room_layout segment25d segment2d vanishing_point_well_defined segmentsemantic_rb class_selected class_1000"<line_sep>list_of_tasks=target_tasks.split(" ")<line_sep>ON_TEST_SET=<true><line_sep>IN_TRAIN_MODE=<false><line_sep>parser=argparse.ArgumentParser(description='Viz Single Task')<line_sep>parser.add_argument('--idx' dest='idx' help='Task to run' type=int)<line_sep>parser.add_argument('--hs' dest='hs' help='Hidden size to use' type=int)<line_sep>parser.add_argument('--n-parallel' dest='n_parallel' help='Number of models to run in parallel' type=int)<line_sep>parser.set_defaults(n_parallel=1)<line_sep>tf.logging.set_verbosity(tf.logging.ERROR)<line_sep>ipython_std_out=sys.stdout<line_sep># Disabe <def_stmt>blockPrint <block_start>sys.stdout=open(os.devnull 'w')<block_end># Restore <def_stmt>enablePrint <block_start>sys.stdout=ipython_std_out<block_end># Force Print <def_stmt>forcePrint str<block_start>enablePrint()<line_sep>print(str)<line_sep>sys.stdout.flush()<line_sep>blockPrint()<block_end><def_stmt>remove_dups seq<block_start>seen=set()<line_sep>seen_add=seen.add<line_sep><return>[x<for>x seq<if><not>(x<in>seen<or>seen_add(x))]<block_end>pairs=list(itertools.product(list_of_tasks list_of_tasks))<line_sep>args=parser.parse_args()<line_sep>idx_to_run=args.idx<if_stmt>idx_to_run<eq>-1<block_start>pairs_to_run=pairs<block_end><else_stmt><block_start>pairs_to_run=pairs[idx_to_run:idx_to_run+1]<block_end><def_stmt>run_to_task task_to<block_start><import_stmt>general_utils<import_from_stmt>general_utils RuntimeDeterminedEnviromentVars<import_stmt>models.architectures<as>architectures<import_from_stmt>data.load_ops resize_rescale_image<import_stmt>utils<import_from_stmt>data.task_data_loading load_and_specify_preprocessors_for_representation_extraction<import_stmt>lib.data.load_ops<as>load_ops<line_sep>tf.logging.set_verbosity(tf.logging.ERROR)<line_sep>all_outputs={}<line_sep>pickle_dir='viz_output_single_task.pkl'<import_stmt>os<if_stmt>os.path.isfile(pickle_dir)<block_start><with_stmt>open(pickle_dir 'rb')<as>fp<block_start>all_outputs=pickle.load(fp)<block_end><block_end><for_stmt>task list_of_tasks<block_start><if_stmt>task<in>all_outputs<block_start>print("{} already exists....\n\n\n".format(task))<line_sep><continue><block_end>print("Doing {task}".format(task=task))<line_sep>general_utils=importlib.reload(general_utils)<line_sep>tf.reset_default_graph()<line_sep>training_runners={'sess':tf.InteractiveSession() 'coord':tf.train.Coordinator()}<line_sep># task = '{f}__{t}__{hs}'.format(f=task_from, t=task_to, hs=args.hs) CONFIG_DIR='/home/ubuntu/task-taxonomy-331b/experiments/final/{TASK}'.format(TASK=task)<line_sep>############## Load Configs ############## cfg=utils.load_config(CONFIG_DIR nopause=<true>)<line_sep>RuntimeDeterminedEnviromentVars.register_dict(cfg)<line_sep>split_file=cfg['test_filenames']<if>ON_TEST_SET<else>cfg['val_filenames']<line_sep>cfg['train_filenames']=split_file<line_sep>cfg['val_filenames']=split_file<line_sep>cfg['test_filenames']=split_file<line_sep>cfg['num_epochs']=1<line_sep>cfg['randomize']=<false><line_sep>root_dir=cfg['root_dir']<line_sep>cfg['num_read_threads']=1<line_sep>print(cfg['log_root'])<if_stmt>task<eq>'jigsaw'<block_start><continue><block_end>cfg['model_path']=os.path.join(cfg['log_root'] task 'model.permanent-ckpt')<line_sep>print(cfg['model_path'])<if_stmt>cfg['model_path']<is><none><block_start><continue><block_end>############## Set Up Inputs ############## # tf.logging.set_verbosity( tf.logging.INFO ) inputs=utils.setup_input(cfg is_training=ON_TEST_SET use_filename_queue=<false>)# is_training determines whether to use train/validaiton RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs cfg)<line_sep>RuntimeDeterminedEnviromentVars.populate_registered_variables()<line_sep>start_time=time.time()<line_sep># utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False ) ############## Set Up Model ############## model=utils.setup_model(inputs cfg is_training=IN_TRAIN_MODE)<line_sep>m=model['model']<line_sep>model['saver_op'].restore(training_runners['sess'] cfg['model_path'])<line_sep>############## Start dataloading workers ############## data_prefetch_init_fn=utils.get_data_prefetch_threads_init_fn(inputs cfg is_training=ON_TEST_SET use_filename_queue=<false>)<line_sep>prefetch_threads=threading.Thread(target=data_prefetch_init_fn args=(training_runners['sess'] training_runners['coord']))<line_sep>prefetch_threads.start()<line_sep>############## Run First Batch ############## <if_stmt><not>hasattr(m 'masks')<block_start>(input_batch target_batch data_idx predicted loss )=training_runners['sess'].run([m.input_images m.targets model['data_idxs'] m.decoder_output m.total_loss])<line_sep>mask_batch=1.<block_end><else_stmt><block_start>(input_batch target_batch mask_batch data_idx predicted loss )=training_runners['sess'].run([m.input_images m.targets m.masks model['data_idxs'] m.decoder_output m.total_loss])<block_end><if_stmt>task<eq>'segment2d'<or>task<eq>'segment25d'<block_start><import_from_stmt>sklearn.decomposition PCA<line_sep>x=np.zeros((32 256 256 3) dtype='float')<for_stmt>i range(predicted.shape[0])<block_start>embedding_flattened=np.squeeze(predicted[i]).reshape((-1 64))<line_sep>pca=PCA(n_components=3)<line_sep>pca.fit(embedding_flattened)<line_sep>lower_dim=pca.transform(embedding_flattened).reshape((256 256 -1))<line_sep>lower_dim=(lower_dim-lower_dim.min())/(lower_dim.max()-lower_dim.min())<line_sep>x[i]=lower_dim<block_end>predicted=x<block_end>############## Clean Up ############## training_runners['coord'].request_stop()<line_sep>training_runners['coord'].join()<line_sep># if os.path.isfile(pickle_dir): # with open(pickle_dir, 'rb') as fp: # all_outputs = pickle.load(fp) ############## Store to dict ############## to_store={'input':input_batch 'target':target_batch 'mask':mask_batch 'data_idx':data_idx 'output':predicted}<line_sep>all_outputs[task]=to_store<line_sep>print("Done: {}".format(task))<line_sep># os.system("sudo cp {d} /home/ubuntu/s3/model_log".format(d=pickle_dir)) ############## Reset graph and paths ############## tf.reset_default_graph()<line_sep>training_runners['sess'].close()<try_stmt><block_start><del_stmt>sys.modules['config']<block_end><except_stmt><block_start><pass><block_end>sys.path=remove_dups(sys.path)<line_sep>print("FINISHED: {}\n\n\n\n\n\n".format(task))<line_sep>pickle_dir='viz_output_single_task.pkl'<with_stmt>open(pickle_dir 'wb')<as>fp<block_start>pickle.dump(all_outputs fp)<block_end><try_stmt><block_start>subprocess.call("aws s3 cp {} s3://task-preprocessing-512-oregon/visualizations/".format(pickle_dir) shell=<true>)<block_end><except_stmt><block_start>subprocess.call("sudo cp {} /home/ubuntu/s3/visualizations/".format(pickle_dir) shell=<true>)<block_end><block_end><return><block_end><if_stmt>__name__<eq>'__main__'<block_start>run_to_task(<none>)<line_sep># with Pool(args.n_parallel) as p: # p.map(run_to_task, list_of_tasks) <block_end>
# Copyright 2018 Google LLC # Copyright 2018-present Open Networking Foundation # SPDX-License-Identifier: Apache-2.0 """A portable build system for Stratum P4 switch stack. To use this, load() this file in a BUILD file, specifying the symbols needed. The public symbols are the macros: decorate(path) sc_cc_lib Declare a portable Library. sc_proto_lib Declare a portable .proto Library. sc_cc_bin Declare a portable Binary. sc_package Declare a portable tarball package. and the variables/lists: ALL_ARCHES All known arches. EMBEDDED_ARCHES All embedded arches. EMBEDDED_PPC Name of PowerPC arch - "ppc". EMBEDDED_X86 Name of "x86" arch. HOST_ARCH Name of default "host" arch. HOST_ARCHES All host arches. STRATUM_INTERNAL For declaring Stratum internal visibility. The macros are like cc_library(), proto_library(), and cc_binary(), but with different options and some restrictions. The key difference: you can supply lists of architectures for which they should be compiled - defaults to all if left unstated. Internally, libraries and binaries are generated for every listed architecture. The names are decorated to keep them different and allow all to be generated and addressed independently. This aspect of the system is suboptimal - something along the lines of augmenting context with a user defined configuration fragment would be a much cleaner solution. Currently supported architectures: ppc x86 """<line_sep>load("//tools/build_defs/label:def.bzl" "parse_label")<line_sep>load("//devtools/build_cleaner/skylark:build_defs.bzl" "register_extension_info" )<line_sep>load("@rules_proto//proto:defs.bzl" "proto_library")<line_sep>load("@rules_cc//cc:defs.bzl" "cc_binary" "cc_library" "cc_test")<line_sep># Generic path & label helpers. ============================================ <def_stmt>_normpath path<block_start>"""Normalize a path. Normalizes a path by removing unnecessary path-up segments and its corresponding directories. Providing own implementation because import os is not allowed in build defs. For example ../../dir/to/deeply/nested/path/../../../other/path will become ../../dir/to/other/path Args: path: A valid absolute or relative path to normalize. Returns: A path equivalent to the input path with minimal use of path-up segments. Invalid input paths will stay invalid. """<line_sep>sep="/"<line_sep>level=0<line_sep>result=[]<for_stmt>d path.split(sep)<block_start><if_stmt>d<in>("" ".")<block_start><if_stmt>result<block_start><continue><block_end><block_end><elif_stmt>d<eq>".."<block_start><if_stmt>level<g>0<block_start>result.pop()<line_sep>level<augadd>-1<line_sep><continue><block_end><block_end><else_stmt><block_start>level<augadd>1<block_end>result.append(d)<block_end><return>sep.join(result)<block_end># Adds a suffix to a label, expanding implicit targets if needed. <def_stmt>decorate label suffix<block_start><if_stmt>label.endswith(":")# .../bar: -> .../bar <block_start>label=label[:-1]<block_end><if_stmt>":"<in>label# .../bar:bat -> .../bar:bat_suffix <block_start><return>"%s_%s"%(label suffix)<block_end><elif_stmt>label.startswith("//")# //foo/bar -> //foo/bar:bar_suffix <block_start><return>"%s:%s_%s"%(label label.split("/")[-1] suffix)<block_end><else_stmt># bar -> bar_suffix <block_start><return>"%s_%s"%(label suffix)<block_end><block_end># Creates a relative filename from a label, replacing "//" and ":". <def_stmt>_make_filename label<block_start><if_stmt>label.startswith("//")# //foo/bar:bat/baz -> google3_foo/bar/bat/baz <block_start><return>label.replace("//" "google3/").replace(":" "/")<block_end><elif_stmt>label.startswith(":")# :bat/baz -> bat/baz <block_start><return>label[1:]<block_end><else_stmt># bat/baz -> bat/baz <block_start><return>label<block_end><block_end># Adds dquotes around a string. <def_stmt>dquote s<block_start><return>'"'+s+'"'<block_end># Adds squotes around a string. <def_stmt>squote s<block_start><return>"'"+s+"'"<block_end># Emulate Python 2.5+ str(startswith([prefix ...]) <def_stmt>starts_with s prefix_list<block_start><for_stmt>prefix prefix_list<block_start><if_stmt>s.startswith(prefix)<block_start><return>prefix<block_end><block_end><return><none><block_end><def_stmt>sc_platform_select host=<none> ppc=<none> x86=<none> default=<none><block_start>"""Public macro to alter blaze rules based on the platform architecture. Generates a blaze select(...) statement that can be used in most contexts to alter a blaze rule based on the target platform architecture. If no selection is provided for a given platform, {default} is used instead. A specific value or default must be provided for every target platform. Args: host: The value to use for host builds. ppc: The value to use for ppc builds. x86: The value to use for x86 builds. default: The value to use for any of {host,ppc,x86} that isn't specified. Returns: The requested selector. """<if_stmt>default<eq><none><and>(host<eq><none><or>ppc<eq><none><or>x86<eq><none>)<block_start>fail("Missing a select value for at least one platform in "+"sc_platform_select. Please add.")<block_end>config_label_prefix="//stratum:stratum_"<line_sep><return>select({"//conditions:default":(host<or>default) config_label_prefix+"ppc":(ppc<or>default) config_label_prefix+"x86":(x86<or>default) })<block_end># Generates an sc_platform_select based on a textual list of arches. <def_stmt>sc_platform_filter value default arches<block_start><return>sc_platform_select(host=value<if>"host"<in>arches<else>default ppc=value<if>"ppc"<in>arches<else>default x86=value<if>"x86"<in>arches<else>default )<block_end><def_stmt>sc_platform_alias name host=<none> ppc=<none> x86=<none> default=<none> visibility=<none><block_start>"""Public macro to create an alias that changes based on target arch. Generates a blaze alias that will select the appropriate target. If no selection is provided for a given platform and no default is set, a dummy default target is used instead. Args: name: The name of the alias target. host: The result of the alias for host builds. ppc: The result of the alias for ppc builds. x86: The result of the alias for x86 builds. default: The result of the alias for any of {host,ppc,x86} that isn't specified. visibility: The visibility of the alias target. """<line_sep>native.alias(name=name actual=sc_platform_select(default=default<or>"//stratum/portage:dummy" host=host ppc=ppc x86=x86 ) visibility=visibility )<block_end># Embedded build definitions. ============================================== EMBEDDED_PPC="ppc"<line_sep>EMBEDDED_X86="x86"<line_sep>EMBEDDED_ARCHES=[EMBEDDED_PPC EMBEDDED_X86 ]<line_sep>HOST_ARCH="host"<line_sep>HOST_ARCHES=[HOST_ARCH]<line_sep>ALL_ARCHES=EMBEDDED_ARCHES+HOST_ARCHES<line_sep># Identify Stratum platform arch for .pb.h shims and other portability hacks. _ARCH_DEFINES=sc_platform_select(default=["STRATUM_ARCH_HOST"] ppc=["STRATUM_ARCH_PPC"] x86=["STRATUM_ARCH_X86"] )<line_sep>STRATUM_INTERNAL=["//stratum:__subpackages__" ]<line_sep># # Build options for all embedded architectures # # Set _TRACE_SRCS to show sources in embedded sc_cc_lib compile steps. # This is more general than it may seem: genrule doesn't have hdrs or deps # attributes, so all embedded dependencies appear as a `src'. # TODO(unknown): if useful again then inject from cmdline else kill feature. _TRACE_SRCS=<false><line_sep># Used for all gcc invocations. _EMBEDDED_FLAGS=["-O0" # Don't use this for program-sizing build #-- "-Os", # Use this for program-sizing build "-g" # Don't use this for program-sizing build "-Wall" "-Werror" # Warn lots, and force fixing warnings. "-no-canonical-prefixes" # Don't mangle paths and confuse blaze. "-fno-builtin-malloc" # We'll use tcmalloc "-fno-builtin-calloc" "-fno-builtin-realloc" "-fno-builtin-free" "-D__STDC_FORMAT_MACROS=1" # TODO(unknown): Figure out how we can use $(CC_FLAGS) instead of this. "-D__GOOGLE_STL_LEGACY_COMPATIBILITY" ]<line_sep># Used for C and C++ compiler invocations. _EMBEDDED_CFLAGS=["-I$(GENDIR)" ]<line_sep># Used for C++ compiler invocations. _EMBEDDED_CXXFLAGS=["-std=gnu++11" # Allow C++11 features _and_ GNU extensions. ]<line_sep># Used for linking binaries. _EMBEDDED_LDFLAGS=[# "-static", # Use this for program-sizing build # "-Wl,--gc-sections,--no-wchar-size-warning", # Use this for program-sizing build ]<line_sep># PPC ====================================================================== _PPC_GRTE="//unsupported_toolchains/crosstoolng_powerpc32_8540/sysroot"<line_sep># X86 ====================================================================== _X86_GRTE="//grte/v4_x86/release/usr/grte/v4"<line_sep># Portability definitions =================================================== <def_stmt>sc_cc_test name size=<none> srcs=<none> deps=<none> data=<none> defines=<none> copts=<none> linkopts=<none> visibility=<none><block_start>"""Creates a cc_test rule that interacts safely with Stratum builds. Generates a cc_test rule that doesn't break the build when an embedded arch is selected. During embedded builds this target will generate a dummy binary and will not attempt to build any dependencies. Args: name: Analogous to cc_test name argument. size: Analogous to cc_test size argument. srcs: Analogous to cc_test srcs argument. deps: Analogous to cc_test deps argument. data: Analogous to cc_test data argument. defines: Analogous to cc_test defines argument. copts: Analogous to cc_test copts argument. linkopts: Analogous to cc_test linkopts argument. visibility: Analogous to cc_test visibility argument. """<line_sep>cc_test(name=name size=size<or>"small" srcs=sc_platform_select(host=srcs<or>[] default=[]) deps=sc_platform_select(host=deps<or>[] default=["//stratum/portage:dummy_with_main"] ) data=data<or>[] defines=defines copts=copts linkopts=linkopts visibility=visibility )<block_end>register_extension_info(extension_name="sc_cc_test" label_regex_for_dep="{extension_name}" )<def_stmt>sc_cc_lib name deps=<none> srcs=<none> hdrs=<none> arches=<none> copts=<none> defines=<none> includes=<none> include_prefix=<none> strip_include_prefix=<none> data=<none> testonly=<none> textual_hdrs=<none> visibility=<none> xdeps=<none><block_start>"""Creates rules for the given portable library and arches. Args: name: Analogous to cc_library name argument. deps: Analogous to cc_library deps argument. srcs: Analogous to cc_library srcs argument. hdrs: Analogous to cc_library hdrs argument. arches: List of architectures to generate this way. copts: Analogous to cc_library copts argument. defines: Symbols added as "-D" compilation options. includes: Paths to add as "-I" compilation options. include_prefix: Analogous to cc_library include_prefix argument. strip_include_prefix: Analogous to cc_library strip_include_prefix argument. data: Files to provide as data at runtime (host builds only). testonly: Standard blaze testonly parameter. textual_hdrs: Analogous to cc_library. visibility: Standard blaze visibility parameter. xdeps: External (file) dependencies of this library - no decorations assumed, used and exported as header, not for flags, libs, etc. """<line_sep>alwayslink=0<line_sep>deps=depset(deps<or>[])<line_sep>srcs=depset(srcs<or>[])<line_sep>hdrs=depset(hdrs<or>[])<line_sep>xdeps=depset(xdeps<or>[])<line_sep>copts=depset(copts<or>[])<line_sep>includes=depset(includes<or>[])<line_sep>data=depset(data<or>[])<line_sep>textual_hdrs=depset(textual_hdrs<or>[])<if_stmt>srcs<block_start><if_stmt>[s<for>s srcs.to_list()<if><not>s.endswith(".h")]<block_start>alwayslink=1<block_end><block_end><if_stmt><not>arches<block_start>arches=ALL_ARCHES<block_end>defs_plus=(defines<or>[])+_ARCH_DEFINES<line_sep>textual_plus=textual_hdrs|depset(deps.to_list())<line_sep>cc_library(name=name deps=sc_platform_filter(deps [] arches) srcs=sc_platform_filter(srcs [] arches) hdrs=sc_platform_filter(hdrs [] arches) alwayslink=alwayslink copts=sc_platform_filter(copts [] arches) defines=defs_plus includes=sc_platform_filter(includes [] arches) include_prefix=include_prefix strip_include_prefix=strip_include_prefix testonly=testonly textual_hdrs=sc_platform_filter(textual_plus|xdeps [] arches ) data=sc_platform_filter(data [] arches) visibility=visibility )<block_end>register_extension_info(extension_name="sc_cc_lib" label_regex_for_dep="{extension_name}" )<def_stmt>sc_cc_bin name deps=<none> srcs=<none> arches=<none> copts=<none> defines=<none> includes=<none> testonly=<none> visibility=<none><block_start>"""Creates rules for the given portable binary and arches. Args: name: Analogous to cc_binary name argument. deps: Analogous to cc_binary deps argument. srcs: Analogous to cc_binary srcs argument. arches: List of architectures to generate this way. copts: Analogous to cc_binary copts argument. defines: Symbols added as "-D" compilation options. includes: Paths to add as "-I" compilation options. testonly: Standard blaze testonly parameter. visibility: Standard blaze visibility parameter. """<line_sep>deps=depset(deps<or>[])<line_sep>srcs=depset(srcs<or>[])<if_stmt><not>arches<block_start>arches=ALL_ARCHES<block_end>defs_plus=(defines<or>[])+_ARCH_DEFINES<line_sep>cc_binary(name=name deps=sc_platform_filter(deps ["//stratum/portage:dummy_with_main"] arches ) srcs=sc_platform_filter(srcs [] arches) copts=copts defines=defs_plus includes=includes linkopts=["-ldl" "-lutil"] testonly=testonly visibility=visibility )<block_end>register_extension_info(extension_name="sc_cc_bin" label_regex_for_dep="{extension_name}" )<line_sep># Protobuf ================================================================= _SC_GRPC_DEPS=["//sandblaze/prebuilt/grpc" "//sandblaze/prebuilt/grpc:grpc++_codegen_base" "//sandblaze/prebuilt/grpc:grpc++_codegen_proto_lib" ]<line_sep>_PROTOC="@com_google_protobuf//:protobuf:protoc"<line_sep>_PROTOBUF="@com_google_protobuf//:protobuf"<line_sep>_SC_GRPC_PLUGIN="//sandblaze/prebuilt/protobuf:grpc_cpp_plugin"<line_sep>_GRPC_PLUGIN="//grpc:grpc_cpp_plugin"<def_stmt>_loc target<block_start>"""Return target location for constructing commands. Args: target: Blaze target name available to this build. Returns: $(location target) """<line_sep><return>"$(location %s)"%target<block_end><def_stmt>_gen_proto_lib name srcs hdrs deps arch visibility testonly proto_include grpc_shim_rule<block_start>"""Creates rules and filegroups for embedded protobuf library. For every given ${src}.proto, generate: :${src}_${arch}.pb rule to run protoc ${src}.proto => ${src}.${arch}.pb.{h,cc} :${src}_${arch}.grpc.pb rule to run protoc w/ erpc plugin: ${src}.proto => ${src}.${arch}.grpc.pb.{h,cc} :${src}_${arch}_proto_rollup collects include options for protoc: ${src}_${arch}_proto_rollup.flags Feed each set into sc_cc_lib to wrap them them up into a usable library; note that ${src}_${arch}_erpc_proto depends on ${src}_${arch}_proto. Args: name: Base name for this library. srcs: List of proto files hdrs: More files to build into this library, but also exported for dependent rules to utilize. deps: List of deps for this library arch: Which architecture to build this library for. visibility: Standard blaze visibility parameter, passed through to subsequent rules. testonly: Standard blaze testonly parameter. proto_include: Include path for generated sc_cc_libs. grpc_shim_rule: If needed, the name of the grpc shim for this proto lib. """<line_sep>bash_vars=["g3=$${PWD}"]<line_sep># TODO(unknown): Switch protobuf to using the proto_include mechanism protoc_label=_PROTOC<line_sep>protobuf_label=_PROTOBUF<line_sep>protobuf_hdrs="%s:well_known_types_srcs"%protobuf_label<line_sep>protobuf_srcs=[protobuf_hdrs]<line_sep>protobuf_include="$${g3}/protobuf/src"<if_stmt>arch<in>EMBEDDED_ARCHES<block_start>grpc_plugin=_SC_GRPC_PLUGIN<block_end><else_stmt><block_start>grpc_plugin=_GRPC_PLUGIN<block_end>protoc_deps=[]<for_stmt>dep deps<block_start><if_stmt>dep.endswith("_proto")<block_start>protoc_deps.append("%s_%s_headers"%(dep arch))<block_end><block_end>name_arch=decorate(name arch)<line_sep># We use this filegroup to accumulate the set of .proto files needed to # compile this proto. native.filegroup(name=decorate(name_arch "headers") srcs=hdrs+protoc_deps visibility=visibility )<line_sep>my_proto_rollup=decorate(name_arch "proto_rollup.flags")<line_sep>protoc_srcs_set=(srcs+hdrs+protoc_deps+protobuf_srcs+[my_proto_rollup])<line_sep>gen_srcs=[]<line_sep>gen_hdrs=[]<line_sep>grpc_gen_hdrs=[]<line_sep>grpc_gen_srcs=[]<line_sep>tools=[protoc_label]<line_sep>grpc_tools=[protoc_label grpc_plugin]<line_sep>protoc="$${g3}/%s"%_loc(protoc_label)<line_sep>grpc_plugin="$${g3}/%s"%_loc(grpc_plugin)<line_sep>cpp_out="$${g3}/$(GENDIR)/%s/%s"%(native.package_name() arch)<line_sep>accum_flags=[]<line_sep>full_proto_include=<none><if_stmt>proto_include<eq>"."<block_start>full_proto_include=native.package_name()<block_end><elif_stmt>proto_include<block_start>full_proto_include="%s/%s"%(native.package_name() proto_include)<block_end><if_stmt>full_proto_include<block_start>temp_prefix="%s/%s"%(cpp_out native.package_name()[len(full_proto_include):])<line_sep># We do a bit of extra work with these include flags to avoid generating # warnings. accum_flags.append("$$(if [[ -e $(GENDIR)/%s ]]; then echo -IG3LOC/$(GENDIR)/%s; fi)"%(full_proto_include full_proto_include) )<line_sep>accum_flags.append("$$(if [[ -e %s ]]; then echo -IG3LOC/%s; fi)"%(full_proto_include full_proto_include) )<block_end><else_stmt><block_start>temp_prefix="%s/%s"%(cpp_out native.package_name())<block_end>proto_rollups=[decorate(decorate(dep arch) "proto_rollup.flags")<for>dep deps<if>dep.endswith("_proto")]<line_sep>proto_rollup_cmds=["printf '%%s\n' %s"%flag<for>flag accum_flags]<line_sep>proto_rollup_cmds.append("cat $(SRCS)")<line_sep>proto_rollup_cmd="{ %s; } | sort -u -o $(@)"%"; ".join(proto_rollup_cmds)<line_sep>native.genrule(name=decorate(name_arch "proto_rollup") srcs=proto_rollups outs=[my_proto_rollup] cmd=proto_rollup_cmd visibility=visibility testonly=testonly )<for_stmt>src srcs+hdrs<block_start><if_stmt>src.endswith(".proto")<block_start>src_stem=src[0:-6]<line_sep>src_arch="%s_%s"%(src_stem arch)<line_sep>temp_stem="%s/%s"%(temp_prefix src_stem)<line_sep>gen_stem="%s.%s"%(src_stem arch)<line_sep># We can't use $${PWD} until this step, because our rollup command # might be generated on another forge server. proto_path_cmds=["rollup=$$(sed \"s,G3LOC,$${PWD},g\" %s)"%_loc(my_proto_rollup)]<line_sep>proto_rollup_flags=["$${rollup}"]<if_stmt>proto_include# We'll be cd-ing to another directory before protoc, so # adjust our .proto path accordingly. <block_start>proto_src_loc="%s/%s"%(native.package_name() src)<if_stmt>proto_src_loc.startswith(full_proto_include+"/")<block_start>proto_src_loc=proto_src_loc[len(full_proto_include)+1:]<block_end><else_stmt><block_start>print("Invalid proto include '%s' doesn't match src %s"%(full_proto_include proto_src_loc))<block_end># By cd-ing to another directory, we force protoc to produce # different symbols. Careful, our proto might be in GENDIR! proto_path_cmds.append("; ".join(["if [[ -e %s ]]"%("%s/%s"%(full_proto_include proto_src_loc)) "then cd %s"%full_proto_include "else cd $(GENDIR)/%s"%full_proto_include "fi" ]))<line_sep>gendir_include=["-I$${g3}/$(GENDIR)" "-I$${g3}" "-I."]<block_end><else_stmt><block_start>proto_src_loc="%s/%s"%(native.package_name() src)<line_sep>proto_path_cmds.append("[[ -e %s ]] || cd $(GENDIR)"%proto_src_loc)<line_sep>gendir_include=["-I$(GENDIR)" "-I."]<block_end># Generate messages gen_pb_h=gen_stem+".pb.h"<line_sep>gen_pb_cc=gen_stem+".pb.cc"<line_sep>gen_hdrs.append(gen_pb_h)<line_sep>gen_srcs.append(gen_pb_cc)<line_sep>cmds=bash_vars+["mkdir -p %s"%temp_prefix ]+proto_path_cmds+[" ".join([protoc]+gendir_include+proto_rollup_flags+["-I%s"%protobuf_include "--cpp_out=%s"%cpp_out proto_src_loc ]) "cd $${g3}" "cp %s.pb.h %s"%(temp_stem _loc(gen_pb_h)) "cp %s.pb.cc %s"%(temp_stem _loc(gen_pb_cc)) ]<line_sep>pb_outs=[gen_pb_h gen_pb_cc]<line_sep>native.genrule(name=src_arch+".pb" srcs=protoc_srcs_set outs=pb_outs tools=tools cmd=" && ".join(cmds) heuristic_label_expansion=0 visibility=visibility )<line_sep># Generate GRPC <if_stmt>grpc_shim_rule<block_start>gen_grpc_pb_h=gen_stem+".grpc.pb.h"<line_sep>gen_grpc_pb_cc=gen_stem+".grpc.pb.cc"<line_sep>grpc_gen_hdrs.append(gen_grpc_pb_h)<line_sep>grpc_gen_srcs.append(gen_grpc_pb_cc)<line_sep>cmds=bash_vars+["mkdir -p %s"%temp_prefix ]+proto_path_cmds+[" ".join([protoc "--plugin=protoc-gen-grpc-cpp=%s"%grpc_plugin ]+gendir_include+proto_rollup_flags+["-I%s"%protobuf_include "--grpc-cpp_out=%s"%cpp_out proto_src_loc ]) "cd $${g3}" "cp %s.grpc.pb.h %s"%(temp_stem _loc(gen_grpc_pb_h)) "cp %s.grpc.pb.cc %s"%(temp_stem _loc(gen_grpc_pb_cc)) ]<line_sep>grpc_pb_outs=[gen_grpc_pb_h gen_grpc_pb_cc]<line_sep>native.genrule(name=src_arch+".grpc.pb" srcs=protoc_srcs_set outs=grpc_pb_outs tools=grpc_tools cmd=" && ".join(cmds) heuristic_label_expansion=0 visibility=visibility )<block_end><block_end><block_end>dep_set=depset(deps)|[protobuf_label]<line_sep>includes=[]<if_stmt>proto_include<block_start>includes=[proto_include]<block_end># Note: Public sc_proto_lib invokes this once per (listed) arch; # which then calls sc_cc_lib with same name for each arch; # multiple such calls are OK as long as the arches are disjoint. sc_cc_lib(name=decorate(name arch) deps=dep_set srcs=gen_srcs hdrs=hdrs+gen_hdrs arches=[arch] copts=[] includes=includes testonly=testonly textual_hdrs=gen_hdrs visibility=visibility )<if_stmt>grpc_shim_rule<block_start>grpc_name=name[:-6]+"_grpc_proto"<line_sep>grpc_dep_set=dep_set|[name]|_SC_GRPC_DEPS<line_sep>grpc_gen_hdrs_plus=grpc_gen_hdrs+gen_hdrs<line_sep>sc_cc_lib(name=decorate(grpc_name arch) deps=grpc_dep_set srcs=grpc_gen_srcs hdrs=hdrs+grpc_gen_hdrs_plus+[grpc_shim_rule] arches=[arch] copts=[] includes=includes testonly=testonly textual_hdrs=grpc_gen_hdrs_plus visibility=visibility )<block_end><block_end><def_stmt>_gen_proto_shims name pb_modifier srcs arches visibility<block_start>"""Macro to build .pb.h multi-arch master switch for sc_proto_lib. For each src path.proto, generates path.pb.h consisting of: #ifdef logic to select path.${arch}.pb.h Also generates an alias that will select the appropriate proto target based on the currently selected platform architecture. Args: name: Base name for this library. pb_modifier: protoc plugin-dependent file extension (e.g.: .pb) srcs: List of proto files. arches: List of arches this shim should support. visibility: The blaze visibility of the generated alias. Returns: Name of shim rule for use in follow-on hdrs and/or src lists. """<line_sep>outs=[]<line_sep>cmds=[]<line_sep>hdr_ext=pb_modifier+".h"<for_stmt>src srcs<block_start>pkg,filename=parse_label(src)<if_stmt><not>filename.endswith(".proto")<block_start><continue><block_end>hdr_stem=filename[0:-6]<line_sep>new_hdr_name=hdr_stem+hdr_ext<line_sep>outs.append(new_hdr_name)<line_sep># Generate lines for shim switch file. # Lines expand inside squotes, so quote accordingly. include_fmt="#include "+dquote(pkg+"/"+hdr_stem+".%s"+hdr_ext)<line_sep>lines=["#if defined(STRATUM_ARCH_%s)"%"PPC" include_fmt%"ppc" "#elif defined(STRATUM_ARCH_%s)"%"X86" include_fmt%"x86" "#elif defined(STRATUM_ARCH_%s)"%"HOST" include_fmt%"host" "#else" "#error Unknown STRATUM_ARCH" "#endif" ]<line_sep>gen_cmds=[("printf '%%s\\n' '%s'"%line)<for>line lines]<line_sep>new_hdr_loc="$(location %s)"%new_hdr_name<line_sep>cmds.append("{ %s; } > %s"%(" && ".join(gen_cmds) new_hdr_loc))<block_end>shim_rule=decorate(name "shims")<line_sep>native.genrule(name=shim_rule srcs=srcs outs=outs cmd=" && ".join(cmds)<or>"true" )<line_sep>sc_platform_alias(name=name host=decorate(name "host")<if>"host"<in>arches<else><none> ppc=decorate(name "ppc")<if>"ppc"<in>arches<else><none> x86=decorate(name "x86")<if>"x86"<in>arches<else><none> visibility=visibility )<line_sep><return>shim_rule<block_end><def_stmt>_gen_py_proto_lib name srcs deps visibility testonly<block_start>"""Creates a py_proto_library from the given srcs. There's no clean way to make python protos work with sc_proto_lib's proto_include field, so we keep this simple. For library "name", generates: * ${name}_default_pb, a regular proto library. * ${name}_py, a py_proto_library based on ${name}_default_pb. Args: name: Standard blaze name argument. srcs: Standard blaze srcs argument. deps: Standard blaze deps argument. visibility: Standard blaze visibility argument. testonly: Standard blaze testonly argument. """<line_sep>regular_proto_name=decorate(name "default_pb")<line_sep>py_name=decorate(name "py")<line_sep>proto_library(name=regular_proto_name srcs=srcs deps=[decorate(dep "default_pb")<for>dep deps] visibility=visibility testonly=testonly )<line_sep>native.py_proto_library(name=py_name api_version=2 deps=[regular_proto_name] visibility=visibility testonly=testonly )<block_end># TODO(unknown): Add support for depending on normal proto_library rules. <def_stmt>sc_proto_lib name=<none> srcs=[] hdrs=[] deps=[] arches=[] visibility=<none> testonly=<none> proto_include=<none> python_support=<false> services=[]<block_start>"""Public macro to build multi-arch library from Message protobuf(s). For library "name", generates: * ${name}_shim aka .pb.h master switch - see _gen_proto_shims, above. * ${name}_${arch}_pb protobuf compile rules - one for each arch. * sc_cc_lib(name) with those as input. * ${name}_py a py_proto_library version of this library. Only generated if python_support == True. Args: name: Base name for this library. srcs: List of .proto files - private to this library. hdrs: As above, but also exported for dependent rules to utilize. deps: List of deps for this library arches: Which architectures to build this library for, None => ALL. visibility: Standard blaze visibility parameter, passed through to subsequent rules. testonly: Standard blaze testonly parameter. proto_include: Path to add to include path. This will affect the symbols generated by protoc, as well as the include paths used for both sc_cc_lib and sc_proto_lib rules that depend on this rule. Typically "." python_support: Defaults to False. If True, generate a python proto library from this rule. Any sc_proto_lib with python support may only depend on sc_proto_libs that also have python support, and may not use the proto_include field in this rule. services: List of services to enable {"grpc", "rpc"}; Only "grpc" is supported. So "rpc" and "grpc" are equivalent. """<if_stmt><not>arches<block_start><if_stmt>testonly<block_start>arches=HOST_ARCHES<block_end><else_stmt><block_start>arches=ALL_ARCHES<block_end><block_end>service_enable={"grpc":0 }<for_stmt>service services<or>[]<block_start><if_stmt>service<eq>"grpc"<block_start>service_enable["grpc"]=1<block_end><elif_stmt>service<eq>"rpc"<block_start>service_enable["grpc"]=1<block_end><else_stmt><block_start>fail("service='%s' not in (grpc, rpc)"%service)<block_end><block_end>deps=depset(deps<or>[])<line_sep>shim_rule=_gen_proto_shims(name=name pb_modifier=".pb" srcs=srcs+hdrs arches=arches visibility=visibility )<line_sep>grpc_shim_rule=<none><if_stmt>(service_enable["grpc"])<block_start>grpc_shim_rule=_gen_proto_shims(name=decorate(name[:-6] "grpc_proto") pb_modifier=".grpc.pb" srcs=srcs+hdrs arches=arches visibility=visibility )<block_end><for_stmt>arch arches<block_start>_gen_proto_lib(name=name srcs=srcs hdrs=[shim_rule]+hdrs deps=deps arch=arch visibility=visibility testonly=testonly proto_include=proto_include grpc_shim_rule=grpc_shim_rule )<block_end><if_stmt>python_support<block_start><if_stmt>proto_include<block_start>fail("Cannot use proto_include on an sc_proto_lib with python support.")<block_end>_gen_py_proto_lib(name=name srcs=depset(srcs+hdrs) deps=deps visibility=visibility testonly=testonly )<block_end><block_end>register_extension_info(extension_name="sc_proto_lib" label_regex_for_dep="{extension_name}" )<def_stmt>sc_package name=<none> bins=<none> data=<none> deps=<none> arches=<none> visibility=<none><block_start>"""Public macro to package binaries and data for deployment. For package "name", generates: * ${name}_${arch}_bin and ${name}_${arch}_data filesets containing respectively all of the binaries and all of the data needed for this package and all dependency packages. * ${name}_${arch} fileset containing the corresponding bin and data filesets, mapped to bin/ and share/ respectively. * ${name}_${arch}_tarball rule builds that .tar.gz package. Args: name: Base name for this package. bins: List of sc_cc_bin rules to be packaged. data: List of files (and file producing rules) to be packaged. deps: List of other sc_packages to add to this package. arches: Which architectures to build this library for, None => EMBEDDED_ARCHES (HOST_ARCHES not generally supported). visibility: Standard blaze visibility parameter, passed through to all filesets. """<line_sep>bins=depset(bins<or>[])<line_sep>data=depset(data<or>[])<line_sep>deps=depset(deps<or>[])<if_stmt><not>arches<block_start>arches=EMBEDDED_ARCHES<block_end>fileset_name=decorate(name "fs")<for_stmt>extension,inputs [("bin" ["%s.stripped"%b<for>b bins.to_list()]) ("data" data) ]<block_start>native.Fileset(name=decorate(fileset_name extension) out=decorate(name extension) entries=[native.FilesetEntry(files=inputs ) ]+[native.FilesetEntry(srcdir=decorate(dep extension))<for>dep deps.to_list()] visibility=visibility )<block_end># Add any platform specific files to the final tarball. platform_entries=sc_platform_select(# We use a different ppc toolchain for Stratum. # This means that we must provide portable shared libs for our ppc # executables. ppc=[native.FilesetEntry(srcdir="%s:BUILD"%_PPC_GRTE files=[":libs"] destdir="lib/stratum" symlinks="dereference" )] default=[] )<line_sep>native.Fileset(name=fileset_name out=name entries=[native.FilesetEntry(srcdir=decorate(name "bin") destdir="bin" ) native.FilesetEntry(srcdir=decorate(name "data") destdir="share" ) ]+platform_entries visibility=visibility )<line_sep>outs=["%s.tar.gz"%name]<line_sep># Copy our files into a temporary directory and make any necessary changes # before tarballing. cmds=["TEMP_DIR=$(@D)/stratum_packaging_temp" "mkdir $${TEMP_DIR}" "cp -r %s $${TEMP_DIR}/tarball"%_loc(fileset_name) "if [[ -e $${TEMP_DIR}/tarball/bin ]]" "then for f in $${TEMP_DIR}/tarball/bin/*.stripped" " do mv $${f} $${f%.stripped}" # rename not available. "done" "fi" "tar czf %s -h -C $${TEMP_DIR}/tarball ."%_loc(name+".tar.gz") "rm -rf $${TEMP_DIR}" ]<line_sep>native.genrule(name=decorate(name "tarball") srcs=[":%s"%fileset_name] outs=outs cmd="; ".join(cmds) visibility=visibility )<block_end>
<import_stmt>sys<import_from_stmt>PyQt5 QtCore QtGui QtWidgets<class_stmt>Demo(QtWidgets.QWidget)<block_start><def_stmt>__init__ self<block_start>super(Demo self).__init__()<line_sep>self.button=QtWidgets.QPushButton()<line_sep>self.label=QtWidgets.QLabel(alignment=QtCore.Qt.AlignCenter)<line_sep>self.combo=QtWidgets.QComboBox(self)<line_sep>self.combo.currentIndexChanged.connect(self.change_func)<line_sep>self.trans=QtCore.QTranslator(self)<line_sep>self.v_layout=QtWidgets.QVBoxLayout(self)<line_sep>self.v_layout.addWidget(self.combo)<line_sep>self.v_layout.addWidget(self.button)<line_sep>self.v_layout.addWidget(self.label)<line_sep>options=([('English' '') ('français' 'eng-fr') ('中文' 'eng-chs') ])<for_stmt>i,(text lang) enumerate(options)<block_start>self.combo.addItem(text)<line_sep>self.combo.setItemData(i lang)<block_end>self.retranslateUi()<block_end>@QtCore.pyqtSlot(int)<def_stmt>change_func self index<block_start>data=self.combo.itemData(index)<if_stmt>data<block_start>self.trans.load(data)<line_sep>QtWidgets.QApplication.instance().installTranslator(self.trans)<block_end><else_stmt><block_start>QtWidgets.QApplication.instance().removeTranslator(self.trans)<block_end><block_end><def_stmt>changeEvent self event<block_start><if_stmt>event.type()<eq>QtCore.QEvent.LanguageChange<block_start>self.retranslateUi()<block_end>super(Demo self).changeEvent(event)<block_end><def_stmt>retranslateUi self<block_start>self.button.setText(QtWidgets.QApplication.translate('Demo' 'Start'))<line_sep>self.label.setText(QtWidgets.QApplication.translate('Demo' 'Hello, World'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app=QtWidgets.QApplication(sys.argv)<line_sep>demo=Demo()<line_sep>demo.show()<line_sep>sys.exit(app.exec_())<block_end>
#Author <NAME> print("Hello World")<line_sep>hello_list=["Hello World"]<line_sep>print(hello_list[0])<for_stmt>i hello_list<block_start>print(i)<block_end>
<import_stmt>numpy<as>np<import_stmt>copy<import_stmt>combo.misc<import_stmt>cPickle<as>pickle<import_from_stmt>results history<import_from_stmt>.. utility<import_from_stmt>...variable variable<import_from_stmt>..call_simulator call_simulator<import_from_stmt>... predictor<import_from_stmt>...gp predictor<as>gp_predictor<import_from_stmt>...blm predictor<as>blm_predictor<import_stmt>combo.search.score<line_sep>MAX_SEACH=int(20000)<class_stmt>policy<block_start><def_stmt>__init__ self test_X config=<none><block_start>self.predictor=<none><line_sep>self.training=variable()<line_sep>self.test=self._set_test(test_X)<line_sep>self.actions=np.arange(0 self.test.X.shape[0])<line_sep>self.history=history()<line_sep>self.config=self._set_config(config)<block_end><def_stmt>set_seed self seed<block_start>self.seed=seed<line_sep>np.random.seed(self.seed)<block_end><def_stmt>delete_actions self index actions=<none><block_start>actions=self._set_unchosed_actions(actions)<line_sep><return>np.delete(actions index)<block_end><def_stmt>write self action t X=<none><block_start><if_stmt>X<is><none><block_start>X=self.test.X[action :]<line_sep>Z=self.test.Z[action :]<if>self.test.Z<is><not><none><else><none><block_end><else_stmt><block_start>Z=self.predictor.get_basis(X)<if>self.predictor<is><not><none><else><none><block_end>self.new_data=variable(X t Z)<line_sep>self.history.write(t action)<line_sep>self.training.add(X=X t=t Z=Z)<block_end><def_stmt>random_search self max_num_probes num_search_each_probe=1 simulator=<none> is_disp=<true><block_start>N=int(num_search_each_probe)<if_stmt>int(max_num_probes)<times>N<g>len(self.actions)<block_start><raise>ValueError('max_num_probes * num_search_each_probe must \ be smaller than the length of candidates')<block_end><if_stmt>is_disp<block_start>utility.show_interactive_mode(simulator self.history)<block_end><for_stmt>n xrange(0 max_num_probes)<block_start><if_stmt>is_disp<and>N<g>1<block_start>utility.show_start_message_multi_search(self.history.num_runs)<block_end>action=self.get_random_action(N)<if_stmt>simulator<is><none><block_start><return>action<block_end>t,X=call_simulator(simulator action)<line_sep>self.write(action t X)<if_stmt>is_disp<block_start>utility.show_search_results(self.history N)<block_end><block_end><return>copy.deepcopy(self.history)<block_end><def_stmt>bayes_search self training=<none> max_num_probes=<none> num_search_each_probe=1 predictor=<none> is_disp=<true> simulator=<none> score='TS' interval=0 num_rand_basis=0<block_start><if_stmt>max_num_probes<is><none><block_start>max_num_probes=1<line_sep>simulator=<none><block_end>is_rand_expans=<false><if>num_rand_basis<eq>0<else><true><line_sep>self.training=self._set_training(training)<if_stmt>predictor<is><none><block_start>self.predictor=self._init_predictor(is_rand_expans)<block_end><else_stmt><block_start>self.predictor=predictor<block_end>N=int(num_search_each_probe)<for_stmt>n xrange(max_num_probes)<block_start><if_stmt>utility.is_learning(n interval)<block_start>self.predictor.fit(self.training num_rand_basis)<line_sep>self.test.Z=self.predictor.get_basis(self.test.X)<line_sep>self.training.Z=self.predictor.get_basis(self.training.X)<line_sep>self.predictor.prepare(self.training)<block_end><else_stmt><block_start><try_stmt><block_start>self.predictor.update(self.training self.new_data)<block_end><except_stmt><block_start>self.predictor.prepare(self.training)<block_end><block_end><if_stmt>num_search_each_probe<ne>1<block_start>utility.show_start_message_multi_search(self.history.num_runs score)<block_end>K=self.config.search.multi_probe_num_sampling<line_sep>alpha=self.config.search.alpha<line_sep>action=self.get_actions(score N K alpha)<if_stmt>simulator<is><none><block_start><return>action<block_end>t,X=call_simulator(simulator action)<line_sep>self.write(action t X)<if_stmt>is_disp<block_start>utility.show_search_results(self.history N)<block_end><block_end><return>copy.deepcopy(self.history)<block_end><def_stmt>get_score self mode predictor=<none> training=<none> alpha=1<block_start>self._set_training(training)<line_sep>self._set_predictor(predictor)<line_sep>actions=self.actions<line_sep>test=self.test.get_subset(actions)<if_stmt>mode<eq>'EI'<block_start>f=combo.search.score.EI(predictor training test)<block_end><elif_stmt>mode<eq>'PI'<block_start>f=combo.search.score.PI(predictor training test)<block_end><elif_stmt>mode<eq>'TS'<block_start>f=combo.search.score.TS(predictor training test alpha)<block_end><else_stmt><block_start><raise>NotImplementedError('mode must be EI, PI or TS.')<block_end><return>f<block_end><def_stmt>get_marginal_score self mode chosed_actions N alpha<block_start>f=np.zeros((N len(self.actions)))<line_sep>new_test=self.test.get_subset(chosed_actions)<line_sep>virtual_t=self.predictor.get_predict_samples(self.training new_test N)<for_stmt>n xrange(N)<block_start>predictor=copy.deepcopy(self.predictor)<line_sep>train=copy.deepcopy(self.training)<line_sep>virtual_train=new_test<line_sep>virtual_train.t=virtual_t[n :]<if_stmt>virtual_train.Z<is><none><block_start>train.add(virtual_train.X virtual_train.t)<block_end><else_stmt><block_start>train.add(virtual_train.X virtual_train.t virtual_train.Z)<block_end><try_stmt><block_start>predictor.update(train virtual_train)<block_end><except_stmt><block_start>predictor.prepare(train)<block_end>f[n :]=self.get_score(mode predictor train)<block_end><return>f<block_end><def_stmt>get_actions self mode N K alpha<block_start>f=self.get_score(mode self.predictor self.training alpha)<line_sep>temp=np.argmax(f)<line_sep>action=self.actions[temp]<line_sep>self.actions=self.delete_actions(temp)<line_sep>chosed_actions=np.zeros(N dtype=int)<line_sep>chosed_actions[0]=action<for_stmt>n xrange(1 N)<block_start>f=self.get_marginal_score(mode chosed_actions[0:n] K alpha)<line_sep>temp=np.argmax(np.mean(f 0))<line_sep>chosed_actions[n]=self.actions[temp]<line_sep>self.actions=self.delete_actions(temp)<block_end><return>chosed_actions<block_end><def_stmt>get_random_action self N<block_start>random_index=np.random.permutation(xrange(self.actions.shape[0]))<line_sep>index=random_index[0:N]<line_sep>action=self.actions[index]<line_sep>self.actions=self.delete_actions(index)<line_sep><return>action<block_end><def_stmt>load self file_history file_training=<none> file_predictor=<none><block_start>self.history.load(file_history)<if_stmt>file_training<is><none><block_start>N=self.history.total_num_search<line_sep>X=self.test.X[self.history.chosed_actions[0:N] :]<line_sep>t=self.history.fx[0:N]<line_sep>self.training=variable(X=X t=t)<block_end><else_stmt><block_start>self.training=variable()<line_sep>self.training.load(file_training)<block_end><if_stmt>file_predictor<is><not><none><block_start><with_stmt>open(file_predictor)<as>f<block_start>self.predictor=pickle.load(f)<block_end><block_end><block_end><def_stmt>export_predictor self<block_start><return>self.predictor<block_end><def_stmt>export_training self<block_start><return>self.training<block_end><def_stmt>export_history self<block_start><return>self.history<block_end><def_stmt>_set_predictor self predictor=<none><block_start><if_stmt>predictor<is><none><block_start>predictor=self.predictor<block_end><return>predictor<block_end><def_stmt>_init_predictor self is_rand_expans predictor=<none><block_start>self.predictor=self._set_predictor(predictor)<if_stmt>self.predictor<is><none><block_start><if_stmt>is_rand_expans<block_start>self.predictor=blm_predictor(self.config)<block_end><else_stmt><block_start>self.predictor=gp_predictor(self.config)<block_end><block_end><return>self.predictor<block_end><def_stmt>_set_training self training=<none><block_start><if_stmt>training<is><none><block_start>training=self.training<block_end><return>training<block_end><def_stmt>_set_unchosed_actions self actions=<none><block_start><if_stmt>actions<is><none><block_start>actions=self.actions<block_end><return>actions<block_end><def_stmt>_set_test self test_X<block_start><if_stmt>isinstance(test_X np.ndarray)<block_start>test=variable(X=test_X)<block_end><elif_stmt>isinstance(test_X variable)<block_start>test=test_X<block_end><else_stmt><block_start><raise>TypeError('The type of test_X must \ take ndarray or combo.variable')<block_end><return>test<block_end><def_stmt>_set_config self config=<none><block_start><if_stmt>config<is><none><block_start>config=combo.misc.set_config()<block_end><return>config<block_end><block_end>
<def_stmt>_disable_linecache <block_start><import_stmt>linecache<def_stmt>fake_getline *args **kwargs<block_start><return>""<block_end>linecache.orig_getline=linecache.getline<line_sep>linecache.getline=fake_getline<block_end>_disable_linecache()<line_sep>
""" Module docstring """<def_stmt>_impl _ctx<block_start>""" Function docstring """<line_sep><pass><block_end>some_rule=rule(attrs={"attr1":attr.int(default=2 mandatory=<false> ) "attr2":5 } implementation=_impl )<line_sep>