content
stringlengths
0
1.55M
<import_from_stmt>time sleep<import_from_stmt>tensorboardX SummaryWriter<with_stmt>SummaryWriter(logdir='runs/purge')<as>w<block_start><for_stmt>i range(100)<block_start>w.add_scalar('purgetest' i i)<block_end><block_end>sleep(1.0)<with_stmt>SummaryWriter(logdir='runs/purge' purge_step=42)<as>w# event 42~99 are removed (inclusively) <block_start><for_stmt>i range(42 100)<block_start>w.add_scalar('purgetest' 42 i)<block_end><block_end>
<import_stmt>time<line_sep>print(time.strftime('%Y-%m-%d %H:%M:%S' time.localtime(time.time())))<line_sep>
<import_from_future_stmt> print_function division<import_stmt>warnings<def_stmt>flip_sublat opstr indx lat=0<block_start>sign=1<line_sep>opstr=[str(s)<for>s opstr]<for_stmt>s,i,j zip(opstr indx range(len(indx)))<block_start><if_stmt>((i%2)<eq>(lat%2))<block_start><if_stmt>(s<in>['z' 'y'])<block_start>sign<augmul>-1<block_end><elif_stmt>(s<eq>"+")<block_start>opstr[j]='-'<block_end><elif_stmt>(s<eq>"-")<block_start>opstr[j]='+'<block_end><block_end><block_end><return>sign "".join(opstr)<block_end><def_stmt>check_T sort_opstr operator_list L a<block_start>missing_ops=[]<for_stmt>i range(0 L<floordiv>a 1)<block_start><for_stmt>op operator_list<block_start>opstr=str(op[0])<line_sep>indx=list(op[1])<for_stmt>j,ind enumerate(indx)<block_start>indx[j]=(ind+i<times>a)%L<block_end>new_op=list(op)<line_sep>new_op[1]=indx<line_sep>new_op=sort_opstr(new_op)<if_stmt><not>(new_op<in>operator_list)<block_start>missing_ops.append(new_op)<block_end><block_end><block_end><return>missing_ops<block_end><def_stmt>check_Z sort_opstr operator_list<block_start>missing_ops=[]<line_sep>odd_ops=[]<for_stmt>op operator_list<block_start>opstr=str(op[0])<line_sep>indx=list(op[1])<if_stmt>opstr.count("|")<eq>1<block_start>i=opstr.index("|")<block_end><else_stmt><block_start>i=len(opstr)<block_end>z_count=opstr[:i].count("z")<line_sep>y_count=opstr[:i].count("y")<if_stmt>((y_count+z_count)%2)<ne>0<block_start>odd_ops.append(op)<block_end>new_op=list(op)<line_sep>new_op[0]=new_op[0][:i].replace("+" "#").replace("-" "+").replace("#" "-")+op[0][i:]<line_sep>new_op=sort_opstr(new_op)<if_stmt><not>(new_op<in>operator_list)<block_start>missing_ops.append(new_op)<block_end><block_end><return>odd_ops missing_ops<block_end><def_stmt>check_P sort_opstr operator_list L<block_start>missing_ops=[]<for_stmt>op operator_list<block_start>indx=list(op[1])<for_stmt>j,ind enumerate(indx)<block_start>indx[j]=(L-1-ind)%L<block_end>new_op=list(op)<line_sep>new_op[1]=indx<line_sep>new_op=sort_opstr(new_op)<if_stmt><not>(new_op<in>operator_list)<block_start>missing_ops.append(new_op)<block_end><block_end><return>missing_ops<block_end><def_stmt>check_PZ sort_opstr operator_list L<block_start>missing_ops=[]<for_stmt>op operator_list<block_start>opstr=str(op[0])<line_sep>indx=list(op[1])<if_stmt>opstr.count("|")<eq>1<block_start>i=opstr.index("|")<block_end><else_stmt><block_start>i=len(opstr)<block_end><for_stmt>j,ind enumerate(indx)<block_start>indx[j]=(L-1-ind)%L<block_end>sign=(-1)<power>(opstr[:i].count('z')+opstr.count('y'))<line_sep>new_op=list(op)<line_sep>new_op[0]=new_op[0][:i].replace("+" "#").replace("-" "+").replace("#" "-")+op[0][i:]<line_sep>new_op[1]=indx<line_sep>new_op[2]<augmul>sign<line_sep>new_op=sort_opstr(new_op)<if_stmt><not>(new_op<in>operator_list)<block_start>missing_ops.append(new_op)<block_end><block_end><return>missing_ops<block_end><def_stmt>check_ZA sort_opstr operator_list<block_start>missing_ops=[]<line_sep>odd_ops=[]<for_stmt>op operator_list<block_start>opstr=str(op[0])<line_sep>indx=list(op[1])<if_stmt>opstr.count("|")<eq>1<block_start>i=opstr.index("|")<block_end><else_stmt><block_start>i=len(opstr)<block_end>sign,new_opstr=flip_sublat(opstr[:i] indx[:i] lat=0)<if_stmt>sign<eq>-1<block_start>odd_ops.append(op)<block_end>new_op=list(op)<line_sep>new_op[0]=new_opstr+opstr[i:]<line_sep>new_op=sort_opstr(new_op)<if_stmt><not>(new_op<in>operator_list)<block_start>missing_ops.append(new_op)<block_end><block_end><return>odd_ops missing_ops<block_end><def_stmt>check_ZB sort_opstr operator_list<block_start>missing_ops=[]<line_sep>odd_ops=[]<for_stmt>op operator_list<block_start>opstr=str(op[0])<line_sep>indx=list(op[1])<if_stmt>opstr.count("|")<eq>1<block_start>i=opstr.index("|")<block_end><else_stmt><block_start>i=len(opstr)<block_end>sign,new_opstr=flip_sublat(opstr[:i] indx[:i] lat=1)<if_stmt>sign<eq>-1<block_start>odd_ops.append(op)<block_end>new_op=list(op)<line_sep>new_op[0]=new_opstr+opstr[i:]<line_sep>new_op=sort_opstr(new_op)<if_stmt><not>(new_op<in>operator_list)<block_start>missing_ops.append(new_op)<block_end><block_end><return>odd_ops missing_ops<block_end>
<import_from_stmt>.gen.project_memberships _ProjectMemberships<class_stmt>ProjectMemberships(_ProjectMemberships)<block_start>"""Project Memberships resource"""<def_stmt>find_by_project self project params={} **options<block_start>"""Returns the compact project membership records for the project. Parameters ---------- project : {Gid} The project for which to fetch memberships. [params] : {Object} Parameters for the request - [user] : {String} If present, the user to filter the memberships to. """<line_sep>path="/projects/%s/project_memberships"%(project)<line_sep><return>self.client.get_collection(path params **options)<block_end><def_stmt>find_by_id self project_membership params={} **options<block_start>"""Returns the project membership record. Parameters ---------- project_membership : {Gid} Globally unique identifier for the project membership. [params] : {Object} Parameters for the request """<line_sep>path="/project_memberships/%s"%(project_membership)<line_sep><return>self.client.get(path params **options)<block_end><block_end>
<import_stmt>logging<import_stmt>math<import_stmt>os<import_stmt>pickle<import_stmt>re<import_from_stmt>typing Optional<import_from_stmt>.core Namespace<as>ns json_dump json_load<import_from_stmt>.process profile<line_sep>log=logging.getLogger(__name__)<def_stmt>_import_data_libraries <block_start><try_stmt><block_start><import_stmt>numpy<as>np<block_end><except_stmt>ImportError<block_start>np=<none><block_end><try_stmt><block_start><import_stmt>pandas<as>pd<block_end><except_stmt>ImportError<block_start>pd=<none><block_end><try_stmt><block_start><import_stmt>scipy.sparse<as>sp<block_end><except_stmt>ImportError<block_start>sp=<none><block_end><return>np pd sp<block_end>ser_config=ns(# the serializer to use when there's no specific serializer available. # mainly intended to serialize simple data structures like lists. # allowed=['pickle', 'json'] fallback_serializer='json' # if numpy can use pickle to serialize ndarrays, numpy_allow_pickle=<true> # format used to serialize pandas dataframes/series between processes. # allowed=['pickle', 'parquet', 'hdf', 'json'] pandas_serializer='parquet' # the compression format used when serializing pandas dataframes/series. # allowed=[None, 'infer', 'bz2', 'gzip'] # 'infer' (= None) is the fastest but no compression, # 'gzip' fast write and read with good compression. # 'bz2' looks like the best compression/time ratio (faster write, sometimes slightly slower read) pandas_compression='infer' # the compression format used when serializing pandas dataframes/series to parquet. # allowed=[None, 'snappy', 'gzip', 'brotli'] pandas_parquet_compression=<none> # if sparse matrices should be compressed during serialization. sparse_matrix_compression=<true> # if sparse matrices should be deserialized to some specific format: # allowed=[None, 'array', 'dense'] # None (no change), 'array' (numpy), 'dense' (dense matrix). sparse_matrix_deserialized_format=<none> # if sparse dataframes should be deserialized to some specific format: # allowed=[None, 'array', 'dense'] # None (no change), 'array' (numpy), 'dense' (dense dataframe/series). sparse_dataframe_deserialized_format=<none> )<line_sep>__series__='_series_'<class_stmt>SerializationError(Exception)<block_start><pass><block_end><def_stmt>is_serializable_data data<block_start>np,pd,sp=_import_data_libraries()<line_sep><return>isinstance(data (np.ndarray sp.spmatrix pd.DataFrame pd.Series))<block_end><def_stmt>is_sparse data<block_start>np,pd,sp=_import_data_libraries()<line_sep><return>((sp<and>isinstance(data sp.spmatrix))# sparse matrix <or>(pd<and>isinstance(data pd.Series)<and>pd.api.types.is_sparse(data.dtype))# sparse Series <or>(pd<and>isinstance(data pd.DataFrame)# if one column is sparse, the dataframe is considered as sparse <and>any(pd.api.types.is_sparse(dt)<for>dt data.dtypes)))<block_end><def_stmt>unsparsify *data fmt='dense'<block_start><if_stmt>len(data)<eq>1<block_start><return>_unsparsify(data[0] fmt=fmt)<block_end><else_stmt><block_start><return>tuple(_unsparsify(d fmt=fmt)<for>d data)<block_end><block_end><def_stmt>_unsparsify data fmt=<none><block_start>""" :param data: the matrix to process. :param fmt: one of None, 'array', 'dense' :return: the original matrix is fmt is None, a numpy array if fmt is 'array', a dense version of the data type if fmt is 'dense'. """<if_stmt>fmt<is><none><block_start><return>data<block_end>np,pd,sp=_import_data_libraries()<if_stmt>sp<and>isinstance(data sp.spmatrix)<block_start><return>(data.toarray()<if>fmt<eq>'array'<else>data.todense()<if>fmt<eq>'dense'<else>data)<block_end><elif_stmt>pd<and>isinstance(data (pd.DataFrame pd.Series))<block_start><return>(data.to_numpy(copy=<false>)<if>fmt<eq>'array'<else>_pd_to_dense(pd data)<if>fmt<eq>'dense'<and>is_sparse(data)<else>data)<block_end><else_stmt><block_start><return>data<block_end><block_end><def_stmt>_pd_to_dense pd df<block_start><if_stmt>hasattr(df 'sparse')<block_start><return>df.sparse.to_dense()<block_end>data={k:(v.sparse.to_dense()<if>hasattr(v 'sparse')<else>v)<for>k,v df.items()}<line_sep><return>pd.DataFrame(data index=df.index columns=df.columns)<block_end><def_stmt>_pd_dtypes_to_str pd df<block_start><return>{k:str(v)<for>k,v df.dtypes.items()}<block_end><def_stmt>_pd_dtypes_from_str pd dt<block_start><def_stmt>dt_from_str s<block_start>m_sparse=re.match(r"Sparse\[(.*)]" s)<if_stmt>m_sparse<block_start>sub_type,fill_value=[t.strip()<for>t m_sparse.group(1).split("," 1)]<try_stmt><block_start>fill_value=eval(fill_value {'nan':math.nan '<NA>':pd.NA})<block_end><except_stmt>ValueError<block_start><pass><block_end>dt=pd.api.types.pandas_dtype(f"Sparse[{sub_type}]")<line_sep><return>pd.SparseDtype(dt fill_value=fill_value)<block_end><else_stmt><block_start><return>pd.api.types.pandas_dtype(s)<block_end><block_end><return>{k:dt_from_str(v)<for>k,v dt.items()}<block_end>@profile(log)<def_stmt>serialize_data data path config:Optional[ns]=<none><block_start>config=(config|ser_config)<if>config<else>ser_config<line_sep>root,ext=os.path.splitext(path)<line_sep>np,pd,sp=_import_data_libraries()<if_stmt>np<and>isinstance(data np.ndarray)<block_start>path=f"{root}.npy"<line_sep>np.save(path data allow_pickle=config.numpy_allow_pickle)<block_end><elif_stmt>sp<and>isinstance(data sp.spmatrix)# use custom extension to recognize sparsed matrices from file name. # .npz is automatically appended if missing, and can also potentially be used for numpy arrays. <block_start>path=f"{root}.spy.npz"<line_sep>sp.save_npz(path data compressed=config.sparse_matrix_compression)<block_end><elif_stmt>pd<and>isinstance(data (pd.DataFrame pd.Series))<block_start>path=f"{root}.pd"<if_stmt>isinstance(data pd.DataFrame)# pandas has this habit of inferring value types when data are loaded from file, # for example, 'true' and 'false' are converted automatically to booleans, even for column names… <block_start>data.rename(str axis='columns' inplace=<true>)<block_end>ser=config.pandas_serializer<if_stmt>ser<eq>'pickle'<block_start>data.to_pickle(path compression=config.pandas_compression)<block_end><elif_stmt>ser<eq>'parquet'<block_start><if_stmt>isinstance(data pd.Series)<block_start>data=pd.DataFrame({__series__:data})<block_end># parquet serialization doesn't support sparse dataframes <if_stmt>is_sparse(data)<block_start>path=f"{root}.sparse.pd"<line_sep>dtypes=_pd_dtypes_to_str(pd data)<line_sep>json_dump(dtypes f"{path}.dtypes" style='compact')<line_sep>data=unsparsify(data)<block_end>data.to_parquet(path compression=config.pandas_parquet_compression)<block_end><elif_stmt>ser<eq>'hdf'<block_start>data.to_hdf(path os.path.basename(path) mode='w' format='table')<block_end><elif_stmt>ser<eq>'json'<block_start>data.to_json(path compression=config.pandas_compression)<block_end><block_end><else_stmt># fallback serializer <block_start><if_stmt>config.fallback_serializer<eq>'json'<block_start>path=f"{root}.json"<line_sep>json_dump(data path style='compact')<block_end><else_stmt><block_start>path=f"{root}.pkl"<with_stmt>open(path 'wb')<as>f<block_start>pickle.dump(data f)<block_end><block_end><block_end><return>path<block_end>@profile(log)<def_stmt>deserialize_data path config:Optional[ns]=<none><block_start>config=(config|ser_config)<if>config<else>ser_config<line_sep>np,pd,sp=_import_data_libraries()<line_sep>base,ext=os.path.splitext(path)<if_stmt>ext<eq>'.npy'<block_start><if_stmt>np<is><none><block_start><raise>SerializationError(f"Numpy is required to deserialize {path}.")<block_end><return>np.load(path allow_pickle=config.numpy_allow_pickle)<block_end><elif_stmt>ext<eq>'.npz'<block_start>_,ext2=os.path.splitext(base)<if_stmt>ext2<eq>'.spy'<block_start><if_stmt>sp<is><none><block_start><raise>SerializationError(f"Scipy is required to deserialize {path}.")<block_end>sp_matrix=sp.load_npz(path)<line_sep><return>unsparsify(sp_matrix fmt=config.sparse_matrix_deserialized_format)<block_end><else_stmt><block_start><if_stmt>np<is><none><block_start><raise>SerializationError(f"Numpy is required to deserialize {path}.")<block_end><with_stmt>np.load(path allow_pickle=config.numpy_pickle)<as>loaded<block_start><return>loaded<block_end><block_end><block_end><elif_stmt>ext<eq>'.pd'<block_start><if_stmt>pd<is><none><block_start><raise>SerializationError(f"Pandas is required to deserialize {path}.")<block_end>ser=config.pandas_serializer<line_sep>df=<none><if_stmt>ser<eq>'pickle'<block_start>df=pd.read_pickle(path compression=config.pandas_compression)<block_end><elif_stmt>ser<eq>'parquet'<block_start>df=pd.read_parquet(path)<if_stmt>len(df.columns)<eq>1<and>df.columns[0]<eq>__series__<block_start>df=df.squeeze()<block_end>_,ext2=os.path.splitext(base)<if_stmt>config.sparse_dataframe_deserialized_format<is><none><and>ext2<eq>'.sparse'# trying to restore dataframe as sparse if it was as such before serialization # and if the dataframe format should remain unchanged <block_start>j_dtypes=json_load(f"{path}.dtypes")<line_sep>dtypes=_pd_dtypes_from_str(pd j_dtypes)<line_sep>df=df.astype(dtypes copy=<false>)<block_end><block_end><elif_stmt>ser<eq>'hdf'<block_start>df=pd.read_hdf(path os.path.basename(path))<block_end><elif_stmt>ser<eq>'json'<block_start>df=pd.read_json(path compression=config.pandas_compression)<block_end><return>unsparsify(df fmt=config.sparse_dataframe_deserialized_format)<block_end><elif_stmt>ext<eq>'.json'<block_start><return>json_load(path)<block_end><elif_stmt>ext<eq>'.pkl'<block_start><with_stmt>open(path 'rb')<as>f<block_start><return>pickle.load(f)<block_end><block_end><else_stmt><block_start><raise>SerializationError(f"Can not deserialize file `{path}` in unknown format.")<block_end><block_end>
"""Utilities related to formatting job metrics for human consumption."""<class_stmt>JobMetricFormatter<block_start>"""Format job metric key-value pairs for human consumption in Web UI."""<def_stmt>format self key value<block_start><return>(str(key) str(value))<block_end><block_end><def_stmt>seconds_to_str value<block_start>"""Convert seconds to a simple simple string describing the amount of time."""<line_sep>mins,secs=divmod(value 60)<line_sep>hours,mins=divmod(mins 60)<if_stmt>value<l>60<block_start><return>f"{secs} second{'s'<if>secs<ne>1<else>''}"<block_end><elif_stmt>value<l>3600<block_start><return>f"{mins} minute{'s'<if>mins<ne>1<else>''}"<block_end><else_stmt><block_start><return>f"{hours} hour{'s'<if>hours<ne>1<else>''} and {mins} minute{'s'<if>mins<ne>1<else>''}"<block_end><block_end>
# coding: utf-8 # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- <import_from_stmt>typing Awaitable List<import_from_stmt>azure.security.attestation AttestationType<try_stmt><block_start><import_from_stmt>typing TYPE_CHECKING<block_end><except_stmt>ImportError<block_start>TYPE_CHECKING=<false><block_end><import_from_stmt>typing Awaitable Callable Dict Optional Any TypeVar overload<line_sep>T=TypeVar("T")<def_stmt>AllAttestationTypes func:Callable[<ellipsis> Awaitable[T]]=<none> **kwargs:Any<block_start>"""Decorator to apply to function to add attestation_type kwarg for each attestation type."""<async_keyword><def_stmt>wrapper *args **kwargs<arrow>Callable[<ellipsis> Awaitable[T]]<block_start><for_stmt>attestation_type [AttestationType.SGX_ENCLAVE AttestationType.OPEN_ENCLAVE AttestationType.TPM ]<block_start><await>func(*args attestation_type=attestation_type **kwargs)<block_end><block_end><return>wrapper<block_end><def_stmt>AllInstanceTypes func:Callable[<ellipsis> Awaitable[T]]=<none> include_shared:bool=<true> **kwargs:Any<block_start>"""Decorator to apply to function to add instance_url kwarg for each instance type."""<async_keyword><def_stmt>wrapper *args **kwargs<arrow>Callable[<ellipsis> Awaitable[T]]<block_start>instances=[]# type:List[str] instances.append(kwargs.get("attestation_aad_url"))<line_sep>instances.append(kwargs.get("attestation_isolated_url"))<if_stmt>include_shared<block_start>instances.append("https://shared"+kwargs.get("attestation_location_short_name")+"."+kwargs.get("attestation_location_short_name")+".attest.azure.net")<block_end><for_stmt>instance_url instances<block_start><await>func(*args instance_url=instance_url **kwargs)<block_end><block_end><return>wrapper<block_end>
<class_stmt>Evaluator(object)<block_start>""" Compute metrics for recommendations that have been written to file. Parameters ---------- compute_metrics : function(list,list) The evaluation function which should accept two lists of predicted and actual item indices. max_items : int The number of recommendations needed to compute the evaluation function. """<def_stmt>__init__ self compute_metrics max_items<block_start>self.compute_metrics=compute_metrics<line_sep>self.max_items=max_items<block_end><def_stmt>_add_metrics self predicted actual<block_start>metrics=self.compute_metrics(predicted actual)<if_stmt>metrics<block_start><for_stmt>m,val metrics.iteritems()<block_start>self.cum_metrics[m]<augadd>val<block_end>self.count<augadd>1<block_end><block_end><def_stmt>process self testdata recsfile start end offset=1<block_start>""" Parameters ---------- testdata : scipy sparse matrix The test items for each user. recsfile : str Filepath to the recommendations. The file should contain TSV of the form: user, item, score. IMPORTANT: the recommendations must be sorted by user and score. start : int First user to evaluate. end: int One after the last user to evaluate. offset : int Index offset for users and items in recommendations file. Returns ------- cum_metrics : dict Aggregated metrics i.e. total values for all users. count : int The number of users for whom metrics were computed. """<import_from_stmt>collections defaultdict<line_sep>self.cum_metrics=defaultdict(float)<line_sep>self.count=0<line_sep>last_user=start<line_sep>recs=[]<for_stmt>line open(recsfile)<block_start>user,item,score=line.strip().split('\t')<line_sep>user=int(user)-1# convert to 0-indxed item=int(item)-1<if_stmt>user<ge>end<block_start><break><block_end><if_stmt>user<l>start<block_start><continue><block_end><if_stmt>user<ne>last_user<block_start>self._add_metrics(recs testdata[last_user :].indices.tolist())<line_sep>last_user=user<line_sep>recs=[]<block_end><if_stmt>len(recs)<l>self.max_items<block_start>recs.append(item)<block_end><block_end>self._add_metrics(recs testdata[last_user :].indices.tolist())<line_sep><return>self.cum_metrics self.count<block_end><block_end>
# A pythom program for all operations performed on singly linked-list. # Time-Complexity = O(n) # Space-Complexity = O(n) <class_stmt>Node<block_start><def_stmt>__init__ self data=<none> next=<none># Creation of Node <block_start>self.data=data<line_sep>self.next=next<block_end><block_end><class_stmt>LinkedList<block_start><def_stmt>__init__ self<block_start>self.head=<none><block_end># head points the first node <def_stmt>print self<block_start><if_stmt>self.head<is><none><block_start>print("Linked list is empty")<line_sep><return><block_end>itr=self.head<line_sep>llstr=''# empty string <while_stmt>itr<block_start>llstr<augadd>str(itr.data)+' --> '<if>itr.next<else>str(itr.data)<line_sep>itr=itr.next<block_end>print(llstr)<block_end><def_stmt>length self# will calculate length of the linked list <block_start>count=0<line_sep>itr=self.head<while_stmt>itr<block_start>count<augadd>1<line_sep>itr=itr.next<block_end><return>count<block_end><def_stmt>insert_at_begining self data<block_start>node=Node(data self.head)# Creating a new node calling Node method self.head=node<block_end><def_stmt>insert_at_end self data<block_start><if_stmt>self.head<is><none><block_start>self.head=Node(data <none>)<line_sep><return><block_end>itr=self.head<while_stmt>itr.next<block_start>itr=itr.next<block_end>itr.next=Node(data <none>)<block_end><def_stmt>insert_at self index data<block_start><if_stmt>index<l>0<or>index<g>self.length()<block_start><raise>Exception("Invalid Index")<block_end><if_stmt>index<eq>0<block_start>self.insert_at_begining(data)<line_sep><return><block_end>count=0<line_sep>itr=self.head<while_stmt>itr<block_start><if_stmt>count<eq>index-1<block_start>node=Node(data itr.next)<line_sep>itr.next=node<line_sep><break><block_end>itr=itr.next<line_sep>count<augadd>1<block_end><block_end><def_stmt>remove_at self index<block_start><if_stmt>index<l>0<or>index<ge>self.length()<block_start><raise>Exception("Invalid Index")<block_end><if_stmt>index<eq>0<block_start>self.head=self.head.next<line_sep><return><block_end>count=0<line_sep>itr=self.head<while_stmt>itr<block_start><if_stmt>count<eq>index-1<block_start>itr.next=itr.next.next# to delete the specified node <break><block_end>itr=itr.next<line_sep>count<augadd>1<block_end><block_end><def_stmt>insert_values self data_list<block_start>self.head=<none><for_stmt>data data_list<block_start>self.insert_at_end(data)<block_end><block_end># removing element at linkedlist with Value <def_stmt>removeval self value<block_start>count=0<line_sep>temp=self.head<while_stmt>temp<block_start><if_stmt>value<ne>temp.data<block_start>count<augadd>1<block_end>temp=temp.next<block_end><if_stmt>count<eq>self.length()<block_start>print("Value is not present")<block_end><else_stmt><block_start><if_stmt>value<eq>self.head.data<block_start>self.head=self.head.next<line_sep><return><block_end>temp=self.head<while_stmt>temp<block_start><if_stmt>value<eq>temp.next.data<block_start>temp.next=temp.next.next<line_sep><break><block_end>temp=temp.next<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>node1=LinkedList()<line_sep>ins=list(input("Enter a values to be inserted by giving space[eg: python c++ java] : ").rstrip().split())<line_sep>node1.insert_values(ins)<line_sep>node1.print()<line_sep>ind=int(input("Enter the index to be added: "))<line_sep>val=input('Enter the value: ')<line_sep>node1.insert_at(ind val)<line_sep>node1.print()<line_sep>remm=int(input('Enter the index to be removed: '))<line_sep>node1.remove_at(remm)<line_sep>node1.print()<line_sep>remval=input('Enter the value to be removed: ')<line_sep>node1.removeval(remval)<line_sep>node1.print()<line_sep>inss=list(input("Enter a values to be inserted by giving space[eg: 45 30 22] : ").rstrip().split())<line_sep>node1.insert_values(inss)<line_sep>node1.print()<line_sep>inend=int(input('Enter the number to be inserted at the end: '))<line_sep>node1.insert_at_end(inend)<line_sep>node1.print()<line_sep>remval1=input('Enter the value to be removed: ')<line_sep>node1.removeval(remval1)<line_sep>node1.print()<block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WordpieceTokenizer classes."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>unicodedata<import_from_stmt>.utils load_vocab convert_to_unicode clean_text split_on_whitespace convert_by_vocab tokenize_chinese_chars <class_stmt>WordpieceTokenizer(object)<block_start>"""Runs WordPiece tokenziation."""<def_stmt>__init__ self vocab=<none> unk_token="[UNK]" max_input_chars_per_word=200<block_start>self.vocab=load_vocab(vocab)<line_sep>self.inv_vocab={v:k<for>k,v self.vocab.items()}<line_sep>self.unk_token=unk_token<line_sep>self.max_input_chars_per_word=max_input_chars_per_word<block_end><def_stmt>tokenize self text<block_start>""" Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: output_tokens: A list of wordpiece tokens. current_positions: A list of the current positions for the original words in text . """<line_sep>text=convert_to_unicode(text)<line_sep>text=clean_text(text)<line_sep>text=tokenize_chinese_chars(text)<line_sep>output_tokens=[]<line_sep>current_positions=[]<line_sep>token_list=split_on_whitespace(text)<for_stmt>chars token_list<block_start><if_stmt>len(chars)<g>self.max_input_chars_per_word<block_start>output_tokens.append(self.unk_token)<line_sep><continue><block_end>is_bad=<false><line_sep>start=0<line_sep>sub_tokens=[]<while_stmt>start<l>len(chars)<block_start>end=len(chars)<if_stmt>start<g>0<block_start>substr="##"+chars[start:end]<block_end><else_stmt><block_start>substr=chars[start:end]<block_end>cur_substr=<none><while_stmt>start<l>end<block_start><if_stmt>substr<in>self.vocab<block_start>cur_substr=substr<line_sep><break><block_end>end<augsub>1<line_sep>substr=substr[:-1]<block_end><if_stmt>cur_substr<is><none><block_start>is_bad=<true><line_sep><break><block_end><else_stmt><block_start>sub_tokens.append(cur_substr)<line_sep>start=end<block_end><block_end>current_positions.append([])<if_stmt>is_bad<block_start>current_positions[-1].append(len(output_tokens))<line_sep>output_tokens.append(self.unk_token)<line_sep>current_positions[-1].append(len(output_tokens))<block_end><else_stmt><block_start>current_positions[-1].append(len(output_tokens))<line_sep>output_tokens.extend(sub_tokens)<line_sep>current_positions[-1].append(len(output_tokens))<block_end><block_end><return>output_tokens current_positions<block_end><def_stmt>convert_tokens_to_ids self tokens<block_start><return>convert_by_vocab(self.vocab tokens)<block_end><def_stmt>convert_ids_to_tokens self ids<block_start><return>convert_by_vocab(self.inv_vocab ids)<block_end><block_end>
<import_stmt>os<import_stmt>configparser<import_stmt>traceback<import_from_stmt>. Logger<line_sep># util class <class_stmt>Empty<block_start><pass><block_end><def_stmt>evaluation value# find value type <block_start><try_stmt><block_start>evalValue=eval(value)<if_stmt>type(evalValue)<in>[int float list tuple dict]<block_start><return>evalValue<block_end><block_end><except_stmt><block_start><return>value<block_end><block_end><def_stmt>getValue config section option default_value=<none><block_start><return>evaluation(config[section][option])<if>config.has_option(section option)<else>default_value<block_end><def_stmt>setValue config section option value<block_start><if_stmt><not>config.has_section(section)<block_start>config.add_section(section)<block_end>config.set(section option value)<block_end># ------------------------------ # # CLASS : Configure # Usage : # config = Configure() # # get value example, section:Screen, option:wdith # print(config.Screen.width) # ------------------------------ # <class_stmt>Config<block_start><def_stmt>__init__ self configFilename log_level=Logger.WARN prevent_lowercase=<true><block_start>self.log_level=log_level<line_sep>self.isChanged=<false><line_sep>self.filename=configFilename<line_sep>self.config=configparser.ConfigParser()<line_sep>self.config.read(configFilename)<line_sep># prevent the key value being lowercase <if_stmt>prevent_lowercase<block_start>self.config.optionxform=<lambda>option_name:option_name<block_end><if_stmt>self.log_level<le>Logger.INFO<block_start>print("Load Config : %s"%self.filename)<block_end># set sections <for_stmt>section self.config.sections()<block_start><if_stmt>self.log_level<eq>Logger.DEBUG<block_start>print("[%s]"%section)<block_end><if_stmt><not>hasattr(self section)<block_start>setattr(self section Empty())<block_end># set value to member variables current_section=getattr(self section)<for_stmt>option self.config[section]<block_start>value=self.config.get(section option)<if_stmt>self.log_level<eq>Logger.DEBUG<block_start>print("%s = %s"%(option value))<block_end>setattr(current_section option evaluation(value))<block_end><block_end><block_end><def_stmt>hasValue self section option<block_start><return>self.config.has_option(section option)<block_end><def_stmt>getValue self section option default_value=<none><block_start><return>evaluation(self.config[section][option])<if>self.config.has_option(section option)<else>default_value<block_end><def_stmt>setValue self section option value# set value <block_start><if_stmt><not>self.config.has_section(section)<block_start>self.config.add_section(section)<block_end>self.config[section][option]=str(value)<line_sep># set value to member variables <if_stmt><not>hasattr(self section)<block_start>setattr(self section Empty())<line_sep>self.isChanged=<true><block_end><elif_stmt><not>self.isChanged<block_start>self.isChanged=value<ne>getattr(self section)<block_end>current_section=getattr(self section)<line_sep>setattr(current_section option value)<block_end><def_stmt>setDefaultValue self section option value<block_start><if_stmt><not>self.hasValue(section option)<block_start>self.setValue(section option value)<block_end><block_end><def_stmt>save self<block_start><if_stmt>self.isChanged<or><not>os.path.exists(self.filename)<block_start><with_stmt>open(self.filename 'w')<as>configfile<block_start>self.config.write(configfile)<if_stmt>self.log_level<le>Logger.INFO<block_start>print("Saved Config : "+self.filename)<block_end><block_end><block_end>self.isChanged=<false><block_end><def_stmt>getFilename self<block_start><return>self.filename<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>unittest<class_stmt>test(unittest.TestCase)<block_start><def_stmt>testConfig self# load test <block_start>testConfig=Config("TestConfig.ini" debug=<false>)<line_sep># set value testConfig.setValue("TestSection" "test_int" 45)<line_sep>testConfig.setValue("TestSection" "test_float" 0.1)<line_sep>testConfig.setValue("TestSection" "test_string" "Hello, World")<line_sep>testConfig.setValue("TestSection" "test_list" [1 2 3])<line_sep>testConfig.setValue("TestSection" "test_tuple" (4 5 6))<line_sep>testConfig.setValue("TestSection" "test_dict" {"x":7.0 "y":8.0})<line_sep># call test self.assertEqual(testConfig.TestSection.test_int 45)<line_sep>self.assertEqual(testConfig.TestSection.test_float 0.1)<line_sep>self.assertEqual(testConfig.TestSection.test_string "Hello, World")<line_sep>self.assertEqual(testConfig.TestSection.test_list [1 2 3])<line_sep>self.assertEqual(testConfig.TestSection.test_tuple (4 5 6))<line_sep>self.assertEqual(testConfig.TestSection.test_dict['x'] 7.0)<line_sep>self.assertEqual(testConfig.TestSection.test_dict['y'] 8.0)<line_sep># set value test testConfig.setValue("TestSection" "test_int" 99)<line_sep>self.assertEqual(testConfig.TestSection.test_int 99)<line_sep>testConfig.save()<block_end><block_end>unittest.main()<block_end>
# -*- coding: utf-8 -*- # Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the google.colab._installation_commands package."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>unittest<import_stmt>IPython<import_from_stmt>IPython.utils io<import_from_stmt>google.colab load_ipython_extension<line_sep>MOCKED_COMMANDS={'pip install pandas':""" Requirement already satisfied: pandas in /usr/local/lib/python2.7/dist-packages (0.22.0) Requirement already satisfied: pytz>=2011k in /usr/local/lib/python2.7/dist-packages (from pandas) (2018.9) Requirement already satisfied: python-dateutil in /usr/local/lib/python2.7/dist-packages (from pandas) (2.5.3) Requirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python2.7/dist-packages (from pandas) (1.16.2) Requirement already satisfied: six>=1.5 in /usr/local/lib/python2.7/dist-packages (from python-dateutil->pandas) (1.11.0) """ 'pip install -U numpy':""" Collecting numpy Downloading https://files.pythonhosted.org/packages/c4/33/8ec8dcdb4ede5d453047bbdbd01916dbaccdb63e98bba60989718f5f0876/numpy-1.16.2-cp27-cp27mu-manylinux1_x86_64.whl (17.0MB) 100% |============================| 17.0MB 660kB/s fastai 0.7.0 has requirement torch<0.4, but you'll have torch 1.0.1.post2 which is incompatible. albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.8 which is incompatible. featuretools 0.4.1 has requirement pandas>=0.23.0, but you'll have pandas 0.22.0 which is incompatible. Installing collected packages: numpy Found existing installation: numpy 1.14.6 Uninstalling numpy-1.14.6: Successfully uninstalled numpy-1.14.6 Successfully installed numpy-1.16.2 """}<class_stmt>MockInteractiveShell(IPython.InteractiveShell)<block_start>"""Interactive shell that mocks some commands."""<def_stmt>system self cmd<block_start><if_stmt>cmd<in>MOCKED_COMMANDS<block_start>sys.stderr.write('')<line_sep>sys.stdout.write(MOCKED_COMMANDS[cmd])<line_sep>self.user_ns['_exit_code']=0<block_end><else_stmt><block_start><return>super(MockInteractiveShell self).system(cmd)<block_end><block_end><block_end><class_stmt>InstallationCommandsTest(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>super(InstallationCommandsTest cls).setUpClass()<line_sep>cls.ip=MockInteractiveShell()<line_sep>load_ipython_extension(cls.ip)<block_end><def_stmt>testPipMagicPandas self<block_start>output=self.run_cell('%pip install pandas')<line_sep>self.assertEqual([] output.outputs)<line_sep>self.assertEqual('' output.stderr)<line_sep>self.assertIn('pandas' output.stdout)<block_end><def_stmt>testPipMagicNumpy self<block_start>output=self.run_cell('%pip install -U numpy')<line_sep>self.assertEqual([] output.outputs)<line_sep>self.assertEqual('' output.stderr)<line_sep>self.assertIn('numpy' output.stdout)<block_end><def_stmt>run_cell self cell_contents<block_start><with_stmt>io.capture_output()<as>captured<block_start>self.ip.run_cell(cell_contents)<block_end><return>captured<block_end><block_end>
<import_stmt>boto3<line_sep>log_client=boto3.client('logs')<def_stmt>handler event context<block_start>task_id=event['taskId']<line_sep>result=log_client.describe_export_tasks(taskId=task_id)<line_sep># per documentation, only one export can run at a time per account, # therefore ensure none are running in this account # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.describe_export_tasks # result = log_client.describe_export_tasks(statusCode='CANCELLED' | 'PENDING' | 'PENDING_CANCEL' | 'RUNNING') status='RUNNING'<line_sep>task_status=result.get('exportTasks')<if_stmt>len(task_status)<ne>0<block_start>task_status=task_status[0].get('status').get('code')<block_end><if_stmt>task_status<not><in>['PENDING' 'PENDING_CANCEL' 'RUNNING']<block_start>status='NOT_RUNNING'<block_end><return>{"Status":status}<block_end>
"""Tests for ChainerCV related custom ops."""<import_stmt>chainer<import_stmt>chainer.functions<as>F<import_stmt>chainer.links<as>L<import_stmt>numpy<as>np<import_stmt>onnx<import_stmt>onnx_script<import_stmt>test_case<line_sep>_has_chnainercv=<true><try_stmt><block_start><import_stmt>chainercv_rpn<block_end><except_stmt>ImportError<block_start>_has_chnainercv=<false><block_end><def_stmt>aranges *shape<block_start>r=np.prod(shape)<line_sep>v=np.arange(r).reshape(shape).astype(np.float32)<line_sep>v<augsub>r/2+0.1<line_sep><return>v<block_end><def_stmt>_get_scales <block_start><return>(1/4 1/8 1/16 1/32 1/64)<block_end><def_stmt>_get_hs num_channels<block_start>hs=[]<for_stmt>h,w [(200 272) (100 136) (50 68) (25 34) (13 17)]<block_start>hs.append(aranges(1 num_channels h w))<block_end><return>hs<block_end><def_stmt>_get_rpn_locs_confs <block_start>locs=[]<line_sep>confs=[]<for_stmt>i [163200 40800 10200 2550 663]<block_start>locs.append(aranges(1 i 4))<line_sep>confs.append(aranges(1 i))<block_end><return>locs confs<block_end><def_stmt>chainercv_test_rpn_decode test_name<block_start>rpn=chainercv_rpn.RPN(_get_scales())<line_sep>hs=_get_hs(1)<line_sep>locs,confs=_get_rpn_locs_confs()<line_sep>anchors=rpn.anchors(h.shape[2:]<for>h hs)<line_sep>in_shape=(1 3 800 1088)<line_sep>rois,roi_indices=rpn.decode([chainer.Variable(l)<for>l locs] [chainer.Variable(c)<for>c confs] anchors in_shape)<line_sep>gb=onnx_script.GraphBuilder(test_name)<line_sep>hs_v=[gb.input('hs_%d'%i h)<for>i,h enumerate(hs)]<line_sep>locs_v=[gb.input('loc_%d'%i l)<for>i,l enumerate(locs)]<line_sep>confs_v=[gb.input('conf_%d'%i c)<for>i,c enumerate(confs)]<line_sep>in_shape_v=gb.input('in_shape' np.array(in_shape))<line_sep>rois_v='rois'<line_sep>roi_indices_v='roi_indices'<line_sep>gb.ChainerDoSomething(hs_v+locs_v+confs_v+[in_shape_v] outputs=[rois_v roi_indices_v] function_name='ChainerCVRPNDecode')<line_sep>gb.output(rois_v rois)<line_sep>gb.output(roi_indices_v roi_indices)<line_sep>gb.gen_test()<block_end><class_stmt>TestCase(test_case.TestCase)<block_start><def_stmt>__init__ self name func **kwargs<block_start>super(TestCase self).__init__('out' name **kwargs)<line_sep>self.func=func<block_end><block_end><def_stmt>get_tests <block_start><if_stmt><not>_has_chnainercv<block_start><return>[]<block_end>tests=[]<def_stmt>test name func **kwargs<block_start>tests.append(TestCase(name func **kwargs))<block_end>test('chainercv_test_rpn_decode' chainercv_test_rpn_decode)<line_sep><return>tests<block_end>
# Generated by Django 2.0.4 on 2018-04-21 15:39 <import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ]<line_sep>operations=[migrations.CreateModel(name='Category' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(max_length=50 unique=<true>)) ('description' models.TextField(max_length=500)) ] ) migrations.CreateModel(name='Comment' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('post' models.TextField(max_length=500)) ('created_date' models.DateField(auto_now=<true>)) ('created_time' models.TimeField(auto_now=<true>)) ('created_by' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to=settings.AUTH_USER_MODEL)) ] options={'ordering':('created_date' 'created_time') } ) migrations.CreateModel(name='Event' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(max_length=50)) ('details' models.TextField(max_length=1000)) ('venue' models.CharField(max_length=50)) ('date' models.DateField(help_text='Please use the following format: <em>YYYY-MM-DD</em>.')) ('time' models.TimeField()) ('attendees' models.ManyToManyField(blank=<true> related_name='attending' to=settings.AUTH_USER_MODEL)) ('category' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='events' to='events.Category')) ('creator' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to=settings.AUTH_USER_MODEL)) ] options={'verbose_name_plural':'events' 'verbose_name':'event' } ) migrations.AddField(model_name='comment' name='event' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='comments' to='events.Event') ) ]<block_end>
<import_from_stmt>lib.actions BaseAction<class_stmt>ArticleCreate(BaseAction)<block_start><def_stmt>run self title body topic=<none> status=0<block_start><if_stmt>topic<block_start>topic=self._convert_slug(topic)<line_sep>path='/topics/%s/articles'%topic<block_end><else_stmt><block_start>path='/articles'<block_end>payload=self._create_article(title=title body=body status=status)<line_sep>response=self._api_post(path json=payload)<line_sep><return>response<block_end><def_stmt>_create_article self title body status=0<block_start>payload={'article':{'title':title 'body':body 'status':int(status)}}<line_sep><return>payload<block_end><block_end>
<import_from_stmt>rest_framework routers<import_from_stmt>kitsune.questions.api QuestionViewSet AnswerViewSet<line_sep>router=routers.SimpleRouter()<line_sep>router.register(r"question" QuestionViewSet)<line_sep>router.register(r"answer" AnswerViewSet)<line_sep>urlpatterns=router.urls<line_sep>
"""These tests ensure that when running in App Engine standard with the App Engine sandbox enabled that urllib3 appropriately uses the App Engine-patched version of httplib to make requests."""<import_stmt>httplib<import_stmt>StringIO<import_from_stmt>mock patch<import_stmt>pytest<import_from_stmt>..test_no_ssl TestWithoutSSL<class_stmt>MockResponse(object)<block_start><def_stmt>__init__ self content status_code content_was_truncated final_url headers<block_start>self.content=content<line_sep>self.status_code=status_code<line_sep>self.content_was_truncated=content_was_truncated<line_sep>self.final_url=final_url<line_sep>self.header_msg=httplib.HTTPMessage(StringIO.StringIO("".join(["%s: %s\n"%(k v)<for>k,v headers.iteritems()]+["\n"])))<line_sep>self.headers=headers<block_end><block_end>@pytest.mark.usefixtures("sandbox")<class_stmt>TestHTTP(TestWithoutSSL)<block_start><def_stmt>test_urlfetch_called_with_http self<block_start>"""Check that URLFetch is used to fetch non-https resources."""<line_sep>resp=MockResponse("OK" 200 <false> "http://www.google.com" {"content-type":"text/plain"})<line_sep>fetch_patch=patch("google.appengine.api.urlfetch.fetch" return_value=resp)<with_stmt>fetch_patch<as>fetch_mock<block_start><import_stmt>urllib3<line_sep>pool=urllib3.HTTPConnectionPool("www.google.com" "80")<line_sep>r=pool.request("GET" "/")<assert_stmt>r.status<eq>200 r.data<assert_stmt>fetch_mock.call_count<eq>1<block_end><block_end><block_end>@pytest.mark.usefixtures("sandbox")<class_stmt>TestHTTPS(object)<block_start>@pytest.mark.xfail(reason="This is not yet supported by urlfetch, presence of the ssl "<concat>"module will bypass urlfetch.")<def_stmt>test_urlfetch_called_with_https self<block_start>""" Check that URLFetch is used when fetching https resources """<line_sep>resp=MockResponse("OK" 200 <false> "https://www.google.com" {"content-type":"text/plain"})<line_sep>fetch_patch=patch("google.appengine.api.urlfetch.fetch" return_value=resp)<with_stmt>fetch_patch<as>fetch_mock<block_start><import_stmt>urllib3<line_sep>pool=urllib3.HTTPSConnectionPool("www.google.com" "443")<line_sep>pool.ConnectionCls=urllib3.connection.UnverifiedHTTPSConnection<line_sep>r=pool.request("GET" "/")<assert_stmt>r.status<eq>200 r.data<assert_stmt>fetch_mock.call_count<eq>1<block_end><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># # module for filtering of rechits. user provides noise threshold in GeV units # Author: <NAME>, University of Rome & INFN # rechitFilter=cms.EDProducer("RecHitFilter" noiseEnergyThreshold=cms.double(0.08) noiseChi2Threshold=cms.double(40) hitCollection=cms.InputTag('EcalRecHit' 'EcalRecHitsEB') reducedHitCollection=cms.string('FilteredEcalRecHitCollection'))<line_sep>
<import_stmt>math<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>mmcv.cnn.bricks.registry ATTENTION<import_from_stmt>mmcv.runner.base_module BaseModule<line_sep>@ATTENTION.register_module()<class_stmt>DGCNNAttn(BaseModule)<block_start>"""A warpper for DGCNN-type self-attention. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. Same as `nn.MultiheadAttention`. dropout (float):w A Dropout layer on attn_output_weights. Default: 0.. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """<def_stmt>__init__ self embed_dims num_heads dropout=0. init_cfg=<none> **kwargs<block_start>super(DGCNNAttn self).__init__(init_cfg)<line_sep>self.embed_dims=embed_dims<line_sep>self.num_heads=num_heads<line_sep>self.dropout=dropout<line_sep>self.conv1=nn.Sequential(nn.Conv2d(self.embed_dims<times>2 self.embed_dims kernel_size=1 bias=<false>) nn.BatchNorm2d(self.embed_dims) nn.ReLU(inplace=<true>))<line_sep>self.conv2=nn.Sequential(nn.Conv2d(self.embed_dims<times>2 self.embed_dims kernel_size=1 bias=<false>) nn.BatchNorm2d(self.embed_dims) nn.ReLU(inplace=<true>))<line_sep>self.K=kwargs['K']<line_sep>self.dropout=nn.Dropout(dropout)<block_end><def_stmt>forward self query key=<none> value=<none> residual=<none> query_pos=<none> key_pos=<none> attn_mask=<none> key_padding_mask=<none> **kwargs<block_start>"""Forward function for `DGCNN`. **kwargs allow passing a more general data flow when combining with other operations in `DGCNN`. Args: query (Tensor): The input query with shape [num_queries, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. residual (Tensor): This tensor, with the same shape as x, will be used for the residual link. If None, `x` will be used. Defaults to None. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. If not None, it will be added to `x` before forward function. Defaults to None. Returns: Tensor: forwarded results with shape [num_queries, bs, embed_dims]. """<if_stmt>residual<is><none><block_start>residual=query<block_end><if_stmt>query_pos<is><not><none><block_start>query=query+query_pos<block_end>query=query.permute(1 0 2)# [bs, num_queries, embed_dims] edge_feats=self.edge_feats(query K=self.K)<line_sep>edge_feats1=self.conv1(edge_feats)<line_sep>edge_feats1=edge_feats1.max(dim=-1)[0]<line_sep>out=edge_feats1<line_sep>edge_feats1=self.edge_feats(edge_feats1.permute(0 2 1))<line_sep>edge_feats2=self.conv2(edge_feats1)<line_sep>edge_feats2=edge_feats2.max(dim=-1)[0]<line_sep>out=out+edge_feats2<line_sep>out=out.permute(2 0 1)<line_sep><return>residual+self.dropout(out)<block_end><def_stmt>edge_feats self query K=16# (B, N, N) <block_start>affinity=torch.cdist(query query)<line_sep># (B, N, K) _,topk=torch.topk(affinity k=K dim=2)<line_sep>B,N,C=query.size()<line_sep>idx_base=torch.arange(0 B device=query.device).view(-1 1 1)<times>N<line_sep>idx=topk+idx_base<line_sep>idx=idx.view(-1)<line_sep>query=query.reshape(B<times>N C)<line_sep>query_neighbor=query[idx :].view(B N K C)<line_sep>query=query.reshape(B N 1 C).repeat(1 1 K 1)<line_sep>out=torch.cat((query_neighbor query) dim=-1).permute(0 3 1 2).contiguous()<line_sep><return>out<block_end><block_end>
<import_stmt>os<import_stmt>sys<import_stmt>atexit<import_stmt>unittest<import_stmt>tempfile<import_stmt>async<import_stmt>_async<import_stmt>socket<import_from_stmt>socket AF_INET SOCK_STREAM <def_stmt>tcpsock <block_start><return>socket.socket(AF_INET SOCK_STREAM)<block_end>CHARGEN=[r""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefg""" r"""!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh""" r""""#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghi""" r"""#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghij""" r"""$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijk""" ]<line_sep>QOTD='An apple a day keeps the doctor away.\r\n'<line_sep>ECHO_HOST=('echo.snakebite.net' 7)<line_sep>QOTD_HOST=('qotd.snakebite.net' 17)<line_sep>DISCARD_HOST=('discard.snakebite.net' 9)<line_sep>DAYTIME_HOST=('daytime.snakebite.net' 13)<line_sep>CHARGEN_HOST=('chargen.snakebite.net' 19)<line_sep>SERVICES_IP=socket.getaddrinfo(*ECHO_HOST)[0][4][0]<line_sep>ECHO_IP=(SERVICES_IP 7)<line_sep>DISCARD_IP=(SERVICES_IP 9)<line_sep>DAYTIME_IP=(SERVICES_IP 13)<line_sep>CHARGEN_IP=(SERVICES_IP 19)<line_sep>NO_CB=<none><line_sep>NO_EB=<none><line_sep>HOST='1192.168.3.11'<line_sep>ADDR=(HOST 0)<line_sep>TEMPDIR=<none><def_stmt>rmtempdir <block_start><if_stmt>TEMPDIR<block_start>TEMPDIR.cleanup()<block_end><block_end><def_stmt>tempfile <block_start><if_stmt><not>TEMPDIR<block_start>TEMPDIR=tempfile.TemporaryDirectory()<assert_stmt>os.path.isdir(TEMPDIR)<line_sep>atexit.register(rmtempdir)<block_end><assert_stmt>os.path.isdir(TEMPDIR)<line_sep>f=tempfile.NamedTemporaryFile(dir=TEMPDIR delete=<false>)<assert_stmt>os.path.isfile(f)<line_sep><return>f<block_end><def_stmt>tempfilename <block_start>f=tempfile()<line_sep>f.close()<line_sep><return>f.name<block_end><class_stmt>TestBasic(unittest.TestCase)<block_start><def_stmt>test_calling_run_with_no_events_fails self<block_start>self.assertRaises(AsyncRunCalledWithoutEventsError _async.run_once)<block_end><block_end><class_stmt>TestSubmitWork(unittest.TestCase)<block_start><def_stmt>test_submit_simple_work self<block_start><def_stmt>f i<block_start><return>i<times>2<block_end><def_stmt>cb r<block_start>_async.call_from_main_thread(self.assertEqual (r 4) )<block_end>_async.submit_work(f 2 <none> cb <none>)<line_sep>_async.run()<block_end><def_stmt>test_value_error_in_callback self<block_start><def_stmt>f <block_start><return>laksjdflaskjdflsakjdfsalkjdf<block_end>_async.submit_work(f <none> <none> <none> <none>)<line_sep>self.assertRaises(NameError _async.run)<block_end><def_stmt>test_value_error_in_callback_then_run self<block_start><def_stmt>f <block_start><return>laksjdflaskjdflsakjdfsalkjdf<block_end>_async.submit_work(f <none> <none> <none> <none>)<line_sep>self.assertRaises(NameError _async.run)<line_sep>_async.run()<block_end><def_stmt>test_multiple_value_errors_in_callback_then_run self<block_start><def_stmt>f <block_start><return>laksjdflaskjdflsakjdfsalkjdf<block_end>_async.submit_work(f <none> <none> <none> <none>)<line_sep>_async.submit_work(f <none> <none> <none> <none>)<line_sep>self.assertRaises(NameError _async.run)<line_sep>self.assertRaises(NameError _async.run)<line_sep>_async.run()<block_end><def_stmt>test_call_from_main_thread self<block_start>d={}<def_stmt>f i<block_start>_async.call_from_main_thread_and_wait(d.__setitem__ ('foo' i<times>2) )<line_sep><return>_async.call_from_main_thread_and_wait(d.__getitem__ 'foo')<block_end><def_stmt>cb r<block_start>_async.call_from_main_thread(self.assertEqual (r 4) )<block_end>_async.submit_work(f 2 <none> cb <none>)<line_sep>_async.run()<block_end><def_stmt>test_call_from_main_thread_decorator self<block_start>@async.call_from_main_thread<def_stmt>f <block_start>self.assertFalse(_async.is_parallel_thread)<block_end>_async.submit_work(f <none> <none> <none> <none>)<line_sep>_async.run()<block_end><def_stmt>test_submit_simple_work_errback_invoked self<block_start><def_stmt>f <block_start><return>laksjdflaskjdflsakjdfsalkjdf<block_end><def_stmt>test_e et ev eb<block_start><try_stmt><block_start>f()<block_end><except_stmt>NameError<as>e2<block_start>self.assertEqual(et e2.__class__)<line_sep>self.assertEqual(ev e2.args[0])<line_sep>self.assertEqual(eb.__class__ e2.__traceback__.__class__)<block_end><else_stmt><block_start>self.assertEqual(0 1)<block_end><block_end><def_stmt>cb r<block_start>_async.call_from_main_thread(self.assertEqual (0 1))<block_end><def_stmt>eb e<block_start>_async.call_from_main_thread_and_wait(test_e e)<block_end>_async.submit_work(f <none> <none> cb eb)<line_sep>_async.run()<block_end><block_end><class_stmt>TestSubmitFileIO(unittest.TestCase)<block_start><def_stmt>test_write self<block_start>n=tempfilename()<line_sep>f=open(n 'w')<line_sep>_async.submit_io(f.write b'foo' <none> <none> <none>)<line_sep>_async.run()<line_sep>f.close()<with_stmt>open(n 'w')<as>f<block_start>self.assertEqual(f.read() b'foo')<block_end><block_end><def_stmt>test_read self<block_start>@async.call_from_main_thread<def_stmt>cb d<block_start>self.assertEqual(d b'foo')<block_end>n=tempfilename()<with_stmt>open(n 'w')<as>f<block_start>f.write(b'foo')<block_end>f=open(n 'r')<line_sep>_async.submit_io(f.read <none> <none> cb <none>)<line_sep>_async.run()<block_end><block_end><class_stmt>TestConnectSocketIO(unittest.TestCase)<block_start><def_stmt>test_backlog self<block_start>sock=tcpsock()<line_sep>port=sock.bind(ADDR)<line_sep>sock.listen(100)<line_sep>self.assertEqual(sock.backlog 100)<line_sep>sock.close()<block_end><def_stmt>test_connect self<block_start>@async.call_from_main_thread<def_stmt>cb <block_start>self.assertEqual(1 1)<block_end>sock=tcpsock()<line_sep>_async.connect(sock DISCARD_IP 1 <none> cb NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_connect_with_data self<block_start>@async.call_from_main_thread<def_stmt>cb sock<block_start>self.assertEqual(1 1)<block_end>sock=tcpsock()<line_sep>_async.connect(sock DISCARD_IP 1 b'buf' cb NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_connect_with_data self<block_start>@async.call_from_main_thread<def_stmt>cb sock<block_start>self.assertEqual(1 1)<block_end>sock=tcpsock()<line_sep>_async.connect(sock DISCARD_IP 1 b'buf' cb NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_connect_then_recv self<block_start>@async.call_from_main_thread<def_stmt>_check data<block_start>self.assertEqual(data QOTD)<block_end><def_stmt>read_cb sock data<block_start>_check(data)<block_end><def_stmt>connect_cb sock<block_start>_async.recv(sock read_cb NO_EB)<block_end>sock=tcpsock()<line_sep>_async.connect(sock QOTD_IP 1 <none> connect_cb NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_connect_with_data_then_recv self<block_start>@async.call_from_main_thread<def_stmt>_check data<block_start>self.assertEqual(data b'hello')<block_end><def_stmt>read_cb sock data<block_start>_check(data)<block_end><def_stmt>connect_cb sock<block_start>_async.recv(sock read_cb NO_EB)<block_end>sock=tcpsock()<line_sep>_async.connect(sock ECHO_IP 1 b'hello' connect_cb NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_connect_then_send_then_recv self<block_start>@async.call_from_main_thread<def_stmt>_check data<block_start>self.assertEqual(data b'hello')<block_end><def_stmt>read_cb sock data<block_start>_check(data)<block_end><def_stmt>connect_cb sock<block_start>_async.recv(sock read_cb NO_EB)<line_sep>_async.send(sock b'hello' NO_CB NO_EB)<block_end>sock=tcpsock()<line_sep>_async.connect(sock ECHO_IP 1 <none> connect_cb NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_recv_before_connect_with_data_then_recv self<block_start>@async.call_from_main_thread<def_stmt>_check data<block_start>self.assertEqual(data b'hello')<block_end><def_stmt>read_cb sock data<block_start>_check(data)<block_end>sock=tcpsock()<line_sep>_async.recv(sock read_cb NO_EB)<line_sep>_async.connect(sock ECHO_IP 1 b'hello' NO_CB NO_EB)<line_sep>_async.run()<block_end><def_stmt>test_recv_before_connect_then_send_then_recv self<block_start>@async.call_from_main_thread<def_stmt>_check data<block_start>self.assertEqual(data b'hello')<block_end><def_stmt>read_cb sock data<block_start>_check(data)<block_end><def_stmt>connect_cb sock<block_start>_async.send(sock b'hello' NO_CB NO_EB)<block_end>sock=tcpsock()<line_sep>_async.recv(sock read_cb NO_EB)<line_sep>_async.connect(sock ECHO_IP 1 <none> connect_cb NO_EB)<line_sep>_async.run()<block_end><block_end><class_stmt>TestAcceptSocketIO(unittest.TestCase)<block_start><def_stmt>test_accept self<block_start>@async.call_from_main_thread<def_stmt>new_connection sock data<block_start>self.assertEqual(data b'hello')<block_end>sock=tcpsock()<line_sep>port=sock.bind(ADDR)<line_sep>addr=sock.getsockname()<line_sep>sock.listen(1)<line_sep>_async.accept(sock new_connection NO_EB)<line_sep>client=tcpsock()<line_sep>_async.connect(client addr 1 b'hello' NO_CB NO_EB)<line_sep>_async.run()<line_sep>sock.close()<block_end><def_stmt>test_accept_backlog2 self<block_start>counter=0<line_sep>@async.call_from_main_thread<def_stmt>new_connection sock data<block_start>self.assertEqual(data b'hello')<line_sep>counter<augadd>1<block_end>sock=tcpsock()<line_sep>port=sock.bind(ADDR)<line_sep>addr=sock.getsockname()<line_sep>sock.listen(2)<line_sep>_async.accept(sock new_connection NO_EB)<line_sep>client=tcpsock()<line_sep>_async.connect(client addr 2 b'hello' NO_CB NO_EB)<line_sep>_async.run()<line_sep>self.assertEqual(counter 2)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end># vim:set ts=8 sw=4 sts=4 tw=78 et:
<import_stmt>unittest<import_from_stmt>vehicle_info_after VehicleInfo<class_stmt>TestVehicleInfoMethods(unittest.TestCase)<block_start><pass><line_sep># def test_compute_tax_non_electric(self): # v = VehicleInfo("BMW", False, 10000) # self.assertEqual(v.compute_tax(), 500) # def test_compute_tax_electric(self): # v = VehicleInfo("BMW", True, 10000) # self.assertEqual(v.compute_tax(), 200) # def test_compute_tax_exemption(self): # v = VehicleInfo("BMW", False, 10000) # self.assertEqual(v.compute_tax(5000), 250) # def test_compute_tax_exemption_negative(self): # v = VehicleInfo("BMW", False, 10000) # self.assertRaises(ValueError, v.compute_tax, -5000) # def test_compute_tax_exemption_high(self): # v = VehicleInfo("BMW", False, 10000) # self.assertEqual(v.compute_tax(20000), 0) # def test_can_lease_false(self): # v = VehicleInfo("BMW", False, 10000) # self.assertEqual(v.can_lease(5000), False) # def test_can_lease_true(self): # v = VehicleInfo("BMW", False, 10000) # self.assertEqual(v.can_lease(15000), True) # def test_can_lease_negative_income(self): # v = VehicleInfo("BMW", False, 10000) # self.assertRaises(ValueError, v.can_lease, -5000) <block_end># run the actual unittests unittest.main()<line_sep>
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- <import_from_stmt>azure.cli.command_modules.lab.validators validate_artifacts validate_template_id<import_from_stmt>azure.cli.core.util get_json_object<def_stmt>load_arguments self _<block_start><with_stmt>self.argument_context('lab custom-image create')<as>c<block_start>c.argument('name' options_list=['--name' '-n'])<block_end><with_stmt>self.argument_context('lab vm create')<as>c<block_start>c.argument('name' options_list=['--name' '-n'])<line_sep># Authentication related arguments <for_stmt>arg_name ['admin_username' 'admin_password' 'authentication_type' 'ssh_key' 'generate_ssh_keys' 'saved_secret']<block_start>c.argument(arg_name arg_group='Authentication')<block_end>c.argument('generate_ssh_keys' action='store_true')<line_sep># Add Artifacts from json object c.argument('artifacts' type=get_json_object)<line_sep># Image related arguments c.ignore('os_type' 'gallery_image_reference' 'custom_image_id')<line_sep># Network related arguments <for_stmt>arg_name ['ip_configuration' 'subnet' 'vnet_name']<block_start>c.argument(arg_name arg_group='Network')<block_end>c.ignore('lab_subnet_name' 'lab_virtual_network_id' 'disallow_public_ip_address' 'network_interface')<line_sep># Creating VM in the different location then lab is an officially unsupported scenario c.ignore('location')<line_sep>c.argument('allow_claim' action='store_true')<block_end><with_stmt>self.argument_context('lab vm list')<as>c<block_start><for_stmt>arg_name ['filters' 'all' 'claimable' 'environment']<block_start>c.argument(arg_name arg_group='Filter')<block_end><for_stmt>arg_name ['all' 'claimable']<block_start>c.argument(arg_name action='store_true')<block_end><block_end><with_stmt>self.argument_context('lab vm claim')<as>c<block_start>c.argument('name' options_list=['--name' '-n'] id_part='child_name_1')<line_sep>c.argument('lab_name' id_part='name')<block_end><with_stmt>self.argument_context('lab vm apply-artifacts')<as>c<block_start>c.argument('artifacts' type=get_json_object validator=validate_artifacts)<line_sep>c.argument('name' options_list=['--name' '-n'])<block_end><with_stmt>self.argument_context('lab formula')<as>c<block_start>c.argument('name' options_list=['--name' '-n'])<block_end><with_stmt>self.argument_context('lab secret')<as>c<block_start><import_from_stmt>azure.mgmt.devtestlabs.models Secret<line_sep>c.argument('name' options_list=['--name' '-n'])<line_sep>c.argument('secret' options_list=['--value'] type=<lambda>x:Secret(value=x))<line_sep>c.ignore('user_name')<block_end><with_stmt>self.argument_context('lab formula export-artifacts')<as>c# Exporting artifacts does not need expand filter <block_start>c.ignore('expand')<block_end><with_stmt>self.argument_context('lab environment')<as>c<block_start>c.argument('name' options_list=['--name' '-n'])<line_sep>c.ignore('user_name')<block_end><with_stmt>self.argument_context('lab environment create')<as>c<block_start>c.argument('arm_template' validator=validate_template_id)<line_sep>c.argument('parameters' type=get_json_object)<block_end><with_stmt>self.argument_context('lab arm-template')<as>c<block_start>c.argument('name' options_list=['--name' '-n'])<block_end><with_stmt>self.argument_context('lab arm-template show')<as>c<block_start>c.argument('export_parameters' action='store_true')<block_end><block_end>
<import_stmt>os<import_stmt>requests<import_stmt>requests_cache<import_stmt>config<import_from_stmt>templates.text TextTemplate<line_sep>WORDS_API_KEY=os.environ.get('WORDS_API_KEY' config.WORDS_API_KEY)<def_stmt>process input entities<block_start>output={}<try_stmt><block_start>word=entities['word'][0]['value']<with_stmt>requests_cache.enabled('dictionary_cache' backend='sqlite' expire_after=86400)<block_start>r=requests.get('https://wordsapiv1.p.mashape.com/words/'+word+'/definitions' headers={'X-Mashape-Key':WORDS_API_KEY})<line_sep>data=r.json()<block_end>output['input']=input<line_sep>output['output']=TextTemplate('Definition of '+word+':\n'+data['definitions'][0]['definition']).get_message()<line_sep>output['success']=<true><block_end><except_stmt><block_start>error_message='I couldn\'t find that definition.'<line_sep>error_message<augadd>'\nPlease ask me something else, like:'<line_sep>error_message<augadd>'\n - define comfort'<line_sep>error_message<augadd>'\n - cloud definition'<line_sep>error_message<augadd>'\n - what does an accolade mean?'<line_sep>output['error_msg']=TextTemplate(error_message).get_message()<line_sep>output['success']=<false><block_end><return>output<block_end>
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: proto/core/auth/signed_message.proto """Generated protocol buffer code."""<line_sep># third party <import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf descriptor_pool<as>_descriptor_pool<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<line_sep># @@protoc_insertion_point(imports) _sym_db=_symbol_database.Default()<line_sep># third party <import_from_stmt>google.protobuf empty_pb2<as>google_dot_protobuf_dot_empty__pb2<line_sep># syft absolute <import_from_stmt>syft.proto.core.common common_object_pb2<as>proto_dot_core_dot_common_dot_common__object__pb2 <line_sep>DESCRIPTOR=_descriptor_pool.Default().AddSerializedFile(b'\n$proto/core/auth/signed_message.proto\x12\x0esyft.core.auth\x1a%proto/core/common/common_object.proto\x1a\x1bgoogle/protobuf/empty.proto"\x80\x01\n\rSignedMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x10\n\x08obj_type\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\x0c\x12\x12\n\nverify_key\x18\x04 \x01(\x0c\x12\x0f\n\x07message\x18\x05 \x01(\x0c"\x1f\n\tVerifyKey\x12\x12\n\nverify_key\x18\x01 \x01(\x0c"0\n\tVerifyAll\x12#\n\x03\x61ll\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')<line_sep>_SIGNEDMESSAGE=DESCRIPTOR.message_types_by_name["SignedMessage"]<line_sep>_VERIFYKEY=DESCRIPTOR.message_types_by_name["VerifyKey"]<line_sep>_VERIFYALL=DESCRIPTOR.message_types_by_name["VerifyAll"]<line_sep>SignedMessage=_reflection.GeneratedProtocolMessageType("SignedMessage" (_message.Message ) {"DESCRIPTOR":_SIGNEDMESSAGE "__module__":"proto.core.auth.signed_message_pb2"# @@protoc_insertion_point(class_scope:syft.core.auth.SignedMessage) } )<line_sep>_sym_db.RegisterMessage(SignedMessage)<line_sep>VerifyKey=_reflection.GeneratedProtocolMessageType("VerifyKey" (_message.Message ) {"DESCRIPTOR":_VERIFYKEY "__module__":"proto.core.auth.signed_message_pb2"# @@protoc_insertion_point(class_scope:syft.core.auth.VerifyKey) } )<line_sep>_sym_db.RegisterMessage(VerifyKey)<line_sep>VerifyAll=_reflection.GeneratedProtocolMessageType("VerifyAll" (_message.Message ) {"DESCRIPTOR":_VERIFYALL "__module__":"proto.core.auth.signed_message_pb2"# @@protoc_insertion_point(class_scope:syft.core.auth.VerifyAll) } )<line_sep>_sym_db.RegisterMessage(VerifyAll)<if_stmt>_descriptor._USE_C_DESCRIPTORS<eq><false><block_start>DESCRIPTOR._options=<none><line_sep>_SIGNEDMESSAGE._serialized_start=125<line_sep>_SIGNEDMESSAGE._serialized_end=253<line_sep>_VERIFYKEY._serialized_start=255<line_sep>_VERIFYKEY._serialized_end=286<line_sep>_VERIFYALL._serialized_start=288<line_sep>_VERIFYALL._serialized_end=336<block_end># @@protoc_insertion_point(module_scope)
# Copyright (c) 2020 Foundry. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## <import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<import_stmt>time<import_stmt>scipy.misc<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>tensorflow<as>tf<line_sep>tf.compat.v1.disable_eager_execution()# For TF 2.x compatibility <import_from_stmt>models.baseModel BaseModel<import_from_stmt>models.common.model_builder baseline_model<import_from_stmt>models.common.util print_ get_ckpt_list linear_to_srgb srgb_to_linear<import_stmt>message_pb2<class_stmt>Model(BaseModel)<block_start>"""Load your trained model and do inference in Nuke"""<def_stmt>__init__ self<block_start>super(Model self).__init__()<line_sep>self.name='Regression Template TF'<line_sep>self.n_levels=3<line_sep>self.scale=0.5<line_sep>dir_path=os.path.dirname(os.path.realpath(__file__))<line_sep>self.checkpoints_dir=os.path.join(dir_path 'checkpoints')<line_sep>self.patch_size=50<line_sep>self.output_param_number=1<line_sep># Initialise checkpoint name to the latest checkpoint ckpt_names=get_ckpt_list(self.checkpoints_dir)<if_stmt><not>ckpt_names# empty list <block_start>self.checkpoint_name=''<block_end><else_stmt><block_start>latest_ckpt=tf.compat.v1.train.latest_checkpoint(self.checkpoints_dir)<if_stmt>latest_ckpt<is><not><none><block_start>self.checkpoint_name=latest_ckpt.split('/')[-1]<block_end><else_stmt><block_start>self.checkpoint_name=ckpt_names[-1]<block_end><block_end>self.prev_ckpt_name=self.checkpoint_name<line_sep># Silence TF log when creating tf.Session() os.environ['TF_CPP_MIN_LOG_LEVEL']='2'<line_sep># Define options self.gamma_to_predict=1.0<line_sep>self.predict=<false><line_sep>self.options=('checkpoint_name' 'gamma_to_predict' )<line_sep>self.buttons=('predict' )<line_sep># Define inputs/outputs self.inputs={'input':3}<line_sep>self.outputs={'output':3}<block_end><def_stmt>load self model# Check if empty or invalid checkpoint name <block_start><if_stmt>self.checkpoint_name<eq>''<block_start>ckpt_names=get_ckpt_list(self.checkpoints_dir)<if_stmt><not>ckpt_names<block_start><raise>ValueError("No checkpoints found in {}".format(self.checkpoints_dir))<block_end><else_stmt><block_start><raise>ValueError("Empty checkpoint name, try an available checkpoint in {} (ex: {})".format(self.checkpoints_dir ckpt_names[-1]))<block_end><block_end>print_("Loading trained model checkpoint...\n" 'm')<line_sep># Load from given checkpoint file name self.saver.restore(self.sess os.path.join(self.checkpoints_dir self.checkpoint_name))<line_sep>print_("...Checkpoint {} loaded\n".format(self.checkpoint_name) 'm')<block_end><def_stmt>inference self image_list<block_start>"""Do an inference on the model with a set of inputs. # Arguments: image_list: The input image list Return the result of the inference. """<line_sep>image=image_list[0]<line_sep>image=linear_to_srgb(image).copy()<if_stmt><not>hasattr(self 'sess')# Initialise tensorflow graph <block_start>tf.compat.v1.reset_default_graph()<line_sep>config=tf.compat.v1.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><line_sep>self.sess=tf.compat.v1.Session(config=config)<line_sep># Input is stacked histograms of original and gamma-graded images. input_shape=[1 2 100]<line_sep># Initialise input placeholder size self.input=tf.compat.v1.placeholder(tf.float32 shape=input_shape)<line_sep>self.model=baseline_model(input_shape=input_shape[1:] output_param_number=self.output_param_number)<line_sep>self.infer_op=self.model(self.input)<line_sep># Load latest model checkpoint self.saver=tf.compat.v1.train.Saver()<line_sep>self.load(self.model)<line_sep>self.prev_ckpt_name=self.checkpoint_name<block_end># If checkpoint name has changed, load new checkpoint <if_stmt>self.prev_ckpt_name<ne>self.checkpoint_name<or>self.checkpoint_name<eq>''<block_start>self.load(self.model)<line_sep># If checkpoint correctly loaded, update previous checkpoint name self.prev_ckpt_name=self.checkpoint_name<block_end># Preprocess image same way we preprocessed it for training # Here for gamma correction compute histograms <def_stmt>histogram x value_range=[0.0 1.0] nbins=100<block_start>"""Return histogram of tensor x"""<line_sep>h,w,c=x.shape<line_sep>hist=tf.histogram_fixed_width(x value_range nbins=nbins)<line_sep>hist=tf.divide(hist h<times>w<times>c)<line_sep><return>hist<block_end><with_stmt>tf.compat.v1.Session()<as>sess# Convert to grayscale <block_start>img_gray=tf.image.rgb_to_grayscale(image)<line_sep>img_gray=tf.image.resize(img_gray [self.patch_size self.patch_size])<line_sep># Apply gamma correction img_gray_grade=tf.math.pow(img_gray self.gamma_to_predict)<line_sep>img_grade=tf.math.pow(image self.gamma_to_predict)<line_sep># Compute histograms img_hist=histogram(img_gray)<line_sep>img_grade_hist=histogram(img_gray_grade)<line_sep>hists_op=tf.stack([img_hist img_grade_hist] axis=0)<line_sep>hists,img_grade=sess.run([hists_op img_grade])<line_sep>res_img=srgb_to_linear(img_grade)<block_end>hists_batch=np.expand_dims(hists 0)<line_sep>start=time.time()<line_sep># Run model inference inference=self.sess.run(self.infer_op feed_dict={self.input:hists_batch})<line_sep>duration=time.time()-start<line_sep>print('Inference duration: {:4.3f}s'.format(duration))<line_sep>res=inference[-1]<line_sep>print("Predicted gamma: {}".format(res))<line_sep># If predict button is pressed in Nuke <if_stmt>self.predict<block_start>script_msg=message_pb2.FieldValuePairAttrib()<line_sep>script_msg.name="PythonScript"<line_sep># Create a Python script message to run in Nuke python_script=self.nuke_script(res)<line_sep>script_msg_val=script_msg.values.add()<line_sep>script_msg_str=script_msg_val.string_attributes.add()<line_sep>script_msg_str.values.extend([python_script])<line_sep><return>[res_img script_msg]<block_end><return>[res_img]<block_end><def_stmt>nuke_script self res<block_start>"""Return the Python script function to create a pop up window in Nuke."""<line_sep>popup_msg="Predicted gamma: {}".format(res)<line_sep>script="nuke.message('{}')\n".format(popup_msg)<line_sep><return>script<block_end><block_end>
<import_stmt>logging<import_from_stmt>easyprocess EasyProcess<import_from_stmt>pyscreenshot.plugins.backend CBackend<import_from_stmt>pyscreenshot.tempexport RunProgError read_func_img<import_from_stmt>pyscreenshot.util extract_version<line_sep>log=logging.getLogger(__name__)<line_sep>PROGRAM="xwd"<line_sep># wikipedia: https://en.wikipedia.org/wiki/Xwd # xwd | xwdtopnm | pnmtopng > Screenshot.png # xwdtopnm is buggy: https://bugs.launchpad.net/ubuntu/+source/netpbm-free/+bug/1379480 # solution : imagemagick convert # xwd -root -display :0 | convert xwd:- file.png # TODO: xwd sometimes grabs the wrong window so this backend will be not added now <def_stmt>read_xwd_img <block_start><def_stmt>run_prog fpng bbox=<none><block_start>fxwd=fpng+".xwd"<line_sep>pxwd=EasyProcess([PROGRAM "-root" "-out" fxwd])<line_sep>pxwd.call()<if_stmt>pxwd.return_code<ne>0<block_start><raise>RunProgError(pxwd.stderr)<block_end>pconvert=EasyProcess(["convert" "xwd:"+fxwd fpng])<line_sep>pconvert.call()<if_stmt>pconvert.return_code<ne>0<block_start><raise>RunProgError(pconvert.stderr)<block_end><block_end>im=read_func_img(run_prog)<line_sep><return>im<block_end><class_stmt>XwdWrapper(CBackend)<block_start>name="xwd"<line_sep>is_subprocess=<true><def_stmt>grab self bbox=<none><block_start>im=read_xwd_img()<if_stmt>bbox<block_start>im=im.crop(bbox)<block_end><return>im<block_end><def_stmt>backend_version self<block_start><return>extract_version(EasyProcess([PROGRAM "-version"]).call().stdout)<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>os<import_stmt>time<import_stmt>collections<import_stmt>typing<as>tp<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>.optimizerlib registry<import_from_stmt>. test_optimizerlib<line_sep>KEY="NEVERGRAD_SPECIAL_TESTS"<if_stmt><not>os.environ.get(KEY "")<block_start>pytest.skip(f"These tests only run if {KEY} is set in the environment" allow_module_level=<true>)<block_end>@pytest.mark.parametrize("dimension" (2 4 7 77))@pytest.mark.parametrize("num_workers" (1 ))@pytest.mark.parametrize("scale" (4.0 ))@pytest.mark.parametrize("baseline" ["MetaModel" "CMA" "ECMA"])@pytest.mark.parametrize("budget" [400 4000])@pytest.mark.parametrize("ellipsoid" [<true> <false>])<def_stmt>test_metamodel_sqp_chaining dimension:int num_workers:int scale:float budget:int ellipsoid:bool baseline:str<arrow><none><block_start>"""The test can operate on the sphere or on an elliptic funciton."""<line_sep>target=test_optimizerlib.QuadFunction(scale=scale ellipse=ellipsoid)<line_sep>baseline=baseline<if>dimension<g>1<else>"OnePlusOne"<line_sep>chaining="ChainMetaModelSQP"<line_sep># In both cases we compare MetaModel and CMA for a same given budget. # But we expect MetaModel to be clearly better only for a larger budget in the ellipsoid case. contextual_budget=budget<if>ellipsoid<else>3<times>budget<line_sep>contextual_budget<augmul>5<times>int(max(1 np.sqrt(scale)))<line_sep>num_trials=27<line_sep>successes=0.0<line_sep>durations:tp.Dict[str float]=collections.defaultdict(int)<for_stmt>_ range(num_trials)<block_start><if_stmt>successes<ge>num_trials/2<block_start><break><block_end># Let us run the comparison. recoms:tp.Dict[str np.ndarray]={}<for_stmt>name (chaining baseline)<block_start>opt=registry[name](dimension contextual_budget num_workers=num_workers)<line_sep>t0=time.time()<line_sep>recoms[name]=opt.minimize(target).value<line_sep>durations[name]<augadd>time.time()-t0<block_end><if_stmt>target(recoms[baseline])<l>target(recoms[chaining])<block_start>successes<augadd>1<block_end><if_stmt>target(recoms[baseline])<eq>target(recoms[chaining])<block_start>successes<augadd>0.5<block_end><block_end><if_stmt>successes<le>num_trials<floordiv>2<block_start>print(f"ChainMetaModelSQP fails ({successes}/{num_trials}) for d={dimension}, scale={scale}, "<concat>f"num_workers={num_workers}, ellipsoid={ellipsoid}, budget={budget}, vs {baseline}")<line_sep><raise>AssertionError("ChaingMetaModelSQP fails by performance.")<block_end>print(f"ChainMetaModelSQP wins for d={dimension}, scale={scale}, num_workers={num_workers}, "<concat>f"ellipsoid={ellipsoid}, budget={budget}, vs {baseline}")<assert_stmt>durations[chaining]<l>7<times>durations[baseline] "Computationally more than 7x more expensive."<block_end>@pytest.mark.parametrize("args" test_optimizerlib.get_metamodel_test_settings(special=<true>))@pytest.mark.parametrize("baseline" ("CMA" "ECMA"))<def_stmt>test_metamodel_special baseline:str args:tp.Tuple[tp.Any <ellipsis>]<arrow><none><block_start>"""The test can operate on the sphere or on an elliptic funciton."""<line_sep>kwargs=dict(zip(test_optimizerlib.META_TEST_ARGS args))<line_sep>test_optimizerlib.check_metamodel(baseline=baseline **kwargs)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>pyquil.gates RZ RX I CZ ISWAP CPHASE<import_from_stmt>pyquil.noise_gates _get_qvm_noise_supported_gates THETA<def_stmt>test_get_qvm_noise_supported_gates_from_compiler_isa compiler_isa<block_start>gates=_get_qvm_noise_supported_gates(compiler_isa)<for_stmt>q [0 1 2]<block_start><for_stmt>g [I(q) RX(np.pi/2 q) RX(-np.pi/2 q) RX(np.pi q) RX(-np.pi q) RZ(THETA q) ]<block_start><assert_stmt>g<in>gates<block_end><block_end><assert_stmt>CZ(0 1)<in>gates<assert_stmt>CZ(1 0)<in>gates<assert_stmt>ISWAP(1 2)<in>gates<assert_stmt>ISWAP(2 1)<in>gates<assert_stmt>CPHASE(THETA 2 0)<in>gates<assert_stmt>CPHASE(THETA 0 2)<in>gates<block_end>ASPEN_8_QUBITS_NO_RX={8 9 10 18 19 28 29 31}<line_sep>ASPEN_8_QUBITS_NO_RZ={8 9 10 18 19 28 29 31}<line_sep>ASPEN_8_EDGES_NO_CZ={(0 1) (10 11) (1 2) (21 22) (17 10) (12 25)}<def_stmt>test_get_qvm_noise_supported_gates_from_aspen8_isa qcs_aspen8_quantum_processor noise_model_dict<block_start>gates=_get_qvm_noise_supported_gates(qcs_aspen8_quantum_processor.to_compiler_isa())<for_stmt>q range(len(qcs_aspen8_quantum_processor._isa.architecture.nodes))<block_start><if_stmt>q<not><in>ASPEN_8_QUBITS_NO_RX<block_start><for_stmt>g [RX(np.pi/2 q) RX(-np.pi/2 q) RX(np.pi q) RX(-np.pi q) ]<block_start><assert_stmt>g<in>gates<block_end><block_end><if_stmt>q<not><in>ASPEN_8_QUBITS_NO_RZ<block_start><assert_stmt>RZ(THETA q)<in>gates<block_end><block_end><for_stmt>edge qcs_aspen8_quantum_processor._isa.architecture.edges<block_start><if_stmt>(edge.node_ids[0] edge.node_ids[1] )<in>ASPEN_8_EDGES_NO_CZ<block_start><continue><block_end><assert_stmt>CZ(edge.node_ids[0] edge.node_ids[1])<in>gates<assert_stmt>CZ(edge.node_ids[1] edge.node_ids[0])<in>gates<block_end><block_end>
"""logging utils for the downloader"""<import_stmt>wandb<import_stmt>time<import_from_stmt>collections Counter<import_stmt>fsspec<import_stmt>json<import_from_stmt>multiprocessing Process Queue<import_stmt>queue<class_stmt>CappedCounter<block_start>"""Maintain a counter with a capping to avoid memory issues"""<def_stmt>__init__ self max_size=10<power>5<block_start>self.max_size=max_size<line_sep>self.counter=Counter()<block_end><def_stmt>increment self key<block_start><if_stmt>len(self.counter)<ge>self.max_size<block_start>self._keep_most_frequent()<block_end>self.counter[key]<augadd>1<block_end><def_stmt>_keep_most_frequent self<block_start>self.counter=Counter(dict(self.counter.most_common(int(self.max_size/2))))<block_end><def_stmt>most_common self k<block_start><return>self.counter.most_common(k)<block_end><def_stmt>update self counter<block_start>self.counter.update(counter.counter)<if_stmt>len(self.counter)<ge>self.max_size<block_start>self._keep_most_frequent()<block_end><block_end><def_stmt>dump self<block_start><return>self.counter<block_end>@classmethod<def_stmt>load cls d max_size=10<power>5<block_start>c=CappedCounter(max_size)<line_sep>c.counter=Counter(d)<line_sep><return>c<block_end><block_end><class_stmt>Logger<block_start>"""logger which logs when number of calls reaches a value or a time interval has passed"""<def_stmt>__init__ self processes_count=1 min_interval=0<block_start>"""Log only every processes_count and if min_interval (seconds) have elapsed since last log"""<line_sep># wait for all processes to return self.processes_count=processes_count<line_sep>self.processes_returned=0<line_sep># min time (in seconds) before logging a new table (avoids too many logs) self.min_interval=min_interval<line_sep>self.last=time.perf_counter()<line_sep># keep track of whether we logged the last call self.last_call_logged=<false><line_sep>self.last_args=<none><line_sep>self.last_kwargs=<none><block_end><def_stmt>__call__ self *args **kwargs<block_start>self.processes_returned<augadd>1<if_stmt>self.processes_returned%self.processes_count<eq>0<and>time.perf_counter()-self.last<g>self.min_interval<block_start>self.do_log(*args **kwargs)<line_sep>self.last=time.perf_counter()<line_sep>self.last_call_logged=<true><block_end><else_stmt><block_start>self.last_call_logged=<false><line_sep>self.last_args=args<line_sep>self.last_kwargs=kwargs<block_end><block_end><def_stmt>do_log self *args **kwargs<block_start><raise>NotImplementedError()<block_end><def_stmt>sync self<block_start>"""Ensure last call is logged"""<if_stmt><not>self.last_call_logged<block_start>self.do_log(*self.last_args **self.last_kwargs)<line_sep># reset for next file self.processes_returned=0<block_end><block_end><block_end><class_stmt>SpeedLogger(Logger)<block_start>"""Log performance metrics"""<def_stmt>__init__ self prefix enable_wandb **logger_args<block_start>super().__init__(**logger_args)<line_sep>self.prefix=prefix<line_sep>self.start=time.perf_counter()<line_sep>self.count=0<line_sep>self.success=0<line_sep>self.failed_to_download=0<line_sep>self.failed_to_resize=0<line_sep>self.enable_wandb=enable_wandb<block_end><def_stmt>__call__ self duration count success failed_to_download failed_to_resize# pylint: disable=arguments-differ <block_start>self.count<augadd>count<line_sep>self.success<augadd>success<line_sep>self.failed_to_download<augadd>failed_to_download<line_sep>self.failed_to_resize<augadd>failed_to_resize<line_sep>super().__call__(duration self.count self.success self.failed_to_download self.failed_to_resize)<block_end><def_stmt>do_log self duration count success failed_to_download failed_to_resize# pylint: disable=arguments-differ <block_start>img_per_sec=count/duration<line_sep>success_ratio=1.0<times>success/count<line_sep>failed_to_download_ratio=1.0<times>failed_to_download/count<line_sep>failed_to_resize_ratio=1.0<times>failed_to_resize/count<line_sep>print(" - ".join([f"{self.prefix:<7}" f"success: {success_ratio:.3f}" f"failed to download: {failed_to_download_ratio:.3f}" f"failed to resize: {failed_to_resize_ratio:.3f}" f"images per sec: {img_per_sec:.0f}" f"count: {count}" ]))<if_stmt>self.enable_wandb<block_start>wandb.log({f"{self.prefix}/img_per_sec":img_per_sec f"{self.prefix}/success":success_ratio f"{self.prefix}/failed_to_download":failed_to_download_ratio f"{self.prefix}/failed_to_resize":failed_to_resize_ratio f"{self.prefix}/count":count })<block_end><block_end><block_end><class_stmt>StatusTableLogger(Logger)<block_start>"""Log status table to W&B, up to `max_status` most frequent items"""<def_stmt>__init__ self max_status=100 min_interval=60 enable_wandb=<false> **logger_args<block_start>super().__init__(min_interval=min_interval **logger_args)<line_sep># avoids too many errors unique to a specific website (SSL certificates, etc) self.max_status=max_status<line_sep>self.enable_wandb=enable_wandb<block_end><def_stmt>do_log self status_dict count# pylint: disable=arguments-differ <block_start><if_stmt>self.enable_wandb<block_start>status_table=wandb.Table(columns=["status" "frequency" "count"] data=[[k 1.0<times>v/count v]<for>k,v status_dict.most_common(self.max_status)] )<line_sep>wandb.run.log({"status":status_table})<block_end><block_end><block_end><def_stmt>write_stats output_folder shard_id count successes failed_to_download failed_to_resize start_time end_time status_dict oom_shard_count <block_start>"""Write stats to disk"""<line_sep>stats={"count":count "successes":successes "failed_to_download":failed_to_download "failed_to_resize":failed_to_resize "duration":end_time-start_time "status_dict":status_dict.dump() }<line_sep>fs,output_path=fsspec.core.url_to_fs(output_folder)<line_sep>shard_name="{shard_id:0{oom_shard_count}d}".format(shard_id=shard_id oom_shard_count=oom_shard_count)<line_sep>json_file=f"{output_path}/{shard_name}_stats.json"<with_stmt>fs.open(json_file "w")<as>f<block_start>json.dump(stats f indent=4)<block_end><block_end># https://docs.python.org/3/library/multiprocessing.html # logger process that reads stats files regularly, aggregates and send to wandb / print to terminal <class_stmt>LoggerProcess(Process)<block_start>"""Logger process that reads stats files regularly, aggregates and send to wandb / print to terminal"""<def_stmt>__init__ self output_folder enable_wandb wandb_project config_parameters processes_count log_interval=60<block_start>super().__init__()<line_sep>self.log_interval=log_interval<line_sep>self.enable_wandb=enable_wandb<line_sep>self.fs,self.output_path=fsspec.core.url_to_fs(output_folder)<line_sep>self.stats_files=set()<line_sep>self.wandb_project=wandb_project<line_sep>self.config_parameters=config_parameters<line_sep>self.processes_count=processes_count<line_sep>self.q=Queue()<block_end><def_stmt>run self<block_start>"""Run logger process"""<if_stmt>self.enable_wandb<block_start>self.current_run=wandb.init(project=self.wandb_project config=self.config_parameters anonymous="allow")<block_end><else_stmt><block_start>self.current_run=<none><block_end>self.total_speed_logger=SpeedLogger("total" processes_count=self.processes_count enable_wandb=self.enable_wandb)<line_sep>self.status_table_logger=StatusTableLogger(processes_count=self.processes_count enable_wandb=self.enable_wandb)<line_sep>start_time=time.perf_counter()<line_sep>last_check=0<line_sep>total_status_dict=CappedCounter()<while_stmt><true><block_start>time.sleep(0.1)<try_stmt><block_start>self.q.get(<false>)<line_sep>last_one=<true><block_end><except_stmt>queue.Empty<as>_<block_start>last_one=<false><block_end><if_stmt><not>last_one<and>time.perf_counter()-last_check<l>self.log_interval<block_start><continue><block_end><try_stmt># read stats files <block_start>stats_files=self.fs.glob(self.output_path+"/*.json")<line_sep># get new stats files new_stats_files=set(stats_files)-self.stats_files<if_stmt>len(new_stats_files)<eq>0<block_start><if_stmt>last_one<block_start>self.finish()<line_sep><return><block_end><block_end># read new stats files <for_stmt>stats_file new_stats_files<block_start><with_stmt>self.fs.open(stats_file "r")<as>f<block_start>stats=json.load(f)<line_sep>SpeedLogger("worker" enable_wandb=self.enable_wandb)(duration=stats["duration"] count=stats["count"] success=stats["successes"] failed_to_download=stats["failed_to_download"] failed_to_resize=stats["failed_to_resize"] )<line_sep>self.total_speed_logger(duration=time.perf_counter()-start_time count=stats["count"] success=stats["successes"] failed_to_download=stats["failed_to_download"] failed_to_resize=stats["failed_to_resize"] )<line_sep>status_dict=CappedCounter.load(stats["status_dict"])<line_sep>total_status_dict.update(status_dict)<line_sep>self.status_table_logger(total_status_dict self.total_speed_logger.count)<block_end>self.stats_files.add(stats_file)<block_end>last_check=time.perf_counter()<if_stmt>last_one<block_start>self.finish()<line_sep><return><block_end><block_end><except_stmt>Exception<as>e# pylint: disable=broad-except <block_start>print(e)<line_sep>self.finish()<line_sep><return><block_end><block_end><block_end><def_stmt>finish self<block_start>"""Finish logger process"""<line_sep>self.total_speed_logger.sync()<line_sep>self.status_table_logger.sync()<if_stmt>self.current_run<is><not><none><block_start>self.current_run.finish()<block_end><block_end><def_stmt>join self timeout=<none><block_start>"""Stop logger process"""<line_sep>self.q.put("stop")<line_sep>super().join()<line_sep>self.q.close()<block_end><block_end>
# Generated by Django 2.2.13 on 2020-09-30 13:14 <import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('orders' '0006_PromoCodeComments') ]<line_sep>operations=[migrations.AddField(model_name='order' name='promocode' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT to='orders.PromoCode') ) ]<block_end>
<import_stmt>subprocess<import_from_stmt>datetime datetime timedelta<import_from_stmt>i3pystatus IntervalModule<import_from_stmt>i3pystatus.core.desktop DesktopNotification<line_sep>STOPPED=0<line_sep>RUNNING=1<line_sep>BREAK=2<class_stmt>Pomodoro(IntervalModule)<block_start>""" This plugin shows Pomodoro timer. Left click starts/restarts timer. Right click stops it. Example color settings. .. code-block:: python color_map = { 'stopped': '#2ECCFA', 'running': '#FFFF00', 'break': '#37FF00' } """<line_sep>settings=(('sound' 'Path to sound file to play as alarm. Played by "aplay" utility') ('pomodoro_duration' 'Working (pomodoro) interval duration in seconds') ('break_duration' 'Short break duration in seconds') ('long_break_duration' 'Long break duration in seconds') ('short_break_count' 'Short break count before first long break') ('format' 'format string, available formatters: current_pomodoro, '<concat>'total_pomodoro, time') ('inactive_format' 'format string to display when no timer is running') ('color' 'dictionary containing a mapping of statuses to colours'))<line_sep>inactive_format='Start Pomodoro'<line_sep>color_map={'stopped':'#2ECCFA' 'running':'#FFFF00' 'break':'#37FF00'}<line_sep>color=<none><line_sep>sound=<none><line_sep>interval=1<line_sep>short_break_count=3<line_sep>format='☯ {current_pomodoro}/{total_pomodoro} {time}'<line_sep>pomodoro_duration=25<times>60<line_sep>break_duration=5<times>60<line_sep>long_break_duration=15<times>60<line_sep>on_rightclick="stop"<line_sep>on_leftclick="start"<def_stmt>init self# state could be either running/break or stopped <block_start>self.state=STOPPED<line_sep>self.current_pomodoro=0<line_sep>self.total_pomodoro=self.short_break_count+1# and 1 long break self.time=<none><if_stmt>self.color<is><not><none><and>type(self.color)<eq>dict<block_start>self.color_map.update(self.color)<block_end><block_end><def_stmt>run self<block_start><if_stmt>self.time<and>datetime.utcnow()<ge>self.time<block_start><if_stmt>self.state<eq>RUNNING<block_start>self.state=BREAK<if_stmt>self.current_pomodoro<eq>self.short_break_count<block_start>self.time=datetime.utcnow()+timedelta(seconds=self.long_break_duration)<block_end><else_stmt><block_start>self.time=datetime.utcnow()+timedelta(seconds=self.break_duration)<block_end>text='Go for a break!'<block_end><else_stmt><block_start>self.state=RUNNING<line_sep>self.time=datetime.utcnow()+timedelta(seconds=self.pomodoro_duration)<line_sep>text='Back to work!'<line_sep>self.current_pomodoro=(self.current_pomodoro+1)%self.total_pomodoro<block_end>self._alarm(text)<block_end><if_stmt>self.state<eq>RUNNING<or>self.state<eq>BREAK<block_start>min,sec=divmod((self.time-datetime.utcnow()).total_seconds() 60)<line_sep>text='{:02}:{:02}'.format(int(min) int(sec))<line_sep>sdict={'time':text 'current_pomodoro':self.current_pomodoro+1 'total_pomodoro':self.total_pomodoro}<line_sep>color=self.color_map['running']<if>self.state<eq>RUNNING<else>self.color_map['break']<line_sep>text=self.format.format(**sdict)<block_end><else_stmt><block_start>text=self.inactive_format<line_sep>color=self.color_map['stopped']<block_end>self.output={'full_text':text 'color':color}<block_end><def_stmt>start self<block_start>self.state=RUNNING<line_sep>self.time=datetime.utcnow()+timedelta(seconds=self.pomodoro_duration)<line_sep>self.current_pomodoro=0<block_end><def_stmt>stop self<block_start>self.state=STOPPED<line_sep>self.time=<none><block_end><def_stmt>_alarm self text<block_start>notification=DesktopNotification(title='Alarm!' body=text)<line_sep>notification.display()<if_stmt>self.sound<is><not><none><block_start>subprocess.Popen(['aplay' self.sound '-q'] stdout=subprocess.DEVNULL stderr=subprocess.DEVNULL)<block_end><block_end><block_end>
<import_stmt>base64<import_stmt>json<import_stmt>re<import_stmt>unittest<import_stmt>boto3<import_from_stmt>tests get_version get_function_name is_local<import_from_stmt>tests.sam LocalLambdaServer start_local_lambda<class_stmt>TestRuntimeLayer(unittest.TestCase)<block_start>lambda_server:LocalLambdaServer=<none><line_sep>@classmethod<def_stmt>setUpClass cls<block_start><if_stmt>is_local()<block_start>cls.lambda_server=start_local_lambda(template_path="test-template.yaml" parameter_overrides={'Version':get_version()} )<block_end><block_end><def_stmt>get_client self<block_start><return>self.lambda_server.get_client()<if>is_local()<else>boto3.client('lambda')<block_end><def_stmt>test_script self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("ExampleFunction") Payload=json.dumps({'x':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>result=json.loads(raw_payload)<line_sep>self.assertEqual(2 result)<block_end><def_stmt>test_lowercase_extension self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("LowerCaseExtensionFunction") Payload=json.dumps({'x':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>result=json.loads(raw_payload)<line_sep>self.assertEqual(2 result)<block_end><def_stmt>test_multiple_arguments self<block_start>lambda_client=self.get_client()<line_sep>payload={'x':'bar' 'y':1}<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("MultipleArgumentsFunction") Payload=json.dumps(payload) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>result=json.loads(raw_payload)<line_sep>self.assertDictEqual(payload result)<block_end>@unittest.skipIf(is_local() 'Lambda local does not support log retrieval')<def_stmt>test_debug_logging self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("LoggingFunction") LogType='Tail' Payload=json.dumps({'x':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>result=json.loads(raw_payload)<line_sep>self.assertEqual(1 result)<line_sep>log=base64.b64decode(response['LogResult']).decode('utf-8')<line_sep>self.assertIn("runtime:Sourcing 'script.R'" log)<line_sep>self.assertIn("runtime:Invoking function 'handler_with_debug_logging' with parameters:\n$x\n[1] 1" log)<line_sep>self.assertIn("runtime:Function returned:\n[1] 1" log)<line_sep>self.assertIn("runtime:Posted result:\n" log)<block_end>@unittest.skipIf(is_local() 'Lambda local does not support log retrieval')<def_stmt>test_no_debug_logging self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("ExampleFunction") LogType='Tail' Payload=json.dumps({'x':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>result=json.loads(raw_payload)<line_sep>self.assertEqual(2 result)<line_sep>log=base64.b64decode(response['LogResult']).decode('utf-8')<line_sep>self.assertNotIn("Sourcing " log)<line_sep>self.assertNotIn("Invoking function " log)<line_sep>self.assertNotIn("Function returned:" log)<line_sep>self.assertNotIn("Posted result:" log)<block_end>@unittest.skipIf(is_local() 'Lambda local does not pass errors properly')<def_stmt>test_missing_source_file self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("MissingSourceFileFunction") Payload=json.dumps({'y':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>json_payload=json.loads(raw_payload)<line_sep>self.assertEqual('Unhandled' response['FunctionError'])<line_sep>self.assertIn('Source file does not exist: missing.[R|r]' json_payload['errorMessage'])<line_sep>self.assertEqual('simpleError' json_payload['errorType'])<block_end>@unittest.skipIf(is_local() 'Lambda local does not pass errors properly')<def_stmt>test_missing_function self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("MissingFunctionFunction") Payload=json.dumps({'y':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>json_payload=json.loads(raw_payload)<line_sep>self.assertEqual('Unhandled' response['FunctionError'])<line_sep>self.assertIn('Function "handler_missing" does not exist' json_payload['errorMessage'])<line_sep>self.assertEqual('simpleError' json_payload['errorType'])<block_end>@unittest.skipIf(is_local() 'Lambda local does not pass errors properly')<def_stmt>test_function_as_variable self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("HandlerAsVariableFunction") Payload=json.dumps({'y':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>json_payload=json.loads(raw_payload)<line_sep>self.assertEqual('Unhandled' response['FunctionError'])<line_sep>self.assertIn('Function "handler_as_variable" does not exist' json_payload['errorMessage'])<line_sep>self.assertEqual('simpleError' json_payload['errorType'])<block_end>@unittest.skipIf(is_local() 'Lambda local does not pass errors properly')<def_stmt>test_missing_argument self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("ExampleFunction"))<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>json_payload=json.loads(raw_payload)<line_sep>self.assertEqual('Unhandled' response['FunctionError'])<line_sep>self.assertIn('argument "x" is missing, with no default' json_payload['errorMessage'])<line_sep>self.assertEqual('simpleError' json_payload['errorType'])<block_end>@unittest.skipIf(is_local() 'Lambda local does not pass errors properly')<def_stmt>test_unused_argument self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("ExampleFunction") Payload=json.dumps({'x':1 'y':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>json_payload=json.loads(raw_payload)<line_sep>self.assertEqual('Unhandled' response['FunctionError'])<line_sep>self.assertIn('unused argument (y = 1)' json_payload['errorMessage'])<line_sep>self.assertEqual('simpleError' json_payload['errorType'])<block_end># @unittest.skipIf(is_local(), 'Fails locally with "argument list too long"') @unittest.skip('Fails with timeout')<def_stmt>test_long_argument self<block_start>lambda_client=self.get_client()<line_sep>payload={x:x<for>x range(0 100000)}<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("VariableArgumentsFunction") Payload=json.dumps(payload) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>result=json.loads(raw_payload)<line_sep>self.assertEqual(1 result)<block_end>@unittest.skipIf(is_local() 'Lambda local does not pass errors properly')<def_stmt>test_missing_library self<block_start>lambda_client=self.get_client()<line_sep>response=lambda_client.invoke(FunctionName=get_function_name("MissingLibraryFunction") Payload=json.dumps({'y':1}) )<line_sep>raw_payload=response['Payload'].read().decode('utf-8')<line_sep>json_payload=json.loads(raw_payload)<line_sep>self.assertEqual('Unhandled' response['FunctionError'])<line_sep>self.assertIn('there is no package called ‘Matrix’' json_payload['errorMessage'])<line_sep>error_type='packageNotFoundError'<if>get_version()<eq>'3_6_0'<else>'simpleError'<line_sep>self.assertEqual(error_type json_payload['errorType'])<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><if_stmt>is_local()<block_start>cls.lambda_server.kill()<block_end><block_end><block_end>
<import_stmt>os<import_stmt>sys<line_sep># train test1 test2 test3 <def_stmt>readtst tstfn<block_start>outlist=list()<with_stmt>open(tstfn)<as>br<block_start><for_stmt>aline br.readlines()<block_start>aline=aline.strip()<line_sep>outlist.append(aline)<block_end><block_end><return>outlist<block_end><def_stmt>split_train_tests_xml xmlpath test1fn test2fn test3fn<block_start>test1list=readtst(test1fn)<line_sep>test2list=readtst(test2fn)<line_sep>test3list=readtst(test3fn)<line_sep>outtrainlist=list()# full path ".xml.simp" files outt1list=list()# test 1, full path ".xml.simp" files outt2list=list()<line_sep>outt3list=list()<for_stmt>afile os.listdir(xmlpath)<block_start><if_stmt><not>afile.endswith('.xml.simp')<block_start><continue><block_end>afile2=xmlpath+'/'+afile<line_sep>aid=afile.split('.')[0]<if_stmt>aid<in>test1list<block_start>outt1list.append(afile2)<block_end><elif_stmt>aid<in>test2list<block_start>outt2list.append(afile2)<block_end><elif_stmt>aid<in>test3list<block_start>outt3list.append(afile2)<block_end><else_stmt><block_start>outtrainlist.append(afile2)<block_end><block_end><return>outtrainlist outt1list outt2list outt3list<block_end><def_stmt>all_wavs wavpath<block_start>wavlist=list()<for_stmt>afile os.listdir(wavpath)<block_start><if_stmt><not>afile.endswith('.wav')<block_start><continue><block_end>afile2=wavpath+'/'+afile<line_sep>wavlist.append(afile2)<block_end><return>wavlist<block_end><def_stmt>gen_text xmllist outpath# id \t text # e.g., /workspace/asr/wenet/examples/csj/s0/data/xml/S11M1689.xml.simp # ID = S11M1689_stime_etime <block_start>outtxtfn=os.path.join(outpath 'text')<with_stmt>open(outtxtfn 'w')<as>bw<block_start><for_stmt>xmlfn xmllist<block_start>aid=xmlfn.split('/')[-1]<line_sep>aid2=aid.split('.')[0]<with_stmt>open(xmlfn)<as>br<block_start><for_stmt>aline br.readlines()<block_start>aline=aline.strip()<line_sep># stime \t etime \t text1 \t text2 \t text3 \t text4 \t text5 cols=aline.split('\t')<line_sep># TODO different between "< 7" and "< 4"? strange # -> use "< 4", DO NOT use "< 7" ! <if_stmt>len(cols)<l>4<block_start><continue><block_end>stime=cols[0]<line_sep>etime=cols[1]<line_sep>atxt=cols[3].replace(' ' '')<line_sep>afullid='{}_{}_{}'.format(aid2 stime etime)<line_sep>aoutline='{}\t{}\n'.format(afullid atxt)<line_sep>bw.write(aoutline)<block_end><block_end><block_end><block_end><block_end><def_stmt>parse_xml_set xmllist<block_start>outset=set()<for_stmt>xml xmllist<block_start>aid=xml.split('/')[-1]<line_sep>aid2=aid.split('.')[0]<line_sep>outset.add(aid2)<block_end><return>outset<block_end><def_stmt>gen_wav_scp xmllist wavlist outpath# xmlset = pure id set, alike 'S04F1228' # can be from train, test1, test2, or test3 <block_start>xmlset=parse_xml_set(xmllist)<line_sep>outwavscpfn=os.path.join(outpath 'wav.scp')<with_stmt>open(outwavscpfn 'w')<as>bw<block_start><for_stmt>wav wavlist# wav is alike "/workspace/asr/wenet/examples/csj/s0/data # /wav/S04F1228.wav_00458.875_00459.209.wav" <block_start>aid=wav.split('/')[-1]<line_sep>cols=aid.split('_')<line_sep>aid2=cols[0].split('.')[0]<if_stmt>aid2<not><in>xmlset<block_start><continue><block_end>stime=cols[1]<line_sep>etime=cols[2].replace('.wav' '')<line_sep>afullid='{}_{}_{}'.format(aid2 stime etime)<line_sep>wavabspath=os.path.abspath(wav)<line_sep>aoutline='{}\t{}\n'.format(afullid wavabspath)<line_sep>bw.write(aoutline)<block_end><block_end><block_end><def_stmt>prep_text_wavscp xmlpath wavpath test1fn test2fn test3fn outtrainpath out1path out2path out3path<block_start>trainlist,t1list,t2list,t3list=split_train_tests_xml(xmlpath test1fn test2fn test3fn)<line_sep>wavlist=all_wavs(wavpath)<line_sep>gen_text(trainlist outtrainpath)<line_sep>gen_text(t1list out1path)<line_sep>gen_text(t2list out2path)<line_sep>gen_text(t3list out3path)<line_sep>gen_wav_scp(trainlist wavlist outtrainpath)<line_sep>gen_wav_scp(t1list wavlist out1path)<line_sep>gen_wav_scp(t2list wavlist out2path)<line_sep>gen_wav_scp(t3list wavlist out3path)<block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<l>10<block_start>print("Usage: {}".format(sys.argv[0])+"<xmlpath> "+"<wavpath> <test1fn> <test2fn> <test3fn> "+"<outtrainpath> <out1path> <out2path> <out3path>")<line_sep>exit(1)<block_end>xmlpath=sys.argv[1]<line_sep>wavpath=sys.argv[2]<line_sep>test1fn=sys.argv[3]<line_sep>test2fn=sys.argv[4]<line_sep>test3fn=sys.argv[5]<line_sep>outtrainpath=sys.argv[6]<line_sep>out1path=sys.argv[7]<line_sep>out2path=sys.argv[8]<line_sep>out3path=sys.argv[9]<line_sep>prep_text_wavscp(xmlpath wavpath test1fn test2fn test3fn outtrainpath out1path out2path out3path)<block_end>
<import_stmt>sys<import_from_stmt>unicorn.arm_const *<import_from_stmt>...util *<import_stmt>sys<import_from_stmt>..fuzz fuzz_remaining get_fuzz<import_from_stmt>...models.i2c I2CModel<def_stmt>HAL_I2C_Init uc<block_start><pass><block_end><def_stmt>HAL_I2C_Mem_Read uc# HAL_StatusTypeDef __fastcall HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout) <block_start>device_id=uc.reg_read(UC_ARM_REG_R0)<line_sep>dev_addr=uc.reg_read(UC_ARM_REG_R1)<line_sep>mem_addr=uc.reg_read(UC_ARM_REG_R2)<line_sep>mem_addr_size=uc.reg_read(UC_ARM_REG_R3)<line_sep>dst_buf=struct.unpack("<I" uc.mem_read(uc.reg_read(UC_ARM_REG_SP) 4))[0]<line_sep>dst_buf_size=struct.unpack("<I" uc.mem_read(uc.reg_read(UC_ARM_REG_SP)+0x4 4))[0]<line_sep>timeout=struct.unpack("<I" uc.mem_read(uc.reg_read(UC_ARM_REG_SP)+0x8 4))[0]<assert_stmt>(dst_buf<ne>0)<assert_stmt>(dst_buf_size<l>1000)<assert_stmt>(mem_addr<l>65535)<assert_stmt>(dst_buf_size<ge>mem_addr_size)<line_sep>#stuff = I2CModel.rx(device_id, dev_addr, mem_addr_size) stuff=get_fuzz(mem_addr_size)<line_sep>uc.mem_write(dst_buf stuff)<line_sep>uc.reg_write(UC_ARM_REG_R0 0)<line_sep>print(b"<<< "+stuff)<block_end><def_stmt>HAL_I2C_Mem_Write uc<block_start>uc.reg_write(UC_ARM_REG_R0 0)<block_end>
<import_stmt>pandas<as>pd<import_stmt>multiprocessing<import_from_stmt>multiprocessing Pool<def_stmt>train index df<block_start><import_stmt>tensorflow<as>tf<import_stmt>keras<import_from_stmt>keras.models Sequential<line_sep>#------------------------------ #this block enables GPU enabled multiprocessing core_config=tf.ConfigProto()<line_sep>core_config.gpu_options.allow_growth=<true><line_sep>session=tf.Session(config=core_config)<line_sep>keras.backend.set_session(session)<line_sep>#------------------------------ #prepare input and output values df=df.drop(columns=['index'])<line_sep>data=df.drop(columns=['target']).values<line_sep>target=df['target']<line_sep>#------------------------------ model=Sequential()<line_sep>model.add(Dense(5#num of hidden units input_shape=(data.shape[1] )))<line_sep>#num of features in input layer model.add(Activation('sigmoid'))<line_sep>model.add(Dense(1))#number of nodes in output layer model.add(Activation('sigmoid'))<line_sep>model.compile(loss='mse' optimizer=keras.optimizers.Adam())<line_sep>#------------------------------ model.fit(data target epochs=5000 verbose=1)<line_sep>model.save("model_for_%s.hdf5"%index)<line_sep>#------------------------------ #finally, close sessions session.close()<line_sep>keras.backend.clear_session()<block_end>#----------------------------- #main program multiprocessing.set_start_method('spawn' force=<true>)<line_sep>df=pd.read_csv("dataset.csv")<line_sep>my_tuple=[(i df[df['index']<eq>i])<for>i range(0 20)]<with_stmt>Pool(10)<as>pool<block_start>pool.starmap(train my_tuple)<block_end>
<import_from_stmt>torch nn<import_stmt>torch.nn.functional<as>F<class_stmt>LeNet(nn.Module)<block_start>"""LeNet-like network for tests with MNIST (28x28)."""<def_stmt>__init__ self in_channels=1 num_classes=10 **kwargs<block_start>super().__init__()<line_sep># main part of the network self.conv1=nn.Conv2d(in_channels 6 5)<line_sep>self.conv2=nn.Conv2d(6 16 5)<line_sep>self.fc1=nn.Linear(16<times>16 120)<line_sep>self.fc2=nn.Linear(120 84)<line_sep># last classifier layer (head) with as many outputs as classes self.fc=nn.Linear(84 num_classes)<line_sep># and `head_var` with the name of the head, so it can be removed when doing incremental learning experiments self.head_var='fc'<block_end><def_stmt>forward self x<block_start>out=F.relu(self.conv1(x))<line_sep>out=F.max_pool2d(out 2)<line_sep>out=F.relu(self.conv2(out))<line_sep>out=F.max_pool2d(out 2)<line_sep>out=out.view(out.size(0) -1)<line_sep>out=F.relu(self.fc1(out))<line_sep>out=F.relu(self.fc2(out))<line_sep>out=self.fc(out)<line_sep><return>out<block_end><block_end>
# Copyright (c) 2020 Agenium Scale # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This file gives the implementation of platform ARM, i.e. ARM SIMD. # Reading this file is rather straightforward. ARM SIMD extensions are rather # coherent and consistent. It implements the following architectures: # - ARMv7 -> 128 bits registers without f16 and f64 support # - Aarch32 -> 128 bits registers with optional f16 and without f64 support # - Aarch64 -> 128 bits registers with optional f16 and f64 support # - SVE -> up to 2048 bits registers # The first three SIMD extensions are collectively called NEON. Aarch32 and # Aarch64 correspond respectively to ARMv8 32 and 64 bits chips. Note that # the ARM documentation says that ARMv7, Aarch32 are different but it seems # that they differ by only a handful of intrinsics which are not in the scope # of NSIMD so we have implemented the following: # # - ARMv7 \ -> neon128 # - Aarch32 / # - Aarch64 -> aarch64 # - SVE -> sve <import_stmt>common<line_sep># ----------------------------------------------------------------------------- # Helpers <def_stmt>neon_typ typ<block_start>prefix={'i':'int' 'u':'uint' 'f':'float'}<line_sep><return>'{}{}x{}_t'.format(prefix[typ[0]] typ[1:] 128<floordiv>int(typ[1:]))<block_end><def_stmt>half_neon64_typ typ<block_start>prefix={'i':'int' 'u':'uint' 'f':'float'}<line_sep><return>'{}{}x{}_t'.format(prefix[typ[0]] typ[1:] 64<floordiv>int(typ[1:]))<block_end><def_stmt>sve_typ typ<block_start>prefix={'i':'svint' 'u':'svuint' 'f':'svfloat'}<line_sep><return>'{}{}_t'.format(prefix[typ[0]] typ[1:])<block_end><def_stmt>suf typ<block_start><if_stmt>typ[0]<eq>'i'<block_start><return>'s{}'.format(typ[1:])<block_end><else_stmt><block_start><return>typ<block_end><block_end>neon=['neon128' 'aarch64']<line_sep>fixed_sized_sve=['sve128' 'sve256' 'sve512' 'sve1024' 'sve2048']<line_sep>sve=['sve']+fixed_sized_sve<line_sep>fmtspec={}<def_stmt>convert_from_predicate opts op<block_start><if_stmt>opts.sve_emulate_bool<block_start><return>'''svsel({op}, svdup_n_u{typnbits}_x({svtrue}, (u{typnbits})~0), svdup_n_u{typnbits}_x({svtrue}, 0))'''.format(op=op **fmtspec)<block_end><else_stmt><block_start><return>op<block_end><block_end><def_stmt>convert_to_predicate opts op<block_start><if_stmt>opts.sve_emulate_bool# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve # it needs to be deleted when the bug is corrected <block_start><return>'''svcmpeq({svtrue}, (svuint{typnbits}_t){op}, svdup_n_u{typnbits}_x({svtrue}, (u{typnbits})~0))'''.format(op=op **fmtspec)<block_end><else_stmt><block_start><return>op<block_end><block_end># ----------------------------------------------------------------------------- # Implementation of mandatory functions for this module <def_stmt>get_simd_exts <block_start><return>['neon128' 'aarch64' 'sve' 'sve128' 'sve256' 'sve512' 'sve1024' 'sve2048']<block_end><def_stmt>get_prev_simd_ext simd_ext<block_start><if_stmt>simd_ext<in>['neon128' 'aarch64']<block_start><return>'cpu'<block_end><elif_stmt>simd_ext<in>sve<block_start><return>'aarch64'<block_end><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><def_stmt>emulate_fp16 simd_ext<block_start><if_stmt><not>simd_ext<in>get_simd_exts()<block_start><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><if_stmt>simd_ext<in>sve<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>get_type opts simd_ext typ nsimd_typ<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>typ<eq>'f64'<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><return>'typedef struct {{ double v0; double v1; }} {};'.format(nsimd_typ)<block_end><else_stmt><block_start><return>'typedef {} {};'.format(neon_typ('f64') nsimd_typ)<block_end><block_end><elif_stmt>typ<eq>'f16'<block_start><return>''' #ifdef NSIMD_ARM_FP16 typedef float16x8_t {nsimd_typ}; #else typedef struct {{ float32x4_t v0; float32x4_t v1; }} {nsimd_typ}; #endif '''.format(nsimd_typ=nsimd_typ)<line_sep># extra \n are necessary <block_end><else_stmt><block_start><return>'typedef {} {};'.format(neon_typ(typ) nsimd_typ)<block_end><block_end><elif_stmt>simd_ext<eq>'sve'<block_start><return>'typedef {} {};'.format(sve_typ(typ) nsimd_typ)<block_end><elif_stmt>simd_ext<in>fixed_sized_sve<block_start><return>'typedef {} {} __attribute__((arm_sve_vector_bits({})));'.format(sve_typ(typ) nsimd_typ simd_ext[3:])<block_end><else_stmt><block_start><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><block_end><def_stmt>get_logical_type opts simd_ext typ nsimd_typ<block_start><if_stmt>typ<not><in>common.types<block_start><raise>ValueError('Unknown type "{}"'.format(typ))<block_end><if_stmt>simd_ext<not><in>get_simd_exts()<block_start><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><if_stmt>typ<in>common.ftypes+common.itypes<block_start>typ2='u{}'.format(typ[1:])<block_end><else_stmt><block_start>typ2=typ<block_end><if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>typ<eq>'f16'<block_start><return>''' #ifdef NSIMD_ARM_FP16 typedef uint16x8_t {nsimd_typ}; #else typedef struct {{ uint32x4_t v0; uint32x4_t v1; }} {nsimd_typ}; #endif '''.format(nsimd_typ=nsimd_typ)<line_sep># extra \n are necessary <block_end><elif_stmt>typ<eq>'f64'<block_start><return>'typedef struct {{ u64 v0; u64 v1; }} {};'.format(nsimd_typ)<block_end><else_stmt><block_start><return>get_type(opts simd_ext typ2 nsimd_typ)<block_end><block_end><if_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>typ<eq>'f16'<block_start><return>get_logical_type(opts 'neon128' 'f16' nsimd_typ)<block_end><else_stmt><block_start><return>get_type(opts simd_ext typ2 nsimd_typ)<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>opts.sve_emulate_bool<block_start><return>get_type(opts simd_ext 'u'+typ[1:] nsimd_typ)<block_end><elif_stmt>simd_ext<in>fixed_sized_sve<block_start><return>'typedef svbool_t {} __attribute__((arm_sve_vector_bits({})));'.format(nsimd_typ simd_ext[3:])<block_end><else_stmt><block_start><return>'typedef svbool_t {};'.format(nsimd_typ)<block_end><block_end><block_end><def_stmt>get_nb_registers simd_ext<block_start><if_stmt>simd_ext<in>neon<block_start><return>'16'<block_end><elif_stmt>simd_ext<in>sve<block_start><return>'32'<block_end><else_stmt><block_start><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><block_end><def_stmt>get_native_soa_typ simd_ext typ deg<block_start>prefix={'i':'int' 'u':'uint' 'f':'float'}[typ[0]]<if_stmt>simd_ext<in>sve<block_start><return>'sv{}x{}_t'.format(prefix+typ[1:] deg)<block_end><else_stmt><block_start><return>'{}{}x{}x{}_t'.format(prefix typ[1:] 128<floordiv>int(typ[1:]) deg)<block_end><block_end><def_stmt>get_SoA_type simd_ext typ deg nsimd_typ<block_start><if_stmt>simd_ext<ne>'sve'<block_start><raise>ValueError('SIMD extension must be "sve"')<block_end>prefix={'i':'int' 'u':'uint' 'f':'float'}[typ[0]]<line_sep><return>'typedef {} {};'.format(get_native_soa_typ(simd_ext typ deg) nsimd_typ)<block_end><def_stmt>has_compatible_SoA_types simd_ext<block_start><if_stmt>simd_ext<not><in>neon+sve<block_start><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><return><false><block_end># ----------------------------------------------------------------------------- <def_stmt>get_additional_include func platform simd_ext<block_start>ret='''#include <nsimd/cpu/cpu/{}.h> '''.format(func)<if_stmt>simd_ext<in>sve<block_start>ret<augadd>'''#include <nsimd/arm/aarch64/{}.h> '''.format(func)<block_end><if_stmt>func<in>['load2u' 'load3u' 'load4u' 'load2a' 'load3a' 'load4a']<block_start>deg=func[4]<line_sep>ret<augadd>'''#if NSIMD_CXX > 0 extern "C" {{ #endif NSIMD_INLINE nsimd_{simd_ext}_vu16x{deg} nsimd_{func}_{simd_ext}_u16(const u16*); # if NSIMD_CXX > 0 }} // extern "C" #endif '''.format(func=func deg=deg simd_ext=simd_ext)<block_end><if_stmt>func<in>['mask_storea1' 'mask_storeu1' 'masko_loada1' 'masko_loadu1' 'maskz_loada1' 'maskz_loadu1']<and>simd_ext<not><in>sve<block_start>ret<augadd>'''#include <nsimd/scalar_utilities.h> '''<block_end><if_stmt>func<eq>'mask_for_loop_tail'<and>simd_ext<not><in>sve<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/set1.h> #include <nsimd/arm/{simd_ext}/set1l.h> #include <nsimd/arm/{simd_ext}/iota.h> #include <nsimd/arm/{simd_ext}/lt.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>simd_ext<eq>'neon128'<and>func<eq>'notl'<block_start>ret<augadd>'''#include <nsimd/arm/neon128/notb.h> '''<block_end><if_stmt>simd_ext<in>neon<and>func<eq>'ne'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/eq.h> # include <nsimd/arm/{simd_ext}/notl.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>simd_ext<in>neon<and>func<in>['fms' 'fnms']<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/ne.h> #include <nsimd/arm/{simd_ext}/fma.h> #include <nsimd/arm/{simd_ext}/fnma.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<eq>'shra'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/shr.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<in>['loadlu' 'loadla']<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/eq.h> # include <nsimd/arm/{simd_ext}/set1.h> # include <nsimd/arm/{simd_ext}/{load}.h> # include <nsimd/arm/{simd_ext}/notl.h> '''.format(load='load'+func[5] simd_ext=simd_ext)<block_end><if_stmt>func<in>['storelu' 'storela']<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/if_else1.h> # include <nsimd/arm/{simd_ext}/set1.h> # include <nsimd/arm/{simd_ext}/{store}.h> '''.format(store='store'+func[6] simd_ext=simd_ext)<block_end><if_stmt>func<eq>'to_logical'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/reinterpret.h> #include <nsimd/arm/{simd_ext}/ne.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<eq>'zip'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/ziplo.h> #include <nsimd/arm/{simd_ext}/ziphi.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<eq>'unzip'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/unziplo.h> #include <nsimd/arm/{simd_ext}/unziphi.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<eq>'adds'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/add.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<eq>'subs'<block_start>ret<augadd>'''#include <nsimd/arm/{simd_ext}/sub.h> '''.format(simd_ext=simd_ext)<block_end><if_stmt>func<in>['gather' 'scatter']<and>simd_ext<eq>'sve'<block_start>ret<augadd>'''#include <nsimd/arm/sve/len.h> '''<block_end><return>ret<block_end># ----------------------------------------------------------------------------- # Emulators <def_stmt>emulate_op1 op simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start>le=128<floordiv>int(typ[1:])<line_sep><return>'''int i; {typ} buf[{le}]; vst1q_{suf}(buf, {in0}); for (i=0; i < {le}; i += nsimd_len_cpu_{typ}()) {{ nsimd_storeu_cpu_{typ}( & buf[i], nsimd_{op}_cpu_{typ}( nsimd_loadu_cpu_{typ}(&buf[i])));}} return vld1q_{suf}(buf); '''.format(op=op le=le **fmtspec)<block_end><if_stmt>simd_ext<in>sve<block_start>le=2048<floordiv>int(typ[1:])<line_sep><return>'''int i; {typ} buf[{le}]; svst1_{suf}({svtrue}, buf, {in0}); for (i=0; i < simd_len_{simd_ext}_{typ}(); i += nsimd_len_cpu_{typ}()) {{ nsimd_storeu_cpu_{typ}( & buf[i], nsimd_{op}_cpu_{typ}( nsimd_loadu_cpu_{typ}(&buf[i])));}} return svld1_{suf}({svtrue}, buf); '''.format(op=op le=le **fmtspec)<block_end><block_end><def_stmt>emulate_op2 op simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start>le=128<floordiv>int(typ[1:])<line_sep><return>'''int i; {typ} buf0[{le}], buf1[{le}]; vst1q_{suf}(buf0, {in0}); vst1q_{suf}(buf1, {in1}); for (i=0; i < {le}; i++) {{ buf0[i] = ({typ})(buf0[i] {op} buf1[i]);}} return vld1q_{suf}(buf0); '''.format(op=op le=le **fmtspec)<block_end><if_stmt>simd_ext<in>sve<block_start>le=2048<floordiv>int(typ[1:])<line_sep><return>'''int i; {typ} buf0[{le}], buf1[{le}]; svst1_{suf}({svtrue}, buf0, {in0}); svst1_{suf}({svtrue}, buf1, {in1}); for (i=0; i < nsimd_len_{simd_ext}_{typ}(); i++) {{ buf0[i] = ({typ})(buf0[i] {op} buf1[i]);}} return svld1_{suf}({svtrue}, buf0); '''.format(op=op le=le **fmtspec)<block_end><block_end><def_stmt>emulate_lop2_neon opts op simd_ext typ<block_start>le=128<floordiv>int(typ[1:])<line_sep>ltyp=get_logical_type(opts simd_ext typ)<line_sep>lsuf=suf(ltyp)<line_sep><return>'''int i; {ltyp} buf0[{le}], buf1[{le}]; vst1q_{lsuf}(buf0, {in0}); vst1q_{lsuf}(buf1, {in1}); for (i = 0; i < {le}; i++) {{ buf0[i] = buf0[i] {op} buf1[i] ? ({ltyp})-1 : 0; }} return vld1q_{lsuf}(buf0);'''.format(op=op le=le ltyp=ltyp lsuf=lsuf **fmtspec)<block_end><def_stmt>emulate_op3_neon op simd_ext typ<block_start>le=128<floordiv>int(typ[1:])<line_sep><return>'''int i; {typ} buf0[{le}], buf1[{le}], buf2[{le}]; vst1q_{suf}(buf0, {in0}); vst1q_{suf}(buf1, {in1}); vst1q_{suf}(buf2, {in2}); for (i = 0; i < {le}; i += nsimd_len_cpu_{typ}()) {{ nsimd_storeu_cpu_{typ}(&buf0[i], nsimd_{op}_cpu_{typ}( nsimd_loadu_cpu_{typ}(&buf0[i]), nsimd_loadu_cpu_{typ}(&buf1[i]), nsimd_loadu_cpu_{typ}(&buf2[i]))); }} return vld1q_{suf}(buf0);'''.format(op=op le=le **fmtspec)<block_end><def_stmt>emulate_f64_neon simd_ext op params<block_start>fmtspec2=fmtspec.copy()<line_sep>fmtspec2['op']=op<line_sep>fmtspec2['buf_ret_decl']='nsimd_cpu_{}f64 buf_ret;'.format('v'<if>params[0]<eq>'v'<else>'vl')<line_sep>fmtspec2['buf_decl']='\n'.join(['nsimd_cpu_{}f64 buf{};'.format('v'<if>p[1]<eq>'v'<else>'vl' p[0])<for>p common.enum(params[1:])])<line_sep>fmtspec2['bufs']=','.join(['buf{}'.format(i)<for>i range(0 len(params)-1)])<line_sep>fmtspec2['ret_decl']='nsimd_{}_{}f64 ret;'.format(simd_ext 'v'<if>params[0]<eq>'v'<else>'vl')<line_sep>buf_set='\n'.join('''buf{i}.v0 = {ini}.v0; buf{i}.v1 = {ini}.v1;'''.format(i=i ini=fmtspec['in{}'.format(i)])<for>i range(0 len(params)-1))<line_sep><return>'''{buf_ret_decl} {buf_decl} {ret_decl} {buf_set} buf_ret = nsimd_{op}_cpu_f64({bufs}); ret.v0 = buf_ret.v0; ret.v1 = buf_ret.v1; return ret;'''.format(buf_set=buf_set **fmtspec2)<block_end># ----------------------------------------------------------------------------- <def_stmt>f16f64 simd_ext typ op armop arity forced_intrinsics=''<block_start>fmtspec2=fmtspec.copy()<line_sep>tmpl=', '.join(['{{in{}}}.v{{{{i}}}}'.format(i).format(**fmtspec)<for>i range(0 arity)])<line_sep>fmtspec2['args1']=tmpl.format(i='0')<line_sep>fmtspec2['args2']=tmpl.format(i='1')<line_sep>fmtspec2['armop']=armop<line_sep>fmtspec2['op']=op<if_stmt>simd_ext<in>neon<and>typ<eq>'f16'<block_start><if_stmt>forced_intrinsics<ne>''<block_start>fmtspec2['intrinsics']=forced_intrinsics<block_end><else_stmt><block_start>temp=', '.join(['{{in{}}}'.format(i).format(**fmtspec)<for>i range(0 arity)])<line_sep>fmtspec2['intrinsics']='return v{}q_f16({});'.format(armop temp)<block_end><return>'''#ifdef NSIMD_ARM_FP16 {intrinsics} #else nsimd_{simd_ext}_vf16 ret; ret.v0 = nsimd_{op}_{simd_ext}_f32({args1}); ret.v1 = nsimd_{op}_{simd_ext}_f32({args2}); return ret; #endif'''.format(**fmtspec2)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>emulate_f64_neon(simd_ext op ['v']<times>(arity+1))<block_end><return>''<block_end># ----------------------------------------------------------------------------- # Lenghts <def_stmt>max_len simd_ext typ<block_start><if_stmt>simd_ext<eq>'sve'<block_start><return>2048<floordiv>int(typ[1:])<block_end><elif_stmt>simd_ext<in>fixed_sized_sve<block_start><return>int(simd_ext[3:])<floordiv>int(typ[1:])<block_end><else_stmt><block_start><return>128<floordiv>int(typ[1:])<block_end><block_end><def_stmt>real_len simd_ext typ<block_start><if_stmt>simd_ext<eq>'sve'<block_start><return>'nsimd_len_sve_{typ}()'.format(**fmtspec)<block_end><else_stmt><block_start><return>max_len(simd_ext typ)<block_end><block_end># ----------------------------------------------------------------------------- # Loads of degree 1, 2, 3 and 4 <def_stmt>load1234 opts simd_ext typ deg<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>deg<eq>1<block_start>normal='return vld{deg}q_{suf}({in0});'.format(deg=deg **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else /* Note that we can do much better but is it useful? */ nsimd_{simd_ext}_vf16 ret; f32 buf[4]; buf[0] = nsimd_u16_to_f32(*(u16*){in0}); buf[1] = nsimd_u16_to_f32(*((u16*){in0} + 1)); buf[2] = nsimd_u16_to_f32(*((u16*){in0} + 2)); buf[3] = nsimd_u16_to_f32(*((u16*){in0} + 3)); ret.v0 = vld1q_f32(buf); buf[0] = nsimd_u16_to_f32(*((u16*){in0} + 4)); buf[1] = nsimd_u16_to_f32(*((u16*){in0} + 5)); buf[2] = nsimd_u16_to_f32(*((u16*){in0} + 6)); buf[3] = nsimd_u16_to_f32(*((u16*){in0} + 7)); ret.v1 = vld1q_f32(buf); return ret; #endif'''.format(normal=normal **fmtspec)<block_end><elif_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = *{in0}; ret.v1 = *({in0} + 1); return ret;'''.format(**fmtspec)<block_end><else_stmt><block_start><return>normal<block_end><block_end><else_stmt><block_start>normal='''nsimd_{simd_ext}_v{typ}x{deg} ret; {soa_typ} buf = vld{deg}q_{suf}({in0}); {assignment} return ret;'''.format(deg=deg soa_typ=get_native_soa_typ(simd_ext typ deg) assignment='\n'.join(['ret.v{i} = buf.val[{i}];'.format(i=i)<for>i range(0 deg)]) **fmtspec)<if_stmt>typ<eq>'f16'<block_start>assignment='''vst1q_u16(buf, temp.val[{{i}}]); ret.v{{i}} = nsimd_loadu_{simd_ext}_f16((f16 *)buf);'''.format(**fmtspec)<line_sep><return>'''{soa_typ} temp = vld{deg}q_u16((u16 *){in0}); u16 buf[8]; nsimd_{simd_ext}_vf16x{deg} ret; {assignment} return ret;'''.format(deg=deg assignment='\n'.join([assignment.format(i=i)<for>i range(0 deg)]) soa_typ=get_native_soa_typ(simd_ext 'u16' deg) **fmtspec)<block_end><elif_stmt>typ<in>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'nsimd_neon128_vf64x{} ret;\n'.format(deg)+'\n'.join(['ret.v{i}.v0 = *({in0} + {i});'.format(i=i **fmtspec)<for>i range(0 deg)])+'\n'.join(['ret.v{i}.v1 = *({in0} + {ipd});'.format(i=i ipd=i+deg **fmtspec)<for>i range(0 deg)])+'\nreturn ret;\n'<block_end><elif_stmt>typ<in>['i64' 'u64']<and>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_v{typ}x{deg} ret; {typ} buf[2];'''.format(deg=deg **fmtspec)+'\n'.join(['''buf[0] = *({in0} + {i}); buf[1] = *({in0} + {ipd}); ret.v{i} = vld1q_{suf}(buf);'''.format(i=i ipd=i+deg **fmtspec)<for>i range(0 deg)])+'\nreturn ret;\n'<block_end><else_stmt><block_start><return>normal<block_end><block_end><block_end><else_stmt><block_start><if_stmt>deg<eq>1<block_start><return>'return svld{deg}_{suf}({svtrue}, {in0});'.format(deg=deg **fmtspec)<block_end><else_stmt><block_start><return>'''nsimd_{simd_ext}_v{typ}x{deg} ret; {sve_typ} buf = svld{deg}_{suf}({svtrue}, {in0}); {assignment} return ret;'''.format(assignment='\n'.join(['ret.v{i} = svget{deg}_{suf}(buf, {i});'.format(i=i deg=deg **fmtspec)<for>i range(deg)]) sve_typ=get_native_soa_typ('sve' typ deg) deg=deg **fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Mask loads <def_stmt>maskoz_load oz simd_ext typ<block_start><if_stmt>simd_ext<in>sve<block_start><return>'return svsel_{suf}({in0}, svld1_{suf}({in0}, {in1}), {oz});'.format(oz='{in2}'.format(**fmtspec)<if>oz<eq>'o'<else>'svdup_n_{suf}(({typ})0)'.format(**fmtspec) **fmtspec)<block_end><if_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_vf64 ret; if ({in0}.v0) {{ ret.v0 = {in1}[0]; }} else {{ ret.v0 = {oz0}; }} if ({in0}.v1) {{ ret.v1 = {in1}[1]; }} else {{ ret.v1 = {oz1}; }} return ret;'''.format(oz0='0.0f'<if>oz<eq>'z'<else>'{in2}.v0'.format(**fmtspec) oz1='0.0f'<if>oz<eq>'z'<else>'{in2}.v1'.format(**fmtspec) **fmtspec)<block_end>le=128<floordiv>int(typ[1:])<line_sep>normal='''int i; {typ} buf[{le}]; u{typnbits} mask[{le}]; vst1q_{suf}(buf, {oz}); vst1q_u{typnbits}(mask, {in0}); for (i = 0; i < {le}; i++) {{ if (mask[i]) {{ buf[i] = {in1}[i]; }} }} return vld1q_{suf}(buf);'''.format(oz='vdupq_n_{suf}(({typ})0)'.format(**fmtspec)<if>oz<eq>'z'<else>'{in2}'.format(**fmtspec) le=le **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else int i; nsimd_{simd_ext}_vf16 ret; f32 buf[8]; u32 mask[8]; vst1q_f32(buf, {oz0}); vst1q_f32(buf + 4, {oz1}); vst1q_u32(mask, {in0}.v0); vst1q_u32(mask + 4, {in0}.v1); for (i = 0; i < 8; i++) {{ if (mask[i]) {{ buf[i] = nsimd_f16_to_f32({in1}[i]); }} }} ret.v0 = vld1q_f32(buf); ret.v1 = vld1q_f32(buf + 4); return ret; #endif'''.format(oz0='vdupq_n_f32(0.0f)'.format(**fmtspec)<if>oz<eq>'z'<else>'{in2}.v0'.format(**fmtspec) oz1='vdupq_n_f32(0.0f)'.format(**fmtspec)<if>oz<eq>'z'<else>'{in2}.v1'.format(**fmtspec) normal=normal **fmtspec)<block_end><return>normal<block_end># ----------------------------------------------------------------------------- # Stores of degree 1, 2, 3 and 4 <def_stmt>store1234 opts simd_ext typ deg<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>deg<eq>1<block_start>normal='vst{deg}q_{suf}({in0}, {in1});'.format(deg=deg **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else f32 buf[4]; vst1q_f32(buf, {in1}.v0); *((u16*){in0} ) = nsimd_f32_to_u16(buf[0]); *((u16*){in0} + 1) = nsimd_f32_to_u16(buf[1]); *((u16*){in0} + 2) = nsimd_f32_to_u16(buf[2]); *((u16*){in0} + 3) = nsimd_f32_to_u16(buf[3]); vst1q_f32(buf, {in1}.v1); *((u16*){in0} + 4) = nsimd_f32_to_u16(buf[0]); *((u16*){in0} + 5) = nsimd_f32_to_u16(buf[1]); *((u16*){in0} + 6) = nsimd_f32_to_u16(buf[2]); *((u16*){in0} + 7) = nsimd_f32_to_u16(buf[3]); #endif'''.format(normal=normal **fmtspec)<block_end><elif_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''*{in0} = {in1}.v0; *({in0} + 1) = {in1}.v1;'''.format(**fmtspec)<block_end><else_stmt><block_start><return>normal<block_end><block_end><else_stmt><block_start>normal='''{soa_typ} buf; {assignment} vst{deg}q_{suf}({in0}, buf);'''.format(deg=deg assignment='\n'.join(['buf.val[{{}}] = {{in{}}};'.format(i).format(i-1 **fmtspec)<for>i range(1 deg+1)]) soa_typ=get_native_soa_typ(simd_ext typ deg) **fmtspec)<if_stmt>typ<eq>'f16'<block_start>assignment='''nsimd_storeu_{{simd_ext}}_f16((f16 *)buf, {{in{}}}); temp.val[{{}}] = vld1q_u16(buf);'''<line_sep><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else {soa_typ} temp; u16 buf[8]; {assignment} vst{deg}q_u16((u16 *){in0}, temp); #endif'''.format(assignment='\n'.join([assignment.format(i).format(i-1 **fmtspec)<for>i range(1 deg+1)]) deg=deg normal=normal soa_typ=get_native_soa_typ(simd_ext 'u16' deg) **fmtspec)<block_end><elif_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'\n'.join(['*({{in0}} + {}) = {{in{}}}.v0;'.format(i-1 i).format(**fmtspec)<for>i range(1 deg+1)])+'\n'+'\n'.join(['*({{in0}} + {}) = {{in{}}}.v1;'.format(i+deg-1 i).format(**fmtspec)<for>i range(1 deg+1)])<block_end><elif_stmt>typ<in>['i64' 'u64']<and>simd_ext<eq>'neon128'<block_start><return>'{typ} buf[{biglen}];'.format(biglen=2<times>deg **fmtspec)+'\n'.join(['vst1q_{{suf}}(buf + {im1x2}, {{in{i}}});'.format(im1x2=2<times>(i-1) i=i).format(**fmtspec)<for>i range(1 deg+1)])+'\n'.join(['''*({in0} + {i}) = buf[{ix2}]; *({in0} + {ipd}) = buf[{ix2p1}];'''.format(i=i ipd=i+deg ix2=i<times>2 ix2p1=2<times>i+1 **fmtspec)<for>i range(0 deg)])<block_end><else_stmt><block_start><return>normal<block_end><block_end><block_end><else_stmt><block_start><if_stmt>deg<eq>1<block_start><return>'svst{deg}_{suf}({svtrue}, {in0}, {in1});'.format(deg=deg **fmtspec)<block_end>fill_soa_typ='\n'.join(['tmp = svset{{deg}}_{{suf}}(tmp, {im1}, {{in{i}}});'.format(im1=i-1 i=i).format(deg=deg **fmtspec)<for>i range(1 deg+1)])<line_sep><return>'''{soa_typ} tmp = svundef{deg}_{suf}(); {fill_soa_typ} svst{deg}_{suf}({svtrue}, {in0}, tmp);'''.format(soa_typ=get_native_soa_typ('sve' typ deg) deg=deg fill_soa_typ=fill_soa_typ **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Mask stores <def_stmt>mask_store simd_ext typ<block_start><if_stmt>simd_ext<in>sve<block_start><return>'svst1_{suf}({in0}, {in1}, {in2});'.format(**fmtspec)<block_end><if_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''if ({in0}.v0) {{ {in1}[0] = {in2}.v0; }} if ({in0}.v1) {{ {in1}[1] = {in2}.v1; }}'''.format(**fmtspec)<block_end>le=128<floordiv>int(typ[1:])<line_sep>normal='''int i; {typ} buf[{le}]; u{typnbits} mask[{le}]; vst1q_{suf}(buf, {in2}); vst1q_u{typnbits}(mask, {in0}); for (i = 0; i < {le}; i++) {{ if (mask[i]) {{ {in1}[i] = buf[i]; }} }}'''.format(le=le **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else f32 buf[8]; u32 mask[8]; int i; vst1q_u32(mask, {in0}.v0); vst1q_u32(mask + 4, {in0}.v1); vst1q_f32(buf, {in2}.v0); vst1q_f32(buf + 4, {in2}.v1); for (i = 0; i < 8; i++) {{ if (mask[i]) {{ {in1}[i] = nsimd_f32_to_f16(buf[i]); }} }} #endif'''.format(normal=normal **fmtspec)<block_end><return>normal<block_end># ----------------------------------------------------------------------------- # Length <def_stmt>len1 simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start><return>'return {};'.format(128<floordiv>int(typ[1:]))<block_end><elif_stmt>simd_ext<eq>'sve'<block_start><return>'return (int)svcntp_b{typnbits}({svtrue}, {svtrue});'.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>fixed_sized_sve<block_start><return>'return {};'.format(int(simd_ext[3:])<floordiv>int(typ[1:]))<block_end><block_end># ----------------------------------------------------------------------------- # Add/sub <def_stmt>addsub op simd_ext typ<block_start>ret=f16f64(simd_ext typ op op 2)<if_stmt>ret<ne>''<block_start><return>ret<block_end><if_stmt>simd_ext<in>neon<block_start><return>'return v{op}q_{suf}({in0}, {in1});'.format(op=op **fmtspec)<block_end><else_stmt><block_start><return>'return sv{op}_{suf}_x({svtrue}, {in0}, {in1});'.format(op=op **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Multiplication <def_stmt>mul2 simd_ext typ<block_start>ret=f16f64(simd_ext typ 'mul' 'mul' 2)<if_stmt>ret<ne>''<block_start><return>ret<block_end><elif_stmt>simd_ext<in>neon<and>typ<in>['i64' 'u64']<block_start><return>emulate_op2('*' simd_ext typ)<block_end><else_stmt><block_start><if_stmt>simd_ext<in>neon<block_start><return>'return vmulq_{suf}({in0}, {in1});'.format(**fmtspec)<block_end><else_stmt><block_start><return>'return svmul_{suf}_x({svtrue}, {in0}, {in1});'.format(**fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Division <def_stmt>div2 simd_ext typ<block_start><if_stmt>simd_ext<eq>'aarch64'<and>typ<in>['f32' 'f64']<block_start><return>'return vdivq_{suf}({in0}, {in1});'.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>sve<and>typ<in>['f16' 'f32' 'f64' 'i32' 'u32' 'i64' 'u64']<block_start><return>'return svdiv_{suf}_x({svtrue}, {in0}, {in1});'.format(**fmtspec)<block_end><else_stmt><block_start>ret=f16f64(simd_ext typ 'div' 'div' 2)<if_stmt>ret<ne>''<block_start><return>ret<block_end><block_end><return>emulate_op2('/' simd_ext typ)<block_end># ----------------------------------------------------------------------------- # Binary operators: and, or, xor, andnot <def_stmt>binop2 op simd_ext typ<block_start>armop={'orb':'orr' 'xorb':'eor' 'andb':'and' 'andnotb':'bic'}<if_stmt>typ<in>common.iutypes<block_start><if_stmt>simd_ext<in>neon<block_start><return>'return v{armop}q_{suf}({in0}, {in1});'.format(armop=armop[op] **fmtspec)<block_end><else_stmt><block_start><return>'return sv{armop}_{suf}_x({svtrue}, {in0}, {in1});'.format(armop=armop[op] **fmtspec)<block_end><block_end># From here only float types <if_stmt>typ<eq>'f16'<block_start>intrinsics='''return vreinterpretq_f16_u16(v{armop}q_u16(vreinterpretq_u16_f16( {in0}), vreinterpretq_u16_f16({in1})));'''.format(armop=armop[op] **fmtspec)<block_end><else_stmt><block_start>intrinsics=''<block_end>ret=f16f64(simd_ext typ op armop[op] 2 intrinsics)<if_stmt>ret<ne>''<block_start><return>ret<block_end><if_stmt>simd_ext<in>neon<block_start><return>'''return vreinterpretq_f{typnbits}_u{typnbits}(v{armop}q_u{typnbits}( vreinterpretq_u{typnbits}_f{typnbits}({in0}), vreinterpretq_u{typnbits}_f{typnbits}({in1})));'''.format(armop=armop[op] **fmtspec)<block_end><else_stmt><block_start><return>'''return svreinterpret_f{typnbits}_u{typnbits}( sv{armop}_u{typnbits}_x({svtrue}, svreinterpret_u{typnbits}_f{typnbits}({in0}), svreinterpret_u{typnbits}_f{typnbits}({in1})));'''.format(armop=armop[op] **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Binary not <def_stmt>not1 simd_ext typ<block_start><if_stmt>typ<in>common.iutypes<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>typ<in>['i8' 'u8' 'i16' 'u16' 'i32' 'u32']<block_start><return>'return vmvnq_{suf}({in0});'.format(**fmtspec)<block_end><else_stmt><block_start><return>'''return vreinterpretq_{suf}_u32(vmvnq_u32( vreinterpretq_u32_{suf}({in0})));'''.format(**fmtspec)<block_end><block_end><if_stmt>simd_ext<in>sve<block_start><return>'return svnot_{suf}_x({svtrue}, {in0});'.format(**fmtspec)<block_end><block_end># From here only float types <if_stmt>typ<eq>'f16'<block_start>intrinsics='''return vreinterpretq_f16_u16(vmvnq_u16(vreinterpretq_u16_f16( {in0})));'''.format(**fmtspec)<block_end><else_stmt><block_start>intrinsics=''<block_end>ret=f16f64(simd_ext typ 'notb' 'mvn' 1 intrinsics)<if_stmt>ret<ne>''<block_start><return>ret<block_end><if_stmt>simd_ext<in>neon<block_start><return>'''return vreinterpretq_{suf}_u32(vmvnq_u32( vreinterpretq_u32_{suf}({in0})));'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'''return svreinterpret_{suf}_u{typnbits}(svnot_u{typnbits}_x( {svtrue}, svreinterpret_u{typnbits}_{suf}({in0})));'''.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Logical operators: and, or, xor, andnot <def_stmt>lop2 opts op simd_ext typ<block_start>armop={'orl':'orr' 'xorl':'eor' 'andl':'and' 'andnotl':'bic'}<if_stmt>simd_ext<in>neon<block_start><if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return v{armop}q_u16({in0}, {in1}); #else nsimd_{simd_ext}_vlf16 ret; ret.v0 = v{armop}q_u32({in0}.v0, {in1}.v0); ret.v1 = v{armop}q_u32({in0}.v1, {in1}.v1); return ret; #endif'''.format(armop=armop[op] **fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><if_stmt>op<eq>'andnotl'<block_start><return>'''nsimd_{simd_ext}_vlf64 ret; ret.v0 = {in0}.v0 & (~{in1}.v0); ret.v1 = {in0}.v1 & (~{in1}.v1); return ret;'''.format(**fmtspec)<block_end><else_stmt><block_start>cpuop={'orl':'|' 'xorl':'^' 'andl':'&'}<line_sep><return>'''nsimd_{simd_ext}_vlf64 ret; ret.v0 = {in0}.v0 {cpuop} {in1}.v0; ret.v1 = {in0}.v1 {cpuop} {in1}.v1; return ret;'''.format(cpuop=cpuop[op] **fmtspec)<block_end><block_end><else_stmt><block_start><return>'return v{armop}q_u{typnbits}({in0}, {in1});'.format(armop=armop[op] **fmtspec)<block_end><block_end><else_stmt><block_start><if_stmt>opts.sve_emulate_bool# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve # it needs to be deleted when the bug is corrected <block_start><return>'''return sv{armop}_x({svtrue}, (svuint{typnbits}_t){in0}, (svuint{typnbits}_t){in1});'''.format(armop=armop[op] **fmtspec)<block_end><else_stmt><block_start><return>'''return sv{armop}_z({svtrue}, {in0}, {in1});'''.format(armop=armop[op] **fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Logical not <def_stmt>lnot1 opts simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vmvnq_u16({in0}); #else nsimd_{simd_ext}_vlf16 ret; ret.v0 = vmvnq_u32({in0}.v0); ret.v1 = vmvnq_u32({in0}.v1); return ret; #endif'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vlf64 ret; ret.v0 = ~{in0}.v0; ret.v1 = ~{in0}.v1; return ret;'''.format(**fmtspec)<block_end><elif_stmt>typ<in>['i64' 'u64' 'f64']<block_start><return>'''return vreinterpretq_u{typnbits}_u32(vmvnq_u32( vreinterpretq_u32_u{typnbits}({in0})));'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'return vmvnq_u{typnbits}({in0});'.format(**fmtspec)<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>opts.sve_emulate_bool# TODO: the cast is a workaround to avoid a bug in gcc trunk for sve # it needs to be deleted when the bug is corrected <block_start><return>'return svnot_x({svtrue}, (svuint{typnbits}_t){in0});'.format(**fmtspec)<block_end><else_stmt><block_start><return>'return svnot_z({svtrue}, {in0});'.format(**fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Square root <def_stmt>sqrt1 simd_ext typ<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>typ<in>'f16'<block_start><return>'''nsimd_neon128_vf16 ret; ret.v0 = nsimd_sqrt_neon128_f32({in0}.v0); ret.v1 = nsimd_sqrt_neon128_f32({in0}.v1); return ret;'''.format(**fmtspec)<block_end><elif_stmt>typ<eq>'f64'<block_start><return>f16f64('neon128' 'f64' 'sqrt' 'sqrt' 1)<block_end><else_stmt><block_start><return>emulate_op1('sqrt' simd_ext typ)<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>typ<eq>'f16'<block_start><return>f16f64('aarch64' 'f16' 'sqrt' 'sqrt' 1)<block_end><else_stmt><block_start><return>'return vsqrtq_{suf}({in0});'.format(**fmtspec)<block_end><block_end><else_stmt><block_start><return>'return svsqrt_{suf}_x({svtrue}, {in0});'.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Shifts <def_stmt>shl_shr op simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start>sign='-'<if>op<eq>'shr'<else>''<if_stmt>typ<in>common.utypes<block_start><return>'''return vshlq_{suf}({in0}, vdupq_n_s{typnbits}( (i{typnbits})({sign}{in1})));'''.format(sign=sign **fmtspec)<block_end><else_stmt><block_start><return>'''return vreinterpretq_s{typnbits}_u{typnbits}(vshlq_u{typnbits}( vreinterpretq_u{typnbits}_s{typnbits}({in0}), vdupq_n_s{typnbits}((i{typnbits})({sign}{in1}))));'''.format(sign=sign **fmtspec)<block_end><block_end><else_stmt><block_start>armop='lsl'<if>op<eq>'shl'<else>'lsr'<if_stmt>op<eq>'shr'<and>typ<in>common.itypes<block_start><return>'''return svreinterpret_{suf}_{suf2}(sv{armop}_{suf2}_x({svtrue}, svreinterpret_{suf2}_{suf}({in0}), svdup_n_u{typnbits}((u{typnbits}){in1})));'''.format(suf2=common.bitfield_type[typ] armop=armop **fmtspec)<block_end><else_stmt><block_start><return>'''return sv{armop}_{suf}_x({svtrue}, {in0}, svdup_n_u{typnbits}((u{typnbits}){in1}));'''.format(armop=armop **fmtspec)<block_end><block_end><block_end><def_stmt>shra simd_ext typ<block_start><if_stmt>typ<in>common.utypes<block_start><return>'''return nsimd_shr_{simd_ext}_{typ}({in0}, {in1});'''.format(**fmtspec)<block_end><if_stmt>simd_ext<in>neon<block_start><return>'''return vshlq_{suf}( {in0}, vdupq_n_s{typnbits}((i{typnbits})-{in1}));'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>typ[0]<eq>'i'<block_start><return>'''return svasr_n_{suf}_x({svtrue}, {in0}, (u{typnbits}){in1});'''.format(**fmtspec)<block_end><elif_stmt>typ[0]<eq>'u'<block_start><return>'return svlsl_n_{suf}_x({svtrue}, {in0}, (u64){in1});'.format(**fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Set1 <def_stmt>set1 simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vdupq_n_f16({in0}); #else nsimd_{simd_ext}_vf16 ret; f32 f = nsimd_f16_to_f32({in0}); ret.v0 = nsimd_set1_{simd_ext}_f32(f); ret.v1 = nsimd_set1_{simd_ext}_f32(f); return ret; #endif'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = {in0}; ret.v1 = {in0}; return ret;'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'return vdupq_n_{suf}({in0});'.format(**fmtspec)<block_end><block_end><else_stmt><block_start><return>'return svdup_n_{suf}({in0});'.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Set1l <def_stmt>lset1 simd_ext typ<block_start><if_stmt>simd_ext<in>sve<block_start><return>'''if ({in0}) {{ return svptrue_b{typnbits}(); }} else {{ return svpfalse_b(); }}'''.format(**fmtspec)<block_end># getting here means no NEON and AARCH64 only mask='vdupq_n_u{typnbits}((u{typnbits}){{}})'.format(**fmtspec)<line_sep>normal='''if ({in0}) {{ return {ones}; }} else {{ return {zeros}; }}'''.format(ones=mask.format('-1') zeros=mask.format('0') **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else nsimd_{simd_ext}_vlf16 ret; ret.v0 = nsimd_set1l_{simd_ext}_f32({in0}); ret.v1 = ret.v0; return ret; #endif'''.format(normal=normal **fmtspec)<block_end><if_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_vlf64 ret; ret.v0 = (u64)({in0} ? -1 : 0); ret.v1 = ret.v0; return ret;'''.format(**fmtspec)<block_end><return>normal<block_end># ----------------------------------------------------------------------------- # Comparison operators: ==, <, <=, >, >= <def_stmt>cmp2 opts op simd_ext typ<block_start>binop={'eq':'==' 'lt':'<' 'le':'<=' 'gt':'>' 'ge':'>='}<line_sep>armop={'eq':'eq' 'lt':'lt' 'le':'le' 'gt':'gt' 'ge':'ge'}<if_stmt>simd_ext<in>neon<block_start>emul_f16='''nsimd_{simd_ext}_vlf16 ret; ret.v0 = nsimd_{op}_{simd_ext}_f32({in0}.v0, {in1}.v0); ret.v1 = nsimd_{op}_{simd_ext}_f32({in0}.v1, {in1}.v1); return ret;'''.format(op=op **fmtspec)<line_sep>normal='return vc{armop}q_{suf}({in0}, {in1});'.format(armop=armop[op] **fmtspec)<if_stmt>typ<eq>'f16'<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><return>emul_f16<block_end><else_stmt><block_start><return>'''#ifdef NSIMD_ARM_FP16 {} #else {} #endif'''.format(normal emul_f16)<block_end><block_end><if_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_{simd_ext}_vl{typ} ret; ret.v0 = {in0}.v0 {op} {in1}.v0 ? (u64)-1 : 0; ret.v1 = {in0}.v1 {op} {in1}.v1 ? (u64)-1 : 0; return ret;'''.format(op=binop[op] **fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<in>['i64' 'u64']<block_start><return>'''{typ} buf0[2], buf1[2]; u64 ret[2]; vst1q_{suf}(buf0, {in0}); vst1q_{suf}(buf1, {in1}); ret[0] = buf0[0] {op} buf1[0] ? (u64)-1 : 0; ret[1] = buf0[1] {op} buf1[1] ? (u64)-1 : 0; return vld1q_u64(ret);'''.format(op=binop[op] **fmtspec)<block_end><else_stmt><block_start><return>normal<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>opts.sve_emulate_bool# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve # it needs to be deleted when the bug is corrected <block_start>comp='svcmp{op}_{suf}({svtrue}, ({svetyp}){in0}, ({svetyp}){in1})'.format(op=armop[op] **fmtspec)<line_sep><return>'return {};'.format(convert_from_predicate(opts comp))<block_end><else_stmt><block_start><return>'return svcmp{op}_{suf}({svtrue}, {in0}, {in1});'.format(op=armop[op] **fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Not equal <def_stmt>neq2 opts simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start><return>'''return nsimd_notl_{simd_ext}_{typ}( nsimd_eq_{simd_ext}_{typ}({in0}, {in1}));'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>sve<block_start>comp='svcmpne_{suf}({svtrue}, {in0}, {in1})'.format(**fmtspec)<line_sep><return>'return {};'.format(convert_from_predicate(opts comp))<block_end><block_end># ----------------------------------------------------------------------------- # If_else <def_stmt>if_else3 opts simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start>intrinsic='return vbslq_{suf}({in0}, {in1}, {in2});'.format(**fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {intrinsic} #else nsimd_{simd_ext}_vf16 ret; ret.v0 = nsimd_if_else1_{simd_ext}_f32( {in0}.v0, {in1}.v0, {in2}.v0); ret.v1 = nsimd_if_else1_{simd_ext}_f32( {in0}.v1, {in1}.v1, {in2}.v1); return ret; #endif'''.format(intrinsic=intrinsic **fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = {in0}.v0 != 0u ? {in1}.v0 : {in2}.v0; ret.v1 = {in0}.v1 != 0u ? {in1}.v1 : {in2}.v1; return ret;'''.format(**fmtspec)<block_end><else_stmt><block_start><return>intrinsic<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>opts.sve_emulate_bool# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve # it needs to be deleted when the bug is corrected <block_start><return>'return svsel_{suf}({cond}, ({svetyp}){in1}, ({svetyp}){in2});'.format(cond=convert_to_predicate(opts '{in0}'.format(**fmtspec)) **fmtspec)<block_end><else_stmt><block_start><return>'return svsel_{suf}({in0}, {in1}, {in2});'.format(**fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Minimum and maximum <def_stmt>minmax2 op simd_ext typ<block_start>ret=f16f64(simd_ext typ op op 2)<if_stmt>ret<ne>''<block_start><return>ret<block_end><if_stmt>simd_ext<in>neon<block_start><if_stmt>typ<in>['i64' 'u64']<block_start>binop='<'<if>op<eq>'min'<else>'>'<line_sep><return>'''{typ} buf0[2], buf1[2]; vst1q_{suf}(buf0, {in0}); vst1q_{suf}(buf1, {in1}); buf0[0] = buf0[0] {binop} buf1[0] ? buf0[0] : buf1[0]; buf0[1] = buf0[1] {binop} buf1[1] ? buf0[1] : buf1[1]; return vld1q_{suf}(buf0);'''.format(binop=binop **fmtspec)<block_end><else_stmt><block_start><return>'return v{op}q_{suf}({in0}, {in1});'.format(op=op **fmtspec)<block_end><block_end><else_stmt><block_start><return>'return sv{op}_{suf}_x({svtrue}, {in0}, {in1});'.format(op=op **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Abs <def_stmt>abs1 simd_ext typ<block_start><if_stmt>typ<in>common.utypes<block_start><return>'return {in0};'.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>neon<block_start><if_stmt>typ<eq>'f16'<block_start><return>f16f64(simd_ext 'f16' 'abs' 'abs' 1)<block_end><elif_stmt>(typ<in>['i8' 'i16' 'i32' 'f32'])<or>(simd_ext<eq>'aarch64'<and>typ<in>['i64' 'f64'])<block_start><return>'return vabsq_{suf}({in0});'.format(**fmtspec)<block_end><elif_stmt>typ<eq>'i64'<block_start><return>emulate_op1('abs' 'neon128' 'i64')<block_end><else_stmt><block_start><return>f16f64(simd_ext 'f64' 'abs' 'abs' 1)<block_end><block_end><else_stmt><block_start><return>'return svabs_{suf}_x({svtrue}, {in0});'.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Round, trunc, ceil and round_to_even <def_stmt>round1 op simd_ext typ<block_start><if_stmt>typ<in>common.iutypes<block_start><return>'return {in0};'.format(**fmtspec)<block_end>armop={'floor':'rndm' 'ceil':'rndp' 'trunc':'rnd' 'round_to_even':'rndn'}<if_stmt>simd_ext<eq>'neon128'<block_start>ret=f16f64('neon128' typ op 'v{armop}q_{suf}'.format(armop=armop **fmtspec) 1)<if_stmt>ret<ne>''<block_start><return>ret<block_end><return>emulate_op1(op 'neon128' typ)<block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>typ<eq>'f16'<block_start><return>f16f64('aarch64' 'f16' op armop[op] 1)<block_end><else_stmt><block_start><return>'return v{armop}q_{suf}({in0});'.format(armop=armop[op] **fmtspec)<block_end><block_end><else_stmt><block_start>armop={'floor':'rintm' 'ceil':'rintp' 'trunc':'rintz' 'round_to_even':'rintn'}<line_sep><return>'return sv{armop}_{suf}_x({svtrue}, {in0});'.format(armop=armop[op] **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # FMA and FNMA <def_stmt>fmafnma3 op simd_ext typ<block_start><if_stmt>typ<in>common.ftypes<and>simd_ext<eq>'aarch64'<block_start>armop={'fma':'fma' 'fnma':'fms'}<block_end><else_stmt><block_start>armop={'fma':'mla' 'fnma':'mls'}<block_end><if_stmt>simd_ext<in>neon<block_start>normal='return v{armop}q_{suf}({in2}, {in1}, {in0});'.format(armop=armop[op] **fmtspec)<line_sep>emul=emulate_op3_neon(op simd_ext typ)<if_stmt>typ<eq>'f16'<block_start>using_f32='''nsimd_{simd_ext}_vf16 ret; ret.v0 = nsimd_{op}_{simd_ext}_f32({in0}.v0, {in1}.v0, {in2}.v0); ret.v1 = nsimd_{op}_{simd_ext}_f32({in0}.v1, {in1}.v1, {in2}.v1); return ret;'''.format(op=op **fmtspec)<if_stmt>simd_ext<eq>'aarch64'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {} #else {} #endif'''.format(emul using_f32)<block_end><else_stmt><block_start><return>using_f32<block_end><block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>emulate_f64_neon('neon128' op ['v']<times>4)<block_end><elif_stmt>simd_ext<eq>'aarch64'<and>typ<eq>'f64'<block_start><return>normal<block_end><elif_stmt>typ<in>['i64' 'u64']<block_start><return>emul<block_end><else_stmt><block_start><return>normal<block_end><block_end><else_stmt><block_start><return>'return sv{armop}_{suf}_x({svtrue}, {in2}, {in1}, {in0});'.format(armop=armop[op] **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # FMS and FNMS <def_stmt>fmsfnms3 op simd_ext typ<block_start><if_stmt>typ<in>common.iutypes<block_start><return>'''return nsimd_neg_{simd_ext}_{typ}(nsimd_{op2}_{simd_ext}_{typ}( {in0}, {in1}, {in2}));'''.format(op2='fma'<if>op<eq>'fnms'<else>'fnma' **fmtspec)<block_end><if_stmt>simd_ext<in>neon<block_start><return>'''return nsimd_{op2}_{simd_ext}_{typ}({in0}, {in1}, nsimd_neg_{simd_ext}_{typ}({in2}));'''.format(op2='fma'<if>op<eq>'fms'<else>'fnma' **fmtspec)<block_end><else_stmt><block_start>armop={'fnms':'nmla' 'fms':'nmls'}<line_sep><return>'return sv{armop}_{suf}_x({svtrue}, {in2}, {in1}, {in0});'.format(armop=armop[op] **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Neg <def_stmt>neg1 simd_ext typ<block_start><if_stmt>simd_ext<in>neon<block_start>normal='return vnegq_{suf}({in0});'.format(**fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>f16f64(simd_ext 'f16' 'neg' 'neg' 1)<block_end><elif_stmt>typ<in>['i8' 'i16' 'i32' 'f32']<block_start><return>normal<block_end><elif_stmt>typ<in>['u8' 'u16' 'u32']<block_start><return>'''return vreinterpretq_{suf}_s{typnbits}( vnegq_s{typnbits}( vreinterpretq_s{typnbits}_{suf}({in0})));'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<in>['i64' 'u64']<block_start><return>emulate_op1('neg' simd_ext typ)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = -{in0}.v0; ret.v1 = -{in0}.v1; return ret;'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<eq>'aarch64'<and>typ<in>['f64' 'i64']<block_start><return>normal<block_end><elif_stmt>simd_ext<eq>'aarch64'<and>typ<eq>'u64'<block_start><return>'''return vreinterpretq_u64_s64(vnegq_s64( vreinterpretq_s64_u64({in0})));'''.format(**fmtspec)<block_end><block_end><else_stmt><block_start><if_stmt>typ<in>common.utypes<block_start><return>'''return svreinterpret_{suf}_s{typnbits}( svneg_s{typnbits}_x({svtrue}, svreinterpret_s{typnbits}_{suf}({in0})));'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'return svneg_{suf}_x({svtrue}, {in0});'.format(**fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Reciprocals <def_stmt>recs1 op simd_ext typ<block_start>cte='({typ})1'.format(**fmtspec)<if>typ<ne>'f16'<else>'nsimd_f32_to_f16(1.0f)'<if_stmt>op<in>['rec' 'rec11']<block_start><return>'''return nsimd_div_{simd_ext}_{typ}( nsimd_set1_{simd_ext}_{typ}({cte}), {in0});'''.format(cte=cte **fmtspec)<block_end><elif_stmt>op<eq>'rsqrt11'<block_start><return>'''return nsimd_div_{simd_ext}_{typ}( nsimd_set1_{simd_ext}_{typ}({cte}), nsimd_sqrt_{simd_ext}_{typ}({in0}));'''.format(cte=cte **fmtspec)<block_end><elif_stmt>op<in>['rec8' 'rsqrt8']<block_start>armop='recpe'<if>op<eq>'rec8'<else>'rsqrte'<if_stmt>simd_ext<in>sve<block_start><return>'return sv{armop}_{suf}({in0});'.format(armop=armop **fmtspec)<block_end><else_stmt><block_start>ret=f16f64(simd_ext typ op armop 1)<if_stmt>ret<ne>''<block_start><return>ret<block_end><return>'return v{armop}q_{suf}({in0});'.format(armop=armop **fmtspec)<block_end><block_end><block_end># Rec11 and rsqrt11 # According to http://infocenter.arm.com/help/topic/com.arm.doc.faqs/ka14282.html # reciprocal estimates only work when inputs is restrained in some small # interval so we comment these for now and return full-precision reciprocals. # def rec11rsqrt11(op, simd_ext, typ): # armop = {'rec11': 'recpe', 'rsqrt11': 'rsqrte'} # if simd_ext in neon: # ret = f16f64(simd_ext, typ, op, armop[op], 1) # if ret != '': # return ret # return 'return v{armop}q_{suf}({in0});'. \ # format(armop=armop[op], **fmtspec) # else: # return 'return sv{armop}_{suf}({in0});'. \ # format(armop=armop[op], **fmtspec) # ----------------------------------------------------------------------------- # Load of logicals <def_stmt>loadl aligned simd_ext typ<block_start><return>'''/* This can surely be improved but it is not our priority. */ return nsimd_notl_{simd_ext}_{typ}(nsimd_eq_{simd_ext}_{typ}( nsimd_load{align}_{simd_ext}_{typ}( {in0}), nsimd_set1_{simd_ext}_{typ}({zero})));'''.format(align='a'<if>aligned<else>'u' zero='nsimd_f32_to_f16(0.0f)'<if>typ<eq>'f16'<else>'({})0'.format(typ) **fmtspec)<block_end># ----------------------------------------------------------------------------- # Store of logicals <def_stmt>storel aligned simd_ext typ<block_start><return>'''/* This can surely be improved but it is not our priority. */ nsimd_store{align}_{simd_ext}_{typ}({in0}, nsimd_if_else1_{simd_ext}_{typ}({in1}, nsimd_set1_{simd_ext}_{typ}({one}), nsimd_set1_{simd_ext}_{typ}({zero})));'''.format(align='a'<if>aligned<else>'u' one='nsimd_f32_to_f16(1.0f)'<if>typ<eq>'f16'<else>'({})1'.format(typ) zero='nsimd_f32_to_f16(0.0f)'<if>typ<eq>'f16'<else>'({})0'.format(typ) **fmtspec)<block_end># ----------------------------------------------------------------------------- # All and any <def_stmt>allany1 opts op simd_ext typ<block_start>binop='&&'<if>op<eq>'all'<else>'||'<if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>typ<eq>'f16'<block_start><return>'''return nsimd_{op}_neon128_f32({in0}.v0) {binop} nsimd_{op}_neon128_f32({in0}.v1);'''.format(op=op binop=binop **fmtspec)<block_end><elif_stmt>typ<eq>'f64'<block_start><return>'return {in0}.v0 {binop} {in0}.v1;'.format(binop=binop **fmtspec)<block_end><else_stmt><block_start><return>'return '+binop.join(['vgetq_lane_u{typnbits}({in0}, {i})'.format(i=i **fmtspec)<for>i range(0 128<floordiv>int(fmtspec['typnbits']))])+';'<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start>armop={'all':'min' 'any':'max'}<line_sep>normal='return v{armop}vq_u{typnbits}({in0}) != 0;'.format(armop=armop[op] **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else return nsimd_{op}_aarch64_f32({in0}.v0) {binop} nsimd_{op}_aarch64_f32({in0}.v1); #endif'''.format(normal=normal op=op binop=binop **fmtspec)<block_end><elif_stmt>typ<in>['i64' 'u64' 'f64']<block_start><return>'return v{armop}vq_u32(vreinterpretq_u32_u64({in0})) != 0;'.format(armop=armop[op] **fmtspec)<block_end><else_stmt><block_start><return>normal<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>op<eq>'any'<block_start>operand=convert_to_predicate(opts '{in0}'.format(**fmtspec))<line_sep><return>'''return svptest_any({svtrue}, {operand});'''.format(operand=operand **fmtspec)<block_end><else_stmt><block_start>operand='svnot_z({svtrue}, {op})'.format(op=convert_to_predicate(opts '{in0}'.format(**fmtspec)) **fmtspec)<line_sep><return>'''return !svptest_any({svtrue}, {operand});'''.format(operand=operand **fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # nbtrue <def_stmt>nbtrue1 opts simd_ext typ<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>typ<eq>'f16'<block_start><return>'''return nsimd_nbtrue_neon128_f32({in0}.v0) + nsimd_nbtrue_neon128_f32({in0}.v1);'''.format(**fmtspec)<block_end><elif_stmt>typ<eq>'f64'<block_start><return>'return -(int)((i64){in0}.v0 + (i64){in0}.v1);'.format(**fmtspec)<block_end><else_stmt><block_start><return>'''nsimd_neon128_vi{typnbits} temp = vreinterpretq_s{typnbits}_u{typnbits}({in0}); return -(int)('''.format(**fmtspec)+'+'.join(['vgetq_lane_s{typnbits}(temp, {i})'.format(i=i **fmtspec)<for>i range(0 128<floordiv>int(fmtspec['typnbits']))])+');'<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start>normal='''return -(int)vaddvq_s{typnbits}( vreinterpretq_s{typnbits}_u{typnbits}({in0}));'''.format(**fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else return nsimd_nbtrue_aarch64_f32({in0}.v0) + nsimd_nbtrue_aarch64_f32({in0}.v1); #endif'''.format(normal=normal **fmtspec)<block_end><elif_stmt>typ<in>['i64' 'u64' 'f64']<block_start><return>'''return -(vaddvq_s32(vreinterpretq_s32_u64({in0})) >> 1);'''.format(**fmtspec)<block_end><else_stmt><block_start><return>normal<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><return>'return (int)svcntp_b{typnbits}({svtrue}, {op});'.format(op=convert_to_predicate(opts '{in0}'.format(**fmtspec)) **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Reinterpret logical <def_stmt>reinterpretl1 simd_ext from_typ to_typ<block_start><if_stmt>from_typ<eq>to_typ<or>simd_ext<in>sve<block_start><return>'return {in0};'.format(**fmtspec)<block_end>to_f16_with_f32='''nsimd_{simd_ext}_vlf16 ret; u32 buf[4]; buf[0] = (vgetq_lane_u16({in0}, 0) ? (u32)-1 : 0); buf[1] = (vgetq_lane_u16({in0}, 1) ? (u32)-1 : 0); buf[2] = (vgetq_lane_u16({in0}, 2) ? (u32)-1 : 0); buf[3] = (vgetq_lane_u16({in0}, 3) ? (u32)-1 : 0); ret.v0 = vld1q_u32(buf); buf[0] = (vgetq_lane_u16({in0}, 4) ? (u32)-1 : 0); buf[1] = (vgetq_lane_u16({in0}, 5) ? (u32)-1 : 0); buf[2] = (vgetq_lane_u16({in0}, 6) ? (u32)-1 : 0); buf[3] = (vgetq_lane_u16({in0}, 7) ? (u32)-1 : 0); ret.v1 = vld1q_u32(buf); return ret;'''.format(**fmtspec)<line_sep>from_f16_with_f32='''u16 buf[8]; buf[0] = (vgetq_lane_u32({in0}.v0, 0) ? (u16)-1 : 0); buf[1] = (vgetq_lane_u32({in0}.v0, 1) ? (u16)-1 : 0); buf[2] = (vgetq_lane_u32({in0}.v0, 2) ? (u16)-1 : 0); buf[3] = (vgetq_lane_u32({in0}.v0, 3) ? (u16)-1 : 0); buf[4] = (vgetq_lane_u32({in0}.v1, 0) ? (u16)-1 : 0); buf[5] = (vgetq_lane_u32({in0}.v1, 1) ? (u16)-1 : 0); buf[6] = (vgetq_lane_u32({in0}.v1, 2) ? (u16)-1 : 0); buf[7] = (vgetq_lane_u32({in0}.v1, 3) ? (u16)-1 : 0); return vld1q_u16(buf);'''.format(**fmtspec)<if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>to_typ<eq>'f16'<block_start><return>to_f16_with_f32<block_end><elif_stmt>from_typ<eq>'f16'<block_start><return>from_f16_with_f32<block_end><elif_stmt>to_typ<eq>'f64'<block_start><return>'''nsimd_neon128_vlf64 ret; ret.v0 = vgetq_lane_u64({in0}, 0); ret.v1 = vgetq_lane_u64({in0}, 1); return ret;'''.format(**fmtspec)<block_end><elif_stmt>from_typ<eq>'f64'<block_start><return>'''u64 buf[2]; buf[0] = {in0}.v0; buf[1] = {in0}.v1; return vld1q_u64(buf);'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'return {in0};'.format(**fmtspec)<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>to_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return {in0}; #else {using_f32} #endif'''.format(using_f32=to_f16_with_f32 **fmtspec)<block_end><elif_stmt>from_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return {in0}; #else {using_f32} #endif'''.format(using_f32=from_f16_with_f32 **fmtspec)<block_end><else_stmt><block_start><return>'return {in0};'.format(**fmtspec)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Convert <def_stmt>convert1 simd_ext from_typ to_typ<block_start>fmtspec2=fmtspec.copy()<line_sep>fmtspec2['to_suf']=suf(to_typ)<line_sep>fmtspec2['from_suf']=suf(from_typ)<if_stmt>from_typ<eq>to_typ<block_start><return>'return {in0};'.format(**fmtspec)<block_end><if_stmt>from_typ<in>common.iutypes<and>to_typ<in>common.iutypes<block_start><if_stmt>simd_ext<in>neon<block_start><return>'return vreinterpretq_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end><else_stmt><block_start><return>'return svreinterpret_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end><block_end><if_stmt>simd_ext<in>sve<block_start><return>'return svcvt_{to_suf}_{from_suf}_x({svtrue}, {in0});'.format(**fmtspec2)<block_end>to_f16_with_f32='''nsimd_{simd_ext}_vf16 ret; f32 buf[4]; buf[0] = (f32)vgetq_lane_{from_suf}({in0}, 0); buf[1] = (f32)vgetq_lane_{from_suf}({in0}, 1); buf[2] = (f32)vgetq_lane_{from_suf}({in0}, 2); buf[3] = (f32)vgetq_lane_{from_suf}({in0}, 3); ret.v0 = vld1q_f32(buf); buf[0] = (f32)vgetq_lane_{from_suf}({in0}, 4); buf[1] = (f32)vgetq_lane_{from_suf}({in0}, 5); buf[2] = (f32)vgetq_lane_{from_suf}({in0}, 6); buf[3] = (f32)vgetq_lane_{from_suf}({in0}, 7); ret.v1 = vld1q_f32(buf); return ret;'''.format(**fmtspec2)<line_sep>from_f16_with_f32='''{to_typ} buf[8]; buf[0] = ({to_typ})vgetq_lane_f32({in0}.v0, 0); buf[1] = ({to_typ})vgetq_lane_f32({in0}.v0, 1); buf[2] = ({to_typ})vgetq_lane_f32({in0}.v0, 2); buf[3] = ({to_typ})vgetq_lane_f32({in0}.v0, 3); buf[4] = ({to_typ})vgetq_lane_f32({in0}.v1, 0); buf[5] = ({to_typ})vgetq_lane_f32({in0}.v1, 1); buf[6] = ({to_typ})vgetq_lane_f32({in0}.v1, 2); buf[7] = ({to_typ})vgetq_lane_f32({in0}.v1, 3); return vld1q_{to_suf}(buf);'''.format(**fmtspec2)<if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>to_typ<eq>'f16'<block_start><return>to_f16_with_f32<block_end><elif_stmt>from_typ<eq>'f16'<block_start><return>from_f16_with_f32<block_end><elif_stmt>to_typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = (f64)vgetq_lane_{from_suf}({in0}, 0); ret.v1 = (f64)vgetq_lane_{from_suf}({in0}, 1); return ret;'''.format(**fmtspec2)<block_end><elif_stmt>from_typ<eq>'f64'<block_start><return>'''{to_typ} buf[2]; buf[0] = ({to_typ}){in0}.v0; buf[1] = ({to_typ}){in0}.v1; return vld1q_{to_suf}(buf);'''.format(**fmtspec2)<block_end><else_stmt><block_start><return>'return vcvtq_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>to_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vcvtq_{to_suf}_{from_suf}({in0}); #else {using_f32} #endif'''.format(using_f32=to_f16_with_f32 **fmtspec2)<block_end><elif_stmt>from_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vcvtq_{to_suf}_{from_suf}({in0}); #else {using_f32} #endif'''.format(using_f32=from_f16_with_f32 **fmtspec2)<block_end><else_stmt><block_start><return>'return vcvtq_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # Reinterpret <def_stmt>reinterpret1 simd_ext from_typ to_typ<block_start>fmtspec2=fmtspec.copy()<line_sep>fmtspec2['to_suf']=suf(to_typ)<line_sep>fmtspec2['from_suf']=suf(from_typ)<if_stmt>from_typ<eq>to_typ<block_start><return>'return {in0};'.format(**fmtspec)<block_end><if_stmt>simd_ext<in>sve<block_start><return>'return svreinterpret_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end>to_f16_with_f32='''nsimd_{simd_ext}_vf16 ret; f32 buf[4]; buf[0] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 0)); buf[1] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 1)); buf[2] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 2)); buf[3] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 3)); ret.v0 = vld1q_f32(buf); buf[0] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 4)); buf[1] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 5)); buf[2] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 6)); buf[3] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 7)); ret.v1 = vld1q_f32(buf); return ret;'''.format(**fmtspec2)<line_sep>from_f16_with_f32='''{to_typ} buf[8]; buf[0] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 0)); buf[1] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 1)); buf[2] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 2)); buf[3] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 3)); buf[4] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 0)); buf[5] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 1)); buf[6] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 2)); buf[7] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 3)); return vld1q_{to_suf}(buf);'''.format(**fmtspec2)<if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>to_typ<eq>'f16'<block_start><return>to_f16_with_f32<block_end><elif_stmt>from_typ<eq>'f16'<block_start><return>from_f16_with_f32<block_end><elif_stmt>to_typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; union {{ f64 to; {from_typ} from; }} buf; buf.from = vgetq_lane_{from_suf}({in0}, 0); ret.v0 = buf.to; buf.from = vgetq_lane_{from_suf}({in0}, 1); ret.v1 = buf.to; return ret;'''.format(**fmtspec2)<block_end><elif_stmt>from_typ<eq>'f64'<block_start><return>'''union {{ f64 from; {to_typ} to; }} buf_; {to_typ} buf[2]; buf_.from = {in0}.v0; buf[0] = buf_.to; buf_.from = {in0}.v1; buf[1] = buf_.to; return vld1q_{to_suf}(buf);'''.format(**fmtspec2)<block_end><else_stmt><block_start><return>'return vreinterpretq_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>to_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vreinterpretq_{to_suf}_{from_suf}({in0}); #else {using_f32} #endif'''.format(using_f32=to_f16_with_f32 **fmtspec2)<block_end><elif_stmt>from_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vreinterpretq_{to_suf}_{from_suf}({in0}); #else {using_f32} #endif'''.format(using_f32=from_f16_with_f32 **fmtspec2)<block_end><else_stmt><block_start><return>'return vreinterpretq_{to_suf}_{from_suf}({in0});'.format(**fmtspec2)<block_end><block_end><block_end># ----------------------------------------------------------------------------- # reverse <def_stmt>reverse1 simd_ext typ<block_start>armtyp=suf(typ)<if_stmt>simd_ext<in>sve<block_start><return>'''return svrev_{suf}( {in0} );'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = {in0}.v1; ret.v1 = {in0}.v0; return ret;'''.format(**fmtspec)<block_end><elif_stmt>typ<in>['i64' 'u64' 'f64']<block_start><return>'''return vcombine_{armtyp}(vget_high_{armtyp}({in0}), vget_low_{armtyp}({in0}));'''.format(armtyp=armtyp **fmtspec)<block_end><elif_stmt>typ<eq>'f16'<block_start><return>'''nsimd_{simd_ext}_vf16 ret; ret.v0 = nsimd_reverse_{simd_ext}_f32(a0.v1); ret.v1 = nsimd_reverse_{simd_ext}_f32(a0.v0); return ret;'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'''{in0} = vrev64q_{armtyp}({in0}); return vcombine_{armtyp}(vget_high_{armtyp}({in0}), vget_low_{armtyp}({in0}));'''.format(armtyp=armtyp **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Horizontal sum <def_stmt>addv simd_ext typ<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><if_stmt>typ<eq>'f64'<block_start><return>'return ({typ})({in0}.v0 + {in0}.v1);'.format(**fmtspec)<block_end><elif_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {t} tmp = vadd_{suf}(vget_low_{suf}({in0}), vget_high_{suf}({in0})); tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 3)); tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 0)); return vget_lane_{suf}(tmp, 0); #else float32x2_t tmp0 = vadd_f32(vget_low_f32({in0}.v0), vget_high_f32({in0}.v0)); tmp0 = vadd_f32(tmp0, vext_f32(tmp0, tmp0, 1)); float32x2_t tmp1 = vadd_f32(vget_low_f32({in0}.v1), vget_high_f32({in0}.v1)); tmp1 = vadd_f32(tmp1, vext_f32(tmp1, tmp1, 1)); return nsimd_f32_to_f16(vget_lane_f32(tmp0, 0) + vget_lane_f32(tmp1, 0)); #endif'''.format(t=half_neon64_typ(typ) **fmtspec)<block_end><elif_stmt>typ<eq>'f32'<block_start><return>'''{t} tmp = vadd_{suf}(vget_low_{suf}({in0}), vget_high_{suf}({in0})); tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 1)); return vget_lane_{suf}(tmp, 0);'''.format(t=half_neon64_typ(typ) **fmtspec)<block_end><elif_stmt>typ[0]<in>['i' 'u']<block_start>le=128<floordiv>int(typ[1:])<line_sep><return>'''{typ} res = ({typ})0; {typ} buf[{le}]; vst1q_{suf}(buf, {in0}); for (int i = 0; i < {le}; i++) {{ res += buf[i]; }} return res;'''.format(le=le **fmtspec)<block_end><block_end><elif_stmt>simd_ext<eq>'aarch64'<block_start><if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {t} tmp = vadd_{suf}(vget_low_{suf}({in0}), vget_high_{suf}({in0})); tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 3)); tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 0)); return vget_lane_{suf}(tmp, 0); #else float32x2_t tmp0 = vadd_f32(vget_low_f32({in0}.v0), vget_high_f32({in0}.v0)); tmp0 = vadd_f32(tmp0, vext_f32(tmp0, tmp0, 1)); float32x2_t tmp1 = vadd_f32(vget_low_f32({in0}.v1), vget_high_f32({in0}.v1)); tmp1 = vadd_f32(tmp1, vext_f32(tmp1, tmp1, 1)); return nsimd_f32_to_f16(vget_lane_f32(tmp0, 0) + vget_lane_f32(tmp1, 0)); #endif'''.format(t=half_neon64_typ(typ) **fmtspec)<block_end><elif_stmt>typ<in>['f32' 'f64']<block_start><return>'return vaddvq_{suf}({in0});'.format(**fmtspec)<block_end><block_end><elif_stmt>simd_ext<in>sve<block_start><return>'return svaddv_{suf}({svtrue}, {in0});'.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Up convert <def_stmt>upcvt1 simd_ext from_typ to_typ# For integer upcast, due to 2's complement representation # _s : signed -> bigger signed # _s : signed -> bigger unsigned # _u : unsigned -> bigger signed # _u : unsigned -> bigger unsigned <block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>from_typ<eq>'f16'<and>to_typ<eq>'f32'<block_start><return>'''#ifdef NSIMD_ARM_FP16 nsimd_{simd_ext}_vf32x2 ret; ret.v0 = vcvt_f32_f16(vget_low_{suf}({in0})); ret.v1 = vcvt_f32_f16(vget_high_{suf}({in0})); return ret; #else nsimd_{simd_ext}_vf32x2 ret; ret.v0 = {in0}.v0; ret.v1 = {in0}.v1; return ret; #endif'''.format(**fmtspec)<block_end><elif_stmt>from_typ<eq>'f32'<and>to_typ<eq>'f64'<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_vf64x2 ret; f32 buf[4]; vst1q_f32(buf, {in0}); ret.v0.v0 = (f64)buf[0]; ret.v0.v1 = (f64)buf[1]; ret.v1.v0 = (f64)buf[2]; ret.v1.v1 = (f64)buf[3]; return ret;'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'''nsimd_aarch64_vf64x2 ret; ret.v0 = vcvt_f64_f32(vget_low_{suf}({in0})); ret.v1 = vcvt_f64_f32(vget_high_{suf}({in0})); return ret;'''.format(**fmtspec)<block_end><block_end><elif_stmt>(from_typ<in>common.itypes<and>to_typ<in>common.itypes)<or>(from_typ<in>common.utypes<and>to_typ<in>common.utypes)<block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; ret.v0 = vmovl_{suf}(vget_low_{suf}({in0})); ret.v1 = vmovl_{suf}(vget_high_{suf}({in0})); return ret;'''.format(**fmtspec)<block_end><elif_stmt>(from_typ<in>common.itypes<and>to_typ<in>common.utypes)<or>(from_typ<in>common.utypes<and>to_typ<in>common.itypes)<block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; ret.v0 = vreinterpretq_{suf_to_typ}_{suf_int_typ}( vmovl_{suf}(vget_low_{suf}({in0}))); ret.v1 = vreinterpretq_{suf_to_typ}_{suf_int_typ}( vmovl_{suf}(vget_high_{suf}({in0}))); return ret;'''.format(suf_to_typ=suf(to_typ) suf_int_typ=suf(from_typ[0]+to_typ[1:]) **fmtspec)<block_end><else_stmt><block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; nsimd_{simd_ext}_v{int_typ}x2 tmp; tmp = nsimd_upcvt_{simd_ext}_{int_typ}_{from_typ}({in0}); ret.v0 = nsimd_cvt_{simd_ext}_{to_typ}_{int_typ}(tmp.v0); ret.v1 = nsimd_cvt_{simd_ext}_{to_typ}_{int_typ}(tmp.v1); return ret;'''.format(int_typ=from_typ[0]+to_typ[1:] **fmtspec)<block_end><block_end># Getting here means that we deal with SVE <if_stmt>(from_typ<in>common.itypes<and>to_typ<in>common.itypes)<or>(from_typ<in>common.utypes<and>to_typ<in>common.utypes)<block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; ret.v0 = svunpklo_{suf_to_typ}({in0}); ret.v1 = svunpkhi_{suf_to_typ}({in0}); return ret;'''.format(suf_to_typ=suf(to_typ) **fmtspec)<block_end><elif_stmt>(from_typ<in>common.itypes<and>to_typ<in>common.utypes)<or>(from_typ<in>common.utypes<and>to_typ<in>common.itypes)<block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; ret.v0 = svreinterpret_{suf_to_typ}_{suf_int_typ}( svunpklo_{suf_int_typ}({in0})); ret.v1 = svreinterpret_{suf_to_typ}_{suf_int_typ}( svunpkhi_{suf_int_typ}({in0})); return ret;'''.format(suf_to_typ=suf(to_typ) suf_int_typ=suf(from_typ[0]+to_typ[1:]) **fmtspec)<block_end><elif_stmt>from_typ<in>common.iutypes<and>to_typ<in>common.ftypes<block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; ret.v0 = svcvt_{suf_to_typ}_{suf_int_typ}_x( {svtrue}, svunpklo_{suf_int_typ}({in0})); ret.v1 = svcvt_{suf_to_typ}_{suf_int_typ}_x( {svtrue}, svunpkhi_{suf_int_typ}({in0})); return ret;'''.format(suf_to_typ=suf(to_typ) suf_int_typ=suf(from_typ[0]+to_typ[1:]) **fmtspec)<block_end><else_stmt><block_start><return>'''nsimd_{simd_ext}_v{to_typ}x2 ret; ret.v0 = svcvt_{suf_to_typ}_{suf}_x({svtrue}, svzip1_{suf}( {in0}, {in0})); ret.v1 = svcvt_{suf_to_typ}_{suf}_x({svtrue}, svzip2_{suf}( {in0}, {in0})); return ret;'''.format(suf_to_typ=suf(to_typ) **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # Down convert <def_stmt>downcvt1 simd_ext from_typ to_typ<block_start><if_stmt>simd_ext<in>neon<block_start><if_stmt>from_typ<eq>'f64'<and>to_typ<eq>'f32'<block_start><if_stmt>simd_ext<eq>'neon128'<block_start><return>'''f32 buf[4]; buf[0] = (f32){in0}.v0; buf[1] = (f32){in0}.v1; buf[2] = (f32){in1}.v0; buf[3] = (f32){in1}.v1; return vld1q_f32(buf);'''.format(**fmtspec)<block_end><else_stmt><block_start><return>'''return vcombine_f32(vcvt_f32_f64({in0}), vcvt_f32_f64({in1}));'''.format(**fmtspec)<block_end><block_end><elif_stmt>from_typ<eq>'f32'<and>to_typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 return vcombine_f16(vcvt_f16_f32({in0}), vcvt_f16_f32({in1})); #else nsimd_{simd_ext}_vf16 ret; ret.v0 = {in0}; ret.v1 = {in1}; return ret; #endif'''.format(**fmtspec)<block_end><elif_stmt>(from_typ<in>common.itypes<and>to_typ<in>common.itypes)<or>(from_typ<in>common.utypes<and>to_typ<in>common.utypes)<block_start><return>'''return vcombine_{suf_to_typ}(vmovn_{suf}({in0}), vmovn_{suf}({in1}));'''.format(suf_to_typ=suf(to_typ) **fmtspec)<block_end><elif_stmt>(from_typ<in>common.itypes<and>to_typ<in>common.itypes)<or>(from_typ<in>common.utypes<and>to_typ<in>common.utypes)<block_start><return>'''return vreinterpretq_{suf_to_typ}( vcombine_{suf_to_typ}(vmovn_{suf}({in0}), vmovn_{suf}({in1}));'''.format(suf_to_typ=suf(to_typ) **fmtspec)<block_end><else_stmt><block_start><return>'''return nsimd_downcvt_{simd_ext}_{to_typ}_{int_typ}( nsimd_cvt_{simd_ext}_{int_typ}_{from_typ}({in0}), nsimd_cvt_{simd_ext}_{int_typ}_{from_typ}({in1}));'''.format(int_typ=to_typ[0]+from_typ[1:] **fmtspec)<block_end><block_end># Getting here means that we deal with SVE <if_stmt>from_typ<in>common.iutypes<and>to_typ<in>common.iutypes<block_start><return>'''return svuzp1_{suf_to_typ}( svreinterpret_{suf_to_typ}_{suf}({in0}), svreinterpret_{suf_to_typ}_{suf}({in1}));'''.format(suf_to_typ=suf(to_typ) **fmtspec)<block_end><elif_stmt>from_typ<in>common.ftypes<and>to_typ<in>common.iutypes<block_start><return>'''return svuzp1_{suf_to_typ}(svreinterpret_{suf_to_typ}_{suf_int_typ}( svcvt_{suf_int_typ}_{suf}_x({svtrue}, {in0})), svreinterpret_{suf_to_typ}_{suf_int_typ}( svcvt_{suf_int_typ}_{suf}_x({svtrue}, {in1})));'''.format(suf_to_typ=suf(to_typ) suf_int_typ=suf(to_typ[0]+from_typ[1:]) **fmtspec)<block_end><else_stmt><block_start><return>'''return svuzp1_{suf_to_typ}(svcvt_{suf_to_typ}_{suf}_x( {svtrue}, {in0}), svcvt_{suf_to_typ}_{suf}_x( {svtrue}, {in1}));'''.format(suf_to_typ=suf(to_typ) **fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # adds <def_stmt>adds simd_ext from_typ<block_start><if_stmt>from_typ<in>common.ftypes<block_start><return>'return nsimd_add_{simd_ext}_{from_typ}({in0}, {in1});'.format(**fmtspec)<block_end><if_stmt>simd_ext<in>neon<block_start><return>'return vqaddq_{suf}({in0}, {in1});'.format(**fmtspec)<block_end><else_stmt><block_start><return>'return svqadd_{suf}({in0}, {in1});'.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # subs <def_stmt>subs simd_ext from_typ<block_start><if_stmt>from_typ<in>common.ftypes<block_start><return>'return nsimd_sub_{simd_ext}_{from_typ}({in0}, {in1});'.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>neon<block_start><return>'return vqsubq_{suf}({in0}, {in1});'.format(**fmtspec)<block_end><else_stmt><block_start><return>'return svqsub_{suf}({in0}, {in1});'.format(**fmtspec)<block_end><block_end># ----------------------------------------------------------------------------- # to_mask <def_stmt>to_mask1 opts simd_ext typ<block_start><if_stmt>typ<in>common.itypes+common.ftypes<block_start>normal='return vreinterpretq_{suf}_u{typnbits}({in0});'.format(**fmtspec)<block_end><else_stmt><block_start>normal='return {in0};'.format(**fmtspec)<block_end>emulate_f16='''nsimd_{simd_ext}_vf16 ret; ret.v0 = nsimd_to_mask_{simd_ext}_f32({in0}.v0); ret.v1 = nsimd_to_mask_{simd_ext}_f32({in0}.v1); return ret;'''.format(**fmtspec)<if_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f16'<block_start><return>emulate_f16<block_end><elif_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = nsimd_scalar_reinterpret_f64_u64({in0}.v0); ret.v1 = nsimd_scalar_reinterpret_f64_u64({in0}.v1); return ret;'''.format(**fmtspec)<block_end><elif_stmt>simd_ext<eq>'aarch64'<and>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else {emulate_f16} #endif'''.format(normal=normal emulate_f16=emulate_f16)<block_end><elif_stmt>simd_ext<in>sve<block_start><if_stmt>opts.sve_emulate_bool<block_start><return>'return svreinterpret_{suf}_u{typnbits}({in0});'.format(**fmtspec)<block_end><else_stmt><block_start>utyp='u{}'.format(fmtspec['typnbits'])<line_sep><return>'''return svreinterpret_{suf}_{utyp}(svsel_{utyp}( {in0}, svdup_n_{utyp}(({utyp})-1), svdup_n_{utyp}(({utyp})0)));'''.format(utyp=utyp **fmtspec)<block_end><block_end><else_stmt><block_start><return>normal<block_end><block_end># ----------------------------------------------------------------------------- # iota <def_stmt>iota simd_ext typ<block_start><if_stmt>simd_ext<in>sve<block_start><if_stmt>typ<in>common.iutypes<block_start><return>'return svindex_{suf}(0, 1);'.format(**fmtspec)<block_end><else_stmt><block_start><return>'''return svcvt_{suf}_s{typnbits}_x({svtrue}, svindex_s{typnbits}(0, 1));'''.format(**fmtspec)<block_end><block_end><if_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = 0.0; ret.v1 = 1.0; return ret;'''.format(**fmtspec)<block_end>typ2='f32'<if>typ<eq>'f16'<else>typ<line_sep>le=128<floordiv>int(typ[1:])<line_sep>iota=', '.join(['({typ2}){i}'.format(typ2=typ2 i=i)<for>i range(le)])<line_sep>normal='''{typ} buf[{le}] = {{ {iota} }}; return vld1q_{suf}(buf);'''.format(le=le iota=iota **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal} #else f32 buf[8] = {{ {iota} }}; nsimd_{simd_ext}_vf16 ret; ret.v0 = vld1q_f32(buf); ret.v1 = vld1q_f32(buf + 4); return ret; #endif'''.format(iota=iota normal=normal **fmtspec)<block_end><return>normal<block_end># ----------------------------------------------------------------------------- # mask_for_loop_tail <def_stmt>mask_for_loop_tail simd_ext typ<block_start><if_stmt>typ<eq>'f16'<block_start>threshold='nsimd_f32_to_f16((f32)({in1} - {in0}))'.format(**fmtspec)<block_end><else_stmt><block_start>threshold='({typ})({in1} - {in0})'.format(**fmtspec)<block_end><if_stmt>simd_ext<eq>'sve'<block_start>le='nsimd_len_sve_{typ}()'.format(**fmtspec)<block_end><elif_stmt>simd_ext<in>fixed_sized_sve<block_start>le=int(simd_ext[3:])<floordiv>int(typ[1:])<block_end><else_stmt><block_start>le=128<floordiv>int(typ[1:])<block_end><return>'''if ({in0} >= {in1}) {{ return nsimd_set1l_{simd_ext}_{typ}(0); }} if ({in1} - {in0} < {le}) {{ nsimd_{simd_ext}_v{typ} n = nsimd_set1_{simd_ext}_{typ}({threshold}); return nsimd_lt_{simd_ext}_{typ}( nsimd_iota_{simd_ext}_{typ}(), n); }} else {{ return nsimd_set1l_{simd_ext}_{typ}(1); }}'''.format(le=le threshold=threshold **fmtspec)<block_end># ----------------------------------------------------------------------------- # to_logical <def_stmt>to_logical1 opts simd_ext typ<block_start><if_stmt>typ<in>common.iutypes<block_start><return>'''return nsimd_ne_{simd_ext}_{typ}({in0}, nsimd_set1_{simd_ext}_{typ}(({typ})0));'''.format(**fmtspec)<block_end>normal_fp='''return nsimd_reinterpretl_{simd_ext}_{suf}_{utyp}( nsimd_ne_{simd_ext}_{utyp}( nsimd_reinterpret_{simd_ext}_{utyp}_{typ}( {in0}), nsimd_set1_{simd_ext}_{utyp}(({utyp})0)));'''.format(utyp='u{}'.format(fmtspec['typnbits']) **fmtspec)<if_stmt>typ<in>['f32' 'f64']<or>(typ<eq>'f16'<and>simd_ext<in>sve)<block_start><return>normal_fp<block_end>emulate_fp16='''nsimd_{simd_ext}_vlf16 ret; ret.v0 = nsimd_to_logical_{simd_ext}_f32({in0}.v0); ret.v1 = nsimd_to_logical_{simd_ext}_f32({in0}.v1); return ret;'''.format(**fmtspec)<if_stmt>simd_ext<eq>'aarch64'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {normal_fp} #else {emulate_fp16} #endif'''.format(normal_fp=normal_fp emulate_fp16=emulate_fp16)<block_end><elif_stmt>simd_ext<eq>'neon128'<block_start><return>emulate_fp16<block_end><block_end># ----------------------------------------------------------------------------- # unpack functions <def_stmt>zip_unzip_half func simd_ext typ<block_start><if_stmt>simd_ext<eq>'aarch64'<or>simd_ext<in>sve<block_start><if_stmt>typ<eq>'f16'<and>simd_ext<eq>'aarch64'<block_start><if_stmt>func<in>['zip1' 'zip2']<block_start><return>'''\ #ifdef NSIMD_ARM_FP16 return {s}v{op}{q}_{suf}({in0}, {in1}); #else nsimd_{simd_ext}_v{typ} ret; ret.v0 = {s}vzip1{q}_f32({in0}.v{i}, {in1}.v{i}); ret.v1 = {s}vzip2{q}_f32({in0}.v{i}, {in1}.v{i}); return ret; #endif '''.format(op=func i='0'<if>func<in>['zip1' 'uzp1']<else>'1' s='s'<if>simd_ext<in>sve<else>'' q=''<if>simd_ext<in>sve<else>'q' **fmtspec)<block_end><else_stmt><block_start><return>'''\ #ifdef NSIMD_ARM_FP16 return {s}v{op}{q}_{suf}({in0}, {in1}); #else nsimd_{simd_ext}_v{typ} ret; ret.v0 = {s}v{func}{q}_f32({in0}.v0, {in0}.v1); ret.v1 = {s}v{func}{q}_f32({in1}.v0, {in1}.v1); return ret; #endif'''.format(op=func func=func s='s'<if>simd_ext<in>sve<else>'' q=''<if>simd_ext<in>sve<else>'q' **fmtspec)<block_end><block_end><else_stmt><block_start><return>'return {s}v{op}{q}_{suf}({in0}, {in1});'.format(op=func s='s'<if>simd_ext<in>sve<else>'' q=''<if>simd_ext<in>sve<else>'q' **fmtspec)<block_end><block_end><elif_stmt>simd_ext<eq>'neon128'<block_start>armop={'zip1':'zipq' 'zip2':'zipq' 'uzp1':'uzpq' 'uzp2':'uzpq'}<line_sep>prefix={'i':'int' 'u':'uint' 'f':'float'}<line_sep>neon_typ='{}{}x{}x2_t'.format(prefix[typ[0]] typ[1:] 128<floordiv>int(typ[1:]))<if_stmt>typ<eq>'f16'<block_start><if_stmt>func<in>['zip1' 'zip2']<block_start><return>'''\ nsimd_{simd_ext}_v{typ} ret; float32x4x2_t tmp = v{op}_f32({in0}.v{i}, {in1}.v{i}); ret.v0 = tmp.val[0]; ret.v1 = tmp.val[1]; return ret; '''.format(i='0'<if>func<eq>'zip1'<else>'1' op=armop[func] **fmtspec)<block_end><else_stmt><block_start><return>'''\ nsimd_{simd_ext}_v{typ} ret; float32x4x2_t tmp0 = vuzpq_f32({in0}.v0, {in0}.v1); float32x4x2_t tmp1 = vuzpq_f32({in1}.v0, {in1}.v1); ret.v0 = tmp0.val[{i}]; ret.v1 = tmp1.val[{i}]; return ret; '''.format(i='0'<if>func<eq>'uzp1'<else>'1' **fmtspec)<block_end><block_end><elif_stmt>typ<in>['i64' 'u64']<block_start><return>'''\ {typ} buf0[2], buf1[2]; {typ} ret[2]; vst1q_{suf}(buf0, {in0}); vst1q_{suf}(buf1, {in1}); ret[0] = buf0[{i}]; ret[1] = buf1[{i}]; return vld1q_{suf}(ret);'''.format(**fmtspec i='0'<if>func<in>['zip1' 'uzp1']<else>'1')<block_end><elif_stmt>typ<eq>'f64'<block_start><return>'''\ nsimd_{simd_ext}_v{typ} ret; ret.v0 = {in0}.v{i}; ret.v1 = {in1}.v{i}; return ret;'''.format(**fmtspec i='0'<if>func<in>['zip1' 'uzp1']<else>'1')<block_end><else_stmt><block_start><return>'''\ {neon_typ} res; res = v{op}_{suf}({in0}, {in1}); return res.val[{i}];'''.format(neon_typ=neon_typ op=armop[func] **fmtspec i='0'<if>func<in>['zip1' 'uzp1']<else>'1')<block_end><block_end><block_end><def_stmt>zip_unzip func simd_ext typ<block_start>lo_hi='''\ nsimd_{simd_ext}_v{typ}x2 ret; ret.v0 = nsimd_{func}lo_{simd_ext}_{typ}({in0}, {in1}); ret.v1 = nsimd_{func}hi_{simd_ext}_{typ}({in0}, {in1}); return ret; '''.format(func='zip'<if>func<eq>'zip'<else>'unzip' **fmtspec)<if_stmt>simd_ext<eq>'aarch64'<or>simd_ext<in>sve<block_start>content='''\ nsimd_{simd_ext}_v{typ}x2 ret; ret.v0 = {s}v{func}1{q}_{suf}({in0}, {in1}); ret.v1 = {s}v{func}2{q}_{suf}({in0}, {in1}); return ret;'''.format(s='s'<if>simd_ext<in>sve<else>'' q=''<if>simd_ext<in>sve<else>'q' func=func **fmtspec)<if_stmt>typ<eq>'f16'<block_start><return>'''\ #ifdef NSIMD_ARM_FP16 {c} #else {default} #endif'''.format(c=content default=lo_hi s='s'<if>simd_ext<in>sve<else>'' **fmtspec)<block_end><else_stmt><block_start><return>content<block_end><block_end><else_stmt><block_start>prefix={'i':'int' 'u':'uint' 'f':'float'}<line_sep>neon_typ='{}{}x{}x2_t'.format(prefix[typ[0]] typ[1:] 128<floordiv>int(typ[1:]))<line_sep>content='''\ nsimd_{simd_ext}_v{typ}x2 ret; {neon_typ} tmp = v{func}q_{suf}({in0}, {in1}); ret.v0 = tmp.val[0]; ret.v1 = tmp.val[1]; return ret;'''.format(func=func neon_typ=neon_typ **fmtspec)<if_stmt>typ<in>['u64' 'i64' 'f64']<block_start><return>lo_hi<block_end><elif_stmt>typ<eq>'f16'<block_start><return>'''\ #ifdef NSIMD_ARM_FP16 {content} #else {default} #endif'''.format(content=content default=lo_hi f='zip'<if>func<eq>'zip'<else>'unzip' **fmtspec)<block_end><else_stmt><block_start><return>content<block_end><block_end><block_end># ----------------------------------------------------------------------------- # gather <def_stmt>gather simd_ext typ<block_start>le=max_len(simd_ext typ)<line_sep>real_le=real_len(simd_ext typ)<if_stmt>simd_ext<in>sve<block_start>emul='''int i; {typ} buf[{le}]; i{typnbits} offset_buf[{le}]; svst1_s{typnbits}({svtrue}, offset_buf, {in1}); for (i = 0; i < {real_le}; i++) {{ buf[i] = {in0}[offset_buf[i]]; }} return svld1_{suf}({svtrue}, buf);'''.format(le=le real_le=real_le **fmtspec)<block_end><else_stmt><block_start>emul='''nsimd_{simd_ext}_v{typ} ret; ret = vdupq_n_{suf}({in0}[vgetq_lane_s{typnbits}({in1}, 0)]);'''.format(**fmtspec)+''.join(['''ret = vsetq_lane_{suf}({in0}[ vgetq_lane_s{typnbits}({in1}, {i})], ret, {i});\n'''.format(i=i **fmtspec)<for>i range(1 le)])+'return ret;'<block_end><if_stmt>typ<eq>'f16'<block_start><if_stmt>simd_ext<in>sve<block_start><return>emul<block_end><return>'''#ifdef NSIMD_ARM_FP16 {emul} #else nsimd_{simd_ext}_vf16 ret; f32 buf[8]; '''.format(emul=emul **fmtspec)+''.join(['buf[{i}] = nsimd_f16_to_f32({in0}['<concat>'vgetq_lane_s16({in1}, {i})]);\n'.format(i=i **fmtspec)<for>i range(4)])+''.join(['buf[4 + {i}] = nsimd_f16_to_f32({in0}['<concat>'vgetq_lane_s16({in1}, 4 + {i})]);\n'.format(i=i **fmtspec)<for>i range(4)])+''' ret.v0 = vld1q_f32(buf); ret.v1 = vld1q_f32(buf + 4); return ret; #endif'''.format(**fmtspec)<block_end><if_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''nsimd_neon128_vf64 ret; i64 offset_buf[2]; vst1q_s64(offset_buf, {in1}); ret.v0 = {in0}[offset_buf[0]]; ret.v1 = {in0}[offset_buf[1]]; return ret;'''.format(**fmtspec)<block_end><if_stmt>simd_ext<in>neon<or>typ<in>['i8' 'u8' 'i16' 'u16']<block_start><return>emul<block_end># getting here means SVE <return>'return svld1_gather_s{typnbits}index_{suf}({svtrue}, {in0}, '<concat>'{in1});'.format(**fmtspec)<block_end># ----------------------------------------------------------------------------- # linear gather <def_stmt>gather_linear simd_ext typ<block_start><if_stmt>simd_ext<in>sve<block_start><if_stmt>typ<in>['i8' 'u8' 'i16' 'u16' 'f16']<block_start>le=max_len(simd_ext typ)<line_sep>real_le=real_len(simd_ext typ)<line_sep><return>'''{typ} buf[{le}]; int i; for (i = 0; i < {real_le}; i++) {{ buf[i] = {in0}[i * {in1}]; }} return svld1_{suf}({svtrue}, buf);'''.format(le=le real_le=real_le **fmtspec)<block_end><else_stmt><block_start><return>'return svld1_gather_s{typnbits}index_{suf}({svtrue}, '<concat>'{in0}, svindex_s{typnbits}(0, (i{typnbits}){in1}));'.format(**fmtspec)<block_end><block_end># getting here means neon128 and aarch64 intrinsic='''nsimd_{simd_ext}_v{typ} ret; ret = vdupq_n_{suf}({in0}[0]); '''.format(**fmtspec)+''.join(['ret = vsetq_lane_{suf}({in0}[{i} * {in1}], ret, {i});\n'.format(i=i **fmtspec)<for>i range(1 128<floordiv>int(fmtspec['typnbits']))])+'''return ret;'''<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {intrinsic} #else nsimd_{simd_ext}_vf16 ret; f32 buf[8]; int i; for (i = 0; i < 8; i++) {{ buf[i] = nsimd_f16_to_f32({in0}[i * {in1}]); }} ret.v0 = vld1q_f32(buf); ret.v1 = vld1q_f32(buf + 4); return ret; #endif'''.format(intrinsic=intrinsic **fmtspec)<block_end><if_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''nsimd_neon128_vf64 ret; ret.v0 = {in0}[0]; ret.v1 = {in0}[{in1}]; return ret;'''.format(**fmtspec)<block_end><return>intrinsic<block_end># ----------------------------------------------------------------------------- # masked gather <def_stmt>maskoz_gather oz simd_ext typ<block_start>le=max_len(simd_ext typ)<line_sep>real_le=real_len(simd_ext typ)<if_stmt>simd_ext<in>sve<block_start>utyp='u{typnbits}'.format(**fmtspec)<line_sep>store='''svst1_s{typnbits}({svtrue}, offset_buf, {in2}); svst1_{utyp}({svtrue}, mask, svsel_{utyp}( {in0}, svdup_n_{utyp}(({utyp})-1), svdup_n_{utyp}( ({utyp})0))); '''.format(utyp=utyp **fmtspec)<if_stmt>oz<eq>'z'<block_start>store<augadd>'svst1_{suf}({svtrue}, buf, svdup_n_{suf}(({typ})0));'.format(**fmtspec)<block_end><else_stmt><block_start>store<augadd>'svst1_{suf}({svtrue}, buf, {in3});'.format(**fmtspec)<block_end>load='svld1_{suf}({svtrue}, buf)'.format(**fmtspec)<block_end><else_stmt><block_start>store='''vst1q_s{typnbits}(offset_buf, {in2}); vst1q_u{typnbits}(mask, {in0});'''.format(**fmtspec)<if_stmt>oz<eq>'z'<block_start>store<augadd>'vst1q_{suf}(buf, vdupq_n_{suf}(({typ})0));'.format(**fmtspec)<block_end><else_stmt><block_start>store<augadd>'vst1q_{suf}(buf, {in3});'.format(**fmtspec)<block_end>load='vld1q_{suf}(buf)'.format(**fmtspec)<block_end>emul='''int i; {typ} buf[{le}]; u{typnbits} mask[{le}]; i{typnbits} offset_buf[{le}]; {store} for (i = 0; i < {real_le}; i++) {{ if (mask[i]) {{ buf[i] = {in1}[offset_buf[i]]; }} }} return {load};'''.format(le=le real_le=real_le store=store load=load **fmtspec)<if_stmt>typ<eq>'f16'<block_start><if_stmt>simd_ext<in>sve<block_start><return>emul<block_end><if_stmt>oz<eq>'z'<block_start>oz0='vdupq_n_f32(0.0f)'<line_sep>oz1=oz0<block_end><else_stmt><block_start>oz0='{in3}.v0'.format(**fmtspec)<line_sep>oz1='{in3}.v1'.format(**fmtspec)<block_end><return>'''#ifdef NSIMD_ARM_FP16 {emul} #else nsimd_{simd_ext}_vf16 ret; int i; f32 buf[{le}]; u32 mask[{le}]; i16 offset_buf[{le}]; vst1q_s16(offset_buf, {in2}); vst1q_f32(buf, {oz0}); vst1q_f32(buf + {leo2}, {oz1}); vst1q_u32(mask, {in0}.v0); vst1q_u32(mask + {leo2}, {in0}.v1); for (i = 0; i < {le}; i++) {{ if (mask[i]) {{ buf[i] = nsimd_f16_to_f32({in1}[offset_buf[i]]); }} }} ret.v0 = vld1q_f32(buf); ret.v1 = vld1q_f32(buf + {leo2}); return ret; #endif'''.format(emul=emul leo2=le<floordiv>2 le=le oz0=oz0 oz1=oz1 **fmtspec)<block_end><if_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start>oz0='0.0'<if>oz<eq>'z'<else>'{in3}.v0'.format(**fmtspec)<line_sep>oz1='0.0'<if>oz<eq>'z'<else>'{in3}.v1'.format(**fmtspec)<line_sep><return>'''nsimd_neon128_vf64 ret; i64 offset_buf[2]; vst1q_s64(offset_buf, {in2}); if ({in0}.v0) {{ ret.v0 = {in1}[offset_buf[0]]; }} else {{ ret.v0 = {oz0}; }} if ({in0}.v1) {{ ret.v1 = {in1}[offset_buf[1]]; }} else {{ ret.v1 = {oz1}; }} return ret;'''.format(oz0=oz0 oz1=oz1 **fmtspec)<block_end><if_stmt>simd_ext<in>neon<or>typ<in>['i8' 'u8' 'i16' 'u16']<block_start><return>emul<block_end># getting here means SVE oz0='svdup_n_{suf}(({typ})0)'.format(**fmtspec)<if>oz<eq>'z'<else>'{in3}'.format(**fmtspec)<line_sep><return>'''return svsel_{suf}({in0}, svld1_gather_s{typnbits}index_{suf}( {in0}, {in1}, {in2}), {oz0});'''.format(oz0=oz0 **fmtspec)<block_end># ----------------------------------------------------------------------------- # scatter <def_stmt>scatter simd_ext typ<block_start>le=max_len(simd_ext typ)<line_sep>real_le=real_len(simd_ext typ)<if_stmt>simd_ext<in>sve<block_start>emul='''int i; {typ} buf[{le}]; i{typnbits} offset_buf[{le}]; svst1_s{typnbits}({svtrue}, offset_buf, {in1}); svst1_{suf}({svtrue}, buf, {in2}); for (i = 0; i < {real_le}; i++) {{ {in0}[offset_buf[i]] = buf[i]; }}'''.format(le=le real_le=real_le **fmtspec)<block_end><else_stmt><block_start>emul='\n'.join(['{in0}[vgetq_lane_s{typnbits}({in1}, {i})] = '<concat>'vgetq_lane_{suf}({in2}, {i});\n'.format(i=i **fmtspec)<for>i range(int(le))])<block_end><if_stmt>typ<eq>'f16'<block_start><if_stmt>simd_ext<in>sve<block_start><return>emul<block_end><return>'''#ifdef NSIMD_ARM_FP16 {emul} #else '''.format(emul=emul)+'\n'.join(['{in0}[vgetq_lane_s16({in1}, {i})] = '<concat>'nsimd_f32_to_f16(vgetq_lane_f32({in2}.v0, '<concat>'{i}));\n'.format(i=i **fmtspec)<for>i range(4)])+'\n'.join(['{in0}[vgetq_lane_s16({in1}, 4 + {i})] = '<concat>'nsimd_f32_to_f16(vgetq_lane_f32({in2}.v1, '<concat>'{i}));\n'.format(i=i **fmtspec)<for>i range(4)])+''' #endif'''<block_end><if_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''i64 offset_buf[2]; vst1q_s64(offset_buf, {in1}); {in0}[offset_buf[0]] = {in2}.v0; {in0}[offset_buf[1]] = {in2}.v1;'''.format(**fmtspec)<block_end><if_stmt>simd_ext<in>neon<or>typ<in>['i8' 'u8' 'i16' 'u16']<block_start><return>emul<block_end># getting here means SVE <return>'svst1_scatter_s{typnbits}index_{suf}({svtrue}, {in0}, '<concat>'{in1}, {in2});'.format(le=le **fmtspec)<block_end># ----------------------------------------------------------------------------- # linear scatter <def_stmt>scatter_linear simd_ext typ<block_start><if_stmt>simd_ext<in>sve<block_start><if_stmt>typ<in>['i8' 'u8' 'i16' 'u16' 'f16']<block_start>le=max_len(simd_ext typ)<line_sep>real_le=real_len(simd_ext typ)<line_sep><return>'''{typ} buf[{le}]; int i; svst1_{suf}({svtrue}, buf, {in2}); for (i = 0; i < {real_le}; i++) {{ {in0}[i * {in1}] = buf[i]; }}'''.format(le=le real_le=real_le **fmtspec)<block_end><else_stmt><block_start><return>'svst1_scatter_s{typnbits}index_{suf}({svtrue}, {in0}, '<concat>'svindex_s{typnbits}(0, (i{typnbits}){in1}), {in2});'.format(**fmtspec)<block_end><block_end># getting here means neon128 and aarch64 intrinsic='\n'.join(['{in0}[{i} * {in1}] = vgetq_lane_{suf}({in2}, {i});'.format(i=i **fmtspec)<for>i range(128<floordiv>int(fmtspec['typnbits']))])<if_stmt>typ<eq>'f16'<block_start><return>'''#ifdef NSIMD_ARM_FP16 {intrinsic} #else f32 buf[8]; int i; vst1q_f32(buf, {in2}.v0); vst1q_f32(buf + 4, {in2}.v1); for (i = 0; i < 8; i++) {{ {in0}[i * {in1}] = nsimd_f32_to_f16(buf[i]); }} #endif'''.format(intrinsic=intrinsic **fmtspec)<block_end><if_stmt>typ<eq>'f64'<and>simd_ext<eq>'neon128'<block_start><return>'''{in0}[0] = {in2}.v0; {in0}[{in1}] = {in2}.v1;'''.format(**fmtspec)<block_end><return>intrinsic<block_end># ----------------------------------------------------------------------------- # mask_scatter <def_stmt>mask_scatter simd_ext typ<block_start>le=max_len(simd_ext typ)<line_sep>real_le=real_len(simd_ext typ)<if_stmt>simd_ext<in>sve<block_start>store='''svst1_s{typnbits}({svtrue}, offset_buf, {in2}); svst1_u{typnbits}({svtrue}, mask, svsel_u{typnbits}( {in0}, svdup_n_u{typnbits}((u{typnbits})1), svdup_n_u{typnbits}((u{typnbits})0))); svst1_{suf}({svtrue}, buf, {in3});'''.format(**fmtspec)<block_end><else_stmt><block_start>store='''vst1q_s{typnbits}(offset_buf, {in2}); vst1q_{suf}(buf, {in3}); vst1q_u{typnbits}(mask, {in0});'''.format(**fmtspec)<block_end>emul='''int i; {typ} buf[{le}]; u{typnbits} mask[{le}]; i{typnbits} offset_buf[{le}]; {store} for (i = 0; i < {real_le}; i++) {{ if (mask[i]) {{ {in1}[offset_buf[i]] = buf[i]; }} }}'''.format(le=le real_le=real_le store=store **fmtspec)<if_stmt>typ<eq>'f16'<block_start><if_stmt>simd_ext<in>sve<block_start><return>emul<block_end><return>'''#ifdef NSIMD_ARM_FP16 {emul} #else int i; f32 buf[{le}]; u32 mask[{le}]; i16 offset_buf[{le}]; vst1q_s16(offset_buf, {in2}); vst1q_f32(buf, {in3}.v0); vst1q_f32(buf + {leo2}, {in3}.v1); vst1q_u32(mask, {in0}.v0); vst1q_u32(mask + {leo2}, {in0}.v1); for (i = 0; i < {le}; i++) {{ if (mask[i]) {{ {in1}[offset_buf[i]] = nsimd_f32_to_f16(buf[i]); }} }} #endif'''.format(emul=emul le=le leo2=le<floordiv>2 **fmtspec)<block_end><if_stmt>simd_ext<eq>'neon128'<and>typ<eq>'f64'<block_start><return>'''i64 offset_buf[2]; vst1q_s64(offset_buf, {in2}); if ({in0}.v0) {{ {in1}[offset_buf[0]] = {in3}.v0; }} if ({in0}.v1) {{ {in1}[offset_buf[1]] = {in3}.v1; }}'''.format(**fmtspec)<block_end><if_stmt>simd_ext<in>neon<or>typ<in>['i8' 'u8' 'i16' 'u16']<block_start><return>emul<block_end># getting here means SVE <return>'svst1_scatter_s{typnbits}index_{suf}({in0}, {in1}, '<concat>'{in2}, {in3});'.format(le=le **fmtspec)<block_end># ----------------------------------------------------------------------------- # get_impl function <def_stmt>get_impl opts func simd_ext from_typ to_typ<block_start><global>fmtspec<line_sep>simd_ext2=simd_ext<if><not>simd_ext<in>fixed_sized_sve<else>'sve'<line_sep>fmtspec={'simd_ext':simd_ext 'simd_ext2':simd_ext2 'typ':from_typ 'from_typ':from_typ 'to_typ':to_typ 'suf':suf(from_typ) 'in0':common.in0 'in1':common.in1 'in2':common.in2 'in3':common.in3 'in4':common.in4 'in5':common.in5 'typnbits':from_typ[1:] 'svtrue':'svptrue_b{}()'.format(from_typ[1:]) 'svetyp':sve_typ(from_typ) }<line_sep>impls={'loada':<lambda>:load1234(opts simd_ext from_typ 1) 'masko_loada1':<lambda>:maskoz_load('o' simd_ext from_typ) 'maskz_loada1':<lambda>:maskoz_load('z' simd_ext from_typ) 'load2a':<lambda>:load1234(opts simd_ext from_typ 2) 'load3a':<lambda>:load1234(opts simd_ext from_typ 3) 'load4a':<lambda>:load1234(opts simd_ext from_typ 4) 'loadu':<lambda>:load1234(opts simd_ext from_typ 1) 'masko_loadu1':<lambda>:maskoz_load('o' simd_ext from_typ) 'maskz_loadu1':<lambda>:maskoz_load('z' simd_ext from_typ) 'load2u':<lambda>:load1234(opts simd_ext from_typ 2) 'load3u':<lambda>:load1234(opts simd_ext from_typ 3) 'load4u':<lambda>:load1234(opts simd_ext from_typ 4) 'storea':<lambda>:store1234(opts simd_ext from_typ 1) 'mask_storea1':<lambda>:mask_store(simd_ext from_typ) 'store2a':<lambda>:store1234(opts simd_ext from_typ 2) 'store3a':<lambda>:store1234(opts simd_ext from_typ 3) 'store4a':<lambda>:store1234(opts simd_ext from_typ 4) 'storeu':<lambda>:store1234(opts simd_ext from_typ 1) 'mask_storeu1':<lambda>:mask_store(simd_ext from_typ) 'store2u':<lambda>:store1234(opts simd_ext from_typ 2) 'store3u':<lambda>:store1234(opts simd_ext from_typ 3) 'store4u':<lambda>:store1234(opts simd_ext from_typ 4) 'gather':<lambda>:gather(simd_ext from_typ) 'gather_linear':<lambda>:gather_linear(simd_ext from_typ) 'maskz_gather':<lambda>:maskoz_gather('z' simd_ext from_typ) 'masko_gather':<lambda>:maskoz_gather('o' simd_ext from_typ) 'scatter':<lambda>:scatter(simd_ext from_typ) 'scatter_linear':<lambda>:scatter_linear(simd_ext from_typ) 'mask_scatter':<lambda>:mask_scatter(simd_ext from_typ) 'andb':<lambda>:binop2("andb" simd_ext2 from_typ) 'xorb':<lambda>:binop2("xorb" simd_ext2 from_typ) 'orb':<lambda>:binop2("orb" simd_ext2 from_typ) 'andl':<lambda>:lop2(opts "andl" simd_ext2 from_typ) 'xorl':<lambda>:lop2(opts "xorl" simd_ext2 from_typ) 'orl':<lambda>:lop2(opts "orl" simd_ext2 from_typ) 'notb':<lambda>:not1(simd_ext2 from_typ) 'notl':<lambda>:lnot1(opts simd_ext2 from_typ) 'andnotb':<lambda>:binop2("andnotb" simd_ext2 from_typ) 'andnotl':<lambda>:lop2(opts "andnotl" simd_ext2 from_typ) 'add':<lambda>:addsub("add" simd_ext2 from_typ) 'sub':<lambda>:addsub("sub" simd_ext2 from_typ) 'adds':<lambda>:adds(simd_ext2 from_typ) 'subs':<lambda>:subs(simd_ext2 from_typ) 'div':<lambda>:div2(simd_ext2 from_typ) 'sqrt':<lambda>:sqrt1(simd_ext2 from_typ) 'len':<lambda>:len1(simd_ext from_typ) 'mul':<lambda>:mul2(simd_ext2 from_typ) 'shl':<lambda>:shl_shr("shl" simd_ext2 from_typ) 'shr':<lambda>:shl_shr("shr" simd_ext2 from_typ) 'shra':<lambda>:shra(simd_ext2 from_typ) 'set1':<lambda>:set1(simd_ext2 from_typ) 'set1l':<lambda>:lset1(simd_ext2 from_typ) 'eq':<lambda>:cmp2(opts "eq" simd_ext2 from_typ) 'lt':<lambda>:cmp2(opts "lt" simd_ext2 from_typ) 'le':<lambda>:cmp2(opts "le" simd_ext2 from_typ) 'gt':<lambda>:cmp2(opts "gt" simd_ext2 from_typ) 'ge':<lambda>:cmp2(opts "ge" simd_ext2 from_typ) 'ne':<lambda>:neq2(opts simd_ext2 from_typ) 'if_else1':<lambda>:if_else3(opts simd_ext2 from_typ) 'min':<lambda>:minmax2("min" simd_ext2 from_typ) 'max':<lambda>:minmax2("max" simd_ext2 from_typ) 'loadla':<lambda>:loadl(<true> simd_ext2 from_typ) 'loadlu':<lambda>:loadl(<false> simd_ext2 from_typ) 'storela':<lambda>:storel(<true> simd_ext2 from_typ) 'storelu':<lambda>:storel(<false> simd_ext2 from_typ) 'abs':<lambda>:abs1(simd_ext2 from_typ) 'fma':<lambda>:fmafnma3("fma" simd_ext2 from_typ) 'fnma':<lambda>:fmafnma3("fnma" simd_ext2 from_typ) 'fms':<lambda>:fmsfnms3("fms" simd_ext2 from_typ) 'fnms':<lambda>:fmsfnms3("fnms" simd_ext2 from_typ) 'ceil':<lambda>:round1("ceil" simd_ext2 from_typ) 'floor':<lambda>:round1("floor" simd_ext2 from_typ) 'trunc':<lambda>:round1("trunc" simd_ext2 from_typ) 'round_to_even':<lambda>:round1("round_to_even" simd_ext2 from_typ) 'all':<lambda>:allany1(opts "all" simd_ext2 from_typ) 'any':<lambda>:allany1(opts "any" simd_ext2 from_typ) 'reinterpret':<lambda>:reinterpret1(simd_ext2 from_typ to_typ) 'reinterpretl':<lambda>:reinterpretl1(simd_ext2 from_typ to_typ) 'cvt':<lambda>:convert1(simd_ext2 from_typ to_typ) 'rec11':<lambda>:recs1("rec11" simd_ext2 from_typ) 'rec8':<lambda>:recs1("rec8" simd_ext2 from_typ) 'rsqrt11':<lambda>:recs1("rsqrt11" simd_ext2 from_typ) 'rsqrt8':<lambda>:recs1("rsqrt8" simd_ext2 from_typ) 'rec':<lambda>:recs1("rec" simd_ext2 from_typ) 'neg':<lambda>:neg1(simd_ext2 from_typ) 'nbtrue':<lambda>:nbtrue1(opts simd_ext2 from_typ) 'reverse':<lambda>:reverse1(simd_ext2 from_typ) 'addv':<lambda>:addv(simd_ext2 from_typ) 'upcvt':<lambda>:upcvt1(simd_ext2 from_typ to_typ) 'downcvt':<lambda>:downcvt1(simd_ext2 from_typ to_typ) 'to_logical':<lambda>:to_logical1(opts simd_ext2 from_typ) 'to_mask':<lambda>:to_mask1(opts simd_ext2 from_typ) 'ziplo':<lambda>:zip_unzip_half("zip1" simd_ext2 from_typ) 'ziphi':<lambda>:zip_unzip_half("zip2" simd_ext2 from_typ) 'unziplo':<lambda>:zip_unzip_half("uzp1" simd_ext2 from_typ) 'unziphi':<lambda>:zip_unzip_half("uzp2" simd_ext2 from_typ) 'zip':<lambda>:zip_unzip("zip" simd_ext2 from_typ) 'unzip':<lambda>:zip_unzip("uzp" simd_ext2 from_typ) 'mask_for_loop_tail':<lambda>:mask_for_loop_tail(simd_ext from_typ) 'iota':<lambda>:iota(simd_ext2 from_typ)}<if_stmt>simd_ext<not><in>get_simd_exts()<block_start><raise>ValueError('Unknown SIMD extension "{}"'.format(simd_ext))<block_end><if_stmt><not>from_typ<in>common.types<block_start><raise>ValueError('Unknown type "{}"'.format(from_typ))<block_end><if_stmt><not>func<in>impls<block_start><return>common.NOT_IMPLEMENTED<block_end><else_stmt><block_start><return>impls[func]()<block_end><block_end>
""" Simple water flow example using ANUGA Water driven up a linear slope and time varying boundary, similar to a beach environment This is a very simple test of the parallel algorithm using the simplified parallel API """<import_from_future_stmt> print_function<import_from_future_stmt> division<line_sep>#------------------------------------------------------------------------------ # Import necessary modules #------------------------------------------------------------------------------ <import_from_stmt>past.utils old_div<import_from_stmt>future.utils raise_<import_stmt>unittest<import_stmt>os<import_stmt>sys<line_sep>#import pypar <import_stmt>numpy<as>num<import_stmt>anuga<import_from_stmt>anuga Domain<import_from_stmt>anuga Reflective_boundary<import_from_stmt>anuga Dirichlet_boundary<import_from_stmt>anuga Time_boundary<import_from_stmt>anuga Transmissive_boundary<import_from_stmt>anuga rectangular_cross_domain<import_from_stmt>anuga distribute myid numprocs send receive barrier finalize<import_from_stmt>anuga.parallel.sequential_distribute sequential_distribute_dump<import_from_stmt>anuga.parallel.sequential_distribute sequential_distribute_load<import_stmt>anuga.utilities.plot_utils<as>util<line_sep>#-------------------------------------------------------------------------- # Setup parameters #-------------------------------------------------------------------------- yieldstep=0.25<line_sep>finaltime=1.0<line_sep>nprocs=4<line_sep>N=29<line_sep>M=29<line_sep>verbose=<false><line_sep>new_parameters={}<line_sep>new_parameters['ghost_layer_width']=2<line_sep>#--------------------------------- # Setup Functions #--------------------------------- <def_stmt>topography x y<block_start><return>old_div(-x 2)<block_end>########################################################################### # Setup Test ########################################################################## <def_stmt>run_simulation parallel=<false> verbose=<false>#-------------------------------------------------------------------------- # Setup computational domain and quantities #-------------------------------------------------------------------------- <block_start><if_stmt>myid<eq>0<block_start>domain=rectangular_cross_domain(M N)<line_sep>domain.set_name('odomain')# Set sww filename domain.set_datadir('.')<line_sep>domain.set_quantity('elevation' topography)# Use function for elevation domain.set_quantity('friction' 0.0)# Constant friction domain.set_quantity('stage' expression='elevation')# Dry initial stage <block_end><else_stmt><block_start>domain=<none><block_end>#-------------------------------------------------------------------------- # Create pickled partition #-------------------------------------------------------------------------- <if_stmt>myid<eq>0<block_start><if_stmt>verbose<block_start>print('DUMPING PARTITION DATA')<block_end>sequential_distribute_dump(domain numprocs verbose=verbose parameters=new_parameters)<block_end>#-------------------------------------------------------------------------- # Create the parallel domains #-------------------------------------------------------------------------- <if_stmt>parallel<block_start><if_stmt>myid<eq>0<and>verbose<block_start>print('DISTRIBUTING TO PARALLEL DOMAIN')<block_end>pdomain=distribute(domain verbose=verbose parameters=new_parameters)<line_sep>pdomain.set_name('pdomain')<if_stmt>myid<eq>0<and>verbose<block_start>print('LOADING IN PARALLEL DOMAIN')<block_end>sdomain=sequential_distribute_load(filename='odomain' verbose=verbose)<line_sep>sdomain.set_name('sdomain')<block_end><if_stmt>myid<eq>0<and>verbose<block_start>print('EVOLVING pdomain')<block_end>setup_and_evolve(pdomain verbose=verbose)<if_stmt>myid<eq>0<and>verbose<block_start>print('EVOLVING sdomain')<block_end>setup_and_evolve(sdomain verbose=verbose)<if_stmt>myid<eq>0<block_start><if_stmt>verbose<block_start>print('EVOLVING odomain')<block_end>setup_and_evolve(domain verbose=verbose)<block_end><if_stmt>myid<eq>0<and>verbose<block_start>parameter_file=open('odomain.txt' 'w')<import_from_stmt>pprint pprint<line_sep>pprint(domain.get_algorithm_parameters() parameter_file indent=4)<line_sep>parameter_file.close()<line_sep>parameter_file=open('sdomain.txt' 'w')<import_from_stmt>pprint pprint<line_sep>pprint(sdomain.get_algorithm_parameters() parameter_file indent=4)<line_sep>parameter_file.close()<line_sep>parameter_file=open('pdomain.txt' 'w')<import_from_stmt>pprint pprint<line_sep>pprint(pdomain.get_algorithm_parameters() parameter_file indent=4)<line_sep>parameter_file.close()<block_end><assert_stmt>num.allclose(pdomain.quantities['stage'].centroid_values sdomain.quantities['stage'].centroid_values)<assert_stmt>num.allclose(pdomain.quantities['stage'].vertex_values sdomain.quantities['stage'].vertex_values)<assert_stmt>num.allclose(pdomain.vertex_coordinates sdomain.vertex_coordinates)<assert_stmt>num.allclose(pdomain.centroid_coordinates sdomain.centroid_coordinates)<line_sep>#--------------------------------- # Now compare the merged sww files #--------------------------------- <if_stmt>myid<eq>0<block_start><if_stmt>verbose<block_start>print('COMPARING SWW FILES')<block_end>odomain_v=util.get_output('odomain.sww')<line_sep>odomain_c=util.get_centroids(odomain_v)<line_sep>pdomain_v=util.get_output('pdomain.sww')<line_sep>pdomain_c=util.get_centroids(pdomain_v)<line_sep>sdomain_v=util.get_output('sdomain.sww')<line_sep>sdomain_c=util.get_centroids(sdomain_v)<line_sep># Test some values against the original ordering <if_stmt>verbose<block_start>order=2<line_sep>print('PDOMAIN CENTROID VALUES')<line_sep>print(num.linalg.norm(odomain_c.x-pdomain_c.x ord=order))<line_sep>print(num.linalg.norm(odomain_c.y-pdomain_c.y ord=order))<line_sep>print(num.linalg.norm(odomain_c.stage[-1]-pdomain_c.stage[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.xmom[-1]-pdomain_c.xmom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.ymom[-1]-pdomain_c.ymom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.xvel[-1]-pdomain_c.xvel[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.yvel[-1]-pdomain_c.yvel[-1] ord=order))<line_sep>print('SDOMAIN CENTROID VALUES')<line_sep>print(num.linalg.norm(odomain_c.x-sdomain_c.x ord=order))<line_sep>print(num.linalg.norm(odomain_c.y-sdomain_c.y ord=order))<line_sep>print(num.linalg.norm(odomain_c.stage[-1]-sdomain_c.stage[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.xmom[-1]-sdomain_c.xmom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.ymom[-1]-sdomain_c.ymom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.xvel[-1]-sdomain_c.xvel[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_c.yvel[-1]-sdomain_c.yvel[-1] ord=order))<line_sep>print('PDOMAIN VERTEX VALUES')<line_sep>print(num.linalg.norm(odomain_v.stage[-1]-pdomain_v.stage[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.xmom[-1]-pdomain_v.xmom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.ymom[-1]-pdomain_v.ymom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.xvel[-1]-pdomain_v.xvel[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.yvel[-1]-pdomain_v.yvel[-1] ord=order))<line_sep>print('SDOMAIN VERTEX VALUES')<line_sep>print(num.linalg.norm(odomain_v.stage[-1]-sdomain_v.stage[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.xmom[-1]-sdomain_v.xmom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.ymom[-1]-sdomain_v.ymom[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.xvel[-1]-sdomain_v.xvel[-1] ord=order))<line_sep>print(num.linalg.norm(odomain_v.yvel[-1]-sdomain_v.yvel[-1] ord=order))<block_end><assert_stmt>num.allclose(odomain_c.stage pdomain_c.stage)<assert_stmt>num.allclose(odomain_c.xmom pdomain_c.xmom)<assert_stmt>num.allclose(odomain_c.ymom pdomain_c.ymom)<assert_stmt>num.allclose(odomain_c.xvel pdomain_c.xvel)<assert_stmt>num.allclose(odomain_c.yvel pdomain_c.yvel)<assert_stmt>num.allclose(odomain_v.x pdomain_v.x)<assert_stmt>num.allclose(odomain_v.y pdomain_v.y)<assert_stmt>num.linalg.norm(odomain_v.x-pdomain_v.x ord=0)<eq>0<assert_stmt>num.linalg.norm(odomain_v.y-pdomain_v.y ord=0)<eq>0<assert_stmt>num.linalg.norm(odomain_v.stage[-1]-pdomain_v.stage[-1] ord=0)<l>100<assert_stmt>num.linalg.norm(odomain_v.xmom[-1]-pdomain_v.xmom[-1] ord=0)<l>100<assert_stmt>num.linalg.norm(odomain_v.ymom[-1]-pdomain_v.ymom[-1] ord=0)<l>100<assert_stmt>num.linalg.norm(odomain_v.xvel[-1]-pdomain_v.xvel[-1] ord=0)<l>100<assert_stmt>num.linalg.norm(odomain_v.yvel[-1]-pdomain_v.yvel[-1] ord=0)<l>100<assert_stmt>num.allclose(odomain_c.x sdomain_c.x)<assert_stmt>num.allclose(odomain_c.y sdomain_c.y)<assert_stmt>num.allclose(odomain_c.stage sdomain_c.stage)<assert_stmt>num.allclose(odomain_c.xmom sdomain_c.xmom)<assert_stmt>num.allclose(odomain_c.ymom sdomain_c.ymom)<assert_stmt>num.allclose(odomain_c.xvel sdomain_c.xvel)<assert_stmt>num.allclose(odomain_c.yvel sdomain_c.yvel)<assert_stmt>num.allclose(odomain_v.x sdomain_v.x)<assert_stmt>num.allclose(odomain_v.y sdomain_v.y)<line_sep>order=0<assert_stmt>num.linalg.norm(odomain_v.x-sdomain_v.x ord=order)<eq>0<assert_stmt>num.linalg.norm(odomain_v.y-sdomain_v.y ord=order)<eq>0<assert_stmt>num.linalg.norm(odomain_v.stage[-1]-sdomain_v.stage[-1] ord=order)<l>100<assert_stmt>num.linalg.norm(odomain_v.xmom[-1]-sdomain_v.xmom[-1] ord=order)<l>100<assert_stmt>num.linalg.norm(odomain_v.ymom[-1]-sdomain_v.ymom[-1] ord=order)<l>100<assert_stmt>num.linalg.norm(odomain_v.xvel[-1]-sdomain_v.xvel[-1] ord=order)<l>100<assert_stmt>num.linalg.norm(odomain_v.yvel[-1]-sdomain_v.yvel[-1] ord=order)<l>100<line_sep># COMPARE CENTROID PDOMAIN SDOMAIN <assert_stmt>num.allclose(pdomain_c.x sdomain_c.x)<assert_stmt>num.allclose(pdomain_c.y sdomain_c.y)<assert_stmt>num.allclose(pdomain_c.stage[-1] sdomain_c.stage[-1])<assert_stmt>num.allclose(pdomain_c.xmom[-1] sdomain_c.xmom[-1])<assert_stmt>num.allclose(pdomain_c.ymom[-1] sdomain_c.ymom[-1])<assert_stmt>num.allclose(pdomain_c.xvel[-1] sdomain_c.xvel[-1])<assert_stmt>num.allclose(pdomain_c.yvel[-1] sdomain_c.yvel[-1])<line_sep># COMPARE VERTEX PDOMAIN SDOMAIN <assert_stmt>num.allclose(pdomain_v.x sdomain_v.x)<assert_stmt>num.allclose(pdomain_v.y sdomain_v.y)<assert_stmt>num.allclose(pdomain_v.stage[-1] sdomain_v.stage[-1])<assert_stmt>num.allclose(pdomain_v.xmom[-1] sdomain_v.xmom[-1])<assert_stmt>num.allclose(pdomain_v.ymom[-1] sdomain_v.ymom[-1])<assert_stmt>num.allclose(pdomain_v.xvel[-1] sdomain_v.xvel[-1])<assert_stmt>num.allclose(pdomain_v.yvel[-1] sdomain_v.yvel[-1])<import_stmt>os<line_sep>os.remove('odomain.sww')<line_sep>os.remove('pdomain.sww')<line_sep>os.remove('sdomain.sww')<line_sep>os.remove('odomain_P3_0.pickle')<line_sep>os.remove('odomain_P3_1.pickle')<line_sep>os.remove('odomain_P3_2.pickle')<line_sep>#os.remove('odomain_P4_3.pickle') <import_stmt>glob<line_sep>[os.remove(fl)<for>fl glob.glob('*.npy')]<block_end><block_end><def_stmt>setup_and_evolve domain verbose=<false>#-------------------------------------------------------------------------- # Setup domain parameters #-------------------------------------------------------------------------- <block_start>domain.set_flow_algorithm('DE0')<line_sep>#domain.set_store_vertices_uniquely() #------------------------------------------------------------------------------ # Setup boundary conditions # This must currently happen *AFTER* domain has been distributed #------------------------------------------------------------------------------ Br=Reflective_boundary(domain)# Solid reflective wall Bd=Dirichlet_boundary([-0.2 0. 0.])# Constant boundary values # Associate boundary tags with boundary objects domain.set_boundary({'left':Br 'right':Bd 'top':Br 'bottom':Br})<line_sep>#------------------------------------------------------------------------------ # Evolve #------------------------------------------------------------------------------ <for_stmt>t domain.evolve(yieldstep=yieldstep finaltime=finaltime)<block_start><if_stmt>myid<eq>0<and>verbose<block_start>domain.write_time()<block_end>#if myid == 0 and verbose : print domain.quantities['stage'].get_maximum_value() <block_end>domain.sww_merge(delete_old=<true>)<block_end># Test an nprocs-way run of the shallow water equations # against the sequential code. <class_stmt>Test_parallel_sw_flow(unittest.TestCase)<block_start><def_stmt>test_parallel_sw_flow self<block_start><if_stmt>verbose<block_start>print("Expect this test to fail if not run from the parallel directory.")<block_end>cmd=anuga.mpicmd(os.path.abspath(__file__))<line_sep>result=os.system(cmd)<line_sep>assert_(result<eq>0)<block_end><block_end># Because we are doing assertions outside of the TestCase class # the PyUnit defined assert_ function can't be used. <def_stmt>assert_ condition msg="Assertion Failed"<block_start><if_stmt>condition<eq><false>#pypar.finalize() <block_start>raise_(AssertionError msg)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><if_stmt>numprocs<eq>1<block_start>runner=unittest.TextTestRunner()<line_sep>suite=unittest.makeSuite(Test_parallel_sw_flow 'test')<line_sep>runner.run(suite)<block_end><else_stmt><block_start><import_from_stmt>anuga.utilities.parallel_abstraction global_except_hook<import_stmt>sys<line_sep>sys.excepthook=global_except_hook<line_sep>#------------------------------------------ # Run the codel and compare sequential # results at 4 gauge stations #------------------------------------------ <if_stmt>myid<eq>0<and>verbose<block_start>print('PARALLEL START')<block_end>run_simulation(parallel=<true> verbose=verbose)<line_sep>finalize()<block_end><block_end>
<import_from_stmt>typing Callable<import_from_stmt>tensorflow.python.layers base<import_from_stmt>tensorflow.python.eager context<import_from_stmt>tensorflow.python.estimator util<as>estimator_util<import_from_stmt>tensorflow.python.framework dtypes<import_from_stmt>tensorflow.python.framework ops<import_from_stmt>tensorflow.python.framework tensor_shape<import_from_stmt>tensorflow.python.layers utils<as>layers_util<import_from_stmt>tensorflow.python.ops array_ops<import_from_stmt>tensorflow.python.ops variable_scope<as>vs<import_from_stmt>tensorflow.python.ops variables<as>tf_variables<import_from_stmt>tensorflow.python.platform tf_logging<as>logging<import_from_stmt>tensorflow.python.util nest<import_from_stmt>tensorflow.python.eager context<import_from_stmt>tensorflow.python.framework ops<import_from_stmt>tensorflow.python.framework tensor_shape<import_from_stmt>tensorflow.python.layers base<import_from_stmt>tensorflow.python.layers utils<import_from_stmt>tensorflow.python.ops array_ops<import_from_stmt>tensorflow.python.ops init_ops<import_from_stmt>tensorflow.python.ops math_ops<import_from_stmt>tensorflow.python.ops nn<import_from_stmt>tensorflow.python.ops standard_ops<import_from_stmt>tensorflow.contrib.layers fully_connected<import_stmt>tensorflow<as>tf<import_stmt>sys<import_from_stmt>helpers.misc_utils debug_tensor debug_shape<import_from_stmt>helpers.ops safe_log<line_sep>FLAGS=tf.app.flags.FLAGS<class_stmt>CopyLayer(base.Layer)<block_start>"""Densely-connected layer class. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` Where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Note: if the input to the layer has a rank greater than 2, then it is flattened prior to the initial matrix multiply by `kernel`. Arguments: units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Properties: units: Python integer, dimensionality of the output space. activation: Activation function (callable). use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer instance (or name) for the kernel matrix. bias_initializer: Initializer instance (or name) for the bias. kernel_regularizer: Regularizer instance for the kernel matrix (callable) bias_regularizer: Regularizer instance for the bias (callable). activity_regularizer: Regularizer instance for the output (callable) kernel_constraint: Constraint function for the kernel matrix. bias_constraint: Constraint function for the bias. kernel: Weight matrix (TensorFlow variable or tensor). bias: Bias vector, if applicable (TensorFlow variable or tensor). """<def_stmt>__init__ self embedding_dim units switch_units=64 activation=<none> use_bias=<false> kernel_initializer=<none> bias_initializer=init_ops.zeros_initializer() kernel_regularizer=<none> bias_regularizer=<none> activity_regularizer=<none> kernel_constraint=<none> bias_constraint=<none> trainable=<true> name=<none> source_provider:Callable[[] tf.Tensor]=<none> source_provider_sl:Callable[[] tf.Tensor]=<none> condition_encoding:Callable[[] tf.Tensor]=<none> output_mask:Callable[[] tf.Tensor]=<none> training_mode=<false> vocab_size=<none> context_as_set=<false> max_copy_size=<none> mask_oovs=<false> **kwargs<block_start>super(CopyLayer self).__init__(trainable=trainable name=name activity_regularizer=activity_regularizer **kwargs)<line_sep>self.vocab_size=vocab_size<line_sep>self.source_provider=source_provider<line_sep>self.source_provider_sl=source_provider_sl<line_sep>self.embedding_dim=embedding_dim<line_sep>self.units=units<line_sep>self.switch_units=switch_units<line_sep>self.activation=activation<line_sep>self.use_bias=use_bias<line_sep>self.kernel_initializer=kernel_initializer<line_sep>self.bias_initializer=bias_initializer<line_sep>self.kernel_regularizer=kernel_regularizer<line_sep>self.bias_regularizer=bias_regularizer<line_sep>self.kernel_constraint=kernel_constraint<line_sep>self.bias_constraint=bias_constraint<line_sep>self.input_spec=base.InputSpec(min_ndim=2)<line_sep>self.training_mode=training_mode<line_sep># self.output_mask=output_mask self.max_copy_size=max_copy_size<line_sep>self.mask_oovs=mask_oovs<line_sep>self.context_as_set=context_as_set<line_sep>self.condition_encoding=condition_encoding<block_end><def_stmt>build self input_shape<block_start>input_shape=tensor_shape.TensorShape(input_shape)<line_sep># print("building copy layer") # print(input_shape) self.built=<true><block_end><def_stmt>call self inputs<block_start>inputs=ops.convert_to_tensor(inputs dtype=self.dtype)# batch x len_source+emb_dim # inputs = debug_shape(inputs, "inputs") # print(inputs) # [batch_size, emb_dim + len_source] in eval, # [len_target, batch_size,emb_dim + len_source] in train source=self.source_provider()# [batch_size, len_source] # source = debug_shape(source,"src") source_sl=self.source_provider_sl()<line_sep>condition_encoding=self.condition_encoding()<line_sep># condition_encoding = debug_shape(condition_encoding, "cond enc") batch_size=tf.shape(source)[0]<line_sep>len_source=tf.shape(source)[1]<line_sep>shape=tf.shape(inputs)<line_sep>is_eval=len(inputs.get_shape())<eq>2<line_sep>beam_width=tf.constant(1)<if>is_eval<else>shape[1]<line_sep># len_target = tf.Print(len_target, [len_target, batch_size, shape[-1]], "input reshape") # inputs = tf.reshape(inputs, [-1, shape[-1]]) # [len_target * batch_size, len_source + emb_dim] inputs_new=tf.reshape(inputs [batch_size<times>beam_width shape[-1]])<line_sep># [len_target, batch_size, len_source + emb_dim] # inputs_new = debug_shape(inputs_new, "inputs_new") # -- [len_target, batch_size, embedding_dim] attention, [] # -- [len_target, batch_size, len_source] alignments # attention, alignments = tf.split(inputs, [self.embedding_dim, -1], axis=1) attention,alignments=tf.split(inputs_new num_or_size_splits=[self.embedding_dim -1] axis=-1)<line_sep># [len_target, batch_size, vocab_size] <if_stmt>FLAGS.out_vocab_cpu<block_start><with_stmt>tf.device('/cpu:*')<block_start>shortlist=tf.layers.dense(attention self.vocab_size activation=tf.nn.softmax use_bias=<false>)<block_end><block_end><else_stmt><block_start>shortlist=tf.layers.dense(attention self.vocab_size activation=tf.nn.softmax use_bias=<false>)<block_end># attention = debug_shape(attention, "attn") # alignments = debug_shape(alignments, "align ("+str(self.units)+" desired)") # alignments = debug_tensor(alignments, "alignments") # print(alignments) # shortlist = debug_shape(shortlist, "shortlist") # TEMP: kill OOVs s=tf.shape(shortlist)<line_sep>mask=tf.concat([tf.ones((s[0] 1)) tf.zeros((s[0] 1)) tf.ones((s[0] s[1]-2))] axis=1)<line_sep>shortlist=tf.cond(self.mask_oovs <lambda>:shortlist<times>mask <lambda>:shortlist)<line_sep># pad the alignments to the longest possible source st output vocab is fixed size # TODO: Check for non zero alignments outside the seq length # alignments_padded = debug_shape(alignments_padded, "align padded") # switch takes st, vt and yt−1 as inputs # vt = concat(weighted context encoding at t; condition encoding) # st = hidden state at t # y_t-1 is previous generated token condition_encoding_tiled=tf.contrib.seq2seq.tile_batch(condition_encoding multiplier=beam_width)<line_sep>vt=tf.concat([attention condition_encoding_tiled] axis=1)<line_sep># NOTE: this is missing the previous input y_t-1 and s_t switch_input=tf.concat([vt] axis=1)<line_sep>switch_h1=tf.layers.dropout(tf.layers.dense(switch_input self.switch_units activation=tf.nn.tanh kernel_initializer=tf.glorot_uniform_initializer()) rate=0.3 training=self.training_mode)<line_sep>switch_h2=tf.layers.dropout(tf.layers.dense(switch_h1 self.switch_units activation=tf.nn.tanh kernel_initializer=tf.glorot_uniform_initializer()) rate=0.3 training=self.training_mode)<line_sep>self.switch=tf.layers.dense(switch_h2 1 activation=tf.sigmoid kernel_initializer=tf.glorot_uniform_initializer())<line_sep># switch = debug_shape(switch, "switch") <if_stmt>FLAGS.disable_copy<block_start>self.switch=0<block_end><elif_stmt>FLAGS.disable_shortlist<block_start>self.switch=1<block_end># if self.output_mask is not None: # alignments = self.output_mask() * alignments source_tiled=tf.contrib.seq2seq.tile_batch(source multiplier=beam_width)<line_sep>source_tiled_sl=tf.contrib.seq2seq.tile_batch(source_sl multiplier=beam_width)<line_sep>shortlist=(1-self.switch)<times>shortlist<line_sep>alignments=self.switch<times>alignments<line_sep># Take any tokens that are the same in either vocab and combine their probabilities # old: mult by a big sparse matrix - not v mem efficient.. # opt1: mult the copy dist by a vocab x copy matrix and add to vocab part # opt2: do an nd_gather to copy the relevant prob mass, then mask carefully to remove it <if_stmt>FLAGS.combine_vocab# copy everything in real shortlist except special toks # print(len_source, self.max_copy_size) <block_start>source_tiled_sl_padded=tf.pad(source_tiled_sl [[0 0] [0 self.max_copy_size-tf.shape(source_tiled_sl)[-1]]] 'CONSTANT' constant_values=0)<line_sep># attempt 2! batch_ix=tf.tile(tf.expand_dims(tf.range(batch_size<times>beam_width) axis=-1) [1 len_source])<line_sep># seq_ix = tf.tile(tf.expand_dims(tf.range(len_source),axis=0),[batch_size*beam_width,1]) tgt_indices=tf.reshape(tf.concat([tf.expand_dims(batch_ix -1) tf.expand_dims(source_tiled_sl -1)] axis=2) [-1 2])<line_sep>ident_indices=tf.where(tf.greater(source_tiled_sl -1))# get ixs of all elements # ident_indices = tf.where() # tgt_indices = debug_tensor(tgt_indices) # get the copy probs at each point in the source.. updates=tf.reshape(tf.gather_nd(alignments ident_indices) [-1])<line_sep># and send them to the their shortlist index sum_part=tf.scatter_nd(tgt_indices updates [batch_size<times>beam_width self.vocab_size+self.max_copy_size])<line_sep># then zero out the ix's that got copied align_zeroed=alignments<times>tf.cast(tf.greater_equal(source_tiled_sl self.vocab_size) tf.float32)<line_sep>align_moved=alignments<times>tf.cast(tf.less(source_tiled_sl self.vocab_size) tf.float32)# ie only let through stuff that *isnt* in SL # and add the correct pieces together alignments=align_zeroed<line_sep>shortlist=shortlist+sum_part[: :self.vocab_size]<line_sep># result = tf.Print(result, [tf.reduce_sum(result[:,:self.vocab_size],-1)], "result sl sum") # shortlist = tf.Print(shortlist, [tf.reduce_sum(align_moved,-1)], "sum align_moved") # shortlist = tf.Print(shortlist, [tf.reduce_sum(sum_part[:,:self.vocab_size],-1)], "sum sum_part") <block_end># convert position probs to ids <if_stmt>self.context_as_set# print(source) # batch x seq # print(alignments) # batch x seq <block_start>pos_to_id=tf.one_hot(source_tiled-self.vocab_size depth=self.max_copy_size)# batch x seq x vocab <if_stmt>FLAGS.maxout_pointer<block_start>copy_dist=tf.reduce_max(pos_to_id<times>tf.expand_dims(alignments 2) axis=1)<block_end><else_stmt><block_start>copy_dist=tf.squeeze(tf.matmul(tf.expand_dims(alignments 1) pos_to_id) axis=1)<block_end><block_end><else_stmt><block_start>copy_dist=alignments<block_end>copy_dist_padded=tf.pad(copy_dist [[0 0] [0 self.max_copy_size-tf.shape(copy_dist)[-1]]] 'CONSTANT' constant_values=0)<line_sep>result=tf.concat([shortlist copy_dist_padded] axis=1)# this used to be safe_log'd # if FLAGS.combine_vocab: # result = tf.Print(result, [tf.reduce_sum(result,-1)], "result sum") target_shape=tf.concat([shape[:-1] [-1]] 0)<line_sep>result=tf.reshape(result target_shape)<line_sep><return>result<line_sep># return tf.Print(result, [tf.reduce_max(switch), tf.reduce_max(shortlist), # tf.reduce_max(alignments)], summarize=10) <block_end><def_stmt>compute_output_shape self input_shape<block_start>input_shape=tensor_shape.TensorShape(input_shape)<line_sep>input_shape=input_shape.with_rank_at_least(2)<line_sep># print(input_shape) <if_stmt>input_shape[-1].value<is><none><block_start><raise>ValueError('The innermost dimension of input_shape must be defined, but saw: %s'%input_shape)<block_end><return>input_shape[:-1].concatenate(self.units+self.vocab_size<if><not>self.context_as_set<else>self.vocab_size+self.max_copy_size)<block_end># this for older tf versions <def_stmt>_compute_output_shape self input_shape<block_start><return>self.compute_output_shape(input_shape)<block_end><block_end><def_stmt>dense inputs units activation=<none> use_bias=<true> kernel_initializer=<none> bias_initializer=init_ops.zeros_initializer() kernel_regularizer=<none> bias_regularizer=<none> activity_regularizer=<none> kernel_constraint=<none> bias_constraint=<none> trainable=<true> name=<none> reuse=<none><block_start>"""Functional interface for the densely-connected layer. This layer implements the operation: `outputs = activation(inputs.kernel + bias)` Where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Note: if the `inputs` tensor has a rank greater than 2, then it is flattened prior to the initial matrix multiply by `kernel`. Arguments: inputs: Tensor input. units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """<line_sep>layer=CopyLayer(units activation=activation use_bias=use_bias kernel_initializer=kernel_initializer bias_initializer=bias_initializer kernel_regularizer=kernel_regularizer bias_regularizer=bias_regularizer activity_regularizer=activity_regularizer kernel_constraint=kernel_constraint bias_constraint=bias_constraint trainable=trainable name=name dtype=inputs.dtype.base_dtype _scope=name _reuse=reuse)<line_sep>print("inside copy layer, yaaay!")<line_sep>sys.exit(0)<line_sep><return>layer.apply(inputs)<block_end>
expected_output={'pvrstag':{'foo':{'domain':'foo' 'interfaces':{'GigabitEthernet0/0/0/0':{'interface':'GigabitEthernet0/0/0/0' 'vlans':{'5':{'preempt_delay':<true> 'preempt_delay_state':'Sending startup BPDU until 13:38:03' 'sub_interface':'GigabitEthernet0/0/0/0.5' 'sub_interface_state':'Up' 'max_age':20 'root_priority':0 'root_bridge':'0000.0000.0000' 'root_cost':1 'bridge_priority':32768 'bridge_id':'0255.1dff.3c70' 'port_priority':128 'port_id':1 'hello_time':2 'active':<true> 'counters':{'bdpu_sent':6 'topology_changes':0 } } } } 'GigabitEthernet0/0/0/1':{'interface':'GigabitEthernet0/0/0/1' 'vlans':{'5':{'preempt_delay':<true> 'preempt_delay_state':'Sending standard BPDU' 'sub_interface':'GigabitEthernet0/0/0/1.5' 'sub_interface_state':'Up' 'max_age':20 'root_priority':0 'root_bridge':'0000.0000.0000' 'root_cost':0 'bridge_priority':32768 'bridge_id':'021a.9eff.5645' 'port_priority':128 'port_id':1 'hello_time':2 'active':<true> 'counters':{'bdpu_sent':7 'topology_changes':0 } } } } } } } }<line_sep>
<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_stmt>builtins range<import_from_stmt>past.utils old_div<import_stmt>os<import_stmt>numpy<as>num<import_from_stmt>anuga.file.netcdf NetCDFFile<import_stmt>pylab<as>P<import_stmt>anuga<import_from_stmt>anuga.abstract_2d_finite_volumes.mesh_factory rectangular<import_from_stmt>anuga.shallow_water.shallow_water_domain Domain<import_from_stmt>anuga.shallow_water.boundaries Reflective_boundary<import_from_stmt>anuga.coordinate_transforms.geo_reference Geo_reference<import_from_stmt>anuga.shallow_water.forcing *<import_from_stmt>anuga.utilities.numerical_tools ensure_numeric<import_from_stmt>anuga.file.sww Write_sww<import_from_stmt>anuga.config netcdf_mode_r netcdf_mode_w netcdf_mode_a netcdf_float<def_stmt>sts2sww_mesh basename_in basename_out=<none> spatial_thinning=1 verbose=<false><block_start><import_from_stmt>anuga.mesh_engine.mesh_engine NoTrianglesError<import_from_stmt>anuga.pmesh.mesh Mesh<if_stmt>verbose<block_start>print("Starting sts2sww_mesh")<block_end>mean_stage=0.<line_sep>zscale=1.<if_stmt>(basename_in[:-4]<eq>'.sts')<block_start>stsname=basename_in<block_end><else_stmt><block_start>stsname=basename_in+'.sts'<block_end><if_stmt>verbose<block_start>print("Reading sts NetCDF file: %s"%stsname)<block_end>infile=NetCDFFile(stsname netcdf_mode_r)<line_sep>cellsize=infile.cellsize<line_sep>ncols=infile.ncols<line_sep>nrows=infile.nrows<line_sep>no_data=infile.no_data<line_sep>refzone=infile.zone<line_sep>x_origin=infile.xllcorner<line_sep>y_origin=infile.yllcorner<line_sep>origin=num.array([x_origin y_origin])<line_sep>x=infile.variables['x'][:]<line_sep>y=infile.variables['y'][:]<line_sep>times=infile.variables['time'][:]<line_sep>wind_speed_full=infile.variables['wind_speed'][:]<line_sep>wind_angle_full=infile.variables['wind_angle'][:]<line_sep>pressure_full=infile.variables['barometric_pressure'][:]<line_sep>infile.close()<line_sep>number_of_points=nrows<times>ncols<line_sep>points_utm=num.zeros((number_of_points 2) num.float)<line_sep>points_utm[: 0]=x+x_origin<line_sep>points_utm[: 1]=y+y_origin<line_sep>thinned_indices=[]<for_stmt>i range(number_of_points)<block_start><if_stmt>(old_div(i ncols)<eq>0<or>old_div(i ncols)<eq>ncols-1<or>(old_div(i ncols))%(spatial_thinning)<eq>0)<block_start><if_stmt>(i%(spatial_thinning)<eq>0<or>i%nrows<eq>0<or>i%nrows<eq>nrows-1)<block_start>thinned_indices.append(i)<block_end><block_end><block_end>#Spatial thinning points_utm=points_utm[thinned_indices]<line_sep>number_of_points=points_utm.shape[0]<line_sep>number_of_timesteps=wind_speed_full.shape[0]<line_sep>wind_speed=num.empty((number_of_timesteps number_of_points) dtype=float)<line_sep>wind_angle=num.empty((number_of_timesteps number_of_points) dtype=float)<line_sep>barometric_pressure=num.empty((number_of_timesteps number_of_points) dtype=float)<if_stmt>verbose<block_start>print("Total number of points: " nrows<times>ncols)<line_sep>print("Number of thinned points: " number_of_points)<block_end><for_stmt>i range(number_of_timesteps)<block_start>wind_speed[i]=wind_speed_full[i thinned_indices]<line_sep>wind_angle[i]=wind_angle_full[i thinned_indices]<line_sep>barometric_pressure[i]=pressure_full[i thinned_indices]<block_end>#P.plot(points_utm[:,0],points_utm[:,1],'ro') #P.show() <if_stmt>verbose<block_start>print("Generating sww triangulation of gems data")<block_end>mesh=Mesh()<line_sep>mesh.add_vertices(points_utm)<line_sep>mesh.auto_segment(smooth_indents=<true> expand_pinch=<true>)<line_sep>mesh.auto_segment(mesh.shape.get_alpha()<times>1.1)<try_stmt><block_start>mesh.generate_mesh(minimum_triangle_angle=0.0 verbose=<false>)<block_end><except_stmt>NoTrianglesError# This is a bit of a hack, going in and changing the data structure. <block_start>mesh.holes=[]<line_sep>mesh.generate_mesh(minimum_triangle_angle=0.0 verbose=<false>)<block_end>mesh_dic=mesh.Mesh2MeshList()<line_sep>points_utm=ensure_numeric(points_utm)<assert_stmt>num.alltrue(ensure_numeric(mesh_dic['generatedpointlist'])<eq>ensure_numeric(points_utm))<line_sep>volumes=mesh_dic['generatedtrianglelist']<line_sep># Write sww intro and grid stuff. <if_stmt>(basename_out<is><not><none><and>basename_out[:-4]<eq>'.sww')<block_start>swwname=basename_out<block_end><else_stmt><block_start>swwname=basename_in+'.sww'<block_end><if_stmt>verbose<block_start>'Output to %s'%swwname<block_end><if_stmt>verbose<block_start>print("Writing sww wind and pressure field file")<block_end>outfile=NetCDFFile(swwname netcdf_mode_w)<line_sep>sww=Write_sww([] ['wind_speed' 'wind_angle' 'barometric_pressure'])<line_sep>sww.store_header(outfile times len(volumes) len(points_utm) verbose=verbose sww_precision='d')<line_sep>outfile.mean_stage=mean_stage<line_sep>outfile.zscale=zscale<line_sep>sww.store_triangulation(outfile points_utm volumes refzone new_origin=origin #check effect of this line verbose=verbose)<if_stmt>verbose<block_start>print('Converting quantities')<block_end># Read in a time slice from the sts file and write it to the SWW file #print wind_angle[0,:10] <for_stmt>i range(len(times))<block_start>sww.store_quantities(outfile slice_index=i verbose=verbose wind_speed=wind_speed[i :] wind_angle=wind_angle[i :] barometric_pressure=barometric_pressure[i :] sww_precision=num.float)<block_end><if_stmt>verbose<block_start>sww.verbose_quantities(outfile)<block_end>outfile.close()<block_end>
<import_from_stmt>django.test TestCase<import_from_stmt>hs_core.hydroshare hs_requests<import_from_stmt>django.conf settings<class_stmt>TestRewrite(TestCase)<block_start>""" Test local rewriting that bypasses firewalls and hits local nginx server """<def_stmt>setUp self<block_start>self.prod_fqdn=getattr(settings "PROD_FQDN_OR_IP" "www.hydroshare.org")<line_sep>self.fqdn=getattr(settings "FQDN_OR_IP" "www.hydroshare.org")<line_sep>self.nginx_ip=hs_requests.get_nginx_ip()<block_end><def_stmt>test_localize_outer self<block_start>""" rewrite requests to outer host"""<line_sep>self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format(self.fqdn)) "https://{}/foo/bar/".format(self.nginx_ip))<line_sep>self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format(self.fqdn)) "http://{}/foo/bar/".format(self.nginx_ip))<block_end><def_stmt>test_localize_www self<block_start>""" rewrite requests to production host"""<line_sep>self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format(self.prod_fqdn)) "https://{}/foo/bar/".format(self.nginx_ip))<line_sep>self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format(self.prod_fqdn)) "http://{}/foo/bar/".format(self.nginx_ip))<block_end><def_stmt>test_do_not_localize_others self<block_start>""" don't rewrite other host addresses """<line_sep>self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format("www.foo.com")) "https://{}/foo/bar/".format("www.foo.com"))<line_sep>self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format("www.foo.com")) "http://{}/foo/bar/".format("www.foo.com"))<block_end><block_end>
<import_stmt>errno<import_stmt>os<import_stmt>locale<import_from_stmt>datetime datetime<try_stmt><block_start><import_stmt>pytz<line_sep>HAS_PYTZ=<true><block_end><except_stmt>ImportError<block_start>HAS_PYTZ=<false><block_end><import_from_stmt>i3pystatus IntervalModule<class_stmt>Clock(IntervalModule)<block_start>""" This class shows a clock. .. note:: Optionally requires `pytz` for time zone data when using time zones other than local time. Format can be passed in four different ways: - single string, no timezone, just the strftime-format - one two-tuple, first is the format, second the timezone - list of strings - no timezones - list of two tuples, first is the format, second is timezone Use mousewheel to cycle between formats. For complete time format specification see: :: man strftime All available timezones are located in directory: :: /usr/share/zoneinfo/ .. rubric:: Format examples :: # one format, local timezone format = '%a %b %-d %b %X' # multiple formats, local timezone format = [ '%a %b %-d %b %X', '%X' ] # one format, specified timezone format = ('%a %b %-d %b %X', 'Europe/Bratislava') # multiple formats, specified timezones format = [ ('%a %b %-d %b %X', 'America/New_York'), ('%X', 'Etc/GMT+9') ] """<line_sep>settings=(("format" "`None` means to use the default, locale-dependent format.") ("color" "RGB hexadecimal code color specifier, default to #ffffff") )<line_sep>format=<none><line_sep>color="#ffffff"<line_sep>interval=1<line_sep>on_upscroll=["scroll_format" 1]<line_sep>on_downscroll=["scroll_format" -1]<def_stmt>init self<block_start>env_lang=os.environ.get('LC_TIME' <none>)<if_stmt>env_lang<is><none><block_start>env_lang=os.environ.get('LANG' <none>)<block_end><if_stmt>env_lang<is><not><none><block_start><if_stmt>env_lang.find('.')<ne>-1<block_start>lang=tuple(env_lang.split('.' 1))<block_end><else_stmt><block_start>lang=(env_lang <none>)<block_end><block_end><else_stmt><block_start>lang=(<none> <none>)<block_end><if_stmt>lang<ne>locale.getlocale(locale.LC_TIME)# affects language of *.strftime() in whole program <block_start>locale.setlocale(locale.LC_TIME lang)<block_end><if_stmt>self.format<is><none><block_start><if_stmt>lang[0]<eq>'en_US'# MDY format - United States of America <block_start>self.format=["%a %b %-d %X"]<block_end><else_stmt># DMY format - almost all other countries <block_start>self.format=["%a %-d %b %X"]<block_end><block_end><elif_stmt>isinstance(self.format str)<or>isinstance(self.format tuple)<block_start>self.format=[self.format]<block_end>self.system_tz=self._get_system_tz()<line_sep>self.format=[self._expand_format(fmt)<for>fmt self.format]<line_sep>self.current_format_id=0<block_end><def_stmt>_expand_format self fmt<block_start><if_stmt>isinstance(fmt tuple)<block_start><if_stmt>len(fmt)<eq>1<block_start><return>(fmt[0] <none>)<block_end><else_stmt><block_start><if_stmt><not>HAS_PYTZ<block_start><raise>RuntimeError("Need `pytz` for timezone data")<block_end><return>(fmt[0] pytz.timezone(fmt[1]))<block_end><block_end><return>(fmt self.system_tz)<block_end><def_stmt>_get_system_tz self<block_start>''' Get the system timezone for use when no timezone is explicitly provided Requires pytz, if not available then no timezone will be set when not explicitly provided. '''<if_stmt><not>HAS_PYTZ<block_start><return><none><block_end><def_stmt>_etc_localtime <block_start><try_stmt><block_start><with_stmt>open('/etc/localtime' 'rb')<as>fp<block_start><return>pytz.tzfile.build_tzinfo('system' fp)<block_end><block_end><except_stmt>OSError<as>exc<block_start><if_stmt>exc.errno<ne>errno.ENOENT<block_start>self.logger.error('Unable to read from /etc/localtime: %s' exc.strerror)<block_end><block_end><except_stmt>pytz.UnknownTimeZoneError<block_start>self.logger.error('/etc/localtime contains unrecognized tzinfo')<block_end><return><none><block_end><def_stmt>_etc_timezone <block_start><try_stmt><block_start><with_stmt>open('/etc/timezone' 'r')<as>fp<block_start>tzname=fp.read().strip()<block_end><return>pytz.timezone(tzname)<block_end><except_stmt>OSError<as>exc<block_start><if_stmt>exc.errno<ne>errno.ENOENT<block_start>self.logger.error('Unable to read from /etc/localtime: %s' exc.strerror)<block_end><block_end><except_stmt>pytz.UnknownTimeZoneError<block_start>self.logger.error('/etc/timezone contains unrecognized timezone \'%s\'' tzname)<block_end><return><none><block_end><return>_etc_localtime()<or>_etc_timezone()<block_end><def_stmt>run self<block_start>time=datetime.now(self.format[self.current_format_id][1])<line_sep>self.output={"full_text":time.strftime(self.format[self.current_format_id][0]) "color":self.color "urgent":<false> }<block_end><def_stmt>scroll_format self step=1<block_start>self.current_format_id=(self.current_format_id+step)%len(self.format)<block_end><block_end>
<import_stmt>re<import_stmt>sys<line_sep>meetup_svg='.github/images/meetup.svg'<line_sep>readme_md='README.md'<line_sep>conf_py='docs/conf.py'<def_stmt>rm_announce # remove all announcement <block_start><with_stmt>open(readme_md)<as>fp<block_start>_old=fp.read()<line_sep>_new=re.sub(r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)' rf'\g<1>\g<2>' _old flags=re.DOTALL )<block_end><with_stmt>open(readme_md 'w')<as>fp<block_start>fp.write(_new)<block_end><with_stmt>open(conf_py)<as>fp<block_start>_old=fp.read()<line_sep>_new=re.sub(r'(# start-announce\s*?\n).*(\n\s*?# end-announce)' rf'\g<1>\g<2>' _old flags=re.DOTALL )<block_end><with_stmt>open(conf_py 'w')<as>fp<block_start>fp.write(_new)<block_end><block_end><if_stmt>len(sys.argv)<l>3<block_start>rm_announce()<block_end><else_stmt><block_start>text=sys.argv[1]<line_sep>url=sys.argv[2]<if_stmt><not>text<or><not>url<block_start>rm_announce()<block_end><else_stmt><block_start>announce_url=f''' "announcement": \'\'\' <a href="{url}">{text}</a> \'\'\', '''<line_sep>meetup_svg_url=f'<a href="{url}"><img src="https://github.com/jina-ai/jina/blob/master/{meetup_svg}?raw=true"></a>'<line_sep># update meetup_svg <with_stmt>open(meetup_svg)<as>fp<block_start>_old=fp.read()<line_sep>_new=re.sub(r'(<a href=").*(")' rf'\g<1>{url}\g<2>' _old)<line_sep>_new=re.sub(r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)' rf'\g<1>{text}\g<2>' _new flags=re.DOTALL )<block_end><with_stmt>open(meetup_svg 'w')<as>fp<block_start>fp.write(_new)<block_end># update readme_md <with_stmt>open(readme_md)<as>fp<block_start>_old=fp.read()<line_sep>_new=re.sub(r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)' rf'\g<1>{meetup_svg_url}\g<2>' _old flags=re.DOTALL )<block_end><with_stmt>open(readme_md 'w')<as>fp<block_start>fp.write(_new)<block_end># update conf <with_stmt>open(conf_py)<as>fp<block_start>_old=fp.read()<line_sep>_new=re.sub(r'(# start-announce\s*?\n).*(\n\s*?# end-announce)' rf'\g<1>{announce_url}\g<2>' _old flags=re.DOTALL )<block_end><with_stmt>open(conf_py 'w')<as>fp<block_start>fp.write(_new)<block_end><block_end><block_end>
""" GENERATED FILE - DO NOT EDIT (created via @build_stack_rules_proto//cmd/depsgen) """<line_sep>load("@build_bazel_rules_nodejs//:index.bzl" "npm_install" "yarn_install")<def_stmt>_maybe repo_rule name **kwargs<block_start><if_stmt>name<not><in>native.existing_rules()<block_start>repo_rule(name=name **kwargs)<block_end><block_end><def_stmt>ts_proto_deps <block_start>npm_ts_proto()# via <TOP> npm_tsc()<block_end># via <TOP> <def_stmt>npm_ts_proto <block_start>_maybe(npm_install name="npm_ts_proto" package_json="@build_stack_rules_proto//plugin/stephenh/ts-proto:package.json" package_lock_json="@build_stack_rules_proto//plugin/stephenh/ts-proto:package-lock.json" symlink_node_modules=<false> )<block_end><def_stmt>npm_tsc <block_start>_maybe(yarn_install name="npm_tsc" package_json="@build_stack_rules_proto//rules/ts:package.json" yarn_lock="@build_stack_rules_proto//rules/ts:yarn.lock" frozen_lockfile=<true> )<block_end>
<import_from_stmt>polar Variable<import_from_stmt>sqlalchemy.orm Session<import_from_stmt>sqlalchemy_oso.partial partial_to_filter<import_from_stmt>.models User<def_stmt>test_partial_to_query_filter oso engine<block_start>oso.load_str('ok(_: User{username:"gwen"});')<line_sep>session=Session(bind=engine)<line_sep>gwen=User(username="gwen")<line_sep>session.add(gwen)<line_sep>steve=User(username="steve")<line_sep>session.add(steve)<line_sep>result=oso.query_rule("ok" Variable("actor") accept_expression=<true>)<line_sep>partial=next(result)["bindings"]["actor"]<line_sep>filter=partial_to_filter(partial session User oso.get_class)<line_sep>q=list(session.query(User).filter(filter))<assert_stmt>q<eq>[gwen]<block_end>
<import_stmt>numpy<as>np<import_from_stmt>scipy sparse<import_from_stmt>scipy.sparse.linalg lsqr cg eigsh<import_stmt>matplotlib.pyplot<as>plt<import_stmt>scipy.io<as>sio<import_stmt>pickle<import_stmt>sparseqr<import_stmt>time<line_sep>WEIGHT=1.0<line_sep>############################################################## ## Laplacian Mesh Editing ## ############################################################## #Purpose: To return a sparse matrix representing a Laplacian matrix with #the graph Laplacian (D - A) in the upper square part and anchors as the #lower rows #Inputs: mesh (polygon mesh object), anchorsIdx (indices of the anchor points) #Returns: L (An (N+K) x N sparse matrix, where N is the number of vertices #and K is the number of anchors) <def_stmt>getLaplacianMatrixUmbrella mesh anchorsIdx<block_start>n=mesh.n_vertices()# N x 3 k=anchorsIdx.shape[0]<line_sep>I=[]<line_sep>J=[]<line_sep>V=[]<line_sep>vv_idx_list=list(mesh.vertex_vertex_indices())<line_sep># Build sparse Laplacian Matrix coordinates and values <for_stmt>i range(n)<block_start>idx_nbr=filter(<lambda>x:x<ne>-1 vv_idx_list[i])<line_sep>num_nbr=len(idx_nbr)<line_sep>I=I+([i]<times>(num_nbr+1))# repeated row J=J+idx_nbr+[i]# column indices and this row V=V+([-1]<times>num_nbr)+[num_nbr]<block_end># negative weights and row degree # augment Laplacian matrix with anchor weights <for_stmt>i range(k)<block_start>I=I+[n+i]<line_sep>J=J+[anchorsIdx[i]]<line_sep>V=V+[WEIGHT]<block_end># default anchor weight L=sparse.coo_matrix((V (I J)) shape=(n+k n)).tocsr()<line_sep><return>L<block_end># Modified for openmesh.mesh, Note that only suitable for watertight model #Purpose: To return a sparse matrix representing a laplacian matrix with #cotangent weights in the upper square part and anchors as the lower rows #Inputs: mesh (polygon mesh object), anchorsIdx (indices of the anchor points) #Returns: L (An (N+K) x N sparse matrix, where N is the number of vertices #and K is the number of anchors) <def_stmt>getLaplacianMatrixCotangent mesh anchorsIdx<block_start>n=mesh.n_vertices()# N x 3 k=anchorsIdx.shape[0]<line_sep>I=[]<line_sep>J=[]<line_sep>V=[]<line_sep>#l = mesh.vertex_vertex_indices() <for_stmt>v mesh.vertices()<block_start>weights=[]<line_sep>p_this=mesh.point(v)<line_sep>p_nbrs=[]<line_sep>id_this=v.idx()<line_sep>id_nbrs=[]<for_stmt>vv mesh.vv(v)<block_start>p_nbrs.append(mesh.point(vv))<line_sep>id_nbrs.append(vv.idx())<block_end>num_nbr=len(id_nbrs)<for_stmt>i range(num_nbr)<block_start>u=p_this-p_nbrs[(i+num_nbr-1)%num_nbr]<line_sep>v=p_nbrs[(i+num_nbr)%num_nbr]-p_nbrs[(i+num_nbr-1)%num_nbr]<line_sep>cotangent_1=(np.dot(u v)/np.sqrt(np.sum(np.square(np.cross(u v)))))<line_sep>u=p_this-p_nbrs[(i+num_nbr+1)%num_nbr]<line_sep>v=p_nbrs[(i+num_nbr)%num_nbr]-p_nbrs[(i+num_nbr+1)%num_nbr]<line_sep>cotangent_2=(np.dot(u v)/np.sqrt(np.sum(np.square(np.cross(u v)))))<line_sep>weights.append(-0.5<times>(cotangent_1+cotangent_2))<block_end># cotangent weights I=I+([id_this]<times>(num_nbr+1))# repeated row J=J+id_nbrs+[id_this]# column indices and this row V=V+weights+[(-1<times>np.sum(weights))]<block_end># n negative weights and row vertex sum # augment Laplacian matrix with anchor weights <for_stmt>i range(k)<block_start>I=I+[n+i]<line_sep>J=J+[anchorsIdx[i]]<line_sep>V=V+[WEIGHT]<block_end># default anchor weight L=sparse.coo_matrix((V (I J)) shape=(n+k n)).tocsr()<line_sep><return>L<block_end>#Purpose: Given a mesh, to perform Laplacian mesh editing by solving the system #of delta coordinates and anchors in the least squared sense #Inputs: mesh (polygon mesh object), anchors (a K x 3 numpy array of anchor #coordinates), anchorsIdx (a parallel array of the indices of the anchors) #Returns: Nothing (should update mesh.VPos) <def_stmt>solveLaplacianMesh mesh anchors anchorsIdx cotangent=<true><block_start>n=mesh.n_vertices()<line_sep>k=anchorsIdx.shape[0]<line_sep>operator=(getLaplacianMatrixUmbrella getLaplacianMatrixCotangent)<line_sep>L=operator[1](mesh anchorsIdx)<if>cotangent<else>operator[0](mesh anchorsIdx)<line_sep>delta=np.array(L.dot(mesh.points()))<line_sep># augment delta solution matrix with weighted anchors <for_stmt>i range(k)<block_start>delta[n+i :]=WEIGHT<times>anchors[i :]<block_end># update mesh vertices with least-squares solution <for_stmt>i range(3)#mesh.points()[:, i] = lsqr(L, delta[:, i])[0] <block_start>mesh.points()[: i]=sparseqr.solve(L delta[: i] tolerance=1e-8)<block_end><return>mesh<block_end>############################################################## ## High Speed Laplacian Mesh Editing ## ############################################################## # using umbrella weights for higher speed <class_stmt>fast_deform()<block_start><def_stmt>__init__ self f_ijv_pkl='../predef/dsa_IJV.pkl' f_achr_pkl='../predef/dsa_achr.pkl' weight=1.0 <block_start>self.weight=weight<with_stmt>open(f_ijv_pkl 'rb')<as>fp<block_start>dic_IJV=pickle.load(fp)<block_end>I=dic_IJV['I']<line_sep>J=dic_IJV['J']<line_sep>V=dic_IJV['V']<line_sep>self.n=dic_IJV['num_vert']<with_stmt>open(f_achr_pkl 'rb')<as>fp<block_start>dic_achr=pickle.load(fp)<block_end>#achr_id = dic_achr['achr_id'] self.k=dic_achr['achr_num']<if_stmt>weight<ne>1.0<block_start>num_V=len(V)<for_stmt>i range(num_V-self.k num_V)<block_start>V[i]=V[i]<times>self.weight<block_end><block_end>self.L=sparse.coo_matrix((V (I J)) shape=(self.n+self.k self.n)).tocsr()<block_end><def_stmt>deform self mesh anchors#t_start = time.time() <block_start>delta=np.array(self.L.dot(mesh.points()))<line_sep>#t_end = time.time() #print("delta computation time is %.5f seconds." % (t_end - t_start)) #t_start = time.time() # augment delta solution matrix with weighted anchors <for_stmt>i range(self.k)<block_start>delta[self.n+i :]=self.weight<times>anchors[i :]<block_end>#t_end = time.time() #print("give anchor value computation time is %.5f seconds." % (t_end - t_start)) #t_start = time.time() # update mesh vertices with least-squares solution <for_stmt>i range(3)<block_start>mesh.points()[: i]=sparseqr.solve(self.L delta[: i] tolerance=1e-8)<line_sep>#mesh.points()[:, i] = lsqr(self.L, delta[:, i])[0] <block_end>#t_end = time.time() #print("sparse lsqr time is %.5f seconds." % (t_end - t_start)) <return>mesh<block_end><block_end>############################################################## ## High Speed Laplacian Mesh Editing for DSA ## ############################################################## <class_stmt>fast_deform_dsa()<block_start><def_stmt>__init__ self f_ijv_pkl='../predef/dsa_IJV.pkl' f_achr_pkl='../predef/dsa_achr.pkl' weight=1.0 <block_start>self.weight=weight<with_stmt>open(f_ijv_pkl 'rb')<as>fp<block_start>dic_IJV=pickle.load(fp)<block_end>self.I=dic_IJV['I']<line_sep>self.J=dic_IJV['J']<line_sep>self.V=dic_IJV['V']<line_sep>self.n=dic_IJV['num_vert']<with_stmt>open(f_achr_pkl 'rb')<as>fp<block_start>dic_achr=pickle.load(fp)<block_end>#achr_id = dic_achr['achr_id'] self.k=dic_achr['achr_num']<line_sep>self.num_V=len(self.V)<if_stmt>self.weight<ne>1.0<block_start><for_stmt>i range(self.num_V-self.k self.num_V)<block_start>self.V[i]=self.V[i]<times>self.weight<block_end><block_end><block_end># for inactive index, zero means inactive, non-zeros means active <def_stmt>deform self verts achr_verts active_index=[]<block_start><if_stmt>active_index<ne>[]<block_start><for_stmt>i range(len(active_index))<block_start><if_stmt>active_index[i]<eq>0<block_start>self.V[self.num_V-self.k+i]=0<block_end><block_end><block_end>self.L=sparse.coo_matrix((self.V (self.I self.J)) shape=(self.n+self.k self.n)).tocsr()<line_sep>delta=np.array(self.L.dot(verts))<line_sep># augment delta solution matrix with weighted anchors <for_stmt>i range(self.k)<block_start>delta[self.n+i :]=self.weight<times>achr_verts[i :]<block_end># update mesh vertices with least-squares solution deformed_verts=np.zeros(verts.shape)<for_stmt>i range(3)<block_start>deformed_verts[: i]=sparseqr.solve(self.L delta[: i] tolerance=1e-8)<block_end><return>deformed_verts<block_end><block_end>############################################################## ## High Speed Laplacian Mesh Editing for Joint Adapt ## ############################################################## <class_stmt>fast_deform_dja()<block_start><def_stmt>__init__ self f_ijv_pkl='../predef/dja_IJV.pkl' f_achr_pkl='../predef/dja_achr.pkl' weight=1.0 <block_start>self.weight=weight<with_stmt>open(f_ijv_pkl 'rb')<as>fp<block_start>dic_IJV=pickle.load(fp)<block_end>self.I=dic_IJV['I']<line_sep>self.J=dic_IJV['J']<line_sep>self.V=dic_IJV['V']<line_sep>self.n=dic_IJV['num_vert']<with_stmt>open(f_achr_pkl 'rb')<as>fp<block_start>dic_achr=pickle.load(fp)<block_end>#achr_id = dic_achr['achr_id'] self.k=dic_achr['achr_num']<line_sep>self.num_V=len(self.V)<if_stmt>self.weight<ne>1.0<block_start><for_stmt>i range(self.num_V-self.k self.num_V)<block_start>self.V[i]=self.V[i]<times>self.weight<block_end><block_end><block_end># for inactive index, zero means inactive, non-zeros means active <def_stmt>deform self verts achr_verts<block_start>self.L=sparse.coo_matrix((self.V (self.I self.J)) shape=(self.n+self.k self.n)).tocsr()<line_sep>delta=np.array(self.L.dot(verts))<line_sep># augment delta solution matrix with weighted anchors <for_stmt>i range(self.k)<block_start>delta[self.n+i :]=self.weight<times>achr_verts[i :]<block_end># update mesh vertices with least-squares solution deformed_verts=np.zeros(verts.shape)<for_stmt>i range(3)<block_start>deformed_verts[: i]=sparseqr.solve(self.L delta[: i] tolerance=1e-8)<block_end><return>deformed_verts<block_end><block_end>
<import_from_stmt>colorama Fore Style<def_stmt>colorize token:str color:str<arrow>str<block_start><return>f"{color}{token}{Style.RESET_ALL}"<block_end><def_stmt>colorize_gen tokenizer true_ids gen_ids mask<block_start>gen_ids=gen_ids.numpy()<line_sep>true_ids=true_ids.numpy()<line_sep>mask=mask.numpy()<line_sep>tokens=tokenizer.convert_ids_to_tokens(gen_ids)<line_sep>styled_tokens=tokens.copy()<for_stmt>i range(len(tokens))<block_start><if_stmt>mask[i]<block_start>styled_tokens[i]=colorize(tokens[i] color=Fore.GREEN<if>(true_ids[i]<eq>gen_ids[i])<else>Fore.RED)<block_end><else_stmt><block_start>styled_tokens[i]=tokens[i]<block_end><block_end><return>" ".join(styled_tokens)<block_end><def_stmt>colorize_dis tokenizer gen_ids dis_preds<block_start>gen_ids=gen_ids.numpy()<line_sep>dis_preds=dis_preds.numpy()<line_sep>tokens=tokenizer.convert_ids_to_tokens(gen_ids)<line_sep>styled_tokens=tokens.copy()<for_stmt>i range(len(tokens))<block_start><if_stmt>dis_preds[i]<block_start>styled_tokens[i]=colorize(tokens[i] color=Fore.YELLOW)<block_end><else_stmt><block_start>styled_tokens[i]=tokens[i]<block_end><block_end><return>" ".join(styled_tokens)<block_end>
<import_from_stmt>.gumbel_nac GumbelNACLayer<import_from_stmt>.gumbel_mnac GumbelMNACLayer<import_from_stmt>._abstract_nalu AbstractNALULayer<import_from_stmt>._abstract_recurrent_cell AbstractRecurrentCell<class_stmt>GumbelNALULayer(AbstractNALULayer)<block_start>"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit) Arguments: in_features: number of ingoing features out_features: number of outgoing features """<def_stmt>__init__ self in_features out_features **kwargs<block_start>super().__init__(GumbelNACLayer GumbelMNACLayer in_features out_features **kwargs)<block_end><block_end><class_stmt>GumbelNALUCell(AbstractRecurrentCell)<block_start>"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit) as a recurrent cell Arguments: input_size: number of ingoing features hidden_size: number of outgoing features """<def_stmt>__init__ self input_size hidden_size **kwargs<block_start>super().__init__(GumbelNALULayer GumbelMNACLayer input_size hidden_size **kwargs)<block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>functools reduce<import_from_stmt>torch.autograd Variable<class_stmt>LambdaBase(nn.Sequential)<block_start><def_stmt>__init__ self fn *args<block_start>super(LambdaBase self).__init__(*args)<line_sep>self.lambda_func=fn<block_end><def_stmt>forward_prepare self input<block_start>output=[]<for_stmt>module self._modules.values()<block_start>output.append(module(input))<block_end><return>output<if>output<else>input<block_end><block_end><class_stmt>Lambda(LambdaBase)<block_start><def_stmt>forward self input<block_start><return>self.lambda_func(self.forward_prepare(input))<block_end><block_end><class_stmt>LambdaMap(LambdaBase)<block_start><def_stmt>forward self input<block_start><return>list(map(self.lambda_func self.forward_prepare(input)))<block_end><block_end><class_stmt>LambdaReduce(LambdaBase)<block_start><def_stmt>forward self input<block_start><return>reduce(self.lambda_func self.forward_prepare(input))<block_end><block_end><def_stmt>get_model load_weights=<true># alphabet seems to be fine: <block_start>""" https://github.com/davek44/Basset/tree/master/src/dna_io.py#L145-L148 seq = seq.replace('A','0') seq = seq.replace('C','1') seq = seq.replace('G','2') seq = seq.replace('T','3') """<line_sep>pretrained_model_reloaded_th=nn.Sequential(# Sequential, nn.Conv2d(4 300 (19 1)) nn.BatchNorm2d(300) nn.ReLU() nn.MaxPool2d((3 1) (3 1)) nn.Conv2d(300 200 (11 1)) nn.BatchNorm2d(200) nn.ReLU() nn.MaxPool2d((4 1) (4 1)) nn.Conv2d(200 200 (7 1)) nn.BatchNorm2d(200) nn.ReLU() nn.MaxPool2d((4 1) (4 1)) Lambda(<lambda>x:x.view(x.size(0) -1)) # Reshape, nn.Sequential(Lambda(<lambda>x:x.view(1 -1)<if>1<eq>len(x.size())<else>x) nn.Linear(2000 1000)) # Linear, nn.BatchNorm1d(1000 1e-05 0.1 <true>) #BatchNorm1d, nn.ReLU() nn.Dropout(0.3) nn.Sequential(Lambda(<lambda>x:x.view(1 -1)<if>1<eq>len(x.size())<else>x) nn.Linear(1000 1000)) # Linear, nn.BatchNorm1d(1000 1e-05 0.1 <true>) #BatchNorm1d, nn.ReLU() nn.Dropout(0.3) nn.Sequential(Lambda(<lambda>x:x.view(1 -1)<if>1<eq>len(x.size())<else>x) nn.Linear(1000 164)) # Linear, nn.Sigmoid() )<if_stmt>load_weights<block_start>sd=torch.load('model_files/pretrained_model_reloaded_th.pth')<line_sep>pretrained_model_reloaded_th.load_state_dict(sd)<block_end><return>pretrained_model_reloaded_th<block_end>model=get_model(load_weights=<false>)<line_sep>
<import_from_stmt>skmob.utils gislib<import_stmt>math<class_stmt>TestClustering<block_start><def_stmt>setup_method self<block_start>self.point_1=(43.8430139 10.5079940)<line_sep>self.point_2=(43.5442700 10.3261500)<line_sep>self.decimal=43.8430139<line_sep>self.DMS=(43 50 34.85)<block_end><def_stmt>test_get_distance self<block_start>output=gislib.getDistance(self.point_1 self.point_2)<assert_stmt>(math.isclose(output 36.293701213))<line_sep>support=gislib.getDistanceByHaversine(self.point_1 self.point_2)<assert_stmt>(math.isclose(support output))<line_sep>output=gislib.getDistance(self.point_1 self.point_1)<assert_stmt>(math.isclose(output 0))<block_end><def_stmt>test_get_distance_by_haversine self<block_start>output=gislib.getDistanceByHaversine(self.point_1 self.point_2)<assert_stmt>(math.isclose(output 36.293701213))<line_sep>output=gislib.getDistanceByHaversine(self.point_1 self.point_1)<assert_stmt>(math.isclose(output 0))<block_end># def test_decimal_to_DMS(self): # output = gislib.DecimalToDMS(self.decimal) # assert (output[0] == 43) # assert (output[1] == 50) # assert (math.isclose(output[2], 34.85)) <def_stmt>test_DMS_to_decimal self<block_start>output=gislib.DMSToDecimal(self.DMS[0] self.DMS[1] self.DMS[2])<assert_stmt>(math.isclose(output 43.84301388888))<block_end><def_stmt>test_get_coordinates_for_distance self<block_start>output=gislib.getCoordinatesForDistance(self.point_1[0] self.point_1[1] 15)<assert_stmt>(math.isclose(output[0] 0.134989200863))<assert_stmt>(math.isclose(output[1] 0.187162559305))<block_end># def test_is_within_distance(self): # assert (gislib.isWithinDistance(self.point_1, self.point_2, 20)) # assert (gislib.isWithinDistance(self.point_1, self.point_2, 40) is False) <block_end>
<import_stmt>pprint<import_from_stmt>googlevoice Voice<def_stmt>run <block_start>voice=Voice()<line_sep>voice.login()<line_sep>pprint.pprint(voice.settings)<block_end>__name__<eq>'__main__'<and>run()<line_sep>
<import_from_stmt>pycaption.scc.constants CHARACTERS SPECIAL_CHARS EXTENDED_CHARS<line_sep>ALL_CHARACTERS={**CHARACTERS **SPECIAL_CHARS **EXTENDED_CHARS}<line_sep>COMMAND_LABELS={"9420":"Resume Caption Loading" "9429":"Resume Direct Captioning" "9425":"Roll-Up Captions--2 Rows" "9426":"Roll-Up Captions--3 Rows" "94a7":"Roll-Up Captions--4 Rows" "942a":"Text Restart" "94ab":"Resume Text Display" "942c":"Erase Displayed Memory" "94ae":"Erase Non-displayed Memory" "942f":"End Of Caption" "9140":"row 01, column 00, with plain white text." "91c1":"row 01, column 00, with white underlined text." "91c2":"row 01, column 00, with plain green text." "9143":"row 01, column 00, with green underlined text." "91c4":"row 01, column 00, with plain blue text." "9145":"row 01, column 00, with blue underlined text." "9146":"row 01, column 00, with plain cyan text." "91c7":"row 01, column 00, with cyan underlined text." "91c8":"row 01, column 00, with plain red text." "9149":"row 01, column 00, with red underlined text." "914a":"row 01, column 00, with plain yellow text." "91cb":"row 01, column 00, with yellow underlined text." "914c":"row 01, column 00, with plain magenta text." "91cd":"row 01, column 00, with magenta underlined text." "91ce":"row 01, column 00, with white italicized text." "914f":"row 01, column 00, with white underlined italicized text." "91d0":"row 01, column 00, with plain white text." "9151":"row 01, column 00, with white underlined text." "9152":"row 01, column 04, with plain white text." "91d3":"row 01, column 04, with white underlined text." "9154":"row 01, column 08, with plain white text." "91d5":"row 01, column 08, with white underlined text." "91d6":"row 01, column 12, with plain white text." "9157":"row 01, column 12, with white underlined text." "9158":"row 01, column 16, with plain white text." "91d9":"row 01, column 16, with white underlined text." "91da":"row 01, column 20, with plain white text." "915b":"row 01, column 20, with white underlined text." "91dc":"row 01, column 24, with plain white text." "915d":"row 01, column 24, with white underlined text." "915e":"row 01, column 28, with plain white text." "91df":"row 01, column 28, with white underlined text." "91e0":"row 02, column 00, with plain white text." "9161":"row 02, column 00, with white underlined text." "9162":"row 02, column 00, with plain green text." "91e3":"row 02, column 00, with green underlined text." "9164":"row 02, column 00, with plain blue text." "91e5":"row 02, column 00, with blue underlined text." "91e6":"row 02, column 00, with plain cyan text." "9167":"row 02, column 00, with cyan underlined text." "9168":"row 02, column 00, with plain red text." "91e9":"row 02, column 00, with red underlined text." "91ea":"row 02, column 00, with plain yellow text." "916b":"row 02, column 00, with yellow underlined text." "91ec":"row 02, column 00, with plain magenta text." "916d":"row 02, column 00, with magenta underlined text." "916e":"row 02, column 00, with white italicized text." "91ef":"row 02, column 00, with white underlined italicized text." "9170":"row 02, column 00, with plain white text." "91f1":"row 02, column 00, with white underlined text." "91f2":"row 02, column 04, with plain white text." "9173":"row 02, column 04, with white underlined text." "91f4":"row 02, column 08, with plain white text." "9175":"row 02, column 08, with white underlined text." "9176":"row 02, column 12, with plain white text." "91f7":"row 02, column 12, with white underlined text." "91f8":"row 02, column 16, with plain white text." "9179":"row 02, column 16, with white underlined text." "917a":"row 02, column 20, with plain white text." "91fb":"row 02, column 20, with white underlined text." "91fc":"row 02, column 24, with plain white text." "91fd":"row 02, column 24, with white underlined text." "91fe":"row 02, column 28, with plain white text." "917f":"row 02, column 28, with white underlined text." "9240":"row 03, column 00, with plain white text." "92c1":"row 03, column 00, with white underlined text." "92c2":"row 03, column 00, with plain green text." "9243":"row 03, column 00, with green underlined text." "92c4":"row 03, column 00, with plain blue text." "9245":"row 03, column 00, with blue underlined text." "9246":"row 03, column 00, with plain cyan text." "92c7":"row 03, column 00, with cyan underlined text." "92c8":"row 03, column 00, with plain red text." "9249":"row 03, column 00, with red underlined text." "924a":"row 03, column 00, with plain yellow text." "92cb":"row 03, column 00, with yellow underlined text." "924c":"row 03, column 00, with plain magenta text." "92cd":"row 03, column 00, with magenta underlined text." "92ce":"row 03, column 00, with white italicized text." "924f":"row 03, column 00, with white underlined italicized text." "92d0":"row 03, column 00, with plain white text." "9251":"row 03, column 00, with white underlined text." "9252":"row 03, column 04, with plain white text." "92d3":"row 03, column 04, with white underlined text." "9254":"row 03, column 08, with plain white text." "92d5":"row 03, column 08, with white underlined text." "92d6":"row 03, column 12, with plain white text." "9257":"row 03, column 12, with white underlined text." "9258":"row 03, column 16, with plain white text." "92d9":"row 03, column 16, with white underlined text." "92da":"row 03, column 20, with plain white text." "925b":"row 03, column 20, with white underlined text." "92dc":"row 03, column 24, with plain white text." "925d":"row 03, column 24, with white underlined text." "925e":"row 03, column 28, with plain white text." "92df":"row 03, column 28, with white underlined text." "92e0":"row 04, column 00, with plain white text." "9261":"row 04, column 00, with white underlined text." "9262":"row 04, column 00, with plain green text." "92e3":"row 04, column 00, with green underlined text." "9264":"row 04, column 00, with plain blue text." "92e5":"row 04, column 00, with blue underlined text." "92e6":"row 04, column 00, with plain cyan text." "9267":"row 04, column 00, with cyan underlined text." "9268":"row 04, column 00, with plain red text." "92e9":"row 04, column 00, with red underlined text." "92ea":"row 04, column 00, with plain yellow text." "926b":"row 04, column 00, with yellow underlined text." "92ec":"row 04, column 00, with plain magenta text." "926d":"row 04, column 00, with magenta underlined text." "926e":"row 04, column 00, with white italicized text." "92ef":"row 04, column 00, with white underlined italicized text." "9270":"row 04, column 00, with plain white text." "92f1":"row 04, column 00, with white underlined text." "92f2":"row 04, column 04, with plain white text." "9273":"row 04, column 04, with white underlined text." "92f4":"row 04, column 08, with plain white text." "9275":"row 04, column 08, with white underlined text." "9276":"row 04, column 12, with plain white text." "92f7":"row 04, column 12, with white underlined text." "92f8":"row 04, column 16, with plain white text." "9279":"row 04, column 16, with white underlined text." "927a":"row 04, column 20, with plain white text." "92fb":"row 04, column 20, with white underlined text." "92fc":"row 04, column 24, with plain white text." "92fd":"row 04, column 24, with white underlined text." "92fe":"row 04, column 28, with plain white text." "927f":"row 04, column 28, with white underlined text." "1540":"row 05, column 00, with plain white text." "15c1":"row 05, column 00, with white underlined text." "15c2":"row 05, column 00, with plain green text." "1543":"row 05, column 00, with green underlined text." "15c4":"row 05, column 00, with plain blue text." "1545":"row 05, column 00, with blue underlined text." "1546":"row 05, column 00, with plain cyan text." "15c7":"row 05, column 00, with cyan underlined text." "15c8":"row 05, column 00, with plain red text." "1549":"row 05, column 00, with red underlined text." "154a":"row 05, column 00, with plain yellow text." "15cb":"row 05, column 00, with yellow underlined text." "154c":"row 05, column 00, with plain magenta text." "15cd":"row 05, column 00, with magenta underlined text." "15ce":"row 05, column 00, with white italicized text." "154f":"row 05, column 00, with white underlined italicized text." "15d0":"row 05, column 00, with plain white text." "1551":"row 05, column 00, with white underlined text." "1552":"row 05, column 04, with plain white text." "15d3":"row 05, column 04, with white underlined text." "1554":"row 05, column 08, with plain white text." "15d5":"row 05, column 08, with white underlined text." "15d6":"row 05, column 12, with plain white text." "1557":"row 05, column 12, with white underlined text." "1558":"row 05, column 16, with plain white text." "15d9":"row 05, column 16, with white underlined text." "15da":"row 05, column 20, with plain white text." "155b":"row 05, column 20, with white underlined text." "15dc":"row 05, column 24, with plain white text." "155d":"row 05, column 24, with white underlined text." "155e":"row 05, column 28, with plain white text." "15df":"row 05, column 28, with white underlined text." "15e0":"row 06, column 00, with plain white text." "1561":"row 06, column 00, with white underlined text." "15462":"row 06, column 00, with plain green text." "15e3":"row 06, column 00, with green underlined text." "1564":"row 06, column 00, with plain blue text." "15e5":"row 06, column 00, with blue underlined text." "15e6":"row 06, column 00, with plain cyan text." "1567":"row 06, column 00, with cyan underlined text." "1568":"row 06, column 00, with plain red text." "15e9":"row 06, column 00, with red underlined text." "15ea":"row 06, column 00, with plain yellow text." "156b":"row 06, column 00, with yellow underlined text." "15ec":"row 06, column 00, with plain magenta text." "156d":"row 06, column 00, with magenta underlined text." "156e":"row 06, column 00, with white italicized text." "15ef":"row 06, column 00, with white underlined italicized text." "1570":"row 06, column 00, with plain white text." "15f1":"row 06, column 00, with white underlined text." "15f2":"row 06, column 04, with plain white text." "1573":"row 06, column 04, with white underlined text." "15f4":"row 06, column 08, with plain white text." "1575":"row 06, column 08, with white underlined text." "1576":"row 06, column 12, with plain white text." "15f7":"row 06, column 12, with white underlined text." "15f8":"row 06, column 16, with plain white text." "1579":"row 06, column 16, with white underlined text." "157a":"row 06, column 20, with plain white text." "15fb":"row 06, column 20, with white underlined text." "15fc":"row 06, column 24, with plain white text." "15fd":"row 06, column 24, with white underlined text." "15fe":"row 06, column 28, with plain white text." "157f":"row 06, column 28, with white underlined text." "1640":"row 07, column 00, with plain white text." "16c1":"row 07, column 00, with white underlined text." "16c2":"row 07, column 00, with plain green text." "1643":"row 07, column 00, with green underlined text." "16c4":"row 07, column 00, with plain blue text." "1645":"row 07, column 00, with blue underlined text." "1646":"row 07, column 00, with plain cyan text." "16c7":"row 07, column 00, with cyan underlined text." "16c8":"row 07, column 00, with plain red text." "1649":"row 07, column 00, with red underlined text." "164a":"row 07, column 00, with plain yellow text." "16cb":"row 07, column 00, with yellow underlined text." "164c":"row 07, column 00, with plain magenta text." "16cd":"row 07, column 00, with magenta underlined text." "16ce":"row 07, column 00, with white italicized text." "164f":"row 07, column 00, with white underlined italicized text." "16d0":"row 07, column 00, with plain white text." "1651":"row 07, column 00, with white underlined text." "1652":"row 07, column 04, with plain white text." "16d3":"row 07, column 04, with white underlined text." "1654":"row 07, column 08, with plain white text." "16d5":"row 07, column 08, with white underlined text." "16d6":"row 07, column 12, with plain white text." "1657":"row 07, column 12, with white underlined text." "1658":"row 07, column 16, with plain white text." "16d9":"row 07, column 16, with white underlined text." "16da":"row 07, column 20, with plain white text." "165b":"row 07, column 20, with white underlined text." "16dc":"row 07, column 24, with plain white text." "165d":"row 07, column 24, with white underlined text." "165e":"row 07, column 28, with plain white text." "16df":"row 07, column 28, with white underlined text." "16e0":"row 08, column 00, with plain white text." "1661":"row 08, column 00, with white underlined text." "16462":"row 08, column 00, with plain green text." "16e3":"row 08, column 00, with green underlined text." "1664":"row 08, column 00, with plain blue text." "16e5":"row 08, column 00, with blue underlined text." "16e6":"row 08, column 00, with plain cyan text." "1667":"row 08, column 00, with cyan underlined text." "1668":"row 08, column 00, with plain red text." "16e9":"row 08, column 00, with red underlined text." "16ea":"row 08, column 00, with plain yellow text." "166b":"row 08, column 00, with yellow underlined text." "16ec":"row 08, column 00, with plain magenta text." "166d":"row 08, column 00, with magenta underlined text." "166e":"row 08, column 00, with white italicized text." "16ef":"row 08, column 00, with white underlined italicized text." "1670":"row 08, column 00, with plain white text." "16f1":"row 08, column 00, with white underlined text." "16f2":"row 08, column 04, with plain white text." "1673":"row 08, column 04, with white underlined text." "16f4":"row 08, column 08, with plain white text." "1675":"row 08, column 08, with white underlined text." "1676":"row 08, column 12, with plain white text." "16f7":"row 08, column 12, with white underlined text." "16f8":"row 08, column 16, with plain white text." "1679":"row 08, column 16, with white underlined text." "167a":"row 08, column 20, with plain white text." "16fb":"row 08, column 20, with white underlined text." "16fc":"row 08, column 24, with plain white text." "16fd":"row 08, column 24, with white underlined text." "16fe":"row 08, column 28, with plain white text." "167f":"row 08, column 28, with white underlined text." "9740":"row 09, column 00, with plain white text." "97c1":"row 09, column 00, with white underlined text." "97c2":"row 09, column 00, with plain green text." "9743":"row 09, column 00, with green underlined text." "97c4":"row 09, column 00, with plain blue text." "9745":"row 09, column 00, with blue underlined text." "9746":"row 09, column 00, with plain cyan text." "97c7":"row 09, column 00, with cyan underlined text." "97c8":"row 09, column 00, with plain red text." "9749":"row 09, column 00, with red underlined text." "974a":"row 09, column 00, with plain yellow text." "97cb":"row 09, column 00, with yellow underlined text." "974c":"row 09, column 00, with plain magenta text." "97cd":"row 09, column 00, with magenta underlined text." "97ce":"row 09, column 00, with white italicized text." "974f":"row 09, column 00, with white underlined italicized text." "97d0":"row 09, column 00, with plain white text." "9751":"row 09, column 00, with white underlined text." "9752":"row 09, column 04, with plain white text." "97d3":"row 09, column 04, with white underlined text." "9754":"row 09, column 08, with plain white text." "97d5":"row 09, column 08, with white underlined text." "97d6":"row 09, column 12, with plain white text." "9757":"row 09, column 12, with white underlined text." "9758":"row 09, column 16, with plain white text." "97d9":"row 09, column 16, with white underlined text." "97da":"row 09, column 20, with plain white text." "975b":"row 09, column 20, with white underlined text." "97dc":"row 09, column 24, with plain white text." "975d":"row 09, column 24, with white underlined text." "975e":"row 09, column 28, with plain white text." "97df":"row 09, column 28, with white underlined text." "97e0":"row 10, column 00, with plain white text." "9761":"row 10, column 00, with white underlined text." "9762":"row 10, column 00, with plain green text." "97e3":"row 10, column 00, with green underlined text." "9764":"row 10, column 00, with plain blue text." "97e5":"row 10, column 00, with blue underlined text." "97e6":"row 10, column 00, with plain cyan text." "9767":"row 10, column 00, with cyan underlined text." "9768":"row 10, column 00, with plain red text." "97e9":"row 10, column 00, with red underlined text." "97ea":"row 10, column 00, with plain yellow text." "976b":"row 10, column 00, with yellow underlined text." "97ec":"row 10, column 00, with plain magenta text." "976d":"row 10, column 00, with magenta underlined text." "976e":"row 10, column 00, with white italicized text." "97ef":"row 10, column 00, with white underlined italicized text." "9770":"row 10, column 00, with plain white text." "97f1":"row 10, column 00, with white underlined text." "97f2":"row 10, column 04, with plain white text." "9773":"row 10, column 04, with white underlined text." "97f4":"row 10, column 08, with plain white text." "9775":"row 10, column 08, with white underlined text." "9776":"row 10, column 12, with plain white text." "97f7":"row 10, column 12, with white underlined text." "97f8":"row 10, column 16, with plain white text." "9779":"row 10, column 16, with white underlined text." "977a":"row 10, column 20, with plain white text." "97fb":"row 10, column 20, with white underlined text." "97fc":"row 10, column 24, with plain white text." "97fd":"row 10, column 24, with white underlined text." "97fe":"row 10, column 28, with plain white text." "977f":"row 10, column 28, with white underlined text." "1040":"row 11, column 00, with plain white text." "10c1":"row 11, column 00, with white underlined text." "10c2":"row 11, column 00, with plain green text." "1043":"row 11, column 00, with green underlined text." "10c4":"row 11, column 00, with plain blue text." "1045":"row 11, column 00, with blue underlined text." "1046":"row 11, column 00, with plain cyan text." "10c7":"row 11, column 00, with cyan underlined text." "10c8":"row 11, column 00, with plain red text." "1049":"row 11, column 00, with red underlined text." "104a":"row 11, column 00, with plain yellow text." "10cb":"row 11, column 00, with yellow underlined text." "104c":"row 11, column 00, with plain magenta text." "10cd":"row 11, column 00, with magenta underlined text." "10ce":"row 11, column 00, with white italicized text." "104f":"row 11, column 00, with white underlined italicized text." "10d0":"row 11, column 00, with plain white text." "1051":"row 11, column 00, with white underlined text." "1052":"row 11, column 04, with plain white text." "10d3":"row 11, column 04, with white underlined text." "1054":"row 11, column 08, with plain white text." "10d5":"row 11, column 08, with white underlined text." "10d6":"row 11, column 12, with plain white text." "1057":"row 11, column 12, with white underlined text." "1058":"row 11, column 16, with plain white text." "10d9":"row 11, column 16, with white underlined text." "10da":"row 11, column 20, with plain white text." "105b":"row 11, column 20, with white underlined text." "10dc":"row 11, column 24, with plain white text." "105d":"row 11, column 24, with white underlined text." "105e":"row 11, column 28, with plain white text." "10df":"row 11, column 28, with white underlined text." "1340":"row 12, column 00, with plain white text." "13c1":"row 12, column 00, with white underlined text." "13c2":"row 12, column 00, with plain green text." "1343":"row 12, column 00, with green underlined text." "13c4":"row 12, column 00, with plain blue text." "1345":"row 12, column 00, with blue underlined text." "1346":"row 12, column 00, with plain cyan text." "13c7":"row 12, column 00, with cyan underlined text." "13c8":"row 12, column 00, with plain red text." "1349":"row 12, column 00, with red underlined text." "134a":"row 12, column 00, with plain yellow text." "13cb":"row 12, column 00, with yellow underlined text." "134c":"row 12, column 00, with plain magenta text." "13cd":"row 12, column 00, with magenta underlined text." "13ce":"row 12, column 00, with white italicized text." "134f":"row 12, column 00, with white underlined italicized text." "13d0":"row 12, column 00, with plain white text." "1351":"row 12, column 00, with white underlined text." "1352":"row 12, column 04, with plain white text." "13d3":"row 12, column 04, with white underlined text." "1354":"row 12, column 08, with plain white text." "13d5":"row 12, column 08, with white underlined text." "13d6":"row 12, column 12, with plain white text." "1357":"row 12, column 12, with white underlined text." "1358":"row 12, column 16, with plain white text." "13d9":"row 12, column 16, with white underlined text." "13da":"row 12, column 20, with plain white text." "135b":"row 12, column 20, with white underlined text." "13dc":"row 12, column 24, with plain white text." "135d":"row 12, column 24, with white underlined text." "135e":"row 12, column 28, with plain white text." "13df":"row 12, column 28, with white underlined text." "13e0":"row 13, column 00, with plain white text." "1361":"row 13, column 00, with white underlined text." "13462":"row 13, column 00, with plain green text." "13e3":"row 13, column 00, with green underlined text." "1364":"row 13, column 00, with plain blue text." "13e5":"row 13, column 00, with blue underlined text." "13e6":"row 13, column 00, with plain cyan text." "1367":"row 13, column 00, with cyan underlined text." "1368":"row 13, column 00, with plain red text." "13e9":"row 13, column 00, with red underlined text." "13ea":"row 13, column 00, with plain yellow text." "136b":"row 13, column 00, with yellow underlined text." "13ec":"row 13, column 00, with plain magenta text." "136d":"row 13, column 00, with magenta underlined text." "136e":"row 13, column 00, with white italicized text." "13ef":"row 13, column 00, with white underlined italicized text." "1370":"row 13, column 00, with plain white text." "13f1":"row 13, column 00, with white underlined text." "13f2":"row 13, column 04, with plain white text." "1373":"row 13, column 04, with white underlined text." "13f4":"row 13, column 08, with plain white text." "1375":"row 13, column 08, with white underlined text." "1376":"row 13, column 12, with plain white text." "13f7":"row 13, column 12, with white underlined text." "13f8":"row 13, column 16, with plain white text." "1379":"row 13, column 16, with white underlined text." "137a":"row 13, column 20, with plain white text." "13fb":"row 13, column 20, with white underlined text." "13fc":"row 13, column 24, with plain white text." "13fd":"row 13, column 24, with white underlined text." "13fe":"row 13, column 28, with plain white text." "137f":"row 13, column 28, with white underlined text." "9440":"row 14, column 00, with plain white text." "94c1":"row 14, column 00, with white underlined text." "94c2":"row 14, column 00, with plain green text." "9443":"row 14, column 00, with green underlined text." "94c4":"row 14, column 00, with plain blue text." "9445":"row 14, column 00, with blue underlined text." "9446":"row 14, column 00, with plain cyan text." "94c7":"row 14, column 00, with cyan underlined text." "94c8":"row 14, column 00, with plain red text." "9449":"row 14, column 00, with red underlined text." "944a":"row 14, column 00, with plain yellow text." "94cb":"row 14, column 00, with yellow underlined text." "944c":"row 14, column 00, with plain magenta text." "94cd":"row 14, column 00, with magenta underlined text." "94ce":"row 14, column 00, with white italicized text." "944f":"row 14, column 00, with white underlined italicized text." "94d0":"row 14, column 00, with plain white text." "9451":"row 14, column 00, with white underlined text." "9452":"row 14, column 04, with plain white text." "94d3":"row 14, column 04, with white underlined text." "9454":"row 14, column 08, with plain white text." "94d5":"row 14, column 08, with white underlined text." "94d6":"row 14, column 12, with plain white text." "9457":"row 14, column 12, with white underlined text." "9458":"row 14, column 16, with plain white text." "94d9":"row 14, column 16, with white underlined text." "94da":"row 14, column 20, with plain white text." "945b":"row 14, column 20, with white underlined text." "94dc":"row 14, column 24, with plain white text." "945d":"row 14, column 24, with white underlined text." "945e":"row 14, column 28, with plain white text." "94df":"row 14, column 28, with white underlined text." "94e0":"row 15, column 00, with plain white text." "9461":"row 15, column 00, with white underlined text." "9462":"row 15, column 00, with plain green text." "94e3":"row 15, column 00, with green underlined text." "9464":"row 15, column 00, with plain blue text." "94e5":"row 15, column 00, with blue underlined text." "94e6":"row 15, column 00, with plain cyan text." "9467":"row 15, column 00, with cyan underlined text." "9468":"row 15, column 00, with plain red text." "94e9":"row 15, column 00, with red underlined text." "94ea":"row 15, column 00, with plain yellow text." "946b":"row 15, column 00, with yellow underlined text." "94ec":"row 15, column 00, with plain magenta text." "946d":"row 15, column 00, with magenta underlined text." "946e":"row 15, column 00, with white italicized text." "94ef":"row 15, column 00, with white underlined italicized text." "9470":"row 15, column 00, with plain white text." "94f1":"row 15, column 00, with white underlined text." "94f2":"row 15, column 04, with plain white text." "9473":"row 15, column 04, with white underlined text." "94f4":"row 15, column 08, with plain white text." "9475":"row 15, column 08, with white underlined text." "9476":"row 15, column 12, with plain white text." "94f7":"row 15, column 12, with white underlined text." "94f8":"row 15, column 16, with plain white text." "9479":"row 15, column 16, with white underlined text." "947a":"row 15, column 20, with plain white text." "94fb":"row 15, column 20, with white underlined text." "94fc":"row 15, column 24, with plain white text." "94fd":"row 15, column 24, with white underlined text." "94fe":"row 15, column 28, with plain white text." "947f":"row 15, column 28, with white underlined text." "97a1":"Tab Offset 1 column" "97a2":"Tab Offset 2 columns" "9723":"Tab Offset 3 columns" "94a1":"BackSpace" "94a4":"Delete to End of Row" "94ad":"Carriage Return" "1020":"Background White" "10a1":"Background Semi-Transparent White" "10a2":"Background Green" "1023":"Background Semi-Transparent Green" "10a4":"Background Blue" "1025":"Background Semi-Transparent Blue" "1026":"Background Cyan" "10a7":"Background Semi-Transparent Cyan" "10a8":"Background Red" "1029":"Background Semi-Transparent Red" "102a":"Background Yellow" "10ab":"Background Semi-Transparent Yellow" "102c":"Background Magenta" "10ad":"Background Semi-Transparent Magenta" "10ae":"Background Black" "102f":"Background Semi-Transparent Black" "97ad":"Background Transparent" "97a4":"Standard Character Set" "9725":"Double-Size Character Set" "9726":"First Private Character Set" "97a7":"Second Private Character Set" "97a8":"People`s Republic of China Character Set" "9729":"Korean Standard Character Set" "972a":"First Registered Character Set" "9120":"White" "91a1":"White Underline" "91a2":"Green" "9123":"Green Underline" "91a4":"Blue" "9125":"Blue Underline" "9126":"Cyan" "91a7":"Cyan Underline" "91a8":"Red" "9129":"Red Underline" "912a":"Yellow" "91ab":"Yellow Underline" "912c":"Magenta" "91ad":"Magenta Underline" "97ae":"Black" "972f":"Black Underline" "91ae":"Italics" "912f":"Italics Underline" "94a8":"Flash ON" "9423":"Alarm Off" "94a2":"Alarm On"}<def_stmt>translate_scc scc_content brackets='[]'<block_start>""" Replaces hexadecimal words with their meaning In order to make SCC files more human readable and easier to debug, this function is used to replace command codes with their labels and character bytes with their actual characters :param scc_content: SCC captions to be translated :type scc_content: str :param brackets: Brackets to group the translated content of a command :type brackets: str :return: Translated SCC captions :rtype: str """<line_sep>opening_bracket,closing_bracket=brackets<if>brackets<else>('' '')<line_sep>scc_elements=set(scc_content.split())<for_stmt>elem scc_elements<block_start>name=COMMAND_LABELS.get(elem)<line_sep># If a 2 byte command was not found, try retrieving 1 byte characters <if_stmt><not>name<block_start>char1=ALL_CHARACTERS.get(elem[:2])<line_sep>char2=ALL_CHARACTERS.get(elem[2:])<if_stmt>char1<is><not><none><and>char2<is><not><none><block_start>name=f"{char1}{char2}"<block_end><block_end><if_stmt>name<block_start>scc_content=scc_content.replace(elem f"{opening_bracket}{name}{closing_bracket}")<block_end><block_end><return>scc_content<block_end>
"""Runs integration tests."""<import_stmt>os<import_stmt>subprocess<line_sep>os.environ['PYTEST_ADDOPTS']="--reruns 3 --reruns-delay 1"<line_sep>subprocess.check_call(['pytest' 'integration-tests/test_207.py' 'integration-tests/test_http.py' ])<if_stmt>os.environ.get('AWS_ACCESS_KEY_ID')<and>os.environ.get('AWS_SECRET_ACCESS_KEY')<block_start>subprocess.check_call(['pytest' '-v' 'integration-tests/test_s3_ported.py'])<block_end>
<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>pdb<line_sep># https://github.com/zju3dv/clean-pvnet/blob/master/lib/datasets/augmentation.py <def_stmt>debug_visualize image mask pts2d sym_cor name_prefix='debug'<block_start><import_from_stmt>random sample<line_sep>cv2.imwrite('{}_image.png'.format(name_prefix) image<times>255)<line_sep>cv2.imwrite('{}_mask.png'.format(name_prefix) mask<times>255)<line_sep>img_pts=image.copy()<times>255<for_stmt>i range(pts2d.shape[0])<block_start>x=int(round(pts2d[i 0]))<line_sep>y=int(round(pts2d[i 1]))<line_sep>img_pts=cv2.circle(img_pts (x y) 2 (0 0 255) thickness=-1)<block_end>cv2.imwrite('{}_pts.png'.format(name_prefix) img_pts)<line_sep>img_sym=image.copy()<times>255<line_sep>ys,xs=np.nonzero(mask)<for_stmt>i_pt sample([i<for>i range(len(ys))] min(100 len(ys)))<block_start>y=int(round(ys[i_pt]))<line_sep>x=int(round(xs[i_pt]))<line_sep>x_cor,y_cor=sym_cor[y x]<line_sep>x_cor=int(round(x+x_cor))<line_sep>y_cor=int(round(y+y_cor))<line_sep>img_sym=cv2.line(img_sym (x y) (x_cor y_cor) (0 0 255) 1)<block_end>cv2.imwrite('{}_sym.png'.format(name_prefix) img_sym)<block_end><def_stmt>rotate_sym_cor sym_cor mask R<block_start>h,w=sym_cor.shape[:2]<line_sep>ys,xs=np.nonzero(mask)<line_sep>source=np.float32(np.stack([xs ys] axis=-1))<line_sep>delta=np.float32(sym_cor[ys xs])<line_sep>target=source+delta<line_sep>last_col=np.ones((source.shape[0] 1) dtype=np.float32)<line_sep>source=np.concatenate([source last_col] axis=-1)<line_sep>target=np.concatenate([target last_col] axis=-1)<line_sep>last_row=np.asarray([[0 0 1]] dtype=np.float32)<line_sep>R=np.concatenate([R last_row] axis=0).transpose()<line_sep>source=np.matmul(source R)[: :2]<line_sep>target=np.matmul(target R)[: :2]<line_sep>source=np.uint32(np.round(source))<line_sep>delta=target-source<line_sep># remove invalid indices xs,ys=source[: 0] source[: 1]<line_sep>valid=(xs<g>0)&(xs<l>w)&(ys<g>0)&(ys<l>h)<line_sep>xs,ys,delta=xs[valid] ys[valid] delta[valid]<line_sep>sym_cor=np.zeros_like(sym_cor)<line_sep>sym_cor[ys xs]=delta<line_sep><return>sym_cor<block_end><def_stmt>rotate_instance img mask hcoords sym_cor rot_ang_min rot_ang_max<block_start>h,w=img.shape[0] img.shape[1]<line_sep>degree=np.random.uniform(rot_ang_min rot_ang_max)<line_sep>hs,ws=np.nonzero(mask)<line_sep>R=cv2.getRotationMatrix2D((np.mean(ws) np.mean(hs)) degree 1)<line_sep>sym_cor=rotate_sym_cor(sym_cor mask R)<line_sep>mask=cv2.warpAffine(mask R (w h) flags=cv2.INTER_NEAREST borderMode=cv2.BORDER_CONSTANT borderValue=0)<line_sep>img=cv2.warpAffine(img R (w h) flags=cv2.INTER_LINEAR borderMode=cv2.BORDER_CONSTANT borderValue=0)<line_sep>last_row=np.asarray([[0 0 1]] dtype=np.float32)<line_sep>R=np.concatenate([R last_row] axis=0).transpose()<line_sep>last_col=np.ones((hcoords.shape[0] 1) dtype=np.float32)<line_sep>hcoords=np.concatenate([hcoords last_col] axis=1)<line_sep>hcoords=np.float32(np.matmul(hcoords R))<line_sep>hcoords=hcoords[: :2]<line_sep><return>img mask hcoords sym_cor<block_end><def_stmt>crop_resize_instance_v1 img mask hcoords sym_cor imheight imwidth overlap_ratio=0.5 ratio_min=0.8 ratio_max=1.2<block_start>''' crop a region with [imheight*resize_ratio,imwidth*resize_ratio] which at least overlap with foreground bbox with overlap '''<line_sep>hcoords_last_col=np.ones((hcoords.shape[0] 1) dtype=np.float32)<line_sep>hcoords=np.concatenate([hcoords hcoords_last_col] axis=1)<line_sep>resize_ratio=np.random.uniform(ratio_min ratio_max)<line_sep>target_height=int(imheight<times>resize_ratio)<line_sep>target_width=int(imwidth<times>resize_ratio)<line_sep>img,mask,hcoords,sym_cor=crop_or_padding_to_fixed_size_instance(img mask hcoords sym_cor target_height target_width overlap_ratio)<line_sep>img=cv2.resize(img (imwidth imheight) interpolation=cv2.INTER_LINEAR)<line_sep>mask=cv2.resize(mask (imwidth imheight) interpolation=cv2.INTER_NEAREST)<line_sep>sym_cor=cv2.resize(sym_cor (imwidth imheight) interpolation=cv2.INTER_NEAREST)<line_sep>sym_cor<augdiv>resize_ratio<line_sep>hcoords[: 0]=hcoords[: 0]/resize_ratio<line_sep>hcoords[: 1]=hcoords[: 1]/resize_ratio<line_sep>hcoords=hcoords[: :2]<line_sep><return>img mask hcoords sym_cor<block_end><def_stmt>crop_or_padding_to_fixed_size_instance img mask hcoords sym_cor th tw overlap_ratio=0.5<block_start>h,w,_=img.shape<line_sep>hs,ws=np.nonzero(mask)<line_sep>hmin,hmax=np.min(hs) np.max(hs)<line_sep>wmin,wmax=np.min(ws) np.max(ws)<line_sep>fh,fw=hmax-hmin wmax-wmin<line_sep>hpad,wpad=th<ge>h tw<ge>w<line_sep>hrmax=int(min(hmin+overlap_ratio<times>fh h-th))# h must > target_height else hrmax<0 hrmin=int(max(hmin+overlap_ratio<times>fh-th 0))<line_sep>wrmax=int(min(wmin+overlap_ratio<times>fw w-tw))# w must > target_width else wrmax<0 wrmin=int(max(wmin+overlap_ratio<times>fw-tw 0))<line_sep>hbeg=0<if>(hpad<or>hrmin<eq>hrmax)<else>np.random.randint(hrmin hrmax)<line_sep>hend=hbeg+th<line_sep>wbeg=0<if>(wpad<or>wrmin<eq>wrmax)<else>np.random.randint(wrmin wrmax)# if pad then [0,wend] will larger than [0,w], indexing it is safe wend=wbeg+tw<line_sep>img=img[hbeg:hend wbeg:wend]<line_sep>mask=mask[hbeg:hend wbeg:wend]<line_sep>sym_cor=sym_cor[hbeg:hend wbeg:wend]<line_sep>hcoords[: 0]<augsub>wbeg<times>hcoords[: 2]<line_sep>hcoords[: 1]<augsub>hbeg<times>hcoords[: 2]<if_stmt>hpad<or>wpad<block_start>nh,nw,_=img.shape<line_sep>new_img=np.zeros([th tw 3] dtype=img.dtype)<line_sep>new_mask=np.zeros([th tw] dtype=mask.dtype)<line_sep>new_sym_cor=np.zeros([th tw 2] dtype=sym_cor.dtype)<line_sep>hbeg=0<if><not>hpad<else>(th-h)<floordiv>2<line_sep>wbeg=0<if><not>wpad<else>(tw-w)<floordiv>2<line_sep>new_img[hbeg:hbeg+nh wbeg:wbeg+nw]=img<line_sep>new_mask[hbeg:hbeg+nh wbeg:wbeg+nw]=mask<line_sep>new_sym_cor[hbeg:hbeg+nh wbeg:wbeg+nw]=sym_cor<line_sep>hcoords[: 0]<augadd>wbeg<times>hcoords[: 2]<line_sep>hcoords[: 1]<augadd>hbeg<times>hcoords[: 2]<line_sep>img,mask,sym_cor=new_img new_mask new_sym_cor<block_end><return>img mask hcoords sym_cor<block_end><def_stmt>crop_or_padding_to_fixed_size img mask sym_cor th tw<block_start>h,w,_=img.shape<line_sep>hpad,wpad=th<ge>h tw<ge>w<line_sep>hbeg=0<if>hpad<else>np.random.randint(0 h-th)<line_sep>wbeg=0<if>wpad<else>np.random.randint(0 w-tw)<line_sep># if pad then [0,wend] will larger than [0,w], indexing it is safe hend=hbeg+th<line_sep>wend=wbeg+tw<line_sep>img=img[hbeg:hend wbeg:wend]<line_sep>mask=mask[hbeg:hend wbeg:wend]<line_sep>sym_cor=sym_cor[hbeg:hend wbeg:wend]<if_stmt>hpad<or>wpad<block_start>nh,nw,_=img.shape<line_sep>new_img=np.zeros([th tw 3] dtype=img.dtype)<line_sep>new_mask=np.zeros([th tw] dtype=mask.dtype)<line_sep>new_sym_cor=np.zeros([th tw 2] dtype=sym_cor.dtype)<line_sep>hbeg=0<if><not>hpad<else>(th-h)<floordiv>2<line_sep>wbeg=0<if><not>wpad<else>(tw-w)<floordiv>2<line_sep>new_img[hbeg:hbeg+nh wbeg:wbeg+nw]=img<line_sep>new_mask[hbeg:hbeg+nh wbeg:wbeg+nw]=mask<line_sep>new_sym_cor[hbeg:hbeg+nh wbeg:wbeg+nw]=sym_cor<line_sep>img,mask,sym_cor=new_img new_mask new_sym_cor<block_end><return>img mask sym_cor<block_end>
<import_stmt>os<import_from_stmt>conans ConanFile CMake tools<line_sep>required_conan_version=">=1.33.0"<class_stmt>NsimdConan(ConanFile)<block_start>name="nsimd"<line_sep>homepage="https://github.com/agenium-scale/nsimd"<line_sep>description="Agenium Scale vectorization library for CPUs and GPUs"<line_sep>topics=("hpc" "neon" "cuda" "avx" "simd" "avx2" "sse2" "aarch64" "avx512" "sse42" "rocm" "sve" "neon128")<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>license="MIT"<line_sep>exports_sources=["CMakeLists.txt" "patches/*"]<line_sep>generators="cmake"<line_sep>settings="os" "compiler" "build_type" "arch"<line_sep>options={"shared":[<true> <false>] "fPIC":[<true> <false>] # This used only when building the library. # Most functionality is header only. "simd":[<none> "cpu" "sse2" "sse42" "avx" "avx2" "avx512_knl" "avx512_skylake" "neon128" "aarch64" "sve" "sve128" "sve256" "sve512" "sve1024" "sve2048" "cuda" "rocm"]}<line_sep>default_options={"shared":<false> "fPIC":<true> "simd":<none>}<line_sep>_cmake=<none><line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end>@property<def_stmt>_build_subfolder self<block_start><return>"build_subfolder"<block_end><def_stmt>config_options self<block_start><if_stmt>self.settings.os<eq>"Windows"<block_start><del_stmt>self.options.fPIC<block_end><block_end><def_stmt>configure self<block_start><if_stmt>self.options.shared<block_start><del_stmt>self.options.fPIC<block_end># Most of the library is header only. # cpp files do not use STL. <del_stmt>self.settings.compiler.libcxx<block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version] strip_root=<true> destination=self._source_subfolder)<block_end><def_stmt>_configure_cmake self<block_start><if_stmt>self._cmake<block_start><return>self._cmake<block_end>self._cmake=CMake(self)<if_stmt>self.options.simd<block_start>self._cmake.definitions["simd"]=self.options.simd<block_end><if_stmt>self.settings.arch<eq>"armv7hf"<block_start>self._cmake.definitions["NSIMD_ARM32_IS_ARMEL"]=<false><block_end>self._cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"]=self.options.get_safe("fPIC" <true>)<line_sep>self._cmake.configure(build_folder=self._build_subfolder)<line_sep><return>self._cmake<block_end><def_stmt>_patch_sources self<block_start>cmakefile_path=os.path.join(self._source_subfolder "CMakeLists.txt")<line_sep>tools.replace_in_file(cmakefile_path " SHARED " " ")<line_sep>tools.replace_in_file(cmakefile_path "RUNTIME DESTINATION lib" "RUNTIME DESTINATION bin")<line_sep>tools.replace_in_file(cmakefile_path "set_property(TARGET ${o} PROPERTY POSITION_INDEPENDENT_CODE ON)" "")<block_end><def_stmt>build self<block_start>self._patch_sources()<line_sep>cmake=self._configure_cmake()<line_sep>cmake.build()<block_end><def_stmt>package self<block_start>self.copy("LICENSE" dst="licenses" src=self._source_subfolder)<line_sep>cmake=self._configure_cmake()<line_sep>cmake.install()<block_end><def_stmt>package_info self<block_start>self.cpp_info.libs=tools.collect_libs(self)<block_end><block_end>
<import_stmt>pytest<line_sep>@pytest.mark.asyncio@pytest.mark.ttftt_engine@pytest.mark.parametrize("query,errors" [(""" subscription Sub { newDog { name } newHuman { name } } """ [{"message":"Subcription Sub must select only one top level field." "path":<none> "locations":[{"line":2 "column":13} {"line":2 "column":30} ] "extensions":{"rule":"5.2.3.1" "spec":"June 2018" "details":"https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field" "tag":"single-root-field" } }] ) (""" subscription Sub { newDog { name } __typename } """ [{"message":"Subcription Sub must select only one top level field." "path":<none> "locations":[{"line":2 "column":13} {"line":2 "column":30} ] "extensions":{"rule":"5.2.3.1" "spec":"June 2018" "details":"https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field" "tag":"single-root-field" } }] ) (""" fragment MultipleSubscriptionsFields on Subscription { newDog { name } newHuman { name } } subscription Sub { ...MultipleSubscriptionsFields } """ [{"message":"Subcription Sub must select only one top level field." "path":<none> "locations":[{"line":11 "column":13} {"line":2 "column":66} ] "extensions":{"rule":"5.2.3.1" "spec":"June 2018" "details":"https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field" "tag":"single-root-field" } }] ) (""" subscription Sub { ... on Subscription { newDog { name } newHuman { name } } } """ [{"message":"Subcription Sub must select only one top level field." "path":<none> "locations":[{"line":2 "column":13} {"line":3 "column":35} ] "extensions":{"rule":"5.2.3.1" "spec":"June 2018" "details":"https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field" "tag":"single-root-field" } }] ) (""" fragment MultipleSubscriptionsFields on Subscription { ... on Subscription { newDog { name } newHuman { name } } } subscription Sub { ...MultipleSubscriptionsFields } """ [{"message":"Subcription Sub must select only one top level field." "path":<none> "locations":[{"line":13 "column":13} {"line":3 "column":35} ] "extensions":{"rule":"5.2.3.1" "spec":"June 2018" "details":"https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field" "tag":"single-root-field" } }] ) ] )<async_keyword><def_stmt>test_issue87 engine query errors<block_start><assert_stmt><await>engine.execute(query)<eq>{"data":<none> "errors":errors}<block_end>
<import_stmt>attr<import_stmt>datetime<import_from_stmt>._common attrs_event Event UnknownEvent ThreadEvent<import_from_stmt>.. _util _threads _models<import_from_stmt>typing Sequence Optional<line_sep>@attrs_event<class_stmt>ColorSet(ThreadEvent)<block_start>"""Somebody set the color in a thread."""<line_sep>#: The new color. Not limited to the ones in `ThreadABC.set_color` color=attr.ib(type=str)<line_sep>#: When the color was set at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>color=_threads.ThreadABC._parse_color(data["untypedData"]["theme_color"])<line_sep><return>cls(author=author thread=thread color=color at=at)<block_end><block_end>@attrs_event<class_stmt>EmojiSet(ThreadEvent)<block_start>"""Somebody set the emoji in a thread."""<line_sep>#: The new emoji emoji=attr.ib(type=str)<line_sep>#: When the emoji was set at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>emoji=data["untypedData"]["thread_icon"]<line_sep><return>cls(author=author thread=thread emoji=emoji at=at)<block_end><block_end>@attrs_event<class_stmt>NicknameSet(ThreadEvent)<block_start>"""Somebody set the nickname of a person in a thread."""<line_sep>#: The person whose nickname was set subject=attr.ib(type=str)<line_sep>#: The new nickname. If ``None``, the nickname was cleared nickname=attr.ib(type=Optional[str])<line_sep>#: When the nickname was set at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>subject=_threads.User(session=session id=data["untypedData"]["participant_id"])<line_sep>nickname=data["untypedData"]["nickname"]<or><none># None if "" <return>cls(author=author thread=thread subject=subject nickname=nickname at=at)<block_end><block_end>@attrs_event<class_stmt>AdminsAdded(ThreadEvent)<block_start>"""Somebody added admins to a group."""<line_sep>#: The people that were set as admins added=attr.ib(type=Sequence["_threads.User"])<line_sep>#: When the admins were added at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>subject=_threads.User(session=session id=data["untypedData"]["TARGET_ID"])<line_sep><return>cls(author=author thread=thread added=[subject] at=at)<block_end><block_end>@attrs_event<class_stmt>AdminsRemoved(ThreadEvent)<block_start>"""Somebody removed admins from a group."""<line_sep>#: The people that were removed as admins removed=attr.ib(type=Sequence["_threads.User"])<line_sep>#: When the admins were removed at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>subject=_threads.User(session=session id=data["untypedData"]["TARGET_ID"])<line_sep><return>cls(author=author thread=thread removed=[subject] at=at)<block_end><block_end>@attrs_event<class_stmt>ApprovalModeSet(ThreadEvent)<block_start>"""Somebody changed the approval mode in a group."""<line_sep>require_admin_approval=attr.ib(type=bool)<line_sep>#: When the approval mode was set at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>raa=data["untypedData"]["APPROVAL_MODE"]<eq>"1"<line_sep><return>cls(author=author thread=thread require_admin_approval=raa at=at)<block_end><block_end>@attrs_event<class_stmt>CallStarted(ThreadEvent)<block_start>"""Somebody started a call."""<line_sep>#: When the call was started at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep><return>cls(author=author thread=thread at=at)<block_end><block_end>@attrs_event<class_stmt>CallEnded(ThreadEvent)<block_start>"""Somebody ended a call."""<line_sep>#: How long the call took duration=attr.ib(type=datetime.timedelta)<line_sep>#: When the call ended at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>duration=_util.seconds_to_timedelta(int(data["untypedData"]["call_duration"]))<line_sep><return>cls(author=author thread=thread duration=duration at=at)<block_end><block_end>@attrs_event<class_stmt>CallJoined(ThreadEvent)<block_start>"""Somebody joined a call."""<line_sep>#: When the call ended at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep><return>cls(author=author thread=thread at=at)<block_end><block_end>@attrs_event<class_stmt>PollCreated(ThreadEvent)<block_start>"""Somebody created a group poll."""<line_sep>#: The new poll poll=attr.ib(type="_models.Poll")<line_sep>#: When the poll was created at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>poll_data=_util.parse_json(data["untypedData"]["question_json"])<line_sep>poll=_models.Poll._from_graphql(session poll_data)<line_sep><return>cls(author=author thread=thread poll=poll at=at)<block_end><block_end>@attrs_event<class_stmt>PollVoted(ThreadEvent)<block_start>"""Somebody voted in a group poll."""<line_sep>#: The updated poll poll=attr.ib(type="_models.Poll")<line_sep>#: Ids of the voted options added_ids=attr.ib(type=Sequence[str])<line_sep>#: Ids of the un-voted options removed_ids=attr.ib(type=Sequence[str])<line_sep>#: When the poll was voted in at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>poll_data=_util.parse_json(data["untypedData"]["question_json"])<line_sep>poll=_models.Poll._from_graphql(session poll_data)<line_sep>added_ids=_util.parse_json(data["untypedData"]["added_option_ids"])<line_sep>removed_ids=_util.parse_json(data["untypedData"]["removed_option_ids"])<line_sep><return>cls(author=author thread=thread poll=poll added_ids=[str(x)<for>x added_ids] removed_ids=[str(x)<for>x removed_ids] at=at )<block_end><block_end>@attrs_event<class_stmt>PlanCreated(ThreadEvent)<block_start>"""Somebody created a plan in a group."""<line_sep>#: The new plan plan=attr.ib(type="_models.PlanData")<line_sep>#: When the plan was created at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>plan=_models.PlanData._from_pull(session data["untypedData"])<line_sep><return>cls(author=author thread=thread plan=plan at=at)<block_end><block_end>@attrs_event<class_stmt>PlanEnded(ThreadEvent)<block_start>"""A plan ended."""<line_sep>#: The ended plan plan=attr.ib(type="_models.PlanData")<line_sep>#: When the plan ended at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>plan=_models.PlanData._from_pull(session data["untypedData"])<line_sep><return>cls(author=author thread=thread plan=plan at=at)<block_end><block_end>@attrs_event<class_stmt>PlanEdited(ThreadEvent)<block_start>"""Somebody changed a plan in a group."""<line_sep>#: The updated plan plan=attr.ib(type="_models.PlanData")<line_sep>#: When the plan was updated at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>plan=_models.PlanData._from_pull(session data["untypedData"])<line_sep><return>cls(author=author thread=thread plan=plan at=at)<block_end><block_end>@attrs_event<class_stmt>PlanDeleted(ThreadEvent)<block_start>"""Somebody removed a plan in a group."""<line_sep>#: The removed plan plan=attr.ib(type="_models.PlanData")<line_sep>#: When the plan was removed at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>plan=_models.PlanData._from_pull(session data["untypedData"])<line_sep><return>cls(author=author thread=thread plan=plan at=at)<block_end><block_end>@attrs_event<class_stmt>PlanResponded(ThreadEvent)<block_start>"""Somebody responded to a plan in a group."""<line_sep>#: The plan that was responded to plan=attr.ib(type="_models.PlanData")<line_sep>#: Whether the author will go to the plan or not take_part=attr.ib(type=bool)<line_sep>#: When the plan was removed at=attr.ib(type=datetime.datetime)<line_sep>@classmethod<def_stmt>_parse cls session data<block_start>author,thread,at=cls._parse_metadata(session data)<line_sep>plan=_models.PlanData._from_pull(session data["untypedData"])<line_sep>take_part=data["untypedData"]["guest_status"]<eq>"GOING"<line_sep><return>cls(author=author thread=thread plan=plan take_part=take_part at=at)<block_end><block_end><def_stmt>parse_admin_message session data<block_start>type_=data["type"]<if_stmt>type_<eq>"change_thread_theme"<block_start><return>ColorSet._parse(session data)<block_end><elif_stmt>type_<eq>"change_thread_icon"<block_start><return>EmojiSet._parse(session data)<block_end><elif_stmt>type_<eq>"change_thread_nickname"<block_start><return>NicknameSet._parse(session data)<block_end><elif_stmt>type_<eq>"change_thread_admins"<block_start>event_type=data["untypedData"]["ADMIN_EVENT"]<if_stmt>event_type<eq>"add_admin"<block_start><return>AdminsAdded._parse(session data)<block_end><elif_stmt>event_type<eq>"remove_admin"<block_start><return>AdminsRemoved._parse(session data)<block_end><else_stmt><block_start><pass><block_end><block_end><elif_stmt>type_<eq>"change_thread_approval_mode"<block_start><return>ApprovalModeSet._parse(session data)<block_end><elif_stmt>type_<eq>"instant_game_update"<block_start><pass># TODO: This <block_end><elif_stmt>type_<eq>"messenger_call_log"# Previously "rtc_call_log" <block_start>event_type=data["untypedData"]["event"]<if_stmt>event_type<eq>"group_call_started"<block_start><return>CallStarted._parse(session data)<block_end><elif_stmt>event_type<in>["group_call_ended" "one_on_one_call_ended"]<block_start><return>CallEnded._parse(session data)<block_end><else_stmt><block_start><pass><block_end><block_end><elif_stmt>type_<eq>"participant_joined_group_call"<block_start><return>CallJoined._parse(session data)<block_end><elif_stmt>type_<eq>"group_poll"<block_start>event_type=data["untypedData"]["event_type"]<if_stmt>event_type<eq>"question_creation"<block_start><return>PollCreated._parse(session data)<block_end><elif_stmt>event_type<eq>"update_vote"<block_start><return>PollVoted._parse(session data)<block_end><else_stmt><block_start><pass><block_end><block_end><elif_stmt>type_<eq>"lightweight_event_create"<block_start><return>PlanCreated._parse(session data)<block_end><elif_stmt>type_<eq>"lightweight_event_notify"<block_start><return>PlanEnded._parse(session data)<block_end><elif_stmt>type_<eq>"lightweight_event_update"<block_start><return>PlanEdited._parse(session data)<block_end><elif_stmt>type_<eq>"lightweight_event_delete"<block_start><return>PlanDeleted._parse(session data)<block_end><elif_stmt>type_<eq>"lightweight_event_rsvp"<block_start><return>PlanResponded._parse(session data)<block_end><return>UnknownEvent(source="Delta type" data=data)<block_end>
<import_stmt>ast<import_stmt>pytest<import_stmt>dask<import_stmt>dask.array<as>da<import_stmt>dask.dataframe<as>dd<import_from_stmt>distributed.diagnostics SchedulerPlugin<import_from_stmt>distributed.utils_test gen_cluster<line_sep>np=pytest.importorskip("numpy")<line_sep>pd=pytest.importorskip("pandas")<import_from_stmt>numpy.testing assert_array_equal<line_sep>@gen_cluster(client=<true>)<async_keyword><def_stmt>test_combo_of_layer_types c s a b<block_start>"""Check pack/unpack of a HLG that has everything!"""<def_stmt>add x y z extra_arg<block_start><return>x+y+z+extra_arg<block_end>y=c.submit(<lambda>x:x 2)<line_sep>z=c.submit(<lambda>x:x 3)<line_sep>x=da.blockwise(add "x" da.zeros((3 ) chunks=(1 )) "x" da.ones((3 ) chunks=(1 )) "x" y <none> concatenate=<false> dtype=int extra_arg=z )<line_sep>df=dd.from_pandas(pd.DataFrame({"a":np.arange(3)}) npartitions=3)<line_sep>df=df.shuffle("a" shuffle="tasks")<line_sep>df=df["a"].to_dask_array()<line_sep>res=x.sum()+df.sum()<line_sep>res=<await>c.compute(res optimize_graph=<false>)<assert_stmt>res<eq>21<block_end>@gen_cluster(client=<true>)<async_keyword><def_stmt>test_blockwise c s a b<block_start>"""Check pack/unpack of blockwise layer"""<def_stmt>add x y z extra_arg<block_start><return>x+y+z+extra_arg<block_end>y=c.submit(<lambda>x:x 10)<line_sep>z=c.submit(<lambda>x:x 3)<line_sep>x=da.blockwise(add "x" da.zeros((3 ) chunks=(1 )) "x" da.ones((3 ) chunks=(1 )) "x" y <none> concatenate=<false> dtype=int extra_arg=z )<line_sep>res=<await>c.compute(x.sum() optimize_graph=<false>)<assert_stmt>res<eq>42<block_end>@gen_cluster(client=<true>)<async_keyword><def_stmt>test_shuffle c s a b<block_start>"""Check pack/unpack of a shuffled dataframe"""<line_sep>df=dd.from_pandas(pd.DataFrame({"a":np.arange(10 dtype=int) "b":np.arange(10 0 -1 dtype=float)}) npartitions=5 )<line_sep>df=df.shuffle("a" shuffle="tasks" max_branch=2)<line_sep>df=df["a"]+df["b"]<line_sep>res=<await>c.compute(df optimize_graph=<false>)<assert_stmt>res.dtypes<eq>np.float64<assert_stmt>(res<eq>10.0).all()<block_end><class_stmt>ExampleAnnotationPlugin(SchedulerPlugin)<block_start><def_stmt>__init__ self priority_fn=<none> qux="" resource="" retries=0<block_start>self.priority_fn=priority_fn<or>(<lambda>k:0)<line_sep>self.qux=qux<line_sep>self.resource=resource<line_sep>self.retries=retries<line_sep>self.priority_matches=0<line_sep>self.resource_matches=0<line_sep>self.retry_matches=0<line_sep>self.qux_matches=0<block_end><def_stmt>update_graph self scheduler dsk=<none> keys=<none> restrictions=<none> **kwargs<block_start>annots=kwargs["annotations"]<if_stmt>"priority"<in>annots<block_start>self.priority_matches=sum(int(self.priority_fn(ast.literal_eval(k))<eq>p)<for>k,p annots["priority"].items())<block_end><if_stmt>"qux"<in>annots<block_start>self.qux_matches=sum(int(self.qux<eq>v)<for>v annots["qux"].values())<block_end><if_stmt>"custom_resource"<in>annots<block_start>self.resource_matches=sum(int(self.resource<eq>v)<for>v annots["custom_resource"].values())<block_end><if_stmt>"retries"<in>annots<block_start>self.retry_matches=sum(int(self.retries<eq>v)<for>v annots["retries"].values())<block_end><block_end><block_end>@gen_cluster(client=<true>)<async_keyword><def_stmt>test_array_annotations c s a b<block_start><def_stmt>fn k<block_start><return>k[1]<times>5+k[2]<block_end>qux="baz"<line_sep>resource="widget"<line_sep>plugin=ExampleAnnotationPlugin(priority_fn=fn qux=qux resource=resource)<line_sep>s.add_plugin(plugin)<assert_stmt>plugin<in>s.plugins.values()<with_stmt>dask.annotate(priority=fn qux=qux)<block_start>A=da.ones((10 10) chunks=(2 2))<block_end><with_stmt>dask.annotate(custom_resource=resource)<block_start>B=A+1<block_end><with_stmt>dask.config.set(optimization__fuse__active=<false>)<block_start>result=<await>c.compute(B)<block_end>assert_array_equal(result 2)<line_sep># There are annotation matches per array chunk (i.e. task) <assert_stmt>plugin.qux_matches<eq>A.npartitions<assert_stmt>plugin.priority_matches<eq>A.npartitions<assert_stmt>plugin.resource_matches<eq>B.npartitions<block_end>@gen_cluster(client=<true>)<async_keyword><def_stmt>test_dataframe_annotations c s a b<block_start>retries=5<line_sep>plugin=ExampleAnnotationPlugin(retries=retries)<line_sep>s.add_plugin(plugin)<assert_stmt>plugin<in>s.plugins.values()<line_sep>df=dd.from_pandas(pd.DataFrame({"a":np.arange(10 dtype=int) "b":np.arange(10 0 -1 dtype=float)}) npartitions=5 )<line_sep>df=df.shuffle("a" shuffle="tasks" max_branch=2)<line_sep>acol=df["a"]<line_sep>bcol=df["b"]<with_stmt>dask.annotate(retries=retries)<block_start>df=acol+bcol<block_end><with_stmt>dask.config.set(optimization__fuse__active=<false>)<block_start>rdf=<await>c.compute(df)<block_end><assert_stmt>rdf.dtypes<eq>np.float64<assert_stmt>(rdf<eq>10.0).all()<line_sep># There is an annotation match per partition (i.e. task) <assert_stmt>plugin.retry_matches<eq>df.npartitions<block_end>
## # @file electric_overflow.py # @author <NAME> # @date Aug 2018 # <import_stmt>math<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.autograd Function<import_from_stmt>torch.nn functional<as>F<import_stmt>dreamplace.ops.electric_potential.electric_potential_cpp<as>electric_potential_cpp<import_stmt>dreamplace.configure<as>configure<if_stmt>configure.compile_configurations["CUDA_FOUND"]<eq>"TRUE"<block_start><import_stmt>dreamplace.ops.electric_potential.electric_potential_cuda<as>electric_potential_cuda<block_end><import_stmt>pdb<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_from_stmt>mpl_toolkits.mplot3d Axes3D<import_stmt>matplotlib.pyplot<as>plt<class_stmt>ElectricDensityMapFunction(Function)<block_start>""" @brief compute density overflow. @param ctx pytorch API to store data for backward proporgation @param pos location of cells, x and then y @param node_size_x_clamped stretched size, max(bin_size*sqrt2, node_size) @param node_size_y_clamped stretched size, max(bin_size*sqrt2, node_size) @param offset_x (stretched size - node_size) / 2 @param offset_y (stretched size - node_size) / 2 @param ratio original area / stretched area @param initial_density_map density_map for fixed cells @param target_density target density @param xl left boundary @param yl lower boundary @param xh right boundary @param yh upper boundary @param bin_size_x bin width @param bin_size_x bin height @param num_movable_nodes number of movable cells @param num_filler_nodes number of filler cells @param padding bin padding to boundary of placement region @param padding_mask padding mask with 0 and 1 to indicate padding bins with padding regions to be 1 @param num_bins_x number of bins in horizontal direction @param num_bins_y number of bins in vertical direction @param num_movable_impacted_bins_x number of impacted bins for any movable cell in x direction @param num_movable_impacted_bins_y number of impacted bins for any movable cell in y direction @param num_filler_impacted_bins_x number of impacted bins for any filler cell in x direction @param num_filler_impacted_bins_y number of impacted bins for any filler cell in y direction @param sorted_node_map the indices of the movable node map """<line_sep>@staticmethod<def_stmt>forward pos node_size_x_clamped node_size_y_clamped offset_x offset_y ratio bin_center_x bin_center_y initial_density_map target_density xl yl xh yh bin_size_x bin_size_y num_movable_nodes num_filler_nodes padding padding_mask # same dimensions as density map, with padding regions to be 1 num_bins_x num_bins_y num_movable_impacted_bins_x num_movable_impacted_bins_y num_filler_impacted_bins_x num_filler_impacted_bins_y deterministic_flag sorted_node_map<block_start><if_stmt>pos.is_cuda<block_start>output=electric_potential_cuda.density_map(pos.view(pos.numel()) node_size_x_clamped node_size_y_clamped offset_x offset_y ratio bin_center_x bin_center_y initial_density_map target_density xl yl xh yh bin_size_x bin_size_y num_movable_nodes num_filler_nodes padding num_bins_x num_bins_y num_movable_impacted_bins_x num_movable_impacted_bins_y num_filler_impacted_bins_x num_filler_impacted_bins_y deterministic_flag sorted_node_map)<block_end><else_stmt><block_start>output=electric_potential_cpp.density_map(pos.view(pos.numel()) node_size_x_clamped node_size_y_clamped offset_x offset_y ratio bin_center_x bin_center_y initial_density_map target_density xl yl xh yh bin_size_x bin_size_y num_movable_nodes num_filler_nodes padding num_bins_x num_bins_y num_movable_impacted_bins_x num_movable_impacted_bins_y num_filler_impacted_bins_x num_filler_impacted_bins_y deterministic_flag)<block_end>density_map=output.view([num_bins_x num_bins_y])<line_sep># set padding density <if_stmt>padding<g>0<block_start>density_map.masked_fill_(padding_mask target_density<times>bin_size_x<times>bin_size_y)<block_end><return>density_map<block_end><block_end><class_stmt>ElectricOverflow(nn.Module)<block_start><def_stmt>__init__ self node_size_x node_size_y bin_center_x bin_center_y target_density xl yl xh yh bin_size_x bin_size_y num_movable_nodes num_terminals num_filler_nodes padding deterministic_flag # control whether to use deterministic routine sorted_node_map movable_macro_mask=<none><block_start>super(ElectricOverflow self).__init__()<line_sep>self.node_size_x=node_size_x<line_sep>self.node_size_y=node_size_y<line_sep>self.bin_center_x=bin_center_x<line_sep>self.bin_center_y=bin_center_y<line_sep>self.target_density=target_density<line_sep>self.xl=xl<line_sep>self.yl=yl<line_sep>self.xh=xh<line_sep>self.yh=yh<line_sep>self.bin_size_x=bin_size_x<line_sep>self.bin_size_y=bin_size_y<line_sep>self.num_movable_nodes=num_movable_nodes<line_sep>self.num_terminals=num_terminals<line_sep>self.num_filler_nodes=num_filler_nodes<line_sep>self.padding=padding<line_sep>self.sorted_node_map=sorted_node_map<line_sep>self.movable_macro_mask=movable_macro_mask<line_sep>self.deterministic_flag=deterministic_flag<line_sep>self.reset()<block_end><def_stmt>reset self<block_start>sqrt2=math.sqrt(2)<line_sep># clamped means stretch a cell to bin size # clamped = max(bin_size*sqrt2, node_size) # offset means half of the stretch size # ratio means the original area over the stretched area self.node_size_x_clamped=self.node_size_x.clamp(min=self.bin_size_x<times>sqrt2)<line_sep>self.offset_x=(self.node_size_x-self.node_size_x_clamped).mul(0.5)<line_sep>self.node_size_y_clamped=self.node_size_y.clamp(min=self.bin_size_y<times>sqrt2)<line_sep>self.offset_y=(self.node_size_y-self.node_size_y_clamped).mul(0.5)<line_sep>node_areas=self.node_size_x<times>self.node_size_y<line_sep>self.ratio=node_areas/(self.node_size_x_clamped<times>self.node_size_y_clamped)<line_sep># detect movable macros and scale down the density to avoid halos # the definition of movable macros should be different according to algorithms self.num_movable_macros=0<if_stmt>self.target_density<l>1<and>self.movable_macro_mask<is><not><none><block_start>self.num_movable_macros=self.movable_macro_mask.sum().data.item()<line_sep>self.ratio[:self.num_movable_nodes][self.movable_macro_mask]=self.target_density<block_end># compute maximum impacted bins self.num_bins_x=int(math.ceil((self.xh-self.xl)/self.bin_size_x))<line_sep>self.num_bins_y=int(math.ceil((self.yh-self.yl)/self.bin_size_y))<if_stmt>self.num_movable_nodes<block_start>self.num_movable_impacted_bins_x=int(((self.node_size_x[:self.num_movable_nodes].max()+2<times>sqrt2<times>self.bin_size_x)/self.bin_size_x).ceil().clamp(max=self.num_bins_x))<line_sep>self.num_movable_impacted_bins_y=int(((self.node_size_y[:self.num_movable_nodes].max()+2<times>sqrt2<times>self.bin_size_y)/self.bin_size_y).ceil().clamp(max=self.num_bins_y))<block_end><else_stmt><block_start>self.num_movable_impacted_bins_x=0<line_sep>self.num_movable_impacted_bins_y=0<block_end><if_stmt>self.num_filler_nodes<block_start>self.num_filler_impacted_bins_x=((self.node_size_x[-self.num_filler_nodes:].max()+2<times>sqrt2<times>self.bin_size_x)/self.bin_size_x).ceil().clamp(max=self.num_bins_x)<line_sep>self.num_filler_impacted_bins_y=((self.node_size_y[-self.num_filler_nodes:].max()+2<times>sqrt2<times>self.bin_size_y)/self.bin_size_y).ceil().clamp(max=self.num_bins_y)<block_end><else_stmt><block_start>self.num_filler_impacted_bins_x=0<line_sep>self.num_filler_impacted_bins_y=0<block_end><if_stmt>self.padding<g>0<block_start>self.padding_mask=torch.ones(self.num_bins_x self.num_bins_y dtype=torch.uint8 device=self.node_size_x.device)<line_sep>self.padding_mask[self.padding:self.num_bins_x-self.padding self.padding:self.num_bins_y-self.padding].fill_(0)<block_end><else_stmt><block_start>self.padding_mask=torch.zeros(self.num_bins_x self.num_bins_y dtype=torch.uint8 device=self.node_size_x.device)<block_end># initial density_map due to fixed cells self.initial_density_map=<none><block_end><def_stmt>compute_initial_density_map self pos<block_start><if_stmt>self.num_terminals<eq>0<block_start>num_fixed_impacted_bins_x=0<line_sep>num_fixed_impacted_bins_y=0<block_end><else_stmt><block_start>max_size_x=self.node_size_x[self.num_movable_nodes:self.num_movable_nodes+self.num_terminals].max()<line_sep>max_size_y=self.node_size_y[self.num_movable_nodes:self.num_movable_nodes+self.num_terminals].max()<line_sep>num_fixed_impacted_bins_x=((max_size_x+self.bin_size_x)/self.bin_size_x).ceil().clamp(max=self.num_bins_x)<line_sep>num_fixed_impacted_bins_y=((max_size_y+self.bin_size_y)/self.bin_size_y).ceil().clamp(max=self.num_bins_y)<block_end><if_stmt>pos.is_cuda<block_start>func=electric_potential_cuda.fixed_density_map<block_end><else_stmt><block_start>func=electric_potential_cpp.fixed_density_map<block_end>self.initial_density_map=func(pos self.node_size_x self.node_size_y self.bin_center_x self.bin_center_y self.xl self.yl self.xh self.yh self.bin_size_x self.bin_size_y self.num_movable_nodes self.num_terminals self.num_bins_x self.num_bins_y num_fixed_impacted_bins_x num_fixed_impacted_bins_y self.deterministic_flag)<line_sep># scale density of fixed macros self.initial_density_map.mul_(self.target_density)<block_end><def_stmt>forward self pos<block_start><if_stmt>self.initial_density_map<is><none><block_start>self.compute_initial_density_map(pos)<block_end>density_map=ElectricDensityMapFunction.forward(pos self.node_size_x_clamped self.node_size_y_clamped self.offset_x self.offset_y self.ratio self.bin_center_x self.bin_center_y self.initial_density_map self.target_density self.xl self.yl self.xh self.yh self.bin_size_x self.bin_size_y self.num_movable_nodes self.num_filler_nodes self.padding self.padding_mask self.num_bins_x self.num_bins_y self.num_movable_impacted_bins_x self.num_movable_impacted_bins_y self.num_filler_impacted_bins_x self.num_filler_impacted_bins_y self.deterministic_flag self.sorted_node_map)<line_sep>bin_area=self.bin_size_x<times>self.bin_size_y<line_sep>density_cost=(density_map-self.target_density<times>bin_area).clamp_(min=0.0).sum().unsqueeze(0)<line_sep><return>density_cost density_map.max().unsqueeze(0)/bin_area<block_end><block_end><def_stmt>plot plot_count density_map padding name<block_start>""" density map contour and heat map """<line_sep>density_map=density_map[padding:density_map.shape[0]-padding padding:density_map.shape[1]-padding]<line_sep>print("max density = %g @ %s"%(np.amax(density_map) np.unravel_index(np.argmax(density_map) density_map.shape)))<line_sep>print("mean density = %g"%(np.mean(density_map)))<line_sep>fig=plt.figure()<line_sep>ax=fig.gca(projection='3d')<line_sep>x=np.arange(density_map.shape[0])<line_sep>y=np.arange(density_map.shape[1])<line_sep>x,y=np.meshgrid(x y)<line_sep># looks like x and y should be swapped ax.plot_surface(y x density_map alpha=0.8)<line_sep>ax.set_xlabel('x')<line_sep>ax.set_ylabel('y')<line_sep>ax.set_zlabel('density')<line_sep># plt.tight_layout() plt.savefig(name+".3d.png")<line_sep>plt.close()<line_sep># plt.clf() #fig, ax = plt.subplots() # ax.pcolor(density_map) # Loop over data dimensions and create text annotations. # for i in range(density_map.shape[0]): # for j in range(density_map.shape[1]): # text = ax.text(j, i, density_map[i, j], # ha="center", va="center", color="w") # fig.tight_layout() #plt.savefig(name+".2d.%d.png" % (plot_count)) # plt.close() <block_end>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. <import_from_stmt>typing Any Dict List Tuple<import_stmt>pytest<import_from_stmt>dataflow.multiwoz.trade_dst_utils BeliefState<def_stmt>convert_belief_dict_to_belief_state belief_dict:Dict[str str]<arrow>BeliefState<block_start>belief_state:BeliefState=[]<for_stmt>slot_fullname,slot_value sorted(belief_dict.items())<block_start>belief_state.append({"slots":[[slot_fullname slot_value]]})<block_end><return>belief_state<block_end><def_stmt>build_trade_dialogue dialogue_id:str turns:List[Tuple[str str Dict[str str]]]<arrow>Dict[str Any]<block_start>trade_dialogue={"dialogue_idx":dialogue_id "dialogue":[{# Our mock dialogues here use 1-based turn indices. # In real MultiWOZ/TRADE dialogues, turn index starts from 0. "turn_idx":turn_idx+1 "system_transcript":agent_utt "transcript":user_utt "belief_state":convert_belief_dict_to_belief_state(belief_dict) }<for>turn_idx,(agent_utt user_utt belief_dict) enumerate(turns)] }<line_sep><return>trade_dialogue<block_end>@pytest.fixture<def_stmt>trade_dialogue_1 <arrow>Dict[str Any]<block_start><return>build_trade_dialogue(dialogue_id="dummy_1" turns=[# turn 1 # activate a domain without constraint, the plan should call "Find" with "EqualityConstraint" # we intentionally to only put two "none" slots in the belief state to match the MultiWoZ annotation style ("" "i want to book a hotel" {"hotel-name":"none" "hotel-type":"none"} ) # turn 2 # add constraints, the plan should call "Revise" with "EqualityConstraint" ("ok what type" "guest house and cheap, probably hilton" {"hotel-name":"hilton" "hotel-pricerange":"cheap" "hotel-type":"guest house" } ) # turn 3 # drop a constraint (but the domain is still active), the plan should call "Revise" with "EqualityConstraint" ("no results" "ok try another hotel" {"hotel-name":"none" "hotel-pricerange":"cheap" "hotel-type":"guest house" } ) # turn 4 # drop the domain ("failed" "ok never mind" {}) # turn 5 # activate the domain again ("sure" "can you find a hotel in west" {"hotel-area":"west"}) # turn 6 # activate a new domain and use a refer call ("how about this" "ok can you find a restaurant in the same area" {"hotel-area":"west" "restaurant-area":"west"} ) # turn 7 # use a refer call to get a value from a dead domain # the salience model should find the first valid refer value (skips "none") ("how about this" "use the same price range as the hotel" {"hotel-area":"west" "restaurant-area":"west" "restaurant-pricerange":"cheap" } ) # turn 8 # do not change belief state ("ok" "give me the address" {"hotel-area":"west" "restaurant-area":"west" "restaurant-pricerange":"cheap" } ) # turn 9 # a new domain ("ok" "book a taxi now" {"hotel-area":"west" "restaurant-area":"west" "restaurant-pricerange":"cheap" "taxi-departure":"none" } ) # turn 10 # do not change belief state (make sure the plan is "Revise" not "Find") ("ok" "ok" {"hotel-area":"west" "restaurant-area":"west" "restaurant-pricerange":"cheap" "taxi-departure":"none" } ) ] )<block_end>
<import_stmt>os<import_from_stmt>logging getLogger<import_from_stmt>src.constants CONSTANTS PLATFORM_ENUM<line_sep>logger=getLogger(__name__)<class_stmt>PlatformConfigurations<block_start>platform=os.getenv("PLATFORM" PLATFORM_ENUM.DOCKER.value)<if_stmt><not>PLATFORM_ENUM.has_value(platform)<block_start><raise>ValueError(f"PLATFORM must be one of {[v.value<for>v PLATFORM_ENUM.__members__.values()]}")<block_end><block_end><class_stmt>DBConfigurations<block_start>mysql_username=os.getenv("MYSQL_USER")<line_sep>mysql_password=os.getenv("MYSQL_PASSWORD")<line_sep>mysql_port=int(os.getenv("MYSQL_PORT" 3306))<line_sep>mysql_database=os.getenv("MYSQL_DATABASE" "sample_db")<line_sep>mysql_server=os.getenv("MYSQL_SERVER")<line_sep>sql_alchemy_database_url=(f"mysql://{mysql_username}:{mysql_password}@{mysql_server}:{mysql_port}/{mysql_database}?charset=utf8")<block_end><class_stmt>APIConfigurations<block_start>title=os.getenv("API_TITLE" "ServingPattern")<line_sep>description=os.getenv("API_DESCRIPTION" "machine learning system serving patterns")<line_sep>version=os.getenv("API_VERSION" "0.1")<block_end><class_stmt>ModelConfigurations<block_start>model_filepath=os.getenv("MODEL_FILEPATH")<line_sep>label_filepath=os.getenv("LABEL_FILEPATH")<line_sep>outlier_model_filepath=os.getenv("OUTLIER_MODEL_FILEPATH")<line_sep>outlier_lower_threshold=float(os.getenv("OUTLIER_LOWER_THRESHOLD" 0.0))<block_end>logger.info(f"{PlatformConfigurations.__name__}: {PlatformConfigurations.__dict__}")<line_sep>logger.info(f"{APIConfigurations.__name__}: {APIConfigurations.__dict__}")<line_sep>logger.info(f"{ModelConfigurations.__name__}: {ModelConfigurations.__dict__}")<line_sep>
<import_stmt>datetime<import_from_stmt>imap_tools EmailAddress<line_sep>DATA=dict(subject='testing' from_='<EMAIL>' to=('<EMAIL>' ) cc=() bcc=() reply_to=() date=datetime.datetime(2005 6 6 22 21 22 tzinfo=datetime.timezone(datetime.timedelta(0 7200))) date_str='Mon, 6 Jun 2005 22:21:22 +0200' text='This is the first part.\r\n' html='' headers={'mime-version':('1.0 (Apple Message framework v730)' ) 'content-type':('multipart/mixed; boundary=Apple-Mail-13-196941151' ) 'message-id':('<9169D984-4E0B-45EF-<EMAIL>-<EMAIL>>' ) 'from':('<EMAIL>' ) 'subject':('testing' ) 'date':('Mon, 6 Jun 2005 22:21:22 +0200' ) 'to':('<EMAIL>' )} attachments=[dict(filename='test.rb' content_id='' content_disposition='attachment' content_type='text/x-ruby-script' payload=b'puts "testing, testing"\r\n' ) dict(filename='test.pdf' content_id='' content_disposition='inline' content_type='application/pdf' payload=b'blah blah blah' ) dict(filename='smime.p7s' content_id='' content_disposition='attachment' content_type='application/pkcs7-signature' payload=b"\x8d\xa9\xa2\xb1*\x86H\x86\xf7\r\x01\x07\x02\xa0\x800\x88\xda\x9a+1\x0b0\t\x06\x05+\x0e\x03\x02\x1a\x05\x000\x80\x06\t*\x86J6\xa6\x8a\xc1\x07\x01\x00\x00\xa0\x82\x05J0\x82\x05F0\x82\x04.\x8d\xa9\xa2\xb1\x02\x02\x04?\xbe\xbaD0\r\x06\t*\x88\xda\x9a+\r\x01\x01\x05\x05\x00011\x0b0\t\x06\x03U\x04\x06\x13\x02F6\xa6\x8a\xc0\n\x06\x03U\x04\n\x13\x03TDC1\x140\x12\x06\x8d\xa9\xa2\xb3\x13\x0bTDC OCES CH\xda\x9a+\r040229115901Z\x17\r06026\xa6\x8a\xc22901Z0\x81\x801\x0b0\t\x06\x03U\x04\x8d\xa9\xa2\xb0K1)0'\x06\x03U\x04\n\x13 H\xda\x9a+. organisatorisk tin6\xa6\x8a\xc4nin" ) ] from_values=EmailAddress('' '<EMAIL>' '<EMAIL>') to_values=(EmailAddress('' '<EMAIL>' '<EMAIL>') ) cc_values=() bcc_values=() reply_to_values=() )<line_sep>
""" Execution environments encapsulate the logic for where your Flow should execute in Prefect Cloud. DEPRECATED: Environment based configuration is deprecated, please transition to configuring `flow.run_config` instead of `flow.environment`. See https://docs.prefect.io/orchestration/flow_config/overview.html for more info. """<import_from_stmt>prefect.environments.execution.base Environment load_and_run_flow<import_from_stmt>prefect.environments.execution.dask DaskKubernetesEnvironment<import_from_stmt>prefect.environments.execution.dask DaskCloudProviderEnvironment<import_from_stmt>prefect.environments.execution.fargate FargateTaskEnvironment<import_from_stmt>prefect.environments.execution.k8s KubernetesJobEnvironment<import_from_stmt>prefect.environments.execution.local LocalEnvironment<line_sep>
<import_from_stmt>collections OrderedDict<import_stmt>pytest<import_from_stmt>common.serializers.serialization serialize_msg_for_signing<def_stmt>test_serialize_int <block_start><assert_stmt>b"1"<eq>serialize_msg_for_signing(1)<block_end><def_stmt>test_serialize_str <block_start><assert_stmt>b"aaa"<eq>serialize_msg_for_signing("aaa")<block_end><def_stmt>test_serialize_none <block_start><assert_stmt>b""<eq>serialize_msg_for_signing(<none>)<block_end><def_stmt>test_serialize_simple_dict <block_start><assert_stmt>b"1:a|2:b"<eq>serialize_msg_for_signing({1:'a' 2:'b'})<assert_stmt>b"1:a|2:b"<eq>serialize_msg_for_signing({"2":'b' "1":'a'})<block_end><def_stmt>test_serialize_array <block_start><assert_stmt>b"1,5,3,4,2"<eq>serialize_msg_for_signing([1 5 3 4 2])<block_end><def_stmt>test_serialize_ordered_dict <block_start>v1=OrderedDict([('1' 'a') ('2' 'b')])<line_sep>v2=OrderedDict([('2' 'b') ('1' 'a')])<assert_stmt>b"1:a|2:b"<eq>serialize_msg_for_signing(v1)<assert_stmt>b"1:a|2:b"<eq>serialize_msg_for_signing(v2)<block_end><def_stmt>test_serialize_dict_with_array <block_start><assert_stmt>b"1:a|2:b|3:1,2:k"<eq>serialize_msg_for_signing({1:'a' 2:'b' 3:[1 {2:'k'}]})<assert_stmt>b"1:a|2:b|3:1,2:k"<eq>serialize_msg_for_signing({'1':'a' '2':'b' '3':['1' {'2':'k'}]})<block_end>@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")<def_stmt>test_serialize_dicts_with_different_keys <block_start>v1=serialize_msg_for_signing({1:'a' 2:{3:'b' 4:{5:{6:'c'}}}})<line_sep>v2=serialize_msg_for_signing({1:'a' 2:{3:'b' } 4:{5:{6:'c'}}})<assert_stmt>v1<eq>v2<block_end>@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")<def_stmt>test_serialize_complex_dict <block_start><assert_stmt>b'1:a|2:3:b|2:4:5:6:c'<eq>serialize_msg_for_signing({1:'a' 2:{3:'b' 4:{5:{6:'c'}}}})<assert_stmt>b'1:a|2:3:b|2:4:5:6:c'<eq>serialize_msg_for_signing({'1':'a' '2':{'3':'b' '4':{'5':{'6':'c'}}}})<line_sep>v=serialize_msg_for_signing({'1':'a' '2':'b' '3':{'4':'c' '5':'d' '6':{'7':{'8':'e' '9':'f'} '10':{'11':'g' '12':'h'}} '13':{'13':{'13':'i' }}}})<assert_stmt>b'1:a|2:b|3:4:c|3:5:d|3:6:7:8:e|3:6:7:9:f|3:6:10:11:g|3:6:10:12:h|3:13:13:13:i'<eq>v<block_end>@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")<def_stmt>test_serialize_complex_ordered_dict <block_start><assert_stmt>b'1:a|2:3:b|4:c'<eq>serialize_msg_for_signing(OrderedDict([('1' 'a') ('2' OrderedDict([('3' 'b') ('4' 'c') ]))]))<assert_stmt>b'1:a|2:3:b|4:c'<eq>serialize_msg_for_signing(OrderedDict([('2' OrderedDict([('4' 'c') ('3' 'b') ])) ('1' 'a') ]))<block_end>
<import_from_stmt>brownie *<import_from_stmt>brownie.network.contract InterfaceContainer<import_stmt>json<def_stmt>loadConfig <block_start><global>contracts acct<line_sep>thisNetwork=network.show_active()<if_stmt>thisNetwork<eq>"development"<block_start>acct=accounts[0]<line_sep>configFile=open('./scripts/contractInteraction/testnet_contracts.json')<block_end><elif_stmt>thisNetwork<eq>"testnet"<block_start>acct=accounts.load("rskdeployer")<line_sep>configFile=open('./scripts/contractInteraction/testnet_contracts.json')<block_end><elif_stmt>thisNetwork<eq>"testnet-ws"<block_start>acct=accounts.load("rskdeployer")<line_sep>configFile=open('./scripts/contractInteraction/testnet_contracts.json')<block_end><elif_stmt>thisNetwork<eq>"rsk-testnet"<block_start>acct=accounts.load("rskdeployer")<line_sep>configFile=open('./scripts/contractInteraction/testnet_contracts.json')<block_end><elif_stmt>thisNetwork<eq>"rsk-mainnet"<block_start>acct=accounts.load("rskdeployer")<line_sep>configFile=open('./scripts/contractInteraction/mainnet_contracts.json')<block_end><else_stmt><block_start><raise>Exception("Network not supported.")<block_end>contracts=json.load(configFile)<block_end>
""" Fedora OpenId backend, docs at: https://python-social-auth.readthedocs.io/en/latest/backends/fedora.html """<import_from_stmt>.open_id OpenIdAuth<class_stmt>FedoraOpenId(OpenIdAuth)<block_start>name='fedora'<line_sep>URL='https://id.fedoraproject.org'<line_sep>USERNAME_KEY='nickname'<block_end>
"""Trains a model, saving checkpoints and tensorboard summaries along the way."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>datetime datetime<import_stmt>os<import_stmt>shutil<import_from_stmt>timeit default_timer<as>timer<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>sys<import_from_stmt>free_model Model<import_stmt>cifar10_input<import_stmt>cifar100_input<import_stmt>pdb<import_stmt>config<def_stmt>get_path_dir data_dir dataset **_<block_start>path=os.path.join(data_dir dataset)<if_stmt>os.path.islink(path)<block_start>path=os.readlink(path)<block_end><return>path<block_end><def_stmt>train tf_seed np_seed train_steps out_steps summary_steps checkpoint_steps step_size_schedule weight_decay momentum train_batch_size epsilon replay_m model_dir dataset **kwargs<block_start>tf.set_random_seed(tf_seed)<line_sep>np.random.seed(np_seed)<line_sep>model_dir=model_dir+'%s_m%d_eps%.1f_b%d'%(dataset replay_m epsilon train_batch_size)# TODO Replace with not defaults # Setting up the data and the model data_path=get_path_dir(dataset=dataset **kwargs)<if_stmt>dataset<eq>'cifar10'<block_start>raw_data=cifar10_input.CIFAR10Data(data_path)<block_end><else_stmt><block_start>raw_data=cifar100_input.CIFAR100Data(data_path)<block_end>global_step=tf.contrib.framework.get_or_create_global_step()<line_sep>model=Model(mode='train' dataset=dataset train_batch_size=train_batch_size)<line_sep># Setting up the optimizer boundaries=[int(sss[0])<for>sss step_size_schedule][1:]<line_sep>values=[sss[1]<for>sss step_size_schedule]<line_sep>learning_rate=tf.train.piecewise_constant(tf.cast(global_step tf.int32) boundaries values)<line_sep>optimizer=tf.train.MomentumOptimizer(learning_rate momentum)<line_sep># Optimizing computation total_loss=model.mean_xent+weight_decay<times>model.weight_decay_loss<line_sep>grads=optimizer.compute_gradients(total_loss)<line_sep># Compute new image pert_grad=[g<for>g,v grads<if>'perturbation'<in>v.name]<line_sep>sign_pert_grad=tf.sign(pert_grad[0])<line_sep>new_pert=model.pert+epsilon<times>sign_pert_grad<line_sep>clip_new_pert=tf.clip_by_value(new_pert -epsilon epsilon)<line_sep>assigned=tf.assign(model.pert clip_new_pert)<line_sep># Train no_pert_grad=[(tf.zeros_like(v) v)<if>'perturbation'<in>v.name<else>(g v)<for>g,v grads]<with_stmt>tf.control_dependencies([assigned])<block_start>min_step=optimizer.apply_gradients(no_pert_grad global_step=global_step)<block_end>tf.initialize_variables([model.pert])# TODO: Removed from TF # Setting up the Tensorboard and checkpoint outputs <if_stmt><not>os.path.exists(model_dir)<block_start>os.makedirs(model_dir)<block_end>saver=tf.train.Saver(max_to_keep=1)<line_sep>tf.summary.scalar('accuracy' model.accuracy)<line_sep>tf.summary.scalar('xent' model.xent/train_batch_size)<line_sep>tf.summary.scalar('total loss' total_loss/train_batch_size)<line_sep>merged_summaries=tf.summary.merge_all()<line_sep>gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=1.0)<with_stmt>tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))<as>sess<block_start>print('\n\n********** free training for epsilon=%.1f using m_replay=%d **********\n\n'%(epsilon replay_m))<line_sep>print('important params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n'%(model_dir dataset train_batch_size))<if_stmt>dataset<eq>'cifar100'<block_start>print('the ride for CIFAR100 is bumpy -- fasten your seatbelts! \n \ you will probably see the training and validation accuracy fluctuating a lot early in trainnig \n \ this is natural especially for large replay_m values because we see that mini-batch so many times.')<block_end># initialize data augmentation <if_stmt>dataset<eq>'cifar10'<block_start>data=cifar10_input.AugmentedCIFAR10Data(raw_data sess model)<block_end><else_stmt><block_start>data=cifar100_input.AugmentedCIFAR100Data(raw_data sess model)<block_end># Initialize the summary writer, global variables, and our time counter. summary_writer=tf.summary.FileWriter(model_dir+'/train' sess.graph)<line_sep>eval_summary_writer=tf.summary.FileWriter(model_dir+'/eval')<line_sep>sess.run(tf.global_variables_initializer())<line_sep># Main training loop <for_stmt>ii range(train_steps)<block_start><if_stmt>ii%replay_m<eq>0<block_start>x_batch,y_batch=data.train_data.get_next_batch(train_batch_size multiple_passes=<true>)<line_sep>nat_dict={model.x_input:x_batch model.y_input:y_batch}<block_end>x_eval_batch,y_eval_batch=data.eval_data.get_next_batch(train_batch_size multiple_passes=<true>)<line_sep>eval_dict={model.x_input:x_eval_batch model.y_input:y_eval_batch}<line_sep># Output to stdout <if_stmt>ii%summary_steps<eq>0<block_start>train_acc,summary=sess.run([model.accuracy merged_summaries] feed_dict=nat_dict)<line_sep>summary_writer.add_summary(summary global_step.eval(sess))<line_sep>val_acc,summary=sess.run([model.accuracy merged_summaries] feed_dict=eval_dict)<line_sep>eval_summary_writer.add_summary(summary global_step.eval(sess))<line_sep>print('Step {}: ({})'.format(ii datetime.now()))<line_sep>print(' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%'.format(train_acc<times>100 val_acc<times>100))<line_sep>sys.stdout.flush()<block_end># Tensorboard summaries <elif_stmt>ii%out_steps<eq>0<block_start>nat_acc=sess.run(model.accuracy feed_dict=nat_dict)<line_sep>print('Step {}: ({})'.format(ii datetime.now()))<line_sep>print(' training nat accuracy {:.4}%'.format(nat_acc<times>100))<block_end># Write a checkpoint <if_stmt>(ii+1)%checkpoint_steps<eq>0<block_start>saver.save(sess os.path.join(model_dir 'checkpoint') global_step=global_step)<block_end># Actual training step sess.run(min_step feed_dict=nat_dict)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>args=config.get_args()<line_sep>train(**vars(args))<block_end>
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conformer utilities."""<import_stmt>copy<import_from_stmt>typing List Optional<import_from_stmt>absl logging<import_stmt>numpy<as>np<import_stmt>rdkit<import_from_stmt>rdkit Chem<import_from_stmt>rdkit.Chem AllChem<import_stmt>tensorflow.compat.v2<as>tf<def_stmt>generate_conformers molecule:Chem.rdchem.Mol max_num_conformers:int * random_seed:int=-1 prune_rms_thresh:float=-1.0 max_iter:int=-1 fallback_to_random:bool=<false> <arrow>Chem.rdchem.Mol<block_start>"""Generates conformers for a given molecule. Args: molecule: molecular representation of the compound. max_num_conformers: maximum number of conformers to generate. If pruning is done, the returned number of conformers is not guaranteed to match max_num_conformers. random_seed: random seed to use for conformer generation. prune_rms_thresh: RMSD threshold which allows to prune conformers that are too similar. max_iter: Maximum number of iterations to perform when optimising MMFF force field. If set to <= 0, energy optimisation is not performed. fallback_to_random: if conformers cannot be obtained, use random coordinates to initialise. Returns: Copy of a `molecule` with added hydrogens. The returned molecule contains force field-optimised conformers. The number of conformers is guaranteed to be <= max_num_conformers. """<line_sep>mol=copy.deepcopy(molecule)<line_sep>mol=Chem.AddHs(mol)<line_sep>mol=_embed_conformers(mol max_num_conformers random_seed prune_rms_thresh fallback_to_random use_random=<false>)<if_stmt>max_iter<g>0<block_start>mol_with_conformers=_minimize_by_mmff(mol max_iter)<if_stmt>mol_with_conformers<is><none><block_start>mol_with_conformers=_minimize_by_uff(mol max_iter)<block_end><block_end><else_stmt><block_start>mol_with_conformers=mol<block_end># Aligns conformations in a molecule to each other using the first # conformation as the reference. AllChem.AlignMolConformers(mol_with_conformers)<line_sep># We remove hydrogens to keep the number of atoms consistent with the graph # nodes. mol_with_conformers=Chem.RemoveHs(mol_with_conformers)<line_sep><return>mol_with_conformers<block_end><def_stmt>atom_to_feature_vector atom:rdkit.Chem.rdchem.Atom conformer:Optional[np.ndarray]=<none> <arrow>List[float]<block_start>"""Converts rdkit atom object to feature list of indices. Args: atom: rdkit atom object. conformer: Generated conformers. Returns -1 values if set to None. Returns: List containing positions (x, y, z) of each atom from the conformer. """<if_stmt>conformer<block_start>pos=conformer.GetAtomPosition(atom.GetIdx())<line_sep><return>[pos.x pos.y pos.z]<block_end><return>[np.nan np.nan np.nan]<block_end><def_stmt>compute_conformer smile:str max_iter:int=-1<arrow>np.ndarray<block_start>"""Computes conformer. Args: smile: Smile string. max_iter: Maximum number of iterations to perform when optimising MMFF force field. If set to <= 0, energy optimisation is not performed. Returns: A tuple containing index, fingerprint and conformer. Raises: RuntimeError: If unable to convert smile string to RDKit mol. """<line_sep>mol=rdkit.Chem.MolFromSmiles(smile)<if_stmt><not>mol<block_start><raise>RuntimeError('Unable to convert smile to molecule: %s'%smile)<block_end>conformer_failed=<false><try_stmt><block_start>mol=generate_conformers(mol max_num_conformers=1 random_seed=45 prune_rms_thresh=0.01 max_iter=max_iter)<block_end><except_stmt>IOError<as>e<block_start>logging.exception('Failed to generate conformers for %s . IOError %s.' smile e)<line_sep>conformer_failed=<true><block_end><except_stmt>ValueError<block_start>logging.error('Failed to generate conformers for %s . ValueError' smile)<line_sep>conformer_failed=<true><block_end><except_stmt># pylint: disable=bare-except <block_start>logging.error('Failed to generate conformers for %s.' smile)<line_sep>conformer_failed=<true><block_end>atom_features_list=[]<line_sep>conformer=<none><if>conformer_failed<else>list(mol.GetConformers())[0]<for_stmt>atom mol.GetAtoms()<block_start>atom_features_list.append(atom_to_feature_vector(atom conformer))<block_end>conformer_features=np.array(atom_features_list dtype=np.float32)<line_sep><return>conformer_features<block_end><def_stmt>get_random_rotation_matrix include_mirror_symmetry:bool<arrow>tf.Tensor<block_start>"""Returns a single random rotation matrix."""<line_sep>rotation_matrix=_get_random_rotation_3d()<if_stmt>include_mirror_symmetry<block_start>random_mirror_symmetry=_get_random_mirror_symmetry()<line_sep>rotation_matrix=tf.matmul(rotation_matrix random_mirror_symmetry)<block_end><return>rotation_matrix<block_end><def_stmt>rotate vectors:tf.Tensor rotation_matrix:tf.Tensor<arrow>tf.Tensor<block_start>"""Batch of vectors on a single rotation matrix."""<line_sep><return>tf.matmul(vectors rotation_matrix)<block_end><def_stmt>_embed_conformers molecule:Chem.rdchem.Mol max_num_conformers:int random_seed:int prune_rms_thresh:float fallback_to_random:bool * use_random:bool=<false> <arrow>Chem.rdchem.Mol<block_start>"""Embeds conformers into a copy of a molecule. If random coordinates allowed, tries not to use random coordinates at first, and uses random only if fails. Args: molecule: molecular representation of the compound. max_num_conformers: maximum number of conformers to generate. If pruning is done, the returned number of conformers is not guaranteed to match max_num_conformers. random_seed: random seed to use for conformer generation. prune_rms_thresh: RMSD threshold which allows to prune conformers that are too similar. fallback_to_random: if conformers cannot be obtained, use random coordinates to initialise. *: use_random: Use random coordinates. Shouldn't be set by any caller except this function itself. Returns: A copy of a molecule with embedded conformers. Raises: ValueError: if conformers cannot be obtained for a given molecule. """<line_sep>mol=copy.deepcopy(molecule)<line_sep># Obtains parameters for conformer generation. # In particular, ETKDG is experimental-torsion basic knowledge distance # geometry, which allows to randomly generate an initial conformation that # satisfies various geometric constraints such as lower and upper bounds on # the distances between atoms. params=AllChem.ETKDGv3()<line_sep>params.randomSeed=random_seed<line_sep>params.pruneRmsThresh=prune_rms_thresh<line_sep>params.numThreads=-1<line_sep>params.useRandomCoords=use_random<line_sep>conf_ids=AllChem.EmbedMultipleConfs(mol max_num_conformers params)<if_stmt><not>conf_ids<block_start><if_stmt><not>fallback_to_random<or>use_random<block_start><raise>ValueError('Cant get conformers')<block_end><return>_embed_conformers(mol max_num_conformers random_seed prune_rms_thresh fallback_to_random use_random=<true>)<block_end><return>mol<block_end><def_stmt>_minimize_by_mmff molecule:Chem.rdchem.Mol max_iter:int <arrow>Optional[Chem.rdchem.Mol]<block_start>"""Minimizes forcefield for conformers using MMFF algorithm. Args: molecule: a datastructure containing conformers. max_iter: number of maximum iterations to use when optimising force field. Returns: A copy of a `molecule` containing optimised conformers; or None if MMFF cannot be performed. """<line_sep>molecule_props=AllChem.MMFFGetMoleculeProperties(molecule)<if_stmt>molecule_props<is><none><block_start><return><none><block_end>mol=copy.deepcopy(molecule)<for_stmt>conf_id range(mol.GetNumConformers())<block_start>ff=AllChem.MMFFGetMoleculeForceField(mol molecule_props confId=conf_id ignoreInterfragInteractions=<false>)<line_sep>ff.Initialize()<line_sep># minimises a conformer within a mol in place. ff.Minimize(max_iter)<block_end><return>mol<block_end><def_stmt>_minimize_by_uff molecule:Chem.rdchem.Mol max_iter:int <arrow>Chem.rdchem.Mol<block_start>"""Minimizes forcefield for conformers using UFF algorithm. Args: molecule: a datastructure containing conformers. max_iter: number of maximum iterations to use when optimising force field. Returns: A copy of a `molecule` containing optimised conformers. """<line_sep>mol=copy.deepcopy(molecule)<line_sep>conf_ids=range(mol.GetNumConformers())<for_stmt>conf_id conf_ids<block_start>ff=AllChem.UFFGetMoleculeForceField(mol confId=conf_id)<line_sep>ff.Initialize()<line_sep># minimises a conformer within a mol in place. ff.Minimize(max_iter)<block_end><return>mol<block_end><def_stmt>_get_symmetry_rotation_matrix sign:tf.Tensor<arrow>tf.Tensor<block_start>"""Returns the 2d/3d matrix for mirror symmetry."""<line_sep>zero=tf.zeros_like(sign)<line_sep>one=tf.ones_like(sign)<line_sep># pylint: disable=bad-whitespace,bad-continuation rot=[sign zero zero zero one zero zero zero one]<line_sep># pylint: enable=bad-whitespace,bad-continuation shape=(3 3)<line_sep>rot=tf.stack(rot axis=-1)<line_sep>rot=tf.reshape(rot shape)<line_sep><return>rot<block_end><def_stmt>_quaternion_to_rotation_matrix quaternion:tf.Tensor<arrow>tf.Tensor<block_start>"""Converts a batch of quaternions to a batch of rotation matrices."""<line_sep>q0=quaternion[0]<line_sep>q1=quaternion[1]<line_sep>q2=quaternion[2]<line_sep>q3=quaternion[3]<line_sep>r00=2<times>(q0<times>q0+q1<times>q1)-1<line_sep>r01=2<times>(q1<times>q2-q0<times>q3)<line_sep>r02=2<times>(q1<times>q3+q0<times>q2)<line_sep>r10=2<times>(q1<times>q2+q0<times>q3)<line_sep>r11=2<times>(q0<times>q0+q2<times>q2)-1<line_sep>r12=2<times>(q2<times>q3-q0<times>q1)<line_sep>r20=2<times>(q1<times>q3-q0<times>q2)<line_sep>r21=2<times>(q2<times>q3+q0<times>q1)<line_sep>r22=2<times>(q0<times>q0+q3<times>q3)-1<line_sep>matrix=tf.stack([r00 r01 r02 r10 r11 r12 r20 r21 r22] axis=-1)<line_sep><return>tf.reshape(matrix [3 3])<block_end><def_stmt>_get_random_rotation_3d <arrow>tf.Tensor<block_start>random_quaternions=tf.random.normal(shape=[4] dtype=tf.float32)<line_sep>random_quaternions<augdiv>tf.linalg.norm(random_quaternions axis=-1 keepdims=<true>)<line_sep><return>_quaternion_to_rotation_matrix(random_quaternions)<block_end><def_stmt>_get_random_mirror_symmetry <arrow>tf.Tensor<block_start>random_0_1=tf.random.uniform(shape=() minval=0 maxval=2 dtype=tf.int32)<line_sep>random_signs=tf.cast((2<times>random_0_1)-1 tf.float32)<line_sep><return>_get_symmetry_rotation_matrix(random_signs)<block_end>
# Software License Agreement (BSD License) # # Copyright (c) 2018, Fraunhofer FKIE/CMS, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Fraunhofer nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. <import_from_stmt>python_qt_binding.QtCore QObject Signal<import_stmt>fkie_node_manager_daemon.remote<as>remote<import_from_stmt>.file_channel FileChannel<import_from_stmt>.launch_channel LaunchChannel<import_from_stmt>.monitor_channel MonitorChannel<import_from_stmt>.screen_channel ScreenChannel<import_from_stmt>.settings_channel SettingsChannel<import_from_stmt>.version_channel VersionChannel<class_stmt>NmdClient(QObject)<block_start>error=Signal(str str str Exception)<line_sep>''' :ivar str,str,str,Exception error: error is a signal, which is emitted on errors {method, url, path, Exception}. '''<def_stmt>__init__ self<block_start>QObject.__init__(self)<line_sep>self._channels=[]<line_sep>self.file=FileChannel()<line_sep>self.file.error.connect(self.on_error)<line_sep>self._channels.append(self.file)<line_sep>self.launch=LaunchChannel()<line_sep>self.launch.error.connect(self.on_error)<line_sep>self._channels.append(self.launch)<line_sep>self.monitor=MonitorChannel()<line_sep>self.monitor.error.connect(self.on_error)<line_sep>self._channels.append(self.monitor)<line_sep>self.screen=ScreenChannel()<line_sep>self.screen.error.connect(self.on_error)<line_sep>self._channels.append(self.screen)<line_sep>self.settings=SettingsChannel()<line_sep>self.settings.error.connect(self.on_error)<line_sep>self._channels.append(self.settings)<line_sep>self.version=VersionChannel()<line_sep>self.version.error.connect(self.on_error)<line_sep>self._channels.append(self.version)<block_end><def_stmt>stop self<block_start>print("clear grpc channels...")<for_stmt>channel self._channels<block_start>channel.stop()<block_end>remote.clear_channels()<line_sep>print("clear grpc channels...ok")<line_sep>self.clear_cache()<del_stmt>self._channels[:]<block_end><def_stmt>clear_cache self grpc_path=''<block_start><for_stmt>channel self._channels<block_start>channel.clear_cache(grpc_path)<block_end><block_end><def_stmt>on_error self method url path exception<block_start>self.error.emit(method url path exception)<block_end><block_end>
<class_stmt>FuzzException(Exception)<block_start>FATAL,SIGCANCEL=range(2)<def_stmt>__init__ self etype msg<block_start>self.etype=etype<line_sep>self.msg=msg<line_sep>Exception.__init__(self msg)<block_end><block_end>
<import_stmt>asyncio<import_stmt>multiprocessing<import_stmt>pytest<import_from_stmt>lahja.asyncio AsyncioEndpoint<import_from_stmt>lahja.common BaseEvent ConnectionConfig<class_stmt>EventTest(BaseEvent)<block_start><def_stmt>__init__ self value<block_start>self.value=value<block_end><block_end><def_stmt>run_asyncio coro *args<block_start>loop=asyncio.get_event_loop()<line_sep>loop.run_until_complete(coro(*args))<line_sep>loop.close()<block_end><async_keyword><def_stmt>_do_asyncio_client_endpoint name ipc_path<block_start>config=ConnectionConfig(name ipc_path)<async_keyword><with_stmt>AsyncioEndpoint(name+"client").run()<as>client<block_start><await>client.connect_to_endpoints(config)<assert_stmt>client.is_connected_to(name)<line_sep><await>client.wait_until_endpoint_subscribed_to(config.name EventTest)<line_sep>event=EventTest("test")<line_sep><await>client.broadcast(event)<block_end><block_end>@pytest.mark.trio<async_keyword><def_stmt>test_trio_endpoint_serving_asyncio_endpoint endpoint_server endpoint_server_config<block_start>name=endpoint_server_config.name<line_sep>path=endpoint_server_config.path<line_sep>proc=multiprocessing.Process(target=run_asyncio args=(_do_asyncio_client_endpoint name path))<line_sep>proc.start()<line_sep>result=<await>endpoint_server.wait_for(EventTest)<assert_stmt>isinstance(result EventTest)<assert_stmt>result.value<eq>"test"<line_sep>proc.join()<block_end>
# pylint: skip-file <class_stmt>GcloudComputeProjectInfoError(Exception)<block_start>'''exception class for projectinfo'''<line_sep><pass><block_end># pylint: disable=too-many-instance-attributes <class_stmt>GcloudComputeProjectInfo(GcloudCLI)<block_start>''' Class to wrap the gcloud compute projectinfo command'''<line_sep># pylint allows 5 # pylint: disable=too-many-arguments <def_stmt>__init__ self metadata=<none> metadata_from_file=<none> remove_keys=<none> verbose=<false><block_start>''' Constructor for gcloud resource '''<line_sep>super(GcloudComputeProjectInfo self).__init__()<line_sep>self._metadata=metadata<line_sep>self.metadata_from_file=metadata_from_file<line_sep>self.remove_keys=remove_keys<line_sep>self._existing_metadata=<none><line_sep>self.verbose=verbose<block_end>@property<def_stmt>metadata self<block_start>'''property for existing metadata'''<line_sep><return>self._metadata<block_end>@property<def_stmt>existing_metadata self<block_start>'''property for existing metadata'''<if_stmt>self._existing_metadata<eq><none><block_start>self._existing_metadata=[]<line_sep>metadata=self.list_metadata()<line_sep>metadata=metadata['results']['commonInstanceMetadata']<if_stmt>metadata.has_key('items')<block_start>self._existing_metadata=metadata['items']<block_end><block_end><return>self._existing_metadata<block_end><def_stmt>list_metadata self<block_start>'''return metatadata'''<line_sep>results=self._list_metadata('project-info')<if_stmt>results['returncode']<eq>0<block_start>results['results']=yaml.load(results['results'])<block_end><return>results<block_end><def_stmt>exists self<block_start>''' return whether the metadata that we are removing exists '''<line_sep># currently we aren't opening up files for comparison so always return False <if_stmt>self.metadata_from_file<block_start><return><false><block_end><for_stmt>key,val self.metadata.items()<block_start><for_stmt>data self.existing_metadata<block_start><if_stmt>key<eq>'sshKeys'<and>data['key']<eq>key<block_start>ssh_keys={}<line_sep># get all the users and their public keys out of the project <for_stmt>user_pub_key data['value'].strip().split('\n')<block_start>col_index=user_pub_key.find(':')<line_sep>user=user_pub_key[:col_index]<line_sep>pub_key=user_pub_key[col_index+1:]<line_sep>ssh_keys[user]=pub_key<block_end># compare the users that were passed in to see if we need to update <for_stmt>inc_user,inc_pub_key val.items()<block_start><if_stmt><not>ssh_keys.has_key(inc_user)<or>ssh_keys[inc_user]<ne>inc_pub_key<block_start><return><false><block_end><block_end># matched all ssh keys <break><block_end><elif_stmt>data['key']<eq>str(key)<and>str(data['value'])<eq>str(val)<block_start><break><block_end><block_end><else_stmt><block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>keys_exist self<block_start>''' return whether the keys exist in the metadata'''<for_stmt>key self.remove_keys<block_start><for_stmt>mdata self.existing_metadata<block_start><if_stmt>key<eq>mdata['key']<block_start><break><block_end><block_end><else_stmt># NOT FOUND <block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>needs_update self<block_start>''' return whether an we need to update '''<line_sep># compare incoming values with metadata returned # for each key in user supplied check against returned data <return><not>self.exists()<block_end><def_stmt>delete_metadata self remove_all=<false><block_start>''' attempt to remove metadata '''<line_sep><return>self._delete_metadata(self.remove_keys remove_all=remove_all)<block_end><def_stmt>create_metadata self<block_start>'''create an metadata'''<line_sep>results=<none><if_stmt>self.metadata<and>self.metadata.has_key('sshKeys')# create a file and pass it to create <block_start>ssh_strings=["%s:%s"%(user pub_key)<for>user,pub_key self.metadata['sshKeys'].items()]<line_sep>ssh_keys={'sshKeys':Utils.create_file('ssh_keys' '\n'.join(ssh_strings) 'raw')}<line_sep>results=self._create_metadata('project-info' self.metadata ssh_keys)<line_sep># remove them and continue <del_stmt>self.metadata['sshKeys']<if_stmt>len(self.metadata.keys())<eq>0<block_start><return>results<block_end><block_end>new_results=self._create_metadata('project-info' self.metadata self.metadata_from_file)<if_stmt>results<block_start><return>[results new_results]<block_end><return>new_results<block_end><block_end>
<import_from_stmt>opytimizer.optimizers.swarm FFOA<line_sep># Creates a FFOA optimizer o=FFOA()<line_sep>
<import_from_stmt>.element GraphElement<import_from_stmt>.broker VertexBroker<class_stmt>Vertex(GraphElement)<block_start>Broker=VertexBroker<line_sep># TODO # Edge information is carried in vertexes retrieved from database, # as OrientBinaryObject. Can likely optimise these traversals # when we know how to parse these. <def_stmt>outE self *edge_classes<block_start>g=self._graph<line_sep><return>g.outE(self._id *edge_classes)<if>g<else><none><block_end><def_stmt>inE self *edge_classes<block_start>g=self._graph<line_sep><return>g.inE(self._id *edge_classes)<if>g<else><none><block_end><def_stmt>bothE self *edge_classes<block_start>g=self._graph<line_sep><return>g.bothE(self._id *edge_classes)<if>g<else><none><block_end><def_stmt>out self *edge_classes<block_start>g=self._graph<line_sep><return>g.out(self._id *edge_classes)<if>g<else><none><block_end><def_stmt>in_ self *edge_classes<block_start>g=self._graph<line_sep><return>g.in_(self._id *edge_classes)<if>g<else><none><block_end><def_stmt>both self *edge_classes<block_start>g=self._graph<line_sep><return>g.both(self._id *edge_classes)<if>g<else><none><block_end><def_stmt>__call__ self edge_or_broker<block_start>"""Provides syntactic sugar for creating edges."""<if_stmt>hasattr(edge_or_broker 'broker')<block_start>edge_or_broker=edge_or_broker.broker.element_cls<block_end><elif_stmt>hasattr(edge_or_broker 'element_cls')<block_start>edge_or_broker=edge_or_broker.element_cls<block_end><if_stmt>edge_or_broker.decl_type<eq>1<block_start><return>VertexVector(self edge_or_broker.objects)<block_end><block_end><block_end><class_stmt>VertexVector(object)<block_start><def_stmt>__init__ self origin edge_broker **kwargs<block_start>self.origin=origin<line_sep>self.edge_broker=edge_broker<line_sep>self.kwargs=kwargs<block_end><def_stmt>__gt__ self target<block_start>"""Syntactic sugar for creating an edge. :param target: If a batch variable, return a command for creating an edge to this vertex. Otherwise, create the edge. """<if_stmt>hasattr(target '_id')<block_start><if_stmt>target._id[0]<eq>'$'<block_start><return>self.edge_broker.create_command(self.origin target **self.kwargs)<block_end><else_stmt><block_start><return>self.edge_broker.create(self.origin target **self.kwargs)<block_end><block_end><return>self<block_end><block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>external_push.models GenericPushTarget BrewersFriendPushTarget BrewfatherPushTarget ThingSpeakPushTarget GrainfatherPushTarget<line_sep>@admin.register(GenericPushTarget)<class_stmt>GenericPushTargetAdmin(admin.ModelAdmin)<block_start>list_display=('name' 'status' 'target_host')<block_end>@admin.register(BrewersFriendPushTarget)<class_stmt>BrewersFriendPushTargetAdmin(admin.ModelAdmin)<block_start>list_display=('gravity_sensor_to_push' 'status' 'push_frequency')<block_end>@admin.register(BrewfatherPushTarget)<class_stmt>BrewfatherPushTargetAdmin(admin.ModelAdmin)<block_start>list_display=('gravity_sensor_to_push' 'status' 'push_frequency')<block_end>@admin.register(ThingSpeakPushTarget)<class_stmt>ThingSpeakPushTargetAdmin(admin.ModelAdmin)<block_start>list_display=('name' 'status')<block_end>@admin.register(GrainfatherPushTarget)<class_stmt>GrainfatherPushTargetAdmin(admin.ModelAdmin)<block_start>list_display=('gravity_sensor_to_push' 'status' 'push_frequency')<block_end>
# Convert an operand to characters #@author b0bb #@category Pwn #@keybinding shift r #@menupath Analysis.Pwn.Utilities.Convert to Char #@toolbar <import_stmt>ghidra.app.cmd.equate.SetEquateCmd<as>SetEquateCmd<import_stmt>ghidra.program.util.OperandFieldLocation<as>OperandFieldLocation<import_stmt>ghidra.program.model.lang.OperandType<as>OperandType<def_stmt>run <block_start><if_stmt>type(currentLocation)<is><not>OperandFieldLocation<block_start><return><block_end>addr=currentLocation.getAddress()<line_sep>inst=currentProgram.getListing().getInstructionAt(addr)<line_sep>opin=currentLocation.getOperandIndex()<if_stmt>inst.getOperandType(opin)<eq>OperandType.SCALAR<block_start>string=''<line_sep>scalar=inst.getScalar(opin)<line_sep>bvalue=scalar.byteArrayValue()<if_stmt><not>currentProgram.getLanguage().isBigEndian()<block_start>bvalue.reverse()<block_end><for_stmt>value bvalue<block_start><if_stmt>value<l>0x20<or>value<g>0x7e<block_start>string<augadd>'\\x%02x'%value<block_end><else_stmt><block_start>string<augadd>chr(value)<block_end><block_end>cmd=SetEquateCmd('"%s"'%string addr opin scalar.getValue())<line_sep>state.getTool().execute(cmd currentProgram)<block_end><block_end>run()<line_sep>
<import_from_stmt>scipy.stats multivariate_normal<import_from_stmt>scipy.signal convolve2d<import_stmt>matplotlib<try_stmt><block_start>matplotlib.pyplot.figure()<line_sep>matplotlib.pyplot.close()<block_end><except_stmt>Exception<block_start>matplotlib.use('Agg')<block_end><import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>os<line_sep># the colormap should assign light colors to low values TERRAIN_CMAP='Greens'<line_sep>DEFAULT_PATH='/tmp/mujoco_terrains'<line_sep>STEP=0.1<def_stmt>generate_hills width height nhills<block_start>''' @param width float, terrain width @param height float, terrain height @param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2 '''<line_sep># setup coordinate grid xmin,xmax=-width/2.0 width/2.0<line_sep>ymin,ymax=-height/2.0 height/2.0<line_sep>x,y=np.mgrid[xmin:xmax:STEP ymin:ymax:STEP]<line_sep>pos=np.empty(x.shape+(2 ))<line_sep>pos[: : 0]=x<line_sep>pos[: : 1]=y<line_sep># generate hilltops xm,ym=np.mgrid[xmin:xmax:width/np.sqrt(nhills) ymin:ymax:height/np.sqrt(nhills)]<line_sep>mu=np.c_[xm.flat ym.flat]<line_sep>sigma=float(width<times>height)/(nhills<times>8)<for_stmt>i range(mu.shape[0])<block_start>mu[i]=multivariate_normal.rvs(mean=mu[i] cov=sigma)<block_end># generate hills sigma=sigma+sigma<times>np.random.rand(mu.shape[0])<line_sep>rvs=[multivariate_normal(mu[i :] cov=sigma[i])<for>i range(mu.shape[0])]<line_sep>hfield=np.max([rv.pdf(pos)<for>rv rvs] axis=0)<line_sep><return>x y hfield<block_end><def_stmt>clear_patch hfield box<block_start>''' Clears a patch shaped like box, assuming robot is placed in center of hfield @param box: rllab.spaces.Box-like '''<if_stmt>box.flat_dim<g>2<block_start><raise>ValueError("Provide 2dim box")<block_end># clear patch h_center=int(0.5<times>hfield.shape[0])<line_sep>w_center=int(0.5<times>hfield.shape[1])<line_sep>fromrow,torow=w_center+int(box.low[0]/STEP) w_center+int(box.high[0]/STEP)<line_sep>fromcol,tocol=h_center+int(box.low[1]/STEP) h_center+int(box.high[1]/STEP)<line_sep>hfield[fromrow:torow fromcol:tocol]=0.0<line_sep># convolve to smoothen edges somewhat, in case hills were cut off K=np.ones((10 10))/100.0<line_sep>s=convolve2d(hfield[fromrow-9:torow+9 fromcol-9:tocol+9] K mode='same' boundary='symm')<line_sep>hfield[fromrow-9:torow+9 fromcol-9:tocol+9]=s<line_sep><return>hfield<block_end><def_stmt>_checkpath path_<block_start><if_stmt>path_<is><none><block_start>path_=DEFAULT_PATH<block_end><if_stmt><not>os.path.exists(path_)<block_start>os.makedirs(path_)<block_end><return>path_<block_end><def_stmt>save_heightfield x y hfield fname path=<none><block_start>''' @param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute of the <asset> element in the env XML where the height field is defined '''<line_sep>path=_checkpath(path)<line_sep>plt.figure()<line_sep>plt.contourf(x y -hfield 100 cmap=TERRAIN_CMAP)# terrain_cmap is necessary to make sure tops get light color plt.savefig(os.path.join(path fname) bbox_inches='tight')<line_sep>plt.close()<block_end><def_stmt>save_texture x y hfield fname path=<none><block_start>''' @param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the <compiler> element in the env XML '''<line_sep>path=_checkpath(path)<line_sep>plt.figure()<line_sep>plt.contourf(x y -hfield 100 cmap=TERRAIN_CMAP)<line_sep>xmin,xmax=x.min() x.max()<line_sep>ymin,ymax=y.min() y.max()<line_sep># for some reason plt.grid does not work here, so generate gridlines manually <for_stmt>i np.arange(xmin xmax 0.5)<block_start>plt.plot([i i] [ymin ymax] 'k' linewidth=0.1)<block_end><for_stmt>i np.arange(ymin ymax 0.5)<block_start>plt.plot([xmin xmax] [i i] 'k' linewidth=0.1)<block_end>plt.savefig(os.path.join(path fname) bbox_inches='tight')<line_sep>plt.close()<block_end>
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 <import_stmt>frida<import_stmt>sys<import_stmt>os<line_sep>vid_index=0<line_sep>aud_index=0<def_stmt>on_message message data<block_start><global>vid_index<line_sep><global>aud_index<line_sep>print(message)<block_end>session=frida.attach("avconferenced")<line_sep>code=open('replay.js' 'r').read()<line_sep>script=session.create_script(code)<line_sep>script.on("message" on_message)<line_sep>script.load()<line_sep>print("Press Ctrl-C to quit")<line_sep>sys.stdin.read()<line_sep>
<import_stmt>unittest<import_stmt>td.enums<as>td_enums<import_from_stmt>unittest TestCase<import_from_stmt>configparser ConfigParser<import_from_stmt>td.orders Order<import_from_stmt>td.orders OrderLeg<import_from_stmt>td.client TDClient<import_from_stmt>td.stream TDStreamerClient<class_stmt>TDSession(TestCase)<block_start>"""Will perform a unit test for the TD session."""<def_stmt>setUp self<arrow><none><block_start>"""Set up the Robot."""<line_sep># Grab configuration values. config=ConfigParser()<line_sep>config.read('config/config.ini')<line_sep>CLIENT_ID=config.get('main' 'CLIENT_ID')<line_sep>REDIRECT_URI=config.get('main' 'REDIRECT_URI')<line_sep>JSON_PATH=config.get('main' 'JSON_PATH')<line_sep>ACCOUNT_NUMBER=config.get('main' 'ACCOUNT_NUMBER')<line_sep># Initalize the session. self.td_session=TDClient(client_id=CLIENT_ID redirect_uri=REDIRECT_URI credentials_path=JSON_PATH account_number=ACCOUNT_NUMBER)<line_sep>self.td_order=Order()<line_sep>self.td_order_leg=OrderLeg()<block_end><def_stmt>test_creates_instance_of_session self<block_start>"""Create an instance and make sure it's a robot."""<line_sep>self.assertIsInstance(self.td_session TDClient)<line_sep>self.assertIsInstance(self.td_order Order)<line_sep>self.assertIsInstance(self.td_order_leg OrderLeg)<block_end><def_stmt>test_define_simple_order self<block_start>"""Test creating a simple order."""<line_sep># Add the Order session. self.td_order.order_session(session=td_enums.ORDER_SESSION.NORMAL)<line_sep># Add the Order duration. self.td_order.order_duration(duration=td_enums.DURATION.GOOD_TILL_CANCEL)<line_sep># Add the Order Leg Instruction. self.td_order_leg.order_leg_instruction(instruction=td_enums.ORDER_INSTRUCTIONS.SELL)<line_sep># Add the Order Leg price. self.td_order_leg.order_leg_price(price=112.50)<line_sep># Add the Order Leg quantity. self.td_order_leg.order_leg_quantity(quantity=10)<line_sep># Add the Order Leg Asset. self.td_order_leg.order_leg_asset(asset_type=td_enums.ORDER_ASSET_TYPE.EQUITY symbol='MSFT')<line_sep># Add the Order Leg. self.td_order.add_order_leg(order_leg=self.td_order_leg)<line_sep>correct_dict={"session":"NORMAL" "duration":"GOOD_TILL_CANCEL" "orderLegCollection":[{"instruction":"SELL" "price":112.5 "quantity":10 "instrument":{"assetType":"EQUITY" "symbol":"MSFT"}}]}<line_sep>self.assertDictEqual(correct_dict self.td_order._grab_order())<block_end><def_stmt>tearDown self<block_start>"""Clean Up."""<line_sep>self.td_session=<none><line_sep>self.td_order=<none><line_sep>self.td_order_leg=<none><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# !/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_from_stmt>polyaxon.polyboard.logging.handler PolyaxonHandler<line_sep>EXCLUDE_DEFAULT_LOGGERS=("polyaxon.client" "polyaxon.cli")<def_stmt>setup_logging add_logs exclude=EXCLUDE_DEFAULT_LOGGERS<block_start>plx_logger=logging.getLogger()<line_sep>plx_logger.setLevel(logging.INFO)<if_stmt>logging.StreamHandler<not><in>map(type plx_logger.handlers)<block_start>plx_logger.addHandler(logging.StreamHandler())<line_sep>plx_logger.propagate=<false><block_end><if_stmt>PolyaxonHandler<in>map(type plx_logger.handlers)<block_start><for_stmt>handler plx_logger.handlers<block_start><if_stmt>isinstance(handler PolyaxonHandler)<block_start>handler.set_add_logs(add_logs=add_logs)<block_end><block_end><block_end><else_stmt><block_start>handler=PolyaxonHandler(add_logs=add_logs)<line_sep>plx_logger.addHandler(handler)<block_end><for_stmt>logger_name exclude<block_start>plx_logger=logging.getLogger(logger_name)<if_stmt>logging.StreamHandler<not><in>map(type plx_logger.handlers)<block_start>plx_logger.addHandler(logging.StreamHandler())<line_sep>plx_logger.propagate=<false><block_end><block_end><block_end>
<import_stmt>sys<import_stmt>sqlite3<import_stmt>msgpack<import_from_stmt>scan *<import_from_stmt>mark *<if_stmt>__name__<eq>'__main__'<block_start>conn=sqlite3.connect('fin.db')<try_stmt><block_start>conn.execute('CREATE TABLE flowfin (label text primary key,len int,fin blob,hash text);')<line_sep>conn.execute('CREATE INDEX index_flowfin_len ON flowfin (len);')<line_sep>conn.execute('CREATE INDEX index_flowfin_hash ON flowfin (hash);')<block_end><except_stmt>sqlite3.OperationalError<block_start><pass><block_end><if_stmt>'gendb'<in>sys.argv<block_start>gen_db(conn)<block_end><else_stmt><block_start>filepath=sys.argv[-1]<line_sep>exe=EXE(filepath filepath)<line_sep>mark_list=[]<line_sep>call_loc=set()<line_sep>start_pc=exe.elf.header['e_entry']<line_sep>call_loc=exe.ScanBlock(exe.GetSection(start_pc))<line_sep>main_pc=<none><line_sep>cur=conn.cursor()<line_sep>cur.execute('SELECT * FROM flowfin WHERE label=?;' ('libc-start.o # __libc_start_main' ))<line_sep>finent=cur.fetchone()<if_stmt>finent<ne><none><block_start>finb=msgpack.unpackb(finent[2])<for_stmt>pos,loc call_loc<block_start>fina=exe.FuncFin(loc set())<if_stmt>CmpFin(fina finb)<eq>0<block_start>ins,_=Disasm(pos[0] pos[1]-7)<line_sep>main_pc=ins.operands[1].value.imm<line_sep><break><block_end><block_end><block_end><if_stmt>main_pc<ne><none><block_start>mark_list.append((exe.GetSection(main_pc) 'main'))<line_sep>call_loc.update(exe.ScanBlock(exe.GetSection(main_pc)))<block_end><for_stmt>pos,loc call_loc<block_start>fina=exe.FuncFin(loc set())<line_sep>find_name=<none><for_stmt>row conn.execute('SELECT * FROM flowfin WHERE len<=?;' (len(fina) ))<block_start>finb=msgpack.unpackb(row[2])<line_sep>dis=CmpFin(fina finb)<if_stmt>dis<eq>0<block_start>find_name=row[0]<line_sep><break><block_end><block_end><if_stmt>find_name<eq><none><block_start>find_name='<unknown>'<block_end><else_stmt><block_start>mark_list.append((loc find_name.split(' # ')[1]))<block_end>print('%016lx - %s'%(loc[0].base+loc[1] find_name))<block_end>mark(exe mark_list)<block_end><block_end>
"""Defines JobFinder."""<import_from_stmt>datetime datetime timedelta<import_from_stmt>.job HEARTBEAT_VALID_MINUTES HEARTBEATLESS_JOB_VALID_HOURS Job State<class_stmt>JobFinder<block_start>""" Query builder for the `Job` model for a certain `elt_uri`. """<def_stmt>__init__ self job_id:str<block_start>self.job_id=job_id<block_end><def_stmt>latest self session<block_start><return>(session.query(Job).filter(Job.job_id<eq>self.job_id).order_by(Job.started_at.desc()).first())<block_end><def_stmt>successful self session<block_start><return>session.query(Job).filter((Job.job_id<eq>self.job_id)&(Job.state<eq>State.SUCCESS)&Job.ended_at.isnot(<none>))<block_end><def_stmt>running self session<block_start>"""Find jobs in the running state."""<line_sep><return>session.query(Job).filter((Job.job_id<eq>self.job_id)&(Job.state<eq>State.RUNNING))<block_end><def_stmt>latest_success self session<block_start><return>self.successful(session).order_by(Job.ended_at.desc()).first()<block_end><def_stmt>latest_running self session<block_start>"""Find the most recent job in the running state, if any."""<line_sep><return>self.running(session).order_by(Job.started_at.desc()).first()<block_end><def_stmt>with_payload self session flags=0 since=<none><block_start>query=(session.query(Job).filter((Job.job_id<eq>self.job_id)&(Job.payload_flags<ne>0)&(Job.payload_flags.op("&")(flags)<eq>flags)&Job.ended_at.isnot(<none>)).order_by(Job.ended_at.asc()))<if_stmt>since<block_start>query=query.filter(Job.ended_at<g>since)<block_end><return>query<block_end><def_stmt>latest_with_payload self session **kwargs<block_start><return>(self.with_payload(session **kwargs).order_by(<none>)# Reset ascending order .order_by(Job.ended_at.desc()).first())<block_end>@classmethod<def_stmt>all_stale cls session<block_start>"""Return all stale jobs."""<line_sep>now=datetime.utcnow()<line_sep>last_valid_heartbeat_at=now-timedelta(minutes=HEARTBEAT_VALID_MINUTES)<line_sep>last_valid_started_at=now-timedelta(hours=HEARTBEATLESS_JOB_VALID_HOURS)<line_sep><return>session.query(Job).filter((Job.state<eq>State.RUNNING)&((Job.last_heartbeat_at.isnot(<none>)&(Job.last_heartbeat_at<l>last_valid_heartbeat_at))|(Job.last_heartbeat_at.is_(<none>)&(Job.started_at<l>last_valid_started_at))))<block_end><def_stmt>stale self session<block_start>"""Return stale jobs with the instance's job ID."""<line_sep><return>self.all_stale(session).filter(Job.job_id<eq>self.job_id)<block_end><block_end>
<import_from_stmt>django.contrib admin<line_sep># Register your models here. <import_from_stmt>.models history toolsscript<line_sep>admin.site.register(history)<line_sep>admin.site.register(toolsscript)<line_sep>
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ <import_stmt>pytest<import_from_stmt>azure.search.documents.models IndexAction<import_from_stmt>azure.search.documents IndexDocumentsBatch<line_sep>METHOD_NAMES=["add_upload_actions" "add_delete_actions" "add_merge_actions" "add_merge_or_upload_actions" ]<line_sep>METHOD_MAP=dict(zip(METHOD_NAMES ["upload" "delete" "merge" "mergeOrUpload"]))<class_stmt>TestIndexDocumentsBatch(object)<block_start><def_stmt>test_init self<block_start>batch=IndexDocumentsBatch()<assert_stmt>batch.actions<eq>[]<block_end><def_stmt>test_repr self<block_start>batch=IndexDocumentsBatch()<assert_stmt>repr(batch)<eq>"<IndexDocumentsBatch [0 actions]>"<line_sep>batch._actions=[1 2 3]<assert_stmt>repr(batch)<eq>"<IndexDocumentsBatch [3 actions]>"<line_sep># a strict length test here would require constructing an actions list # with a length of ~10**24, so settle for this simple sanity check on # an extreme case. batch_actions=list(range(2000))<assert_stmt>len(repr(batch))<le>1024<block_end><def_stmt>test_actions_returns_list_copy self<block_start>batch=IndexDocumentsBatch()<line_sep>batch.actions.extend([1 2 3])<assert_stmt>type(batch.actions)<is>list<assert_stmt>batch.actions<eq>[]<assert_stmt>batch.actions<is><not>batch._actions<block_end>@pytest.mark.parametrize("method_name" METHOD_NAMES)<def_stmt>test_add_method self method_name<block_start>batch=IndexDocumentsBatch()<line_sep>method=getattr(batch method_name)<line_sep>method("doc1")<assert_stmt>len(batch.actions)<eq>1<line_sep>method("doc2" "doc3")<assert_stmt>len(batch.actions)<eq>3<line_sep>method(["doc4" "doc5"])<assert_stmt>len(batch.actions)<eq>5<line_sep>method(("doc6" "doc7"))<assert_stmt>len(batch.actions)<eq>7<assert_stmt>all(action.action_type<eq>METHOD_MAP[method_name]<for>action batch.actions)<assert_stmt>all(type(action)<eq>IndexAction<for>action batch.actions)<line_sep>expected=["doc{}".format(i)<for>i range(1 8)]<assert_stmt>[action.additional_properties<for>action batch.actions]<eq>expected<block_end><block_end>
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: v2/report/status.proto <import_stmt>sys<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<line_sep># @@protoc_insertion_point(imports) _sym_db=_symbol_database.Default()<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='v2/report/status.proto' package='' serialized_pb=_b('\n\x16v2/report/status.proto\"i\n\x06status\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12%\n\nattributes\x18\x02 \x03(\x0b\x32\x11.status.attribute\x1a\'\n\tattribute\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\t'))<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>_STATUS_ATTRIBUTE=_descriptor.Descriptor(name='attribute' full_name='status.attribute' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='key' full_name='status.attribute.key' index=0 number=1 type=9 cpp_type=9 label=2 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='value' full_name='status.attribute.value' index=1 number=2 type=9 cpp_type=9 label=2 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> extension_ranges=[] oneofs=[] serialized_start=92 serialized_end=131 )<line_sep>_STATUS=_descriptor.Descriptor(name='status' full_name='status' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='scan_id' full_name='status.scan_id' index=0 number=1 type=5 cpp_type=1 label=2 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='attributes' full_name='status.attributes' index=1 number=2 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[_STATUS_ATTRIBUTE ] enum_types=[] options=<none> is_extendable=<false> extension_ranges=[] oneofs=[] serialized_start=26 serialized_end=131 )<line_sep>_STATUS_ATTRIBUTE.containing_type=_STATUS<line_sep>_STATUS.fields_by_name['attributes'].message_type=_STATUS_ATTRIBUTE<line_sep>DESCRIPTOR.message_types_by_name['status']=_STATUS<line_sep>status=_reflection.GeneratedProtocolMessageType('status' (_message.Message ) dict(attribute=_reflection.GeneratedProtocolMessageType('attribute' (_message.Message ) dict(DESCRIPTOR=_STATUS_ATTRIBUTE __module__='v2.report.status_pb2'# @@protoc_insertion_point(class_scope:status.attribute) )) DESCRIPTOR=_STATUS __module__='v2.report.status_pb2'# @@protoc_insertion_point(class_scope:status) ))<line_sep>_sym_db.RegisterMessage(status)<line_sep>_sym_db.RegisterMessage(status.attribute)<line_sep># @@protoc_insertion_point(module_scope)
<import_from_stmt>..expr BV BVArray Bool ITE<class_stmt>MemoryObj(object)<block_start><def_stmt>__init__ self name bits=64 bvarray=<none><block_start>self.bvarray=BVArray("MEMOBJ_"+name bits 8)<if>bvarray<is><none><else>bvarray<line_sep>self.name=name<line_sep>self.bits=bits<block_end><def_stmt>__str__ self<block_start><return>"<MemoryObj{bits} {name}>".format(bits=self.bits name=self.name)<block_end><def_stmt>__repr__ self<block_start><return>self.__str__()<block_end><def_stmt>load self index:BV<block_start><return>self.bvarray.Select(index)<block_end><def_stmt>store self index:BV value:BV condition:Bool=<none><block_start><if_stmt>condition<is><none><block_start>self.bvarray.Store(index value)<block_end><else_stmt># this can be inefficient <block_start>self.bvarray.ConditionalStore(index value condition)<block_end><block_end><def_stmt>copy self<block_start><return>MemoryObj(self.name self.bits self.bvarray.copy())<block_end><def_stmt>merge self other merge_condition:Bool<block_start>self.bvarray=self.bvarray.merge(other.bvarray merge_condition)<block_end><block_end>
<import_stmt>pytest<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>spotify_confidence.analysis.frequentist.confidence_computers.z_test_computer sequential_bounds<line_sep>@pytest.mark.skip(reason="Skipping because this test is very slow")<def_stmt>test_many_days <block_start>""" This input (based on a real experiment) is very long, which can cause slow calculation """<line_sep>t=[0.0016169976338740648 0.0057857955498163615 0.012200379088315757 0.020199591701142824 0.02956441064038571 0.04047102718841871 0.052929825413405296 0.06580092295219643 0.07878439818310792 0.09148496950057272 0.1028893343050959 0.1128434997940756 0.12298934256730025 0.13280979910049193 0.14267997977787195 0.15281963941289514 0.16293176212095561 0.17198778455162406 0.17996747917082068 0.18786110540725684 0.1955669737257397 0.20335013690301407 0.21277055903588274 0.22148328777708232 0.2295912740670489 0.23640586948077766 0.2431234831038822 0.24987292468428604 0.2568336065927525 0.2649271880853427 0.27282722271091664 0.2799894816822785 0.2862801096305317 0.2925685639072496 0.2988294699944579 0.3051314956400879 0.3118994077972684 0.31887303037202536 0.32523581745772245 0.3307398353487736 0.33616198578702633 0.34151324975562525 0.3478405485563082 0.3546238566149848 0.36130761502236336 0.36751189302418574 0.3730571543616735 0.37865278180851814 0.38428987795273567 0.3900127609160433 0.3964718089893684 0.40306122104207753 0.40914555292031984 0.41449831480764515 0.4198849769608837 0.4256404199470336 0.4315384355133149 0.43801594290086987 0.4444516211895538 0.45034373518130405 0.4556807858158224 0.4610488197166289 0.46633036852044285 0.4717294082126311 0.47769497653470894 0.48369759863580825 0.4892945325380834 0.49431792124380325 0.49935417177798586 0.5043009639028166 0.5093262559789482 0.5149098888134348 0.5205835093969735 0.5261172491490695 0.5310141031413226 0.5359027242118537 0.540068909216935 0.5451620919252675 0.5506752550043325 0.5562355968920056 0.5614758121490083 0.5660462437469214 0.5706616804819072 0.5750453002157994 0.5795939049979849 0.5861802311128667 0.5913273051077091 0.5958976691303413 0.6001503392324151 0.6042404457337608 0.6082963816680697 0.6124734913435614 0.6174918231657613 0.6223867287374153 0.6268875352709179 0.6308341907134806 0.6348490070893678 0.6388763812049537 0.6430405276890614 0.6476616520101889 0.6525750168960728 0.6570689758011117 0.6610427627189518 0.6649727383296814 0.6689671694958335 0.673019050913289 0.6776959248411508 0.6825336054124376 0.6869984168463193 0.6908780826604262 0.6949984065748767 0.6991746490342636 0.7033415661048878 0.7082721626873987 0.7131064081819068 0.7176506656210218 0.7216193168175142 0.7256178250256133 0.7296113326629264 0.733677461202103 0.7383860054116087 0.7431864069529378 0.7475115177561259 0.7513220765829758 0.7551652404828552 0.7591154774153049 0.7635879699061145 0.76888963361854 0.7740750002725536 0.7788235152607059 0.7829338267710377 0.7870690059847372 0.7912444713283939 0.7954864645360872 0.8002680350991415 0.8051864906561857 0.8097254772233912 0.8137210008565843 0.8175460095309978 0.8214444612731922 0.8256005212486867 0.8302889054993935 0.8351108860804202 0.839542135124793 0.8433705788759852 0.8472835029908369 0.8513248314019267 0.8556693700983707 0.8606610209471658 0.865499591259651 0.8699232042972833 0.8737653545679493 0.8776996212090155 0.8816179062961511 0.8856027192473231 0.8900849425785808 0.8947120585746139 0.8993599427069738 0.9035026227768521 0.9075820073336299 0.9115699850604569 0.9158137239629064 0.9207252417911126 0.925749689176233 0.9303560370359392 0.9343408161994707 0.9384800274049299 0.9426168396879175 0.9475247422385961 0.9523909621035122 0.9573336433987555 0.9618665256655873 0.9657568345864344 0.9697355995499667 0.973736889607129 0.9778353641807583 0.9828378833872299 0.987703190985854 0.9921586319807856 0.9960384779956415 1.0 ]<line_sep>start_time=time.time()<line_sep>results=sequential_bounds(np.array(t) alpha=0.003333333 sides=2)<line_sep>my_bounds=results.bounds<line_sep>expected=np.array([5.75400023 8.0 5.14701605 4.91478643 4.80691346 4.69004328 4.57921075 4.49683943 4.44452939 4.38899083 4.35683792 4.33289847 4.301461 4.27383028 4.24513591 4.21444005 4.18809224 4.17037988 4.15702106 4.13796352 4.12345883 4.10808648 4.07898394 4.06169498 4.04985422 4.04453139 4.03288177 4.02205301 4.00664024 3.98770613 3.97358123 3.96589571 3.95946059 3.94995533 3.94128534 3.93114789 3.91870273 3.90749163 3.90064315 3.8958719 3.88847126 3.88184277 3.86841705 3.85642932 3.84721152 3.84099201 3.83689676 3.8295672 3.82234648 3.81501541 3.80286989 3.79370807 3.78728177 3.78449351 3.77865864 3.76988501 3.76230126 3.75251025 3.74474277 3.73953663 3.73534961 3.72974059 3.72466752 3.71785112 3.70903202 3.70176221 3.6976847 3.6944938 3.68996741 3.68449851 3.67888767 3.67142884 3.66522708 3.65968721 3.65649679 3.65207508 3.65156885 3.643952 3.63644572 3.63029181 3.62665696 3.62527741 3.62117738 3.61789837 3.6128686 3.59904477 3.5976517 3.59678297 3.59434356 3.59116304 3.58814574 3.5835558 3.57659985 3.5726481 3.56990393 3.56879169 3.56501955 3.56127173 3.55720436 3.55194666 3.54597713 3.5436994 3.54287161 3.53974477 3.53649679 3.53314876 3.52700997 3.52175088 3.51873367 3.51846468 3.51401711 3.5106822 3.50742162 3.50113309 3.49658758 3.49376264 3.49238249 3.48979047 3.48725107 3.48341163 3.47810608 3.47381485 3.47184685 3.47110719 3.46801712 3.46472076 3.45913659 3.45209404 3.4484684 3.44587153 3.44472549 3.44242755 3.43895355 3.43549018 3.43080058 3.42621252 3.42437516 3.42371762 3.42122891 3.41861765 3.41451447 3.40936002 3.4051931 3.40307035 3.40295986 3.40052495 3.39688763 3.39279348 3.38725208 3.38421998 3.38214471 3.38133324 3.37908335 3.37689107 3.37364203 3.36937673 3.36593888 3.36250238 3.36109704 3.35878324 3.35666501 3.35305866 3.34754255 3.34364255 3.34157534 3.34085629 3.33864193 3.33563376 3.33016843 3.32687574 3.32338656 3.32166421 3.32107266 3.31861916 3.31615129 3.31334059 3.30792367 3.30479742 3.30339238 3.30296421 3.30041534 ])<assert_stmt>np.allclose(my_bounds expected)<line_sep># if the calculation with max_nints takes longer than 10 seconds, something is most likely broken <assert_stmt>(time.time()-start_time)<l>15<line_sep># Run a second time but with initial state from last run. start_time=time.time()<line_sep>results=sequential_bounds(np.array(t) alpha=0.003333333 sides=2 state=results.state)<line_sep>my_bounds=results.bounds<assert_stmt>np.allclose(my_bounds expected)<line_sep># if the calculation with max_nints takes longer than 10 seconds, something is most likely broken print(f"Time passed second round: {time.time()-start_time}")<assert_stmt>(time.time()-start_time)<l>0.01<block_end>@pytest.mark.skip(reason="Skipping because this test is very slow")<def_stmt>test_many_days_fast_and_no_crash <block_start>""" This is based on experiment 1735 on 26.11.2020. The calculation of the corresponding bounds takes many minutes without performance tweak. Therefore, this test only checks for absence of crashs and time constraints, but does not compare against the baseline without performance tweak. There is a Jupyter notebook making that comparison. """<line_sep>t=[0.011404679673257933 0.02292450819418779 0.0356455988484443 0.04835740420885424 0.05971666577058213 0.06976017458481187 0.07984165086754545 0.09002459314412276 0.10026356929804565 0.11129746744100509 0.1222487922920801 0.13250332796555583 0.1418309168157694 0.15072692856918676 0.15940425274581055 0.16819162796171988 0.17766544268380677 0.18725283769713902 0.19600162922594835 0.20386600701959812 0.21159934032678884 0.21916233120704773 0.22688560894714668 0.23509036348536208 0.24366994698965522 0.2515994198750076 0.25875219123481424 0.2659624389836802 0.2731790169781248 0.28051081384508175 0.28822790138928306 0.2962915558739476 0.3037246366701631 0.31063411372423433 0.31767205835063517 0.32464032826076655 0.3318100596369355 0.3397812253123048 0.3476375502493003 0.3550356746451523 0.3616457394863339 0.3683042335071859 0.375005792804928 0.38175551518794676 0.3891222824602354 0.39652683513644266 0.40347332732118724 0.4098512458112366 0.4163205187081655 0.42263992444151655 0.42899148558161226 0.43464157988476515 0.43858871208254674 0.44192382717460427 0.44482627278235426 0.4474605932759375 0.44957511937869815 0.4509048070694502 0.45222422911858906 0.45333747002744257 0.45426598540713137 0.4551955091445229 0.45605329943533507 0.456895460181754 0.4578387508027823 0.45881449093488524 0.45965707183034693 0.4603621239391219 0.4610501740166303 0.46173166976907054 0.4624475477181825 0.4632872155802805 0.4641010162663083 0.46481571779810027 0.4654194019478082 0.4660207332628762 0.4666458170038323 0.4672646265190821 0.46791675385342846 0.4685898046101078 0.46918687841487516 0.46969451649339183 0.47019581032136176 0.4706811945055765 0.47116992587716583 0.47170379526092326 0.47227291514937425 0.4727852448922026 0.47322669549150526 0.4736554715946826 0.47408022827201673 0.47450655350577753 0.4749737592414058 0.47545756086422586 0.4759381553493523 0.47630259262910407 0.4766609657576709 0.47699441004302984 0.4773518028238301 0.477775327063972 0.4781977729215707 0.47856485714029223 0.47888037506649034 0.47919262983512245 0.47949520717080135 0.47980748994936967 0.4801789017032324 0.4805627078538587 0.48090167009664675 0.4811904245288165 0.48149113920373887 0.4817901452725537 0.4820966860142033 0.48243977972257923 0.4827841618880198 0.48309197708176604 0.4833586316742829 0.4836129058750043 0.4838654994795544 0.4841171547512422 0.48439948090305657 0.48470691796266424 0.4849764575786085 0.4852081697757299 0.48545255646897667 0.4856974893559792 0.48595208567096676 0.48624575584693763 0.4865416528128355 0.4867930840050338 0.4870117575768593 0.4872274340855126 0.4874240218226533 0.4876215198827202 0.4878617751103791 0.488108108494191 0.48831807097586183 0.4884937072807334 0.48866595438332605 0.488852192449045 0.48903411698459087 0.4892522303576926 0.4894829201921431 0.4896802221826566 0.4898457609055321 0.49001188783706756 0.4901847091433521 0.4903469286887892 0.4905345812562857 0.49073597269748276 0.49091467609036693 0.4910691508884479 0.4912115954189357 0.49135658885361677 0.49150574176382184 0.49167835299558493 0.49186735004001847 0.49203167033066975 0.49216849886895175 0.4923075682021289 0.4924506289512129 0.49259525825672346 0.49276396210238826 0.49294465420074185 0.4931019580023778 0.49330306934421303 0.4935200763248353 0.49373208353184794 0.4939721566949216 0.4942334053697541 0.4944958444668745 0.4947262121870588 0.49492469059489225 0.4951192336066912 0.495294323717807 0.4954780829041733 0.4956838158854796 0.49592192835302007 0.49614550366367866 0.49633301618149417 0.49652995404283723 0.4967104500716375 0.4969174855149766 0.49712443692850716 0.4973541744251272 0.49756258235533957 0.49772464784612763 0.4978989396740621 0.4980669292663541 0.4982378038820735 0.49843929335804726 0.4986487236509305 0.49883442952786183 0.49899118713574214 0.49915640374435144 0.49932506557511197 ]<line_sep>alpha=0.0033333333333333335<line_sep>sides=2<line_sep>start_time=time.time()<line_sep>my_bounds=sequential_bounds(np.array(t) alpha=alpha sides=sides).bounds<line_sep>expected=np.array([5.0536015 4.819334 4.70702194 4.60970036 4.55329219 4.5118919 4.465161 4.42168832 4.37932413 4.33343066 4.29780246 4.26550766 4.2476601 4.22343408 4.20455427 4.1834642 4.15580542 4.13352266 4.1170148 4.10326736 4.08845795 4.07496919 4.05959646 4.0417501 4.02262887 4.01056674 4.00192679 3.98996708 3.97709149 3.96442225 3.95010566 3.93456306 3.92603865 3.91801377 3.90630556 3.8975012 3.88641115 3.87143326 3.85966246 3.85112482 3.84569926 3.83714224 3.82719647 3.81910741 3.80682977 3.79652758 3.78889289 3.78428912 3.77646938 3.76966463 3.76150223 3.75820905 3.76088934 3.76171382 3.76141619 3.76079216 3.76237742 3.76725034 3.76769877 3.7690107 3.7710916 3.77168583 3.76813708 3.7705804 3.76669411 3.76711572 3.76808636 3.76962133 3.76680748 3.76844159 3.76552364 3.76210975 3.76321355 3.76471956 3.76227721 3.76424368 3.76172169 3.75923 3.76099518 3.75829319 3.76028082 3.75824824 3.7562443 3.76013739 3.75818674 3.7560594 3.75379557 3.75757852 3.75582548 3.75412511 3.75244297 3.75075688 3.74891172 3.75280489 3.75090966 3.7494744 3.74806463 3.75254602 3.75114099 3.74947802 3.74782149 3.74638383 3.75092969 3.74970739 3.7485241 3.74730404 3.74585452 3.74435839 3.74303855 3.74191532 3.74074663 3.73958567 3.74415751 3.74282592 3.74149075 3.74029857 3.73926672 3.73828357 3.73730769 3.7363362 3.7352472 3.73406243 3.74020438 3.7393112 3.73836986 3.73742713 3.73644796 3.73531947 3.73418345 3.73321896 3.73238074 3.73155456 3.73080198 3.73004637 3.7291278 3.72818669 3.7273851 3.72671496 3.72605809 3.72534827 3.72465527 3.72382494 3.72294733 3.73077145 3.73014101 3.72950865 3.72885115 3.7282343 3.72752112 3.72675617 3.7260778 3.7254917 3.72495149 3.72440186 3.72383671 3.723183 3.72246763 3.72184599 3.7213286 3.72080295 3.72026245 3.71971626 3.71907946 3.71839777 3.71780463 3.71704671 3.7162294 3.71543144 3.71452847 3.72065881 3.71967136 3.71880523 3.71805949 3.71732896 3.71667185 3.71598258 3.71521135 3.71431933 3.71348235 3.71278081 3.71204444 3.71136994 3.7105967 3.70982427 3.70896735 3.71527887 3.71467395 3.71402372 3.71339733 3.71276051 3.71201001 3.71123041 3.71053954 3.70995666 3.70934263 3.70871611 ])<assert_stmt>np.allclose(my_bounds expected)<line_sep># if the calculation with max_nints takes longer than 30 seconds, something is most likely broken <assert_stmt>(time.time()-start_time)<l>30<block_end>
# Copyright (c) 2007, <NAME>. All rights reserved. See LICENSING for details. # @implements RFC4566 (SDP) <import_stmt>socket time<class_stmt>attrs(object)<block_start>'''A generic class that allows uniformly accessing the attribute and items, and returns None for invalid attribute instead of throwing an acception.'''<def_stmt>__init__ self **kwargs<block_start><for_stmt>n,v kwargs.items()<block_start>self[n]=v<block_end><block_end># attribute access: use container if not found <def_stmt>__getattr__ self name<block_start><return>self.__getitem__(name)<block_end># container access: use key in __dict__ <def_stmt>__getitem__ self name<block_start><return>self.__dict__.get(name <none>)<block_end><def_stmt>__setitem__ self name value<block_start>self.__dict__[name]=value<block_end><def_stmt>__contains__ self name<block_start><return>name<in>self.__dict__<block_end>#def __repr__(self): return repr(self.__dict__) <block_end># @implements RFC4566 P3L3-P3L21 <class_stmt>SDP(attrs)<block_start>'''A SDP packet with dynamic properties. The header names can be accessed as attributes or items. Accessing an unavailable header gives None instead of exception. '''<line_sep># header names that can appear multiple times. _multiple='tramb'<def_stmt>__init__ self value=<none><block_start><if_stmt>value<block_start>self._parse(value)<block_end><block_end># @implements RFC4566 P11L1-P12L10 <class_stmt>originator(attrs)<block_start>'''Represents a o= line with attributes username (str), sessionid (long), version (long), nettype (str), addrtype (str), address (str).'''<def_stmt>__init__ self value=<none><block_start><if_stmt>value<block_start>self.username,self.sessionid,self.version,self.nettype,self.addrtype,self.address=value.split(' ')<line_sep>self.sessionid=int(self.sessionid)<line_sep>self.version=int(self.version)<block_end><else_stmt><block_start>hostname=socket.gethostname()<line_sep>self.username,self.sessionid,self.version,self.nettype,self.addrtype,self.address='-' int(time.time()) int(time.time()) 'IN' 'IP4' (hostname.find('.')<g>0<and>hostname<or>socket.gethostbyname(hostname))<block_end><block_end><def_stmt>__repr__ self<block_start><return>' '.join(map(<lambda>x:str(x) [self.username self.sessionid self.version self.nettype self.addrtype self.address]))<block_end><block_end># @implements RFC4566 P14L7-P16L9 <class_stmt>connection(attrs)<block_start>'''Represents a c= line with attributes nettype (str), addrtype (str), address (str) and optionally ttl (int) and count (int).'''<def_stmt>__init__ self value=<none> **kwargs<block_start><if_stmt>value<block_start>self.nettype,self.addrtype,rest=value.split(' ')<line_sep>rest=rest.split('/')<if_stmt>len(rest)<eq>1<block_start>self.address=rest[0]<block_end><elif_stmt>len(rest)<eq>2<block_start>self.address,self.ttl=rest[0] int(rest[1])<block_end><else_stmt><block_start>self.address,self.ttl,self.count=rest[0] int(rest[1]) int(rest[2])<block_end><block_end><elif_stmt>'address'<in>kwargs<block_start>self.address=kwargs.get('address')<line_sep>self.nettype=kwargs.get('nettype' 'IN')<line_sep>self.addrtype=kwargs.get('addrtype' 'IP4')<if_stmt>'ttl'<in>kwargs<block_start>self.ttl=int(kwargs.get('ttl'))<block_end><if_stmt>'count'<in>kwargs<block_start>self.count=int(kwargs.get('count'))<block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>self.nettype+' '+self.addrtype+' '+self.address+('/'+str(self.ttl)<if>self.ttl<else>'')+('/'+str(self.count)<if>self.count<else>'')<block_end><block_end># @implements RFC4566 P22L17-P24L33 <class_stmt>media(attrs)<block_start>'''Represents a m= line and all subsequent lines until next m= or end. It has attributes such as media (str), port (int), proto (str), fmt (list).'''<def_stmt>__init__ self value=<none> **kwargs<block_start><if_stmt>value<block_start>self.media,self.port,self.proto,rest=value.split(' ' 3)<line_sep>self.port=int(self.port)<line_sep>self.fmt=[]<for_stmt>f rest.split(' ')<block_start>a=attrs()<try_stmt><block_start>a.pt=int(f)# if payload type is numeric <block_end><except_stmt><block_start>a.pt=f<block_end>self.fmt.append(a)<block_end><block_end><elif_stmt>'media'<in>kwargs<block_start>self.media=kwargs.get('media')<line_sep>self.port=int(kwargs.get('port' 0))<line_sep>self.proto=kwargs.get('proto' 'RTP/AVP')<line_sep>self.fmt=kwargs.get('fmt' [])<block_end><block_end><def_stmt>__repr__ self<block_start>result=self.media+' '+str(self.port)+' '+self.proto+' '+' '.join(map(<lambda>x:str(x.pt) self.fmt))<for_stmt>k filter(<lambda>x:x<in>self 'icbka')# order is important <block_start><if_stmt>k<not><in>SDP._multiple# single header <block_start>result<augadd>'\r\n'+k+'='+str(self[k])<block_end><else_stmt><block_start><for_stmt>v self[k]<block_start>result<augadd>'\r\n'+k+'='+str(v)<block_end><block_end><block_end><for_stmt>f self.fmt<block_start><if_stmt>f.name<block_start>result<augadd>'\r\n'+'a=rtpmap:'+str(f.pt)+' '+f.name+'/'+str(f.rate)+(f.params<and>('/'+f.params)<or>'')<block_end><block_end><return>result<block_end><def_stmt>dup self# use this method instead of SDP.media(str(m)) to duplicate m. Otherwise, fmt will be incomplete <block_start>result=SDP.media(media=self.media port=self.port proto=self.proto fmt=map(<lambda>f:attrs(pt=f.pt name=f.name rate=f.rate params=f.params) self.fmt))<for_stmt>k filter(<lambda>x:x<in>self 'icbka')<block_start>result[k]=self[k][:]<if>isinstance(self[k] list)<else>self[k]<block_end><return>result<block_end><block_end># @implements RFC4566 P8L17-P10L5 <def_stmt>_parse self text<block_start>g=<true># whether we are in global line or per media line? <for_stmt>line text.replace('\r\n' '\n').split('\n')<block_start>k,sep,v=line.partition('=')<if_stmt>k<eq>'o'<block_start>v=SDP.originator(v)<block_end><elif_stmt>k<eq>'c'<block_start>v=SDP.connection(v)<block_end><elif_stmt>k<eq>'m'<block_start>v=SDP.media(v)<block_end><if_stmt>k<eq>'m'# new m= line <block_start><if_stmt><not>self['m']<block_start>self['m']=[]<block_end>self['m'].append(v)<line_sep>obj=self['m'][-1]<block_end><elif_stmt>self['m']# not in global <block_start>obj=self['m'][-1]<line_sep># @implements RFC4566 P25L41-P27L7 <if_stmt>k<eq>'a'<and>v.startswith('rtpmap:')<block_start>pt,rest=v[7:].split(' ' 1)<line_sep>name,sep,rest=rest.partition('/')<line_sep>rate,sep,params=rest.partition('/')<for_stmt>f filter(<lambda>x:str(x.pt)<eq>str(pt) obj.fmt)<block_start>f.name=name<line_sep>f.rate=int(rate)<line_sep>f.params=params<or><none><block_end><block_end><else_stmt><block_start>obj[k]=(k<in>SDP._multiple<and>((k<in>obj)<and>(obj[k]+[v])<or>[v]))<or>v<block_end><block_end><else_stmt># global <block_start>obj=self<line_sep>obj[k]=((k<in>SDP._multiple)<and>((k<in>obj)<and>(obj[k]+[v])<or>[v]))<or>v<block_end><block_end><block_end><def_stmt>__repr__ self<block_start>result=''<for_stmt>k filter(<lambda>x:x<in>self 'vosiuepcbtam')# order is important <block_start><if_stmt>k<not><in>SDP._multiple# single header <block_start>result<augadd>k+'='+str(self[k])+'\r\n'<block_end><else_stmt><block_start><for_stmt>v self[k]<block_start>result<augadd>k+'='+str(v)+'\r\n'<block_end><block_end><block_end><return>result<block_end><block_end>#--------------------------- Testing -------------------------------------- # @implements RFC4566 P10L7-P10L21 <def_stmt>testSDP <block_start>s='''v=0\r o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\r s=SDP Seminar\r i=A Seminar on the session description protocol\r u=http://www.example.com/seminars/sdp.pdf\r e=<EMAIL> (<NAME>)\r c=IN IP4 172.16.31.10/127\r t=2873397496 2873404696\r a=recvonly\r m=audio 49170 RTP/AVP 0\r m=video 51372 RTP/AVP 99\r a=rtpmap:99 h263-1998/90000\r '''<line_sep>sdp=SDP(s)<assert_stmt>str(sdp)<eq>s<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>doctest<line_sep>doctest.testmod()<line_sep>testSDP()<block_end>
<import_from_stmt>Crypto Random<import_from_stmt>Crypto.PublicKey RSA<import_stmt>base64<def_stmt>generate_keys modulus_length=256<times>4<block_start>privatekey=RSA.generate(modulus_length Random.new().read)<line_sep>publickey=privatekey.publickey()<line_sep><return>privatekey publickey<block_end><def_stmt>encryptit message publickey<block_start>encrypted_msg=publickey.encrypt(message 32)[0]<line_sep>encoded_encrypted_msg=base64.b64encode(encrypted_msg)<line_sep><return>encoded_encrypted_msg<block_end><def_stmt>decryptit message privatekey<block_start>decoded_encrypted_msg=base64.b64decode(message)<line_sep>decoded_decrypted_msg=privatekey.decrypt(decoded_encrypted_msg)<line_sep><return>decoded_decrypted_msg<block_end><if_stmt>__name__<eq>'__main__'<block_start>message="This is a awesome message!"<line_sep>privatekey,publickey=generate_keys()<line_sep>encrypted_msg=encryptit(message.encode("utf-8") publickey)<line_sep>decrypted_msg=decryptit(encrypted_msg privatekey)<line_sep>print(f'{privatekey.exportKey()} - ({len(privatekey.exportKey())})')<line_sep>print(f'{publickey.exportKey()} - ({len(publickey.exportKey())})')<line_sep>print(f'Original: {message} - ({len(message)})')<line_sep>print(f'Encrypted: {encrypted_msg} - ({len(encrypted_msg)})')<line_sep>print(f'Decrypted: {decrypted_msg} - ({len(decrypted_msg)})')<block_end>
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module contains Google Cloud Storage to Trino operator."""<import_stmt>csv<import_stmt>json<import_from_stmt>tempfile NamedTemporaryFile<import_from_stmt>typing TYPE_CHECKING Iterable Optional Sequence Union<import_from_stmt>airflow.models BaseOperator<import_from_stmt>airflow.providers.google.cloud.hooks.gcs GCSHook<import_from_stmt>airflow.providers.trino.hooks.trino TrinoHook<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>airflow.utils.context Context<block_end><class_stmt>GCSToTrinoOperator(BaseOperator)<block_start>""" Loads a csv file from Google Cloud Storage into a Trino table. Assumptions: 1. CSV file should not have headers 2. Trino table with requisite columns is already created 3. Optionally, a separate JSON file with headers can be provided :param source_bucket: Source GCS bucket that contains the csv :param source_object: csv file including the path :param trino_table: trino table to upload the data :param trino_conn_id: destination trino connection :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and interact with the Google Cloud Storage service. :param schema_fields: The names of the columns to fill in the table. If schema_fields is provided, any path provided in the schema object will be :param schema_object: JSON file with schema fields :param delegate_to: The account to impersonate using domain-wide delegation of authority, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account. """<line_sep>template_fields:Sequence[str]=('source_bucket' 'source_object' 'trino_table' )<def_stmt>__init__ self * source_bucket:str source_object:str trino_table:str trino_conn_id:str="trino_default" gcp_conn_id:str="google_cloud_default" schema_fields:Optional[Iterable[str]]=<none> schema_object:Optional[str]=<none> delegate_to:Optional[str]=<none> impersonation_chain:Optional[Union[str Sequence[str]]]=<none> **kwargs <arrow><none><block_start>super().__init__(**kwargs)<line_sep>self.source_bucket=source_bucket<line_sep>self.source_object=source_object<line_sep>self.trino_table=trino_table<line_sep>self.trino_conn_id=trino_conn_id<line_sep>self.gcp_conn_id=gcp_conn_id<line_sep>self.schema_fields=schema_fields<line_sep>self.schema_object=schema_object<line_sep>self.delegate_to=delegate_to<line_sep>self.impersonation_chain=impersonation_chain<block_end><def_stmt>execute self context:'Context'<arrow><none><block_start>gcs_hook=GCSHook(gcp_conn_id=self.gcp_conn_id delegate_to=self.delegate_to impersonation_chain=self.impersonation_chain )<line_sep>trino_hook=TrinoHook(trino_conn_id=self.trino_conn_id)<with_stmt>NamedTemporaryFile("w+")<as>temp_file<block_start>self.log.info("Downloading data from %s" self.source_object)<line_sep>gcs_hook.download(bucket_name=self.source_bucket object_name=self.source_object filename=temp_file.name )<line_sep>data=csv.reader(temp_file)<line_sep>rows=(tuple(row)<for>row data)<line_sep>self.log.info("Inserting data into %s" self.trino_table)<if_stmt>self.schema_fields<block_start>trino_hook.insert_rows(table=self.trino_table rows=rows target_fields=self.schema_fields)<block_end><elif_stmt>self.schema_object<block_start>blob=gcs_hook.download(bucket_name=self.source_bucket object_name=self.schema_object )<line_sep>schema_fields=json.loads(blob.decode("utf-8"))<line_sep>trino_hook.insert_rows(table=self.trino_table rows=rows target_fields=schema_fields)<block_end><else_stmt><block_start>trino_hook.insert_rows(table=self.trino_table rows=rows)<block_end><block_end><block_end><block_end>
"""Estimator which operates on a pair of images to compute relative pose and verified indices. Authors: <NAME>, <NAME> """<import_stmt>logging<import_from_stmt>typing Dict Optional Tuple<import_stmt>dask<import_stmt>numpy<as>np<import_from_stmt>dask.delayed Delayed<import_from_stmt>gtsam Cal3Bundler Pose3 Rot3 Unit3<import_stmt>gtsfm.utils.geometry_comparisons<as>comp_utils<import_stmt>gtsfm.utils.logger<as>logger_utils<import_stmt>gtsfm.utils.metrics<as>metric_utils<import_from_stmt>gtsfm.common.keypoints Keypoints<import_from_stmt>gtsfm.common.two_view_estimation_report TwoViewEstimationReport<import_from_stmt>gtsfm.frontend.inlier_support_processor InlierSupportProcessor<import_from_stmt>gtsfm.frontend.matcher.matcher_base MatcherBase<import_from_stmt>gtsfm.frontend.verifier.verifier_base VerifierBase<import_from_stmt>gtsfm.evaluation.metrics GtsfmMetric GtsfmMetricsGroup<line_sep>logger=logger_utils.get_logger()<line_sep>mpl_logger=logging.getLogger("matplotlib")<line_sep>mpl_logger.setLevel(logging.WARNING)<line_sep>pil_logger=logging.getLogger("PIL")<line_sep>pil_logger.setLevel(logging.INFO)<line_sep>EPSILON=1e-6<class_stmt>TwoViewEstimator<block_start>"""Wrapper for running two-view relative pose estimation on image pairs in the dataset."""<def_stmt>__init__ self matcher:MatcherBase verifier:VerifierBase inlier_support_processor:InlierSupportProcessor eval_threshold_px:float <arrow><none><block_start>"""Initializes the two-view estimator from matcher and verifier. Args: matcher: matcher to use. verifier: verifier to use. inlier_support_processor: post-processor that uses information about RANSAC support to filter out pairs. eval_threshold_px: distance threshold for marking a correspondence pair as inlier during evaluation (not during estimation). """<line_sep>self._matcher=matcher<line_sep>self._verifier=verifier<line_sep>self.processor=inlier_support_processor<line_sep>self._corr_metric_dist_threshold=eval_threshold_px<block_end><def_stmt>get_corr_metric_dist_threshold self<arrow>float<block_start>"""Getter for the distance threshold used in the metric for correct correspondences."""<line_sep><return>self._corr_metric_dist_threshold<block_end><def_stmt>create_computation_graph self keypoints_i1_graph:Delayed keypoints_i2_graph:Delayed descriptors_i1_graph:Delayed descriptors_i2_graph:Delayed camera_intrinsics_i1_graph:Delayed camera_intrinsics_i2_graph:Delayed im_shape_i1_graph:Delayed im_shape_i2_graph:Delayed i2Ti1_expected_graph:Optional[Delayed]=<none> <arrow>Tuple[Delayed Delayed Delayed Optional[Delayed] Optional[Delayed] Optional[Delayed]]<block_start>"""Create delayed tasks for matching and verification. Args: keypoints_i1_graph: keypoints for image i1. keypoints_i2_graph: keypoints for image i2. descriptors_i1_graph: corr. descriptors for image i1. descriptors_i2_graph: corr. descriptors for image i2. camera_intrinsics_i1_graph: intrinsics for camera i1. camera_intrinsics_i2_graph: intrinsics for camera i2. im_shape_i1_graph: image shape for image i1. im_shape_i2_graph: image shape for image i2. i2Ti1_expected_graph (optional): ground truth relative pose, used for evaluation if available. Defaults to None. Returns: Computed relative rotation wrapped as Delayed. Computed relative translation direction wrapped as Delayed. Indices of verified correspondences wrapped as Delayed. Two view report w/ verifier metrics wrapped as Delayed. Two view report w/ post-processor metrics wrapped as Delayed. """<line_sep># graph for matching to obtain putative correspondences corr_idxs_graph=self._matcher.create_computation_graph(keypoints_i1_graph keypoints_i2_graph descriptors_i1_graph descriptors_i2_graph im_shape_i1_graph im_shape_i2_graph )<line_sep># verification on putative correspondences to obtain relative pose # and verified correspondences # TODO: name this verified_correspondence_idxs (add note: everything here is delayed) (i2Ri1_graph i2Ui1_graph v_corr_idxs_graph inlier_ratio_est_model)=self._verifier.create_computation_graph(keypoints_i1_graph keypoints_i2_graph corr_idxs_graph camera_intrinsics_i1_graph camera_intrinsics_i2_graph )<line_sep># if we have the expected GT data, evaluate the computed relative pose <if_stmt>i2Ti1_expected_graph<is><not><none><block_start>R_error_deg,U_error_deg=dask.delayed(compute_relative_pose_metrics nout=2)(i2Ri1_graph i2Ui1_graph i2Ti1_expected_graph)<line_sep>num_inliers_gt_model,inlier_ratio_gt_model,v_corr_idxs_inlier_mask_gt=dask.delayed(compute_correspondence_metrics nout=3)(keypoints_i1_graph keypoints_i2_graph v_corr_idxs_graph camera_intrinsics_i1_graph camera_intrinsics_i2_graph i2Ti1_expected_graph self._corr_metric_dist_threshold )<block_end><else_stmt><block_start>R_error_deg,U_error_deg=<none> <none><line_sep>num_inliers_gt_model,inlier_ratio_gt_model=<none> <none><line_sep>v_corr_idxs_inlier_mask_gt=<none><block_end>two_view_report_graph=dask.delayed(generate_two_view_report)(inlier_ratio_est_model R_error_deg U_error_deg num_inliers_gt_model inlier_ratio_gt_model v_corr_idxs_inlier_mask_gt v_corr_idxs_graph )<line_sep># Note: We name the output as _pp, as it represents a post-processed quantity. (i2Ri1_pp_graph i2Ui1_pp_graph v_corr_idxs_pp_graph two_view_report_pp_graph )=self.processor.create_computation_graph(i2Ri1_graph i2Ui1_graph v_corr_idxs_graph two_view_report_graph)<line_sep># We provide both, as we will create reports for both. <return>(i2Ri1_pp_graph i2Ui1_pp_graph v_corr_idxs_pp_graph two_view_report_graph two_view_report_pp_graph)<block_end><block_end><def_stmt>generate_two_view_report inlier_ratio_est_model:float R_error_deg:float U_error_deg:float num_inliers_gt_model:int inlier_ratio_gt_model:float v_corr_idxs_inlier_mask_gt:np.ndarray v_corr_idxs:np.ndarray <arrow>TwoViewEstimationReport<block_start>"""Wrapper around class constructor for Dask."""<line_sep>two_view_report=TwoViewEstimationReport(inlier_ratio_est_model=inlier_ratio_est_model num_inliers_est_model=v_corr_idxs.shape[0] num_inliers_gt_model=num_inliers_gt_model inlier_ratio_gt_model=inlier_ratio_gt_model v_corr_idxs_inlier_mask_gt=v_corr_idxs_inlier_mask_gt v_corr_idxs=v_corr_idxs R_error_deg=R_error_deg U_error_deg=U_error_deg )<line_sep><return>two_view_report<block_end><def_stmt>compute_correspondence_metrics keypoints_i1:Keypoints keypoints_i2:Keypoints corr_idxs_i1i2:np.ndarray intrinsics_i1:Cal3Bundler intrinsics_i2:Cal3Bundler i2Ti1:Pose3 epipolar_distance_threshold:float <arrow>Tuple[int float Optional[np.ndarray]]<block_start>"""Compute the metrics for the generated verified correspondence. Args: keypoints_i1: detected keypoints in image i1. keypoints_i2: detected keypoints in image i2. corr_idxs_i1i2: indices of correspondences. intrinsics_i1: intrinsics for i1. intrinsics_i2: intrinsics for i2. i2Ti1: relative pose. epipolar_distance_threshold: max epipolar distance to qualify as a correct match. Returns: Number of inlier correspondences to ground truth epipolar geometry, i.e. #correct correspondences. Inlier Ratio, i.e. ratio of correspondences which are correct w.r.t. given relative pose. Mask of which verified correspondences are classified as correct under Sampson error (using GT epipolar geometry). """<if_stmt>corr_idxs_i1i2.size<eq>0<block_start><return>0 float("Nan") <none><block_end>v_corr_idxs_inlier_mask_gt=metric_utils.count_correct_correspondences(keypoints_i1.extract_indices(corr_idxs_i1i2[: 0]) keypoints_i2.extract_indices(corr_idxs_i1i2[: 1]) intrinsics_i1 intrinsics_i2 i2Ti1 epipolar_distance_threshold )<line_sep>num_inliers_gt_model=np.count_nonzero(v_corr_idxs_inlier_mask_gt)<line_sep>inlier_ratio_gt_model=num_inliers_gt_model/corr_idxs_i1i2.shape[0]<line_sep><return>num_inliers_gt_model inlier_ratio_gt_model v_corr_idxs_inlier_mask_gt<block_end><def_stmt>compute_relative_pose_metrics i2Ri1_computed:Optional[Rot3] i2Ui1_computed:Optional[Unit3] i2Ti1_expected:Pose3<arrow>Tuple[Optional[float] Optional[float]]<block_start>"""Compute the metrics on relative camera pose. Args: i2Ri1_computed: computed relative rotation. i2Ui1_computed: computed relative translation direction. i2Ti1_expected: expected relative pose. Returns: Rotation error, in degrees Unit translation error, in degrees """<line_sep>R_error_deg=comp_utils.compute_relative_rotation_angle(i2Ri1_computed i2Ti1_expected.rotation())<line_sep>U_error_deg=comp_utils.compute_relative_unit_translation_angle(i2Ui1_computed Unit3(i2Ti1_expected.translation()))<line_sep><return>(R_error_deg U_error_deg)<block_end><def_stmt>aggregate_frontend_metrics two_view_reports_dict:Dict[Tuple[int int] Optional[TwoViewEstimationReport]] angular_err_threshold_deg:float metric_group_name:str <arrow><none><block_start>"""Aggregate the front-end metrics to log summary statistics. We define "pose error" as the maximum of the angular errors in rotation and translation, per: SuperGlue, CVPR 2020: https://arxiv.org/pdf/1911.11763.pdf Learning to find good correspondences. CVPR 2018: OA-Net, ICCV 2019: NG-RANSAC, ICCV 2019: Args: two_view_report_dict: report containing front-end metrics for each image pair. angular_err_threshold_deg: threshold for classifying angular error metrics as success. metric_group_name: name we will assign to the GtsfmMetricGroup returned by this fn. """<line_sep>num_image_pairs=len(two_view_reports_dict.keys())<line_sep># all rotational errors in degrees rot3_angular_errors=[]<line_sep>trans_angular_errors=[]<line_sep>inlier_ratio_gt_model_all_pairs=[]<line_sep>inlier_ratio_est_model_all_pairs=[]<line_sep>num_inliers_gt_model_all_pairs=[]<line_sep>num_inliers_est_model_all_pairs=[]<line_sep># populate the distributions <for_stmt>report two_view_reports_dict.values()<block_start><if_stmt>report<is><none><block_start><continue><block_end>rot3_angular_errors.append(report.R_error_deg)<line_sep>trans_angular_errors.append(report.U_error_deg)<line_sep>inlier_ratio_gt_model_all_pairs.append(report.inlier_ratio_gt_model)<line_sep>inlier_ratio_est_model_all_pairs.append(report.inlier_ratio_est_model)<line_sep>num_inliers_gt_model_all_pairs.append(report.num_inliers_gt_model)<line_sep>num_inliers_est_model_all_pairs.append(report.num_inliers_est_model)<block_end>rot3_angular_errors=np.array(rot3_angular_errors dtype=float)<line_sep>trans_angular_errors=np.array(trans_angular_errors dtype=float)<line_sep># count number of rot3 errors which are not None. Should be same in rot3/unit3 num_valid_image_pairs=np.count_nonzero(~np.isnan(rot3_angular_errors))<line_sep># compute pose errors by picking the max error from rot3 and unit3 errors pose_errors=np.maximum(rot3_angular_errors trans_angular_errors)<line_sep># check errors against the threshold success_count_rot3=np.sum(rot3_angular_errors<l>angular_err_threshold_deg)<line_sep>success_count_unit3=np.sum(trans_angular_errors<l>angular_err_threshold_deg)<line_sep>success_count_pose=np.sum(pose_errors<l>angular_err_threshold_deg)<line_sep># count image pair entries where inlier ratio w.r.t. GT model == 1. all_correct=np.count_nonzero([report.inlier_ratio_gt_model<eq>1.0<for>report two_view_reports_dict.values()<if>report<is><not><none>])<line_sep>logger.debug("[Two view optimizer] [Summary] Rotation success: %d/%d/%d" success_count_rot3 num_valid_image_pairs num_image_pairs )<line_sep>logger.debug("[Two view optimizer] [Summary] Translation success: %d/%d/%d" success_count_unit3 num_valid_image_pairs num_image_pairs )<line_sep>logger.debug("[Two view optimizer] [Summary] Pose success: %d/%d/%d" success_count_pose num_valid_image_pairs num_image_pairs )<line_sep>logger.debug("[Two view optimizer] [Summary] # Image pairs with 100%% inlier ratio:: %d/%d" all_correct num_image_pairs)<line_sep># TODO(akshay-krishnan): Move angular_err_threshold_deg and num_total_image_pairs to metadata. frontend_metrics=GtsfmMetricsGroup(metric_group_name [GtsfmMetric("angular_err_threshold_deg" angular_err_threshold_deg) GtsfmMetric("num_total_image_pairs" int(num_image_pairs)) GtsfmMetric("num_valid_image_pairs" int(num_valid_image_pairs)) GtsfmMetric("rotation_success_count" int(success_count_rot3)) GtsfmMetric("translation_success_count" int(success_count_unit3)) GtsfmMetric("pose_success_count" int(success_count_pose)) GtsfmMetric("num_all_inlier_correspondences_wrt_gt_model" int(all_correct)) GtsfmMetric("rot3_angular_errors_deg" rot3_angular_errors) GtsfmMetric("trans_angular_errors_deg" trans_angular_errors) GtsfmMetric("pose_errors_deg" pose_errors) GtsfmMetric("inlier_ratio_wrt_gt_model" inlier_ratio_gt_model_all_pairs) GtsfmMetric("inlier_ratio_wrt_est_model" inlier_ratio_est_model_all_pairs) GtsfmMetric("num_inliers_est_model" num_inliers_est_model_all_pairs) GtsfmMetric("num_inliers_gt_model" num_inliers_gt_model_all_pairs) ] )<line_sep><return>frontend_metrics<block_end>
<import_stmt>unittest<import_from_stmt>filters.mixins FiltersMixin<class_stmt>MyTest(unittest.TestCase)<block_start><def_stmt>test self<block_start>self.assertEqual(4 4)<block_end><block_end>
# Copyright 2017, Ansible by Red Hat # <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>click<import_from_stmt>click.formatting join_options<import_from_stmt>tower_cli.conf SETTINGS_PARMS<class_stmt>ActionSubcommand(click.Command)<block_start>"""A Command subclass that adds support for the concept that invocation without arguments assumes `--help`. This code is adapted by taking code from click.MultiCommand and placing it here, to get just the --help functionality and nothing else. """<def_stmt>__init__ self name=<none> no_args_is_help=<true> **kwargs<block_start>self.no_args_is_help=no_args_is_help<line_sep>super(ActionSubcommand self).__init__(name=name **kwargs)<block_end><def_stmt>parse_args self ctx args<block_start>"""Parse arguments sent to this command. The code for this method is taken from MultiCommand: https://github.com/mitsuhiko/click/blob/master/click/core.py It is Copyright (c) 2014 by <NAME>. See the license: https://github.com/mitsuhiko/click/blob/master/LICENSE """<if_stmt><not>args<and>self.no_args_is_help<and><not>ctx.resilient_parsing<block_start>click.echo(ctx.get_help())<line_sep>ctx.exit()<block_end><return>super(ActionSubcommand self).parse_args(ctx args)<block_end><def_stmt>format_options self ctx formatter<block_start>"""Monkey-patch click's format_options method to support option categorization. """<line_sep>field_opts=[]<line_sep>global_opts=[]<line_sep>local_opts=[]<line_sep>other_opts=[]<for_stmt>param self.params<block_start><if_stmt>param.name<in>SETTINGS_PARMS<block_start>opts=global_opts<block_end><elif_stmt>getattr(param 'help' <none>)<and>param.help.startswith('[FIELD]')<block_start>opts=field_opts<line_sep>param.help=param.help[len('[FIELD]'):]<block_end><else_stmt><block_start>opts=local_opts<block_end>rv=param.get_help_record(ctx)<if_stmt>rv<is><none><block_start><continue><block_end><else_stmt><block_start>opts.append(rv)<block_end><block_end><if_stmt>self.add_help_option<block_start>help_options=self.get_help_option_names(ctx)<if_stmt>help_options<block_start>other_opts.append([join_options(help_options)[0] 'Show this message and exit.'])<block_end><block_end><if_stmt>field_opts<block_start><with_stmt>formatter.section('Field Options')<block_start>formatter.write_dl(field_opts)<block_end><block_end><if_stmt>local_opts<block_start><with_stmt>formatter.section('Local Options')<block_start>formatter.write_dl(local_opts)<block_end><block_end><if_stmt>global_opts<block_start><with_stmt>formatter.section('Global Options')<block_start>formatter.write_dl(global_opts)<block_end><block_end><if_stmt>other_opts<block_start><with_stmt>formatter.section('Other Options')<block_start>formatter.write_dl(other_opts)<block_end><block_end><block_end><block_end>
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>uuid<import_from_stmt>pathlib Path PosixPath<import_from_stmt>subprocess Popen<import_from_stmt>.evosuite_test_runners EvosuiteTestRunner TestRuntimeError CompilationError InvalidTest clean_firejail FIREJAIL_PROFILE <import_from_stmt>...model.src.utils TREE_SITTER_ROOT limit_virtual_memory MAX_VIRTUAL_MEMORY <import_from_stmt>...preprocessing.lang_processors.lang_processor LangProcessor<line_sep>sys.path.append(str(Path(__file__).parents[3]))<line_sep>print("adding to path" str(Path(__file__).parents[3]))<line_sep>python_processor=LangProcessor.processors["python"](root_folder=TREE_SITTER_ROOT)<class_stmt>PythonTestRunner(EvosuiteTestRunner)<block_start><def_stmt>__init__ self tmp_folder=Path(Path.home().joinpath("data/CodeGen/automatic_tests/tmp_tests_folder/python")) timeout=15 <block_start>super().__init__(tmp_folder=tmp_folder timeout=timeout)<block_end><def_stmt>_run_tests self function:str test:str tmp_path:PosixPath classname:str=<none> scaffolding:str=<none> <block_start><assert_stmt>(scaffolding<is><none>) f"Scaffolding should be None for python tests, was {scaffolding}"<if_stmt>"#TOFILL"<not><in>test<block_start><raise>InvalidTest("Missing #TOFILL")<block_end><try_stmt><block_start>f_name=python_processor.get_function_name(function)<block_end><except_stmt>(ValueError IndexError)<block_start><raise>CompilationError("No function definition")<block_end>function=python_processor.detokenize_code(function.replace(f" {f_name.strip()} " " f_filled "))<line_sep>filled_test=test.replace("#TOFILL" function)<line_sep>test_path=self.write_test(filled_test classname tmp_path)<assert_stmt>test_path.is_file()<line_sep>test_cmd=f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; firejail --profile={FIREJAIL_PROFILE} python {test_path}"<line_sep>test_proc=Popen(test_cmd stdout=subprocess.PIPE stderr=subprocess.PIPE shell=<true> executable="/bin/bash" preexec_fn=os.setsid )<line_sep><return>test_proc tmp_path<block_end><def_stmt>_eval_proc_state self out err<block_start>stderr=err.decode("utf-8" errors="replace")<line_sep>stderr=clean_firejail(stderr)<line_sep>res_line=stderr.splitlines()<if_stmt>len(res_line)<le>2<or><not>(res_line[-1].startswith("OK")<or>res_line[-1].startswith("FAILED"))<block_start><raise>TestRuntimeError(stderr)<block_end><assert_stmt>res_line[-3].startswith("Ran ")<line_sep>number_of_tests=int(res_line[-3].replace("Ran " "").split(" ")[0])<line_sep>res_line=res_line[-1]<if_stmt>res_line.startswith("OK")<block_start><return>"success" number_of_tests 0<block_end><else_stmt><block_start><assert_stmt>res_line.startswith("FAILED (errors=")<or>res_line.startswith("FAILED (failures=")<line_sep>number_failures=int(res_line.split("=")[-1].replace(")" ""))<line_sep><return>"failure" number_of_tests number_failures<block_end><block_end>@staticmethod<def_stmt>write_test test classname out_folder<block_start><if_stmt>classname<is><none><block_start>classname="a"<block_end>test_path=out_folder.joinpath(f"python_test_{classname}.py")<with_stmt>open(test_path "w" encoding="utf-8")<as>o<block_start>o.write(test)<block_end><return>test_path<block_end><block_end>
<import_from_stmt>django.shortcuts render_to_response redirect<import_from_stmt>django.template RequestContext<import_from_stmt>django.template.loader render_to_string<import_from_stmt>django.core.mail send_mail mail_managers EmailMessage<import_from_stmt>django.contrib.auth.decorators login_required<import_from_stmt>django.contrib messages<import_from_stmt>OpenDataCatalog.contest.models *<import_from_stmt>datetime datetime<def_stmt>get_entries request contest_id=1<block_start>contest=Contest.objects.get(pk=contest_id)<line_sep>entries=Entry.objects.filter(contest=contest is_visible=<true>)<if_stmt><not>request.GET.__contains__('sort')<block_start>entries=entries.order_by('-vote_count')<block_end><return>render_to_response('contest/entries.html' {'contest':contest 'entries':entries} context_instance=RequestContext(request))<block_end><def_stmt>get_entries_table request contest_id=1<block_start>contest=Contest.objects.get(pk=contest_id)<line_sep>entries=Entry.objects.filter(contest=contest)<if_stmt><not>request.GET.__contains__('sort')<block_start>entries=entries.order_by('-vote_count')<block_end><return>render_to_response('contest/entry_table.html' {'contest':contest 'entries':entries} context_instance=RequestContext(request))<block_end><def_stmt>get_winners request contest_id=1<block_start>contest=Contest.objects.get(pk=contest_id)<line_sep>entries=Entry.objects.filter(contest=contest is_visible=<true>).order_by('-vote_count')<line_sep><return>render_to_response('contest/winners.html' {'contest':contest 'entries':entries} context_instance=RequestContext(request))<block_end><def_stmt>get_rules request contest_id=1<block_start>contest=Contest.objects.get(pk=contest_id)<line_sep><return>render_to_response('contest/rules.html' {'contest':contest} context_instance=RequestContext(request))<block_end><def_stmt>get_entry request entry_id<block_start>entry=Entry.objects.get(pk=entry_id)<line_sep><return>render_to_response('contest/entry.html' {'contest':entry.contest 'entry':entry} context_instance=RequestContext(request))<block_end>#@login_required <def_stmt>add_entry request contest_id=1<block_start>contest=Contest.objects.get(pk=contest_id)<if_stmt>request.method<eq>'POST'<block_start>form=EntryForm(request.POST)<line_sep>form.contest=contest_id<if_stmt>form.is_valid()<block_start>data={#"submitter": request.user.username, "submit_date":datetime.now() "org_name":form.cleaned_data.get("org_name") "org_url":form.cleaned_data.get("org_url") "contact_person":form.cleaned_data.get("contact_person") "contact_phone":form.cleaned_data.get("contact_phone") "contact_email":form.cleaned_data.get("contact_email") "data_set":form.cleaned_data.get("data_set") "data_use":form.cleaned_data.get("data_use") "data_mission":form.cleaned_data.get("data_mission")}<line_sep>subject='OpenDataPhilly - Contest Submission'<line_sep>user_email=form.cleaned_data.get("contact_email")<line_sep>text_content=render_to_string('contest/submit_email.txt' data)<line_sep>text_content_copy=render_to_string('contest/submit_email_copy.txt' data)<line_sep>mail_managers(subject text_content)<line_sep>msg=EmailMessage(subject text_content_copy to=[user_email])<line_sep>msg.send()<line_sep><return>render_to_response('contest/thanks.html' {'contest':contest} context_instance=RequestContext(request))<block_end><block_end><else_stmt><block_start>form=EntryForm()<block_end><return>render_to_response('contest/submit_entry.html' {'contest':contest 'form':form} context_instance=RequestContext(request))<block_end>@login_required<def_stmt>add_vote request entry_id<block_start>entry=Entry.objects.get(pk=entry_id)<line_sep>contest=entry.contest<line_sep>user=User.objects.get(username=request.user)<if_stmt>contest.user_can_vote(user)<block_start>new_vote=Vote(user=user entry=entry)<line_sep>new_vote.save()<line_sep>entry.vote_count=entry.vote_set.count()<line_sep>entry.save()<line_sep>next_vote_date=contest.get_next_vote_date(user)<if_stmt>next_vote_date<g>contest.end_date<block_start>messages.success(request '<div style="font-weight:bold;">Your vote has been recorded.</div>Thank you for your vote! You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')<block_end><else_stmt><block_start>messages.success(request '<div style="font-weight:bold;">Your vote has been recorded.</div>You may vote once per week, so come back and visit us again on '+next_vote_date.strftime('%A, %b %d %Y, %I:%M%p')+'. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')<block_end><block_end><else_stmt><block_start>next_vote_date=contest.get_next_vote_date(user)<if_stmt>next_vote_date<g>contest.end_date<block_start>messages.error(request '<div style="font-weight:bold;">You have already voted.</div>You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')<block_end><else_stmt><block_start>messages.error(request '<div style="font-weight:bold;">You have already voted.</div>You may vote once per week, so come back and visit us again on '+next_vote_date.strftime('%A, %b %d %Y, %I:%M%p')+'. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')<block_end><block_end><return>redirect('/contest/?sort=vote_count')<block_end>
<class_stmt>dotBooleanPart_t(object)# no doc <block_start>Boolean=<none><line_sep>OperativePart=<none><line_sep>Type=<none><block_end>
"""Subclass specific to Cisco ASA"""<import_stmt>re<import_from_stmt>netdev.logger logger<import_from_stmt>netdev.vendors.ios_like IOSLikeDevice<class_stmt>CiscoASA(IOSLikeDevice)<block_start>"""Class for working with Cisco ASA"""<def_stmt>__init__ self *args **kwargs<block_start>""" Initialize class for asynchronous working with network devices :param str host: device hostname or ip address for connection :param str username: username for logging to device :param str password: <PASSWORD> for logging to device :param str secret: secret password for privilege mode :param int port: ssh port for connection. Default is 22 :param str device_type: network device type :param known_hosts: file with known hosts. Default is None (no policy). With () it will use default file :param str local_addr: local address for binding source of tcp connection :param client_keys: path for client keys. Default in None. With () it will use default file in OS :param str passphrase: password for encrypted client keys :param float timeout: timeout in second for getting information from channel :param loop: asyncio loop object """<line_sep>super().__init__(*args **kwargs)<line_sep>self._multiple_mode=<false><block_end>_disable_paging_command="terminal pager 0"<line_sep>@property<def_stmt>multiple_mode self<block_start>""" Returning Bool True if ASA in multiple mode"""<line_sep><return>self._multiple_mode<block_end><async_keyword><def_stmt>connect self<block_start>""" Async Connection method Using 5 functions: * _establish_connection() for connecting to device * _set_base_prompt() for finding and setting device prompt * _enable() for getting privilege exec mode * _disable_paging() for non interact output in commands * _check_multiple_mode() for checking multiple mode in ASA """<line_sep>logger.info("Host {}: trying to connect to the device".format(self._host))<line_sep><await>self._establish_connection()<line_sep><await>self._set_base_prompt()<line_sep><await>self.enable_mode()<line_sep><await>self._disable_paging()<line_sep><await>self._check_multiple_mode()<line_sep>logger.info("Host {}: Has connected to the device".format(self._host))<block_end><async_keyword><def_stmt>_set_base_prompt self<block_start>""" Setting two important vars for ASA base_prompt - textual prompt in CLI (usually hostname) base_pattern - regexp for finding the end of command. IT's platform specific parameter For ASA devices base_pattern is "prompt([\/\w]+)?(\(.*?\))?[#|>] """<line_sep>logger.info("Host {}: Setting base prompt".format(self._host))<line_sep>prompt=<await>self._find_prompt()<line_sep># Cut off prompt from "prompt/context/other" if it exists # If not we get all prompt prompt=prompt[:-1].split("/")<line_sep>prompt=prompt[0]<line_sep>self._base_prompt=prompt<line_sep>delimiters=map(re.escape type(self)._delimiter_list)<line_sep>delimiters=r"|".join(delimiters)<line_sep>base_prompt=re.escape(self._base_prompt[:12])<line_sep>pattern=type(self)._pattern<line_sep>self._base_pattern=pattern.format(prompt=base_prompt delimiters=delimiters)<line_sep>logger.debug("Host {}: Base Prompt: {}".format(self._host self._base_prompt))<line_sep>logger.debug("Host {}: Base Pattern: {}".format(self._host self._base_pattern))<line_sep><return>self._base_prompt<block_end><async_keyword><def_stmt>_check_multiple_mode self<block_start>"""Check mode multiple. If mode is multiple we adding info about contexts"""<line_sep>logger.info("Host {}:Checking multiple mode".format(self._host))<line_sep>out=<await>self.send_command("show mode")<if_stmt>"multiple"<in>out<block_start>self._multiple_mode=<true><block_end>logger.debug("Host {}: Multiple mode: {}".format(self._host self._multiple_mode))<block_end><block_end>
<import_stmt>os<import_stmt>zipfile<import_from_stmt>tempfile NamedTemporaryFile<import_from_stmt>usaspending_api.download.filestreaming.zip_file append_files_to_zip_file<def_stmt>test_append_files_to_zip_file <block_start><with_stmt>NamedTemporaryFile()<as>zip_file<block_start><with_stmt>NamedTemporaryFile()<as>include_file_1<block_start><with_stmt>NamedTemporaryFile()<as>include_file_2<block_start>include_file_1.write(b"this is a test")<line_sep>include_file_1.flush()<line_sep>include_file_2.write(b"this is also a test")<line_sep>include_file_2.flush()<line_sep>append_files_to_zip_file([include_file_1.name include_file_2.name] zip_file.name)<with_stmt>zipfile.ZipFile(zip_file.name "r")<as>zf<block_start><assert_stmt>[z.filename<for>z zf.filelist]<eq>[os.path.basename(include_file_1.name) os.path.basename(include_file_2.name) ]<block_end><block_end><block_end><block_end><block_end>