content
stringlengths
0
1.55M
#! /usr/bin/env python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides basic testing modes for the remote debugger."""<import_stmt>abc<class_stmt>DebuggingPlugin(object)<block_start>"""Superclass for all debugging plugins."""<line_sep>__metaclass__=abc.ABCMeta<def_stmt>__init__ self inferior name<block_start>self.name=name<line_sep>self.position=<none><line_sep>self.inferior=inferior<line_sep>super(DebuggingPlugin self).__init__()<block_end>@abc.abstractproperty<def_stmt>commands self<block_start><return>[]<block_end><block_end>
<import_stmt>numpy<as>np<class_stmt>AdlClassifier(object)<block_start><def_stmt>init self class_num:int init_params:dict<block_start>self.class_num=class_num<line_sep>self.label_map=list()<line_sep>self.clf_name=<none><line_sep><raise>NotImplementedError<block_end><def_stmt>fit self train_examples_x:np.ndarray train_examples_y:np.ndarray fit_params:dict<block_start><raise>NotImplementedError<block_end><def_stmt>predict_proba self test_examples:np.ndarray predict_prob_params:dict<arrow>np.ndarray<block_start><raise>NotImplementedError<block_end><def_stmt>rebuild_prob_res self input_label_list orig_prob_array<block_start>new_prob_arary=np.zeros((orig_prob_array.shape[0] self.class_num))<for_stmt>i,cls enumerate(input_label_list)<block_start>new_prob_arary[: cls]=orig_prob_array[: i]<block_end>empty_cls_list=list()<for_stmt>i range(self.class_num)<block_start><if_stmt>i<not><in>input_label_list<block_start>empty_cls_list.append(i)<block_end><block_end><for_stmt>sample_i range(orig_prob_array.shape[0])<block_start>np_median_value=np.median(new_prob_arary[sample_i])<for_stmt>empty_cls empty_cls_list<block_start>new_prob_arary[sample_i][empty_cls]=np_median_value<block_end><block_end><return>new_prob_arary<block_end><block_end><class_stmt>AdlOfflineClassifier(AdlClassifier)<block_start><def_stmt>offline_fit self train_examples_x:np.ndarray train_examples_y:np.ndarray fit_params:dict<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>AdlOnlineClassifier(AdlClassifier)<block_start><def_stmt>online_fit self train_examples_x:np.ndarray train_examples_y:np.ndarray fit_params:dict<block_start><raise>NotImplementedError<block_end><block_end>
<import_stmt>os<import_stmt>pathlib<import_from_stmt>io StringIO<import_from_stmt>pkg_resources resource_filename<import_stmt>streamlit<as>st<import_from_stmt>whatlies.language CountVectorLanguage<import_from_stmt>whatlies.transformers Pca Umap<import_from_stmt>whatlies EmbeddingSet Embedding<import_stmt>sentencepiece<as>spm<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_hub<as>hub<import_stmt>tensorflow.compat.v1<as>tf# noqa: F811 tf.disable_v2_behavior()<with_stmt>tf.Session()<as>sess<block_start>module=hub.Module("https://tfhub.dev/google/universal-sentence-encoder-lite/1")<line_sep>spm_path=sess.run(module(signature="spm_path"))<block_end>sp=spm.SentencePieceProcessor()<line_sep>sp.Load(spm_path)<line_sep>input_placeholder=tf.sparse_placeholder(tf.int64 shape=[<none> <none>])<line_sep>encodings=module(inputs=dict(values=input_placeholder.values indices=input_placeholder.indices dense_shape=input_placeholder.dense_shape ))<def_stmt>process_to_IDs_in_sparse_format sp sentences<block_start>ids=[sp.EncodeAsIds(x)<for>x sentences]<line_sep>max_len=max(len(x)<for>x ids)<line_sep>dense_shape=(len(ids) max_len)<line_sep>values=[item<for>sublist ids<for>item sublist]<line_sep>indices=[[row col]<for>row range(len(ids))<for>col range(len(ids[row]))]<line_sep><return>values indices dense_shape<block_end><def_stmt>calculate_embeddings messages encodings<block_start>values,indices,dense_shape=process_to_IDs_in_sparse_format(sp messages)<with_stmt>tf.Session()<as>session<block_start>session.run([tf.global_variables_initializer() tf.tables_initializer()])<line_sep>message_embeddings=session.run(encodings feed_dict={input_placeholder.values:values input_placeholder.indices:indices input_placeholder.dense_shape:dense_shape } )<block_end><return>message_embeddings<block_end>st.sidebar.markdown("Made with love over at [Rasa](https://rasa.com/).")<line_sep>uploaded=st.sidebar.file_uploader("Upload a `.txt` file for clustering. Each utterance should appear on a new line.")<if_stmt><not>uploaded<block_start>filepath=resource_filename("rasalit" os.path.join("data" "nlu.md"))<line_sep>txt=pathlib.Path(filepath).read_text()<line_sep>texts=list(set([t<for>t txt.split("\n")<if>len(t)<g>0]))<block_end><else_stmt><block_start>bytes_data=uploaded.read()<line_sep>stringio=StringIO(bytes_data.decode("utf-8"))<line_sep>string_data=stringio.read()<line_sep>texts=[t.replace(" - " "")<for>t string_data.split("\n")<if>len(t)<g>0<and>t[0]<ne>"#"]<block_end>method=st.sidebar.selectbox("Select Embedding Method" ["Lite Sentence Encoding" "CountVector SVD"])<if_stmt>method<eq>"CountVector SVD"<block_start>n_svd=st.sidebar.slider("Number of SVD components" min_value=2 max_value=100 step=1)<line_sep>min_ngram,max_ngram=st.sidebar.slider("Range of ngrams" min_value=1 max_value=5 step=1 value=(2 3))<block_end>reduction_method=st.sidebar.selectbox("Reduction Method" ("Umap" "Pca"))<if_stmt>reduction_method<eq>"Umap"<block_start>n_neighbors=st.sidebar.slider("Number of UMAP neighbors" min_value=1 max_value=100 value=15 step=1)<line_sep>min_dist=st.sidebar.slider("Minimum Distance for UMAP" min_value=0.01 max_value=0.99 value=0.8 step=0.01 )<line_sep>reduction=Umap(2 n_neighbors=n_neighbors min_dist=min_dist)<block_end><else_stmt><block_start>reduction=Pca(2)<block_end>st.markdown("# Simple Text Clustering")<line_sep>st.markdown("Let's say you've gotten a lot of feedback from clients on different channels. You might like to be able to distill main topics and get an overview. It might even inspire some intents that will be used in a virtual assistant!")<line_sep>st.markdown("This tool will help you discover them. This app will attempt to cluster whatever text you give it. The chart will try to clump text together and you can explore underlying patterns.")<if_stmt>method<eq>"CountVector SVD"<block_start>lang=CountVectorLanguage(n_svd ngram_range=(min_ngram max_ngram))<line_sep>embset=lang[texts]<block_end><if_stmt>method<eq>"Lite Sentence Encoding"<block_start>embset=EmbeddingSet(*[Embedding(t v)<for>t,v zip(texts calculate_embeddings(texts encodings=encodings))])<block_end>p=(embset.transform(reduction).plot_interactive(annot=<false>).properties(width=500 height=500 title=""))<line_sep>st.write(p)<line_sep>st.markdown("While the tool helps you in discovering clusters, it doesn't do labelling (yet). We do offer a [jupyter notebook](https://github.com/RasaHQ/rasalit/tree/master/notebooks/bulk-labelling) that might help out though.")<line_sep>
""" Sieve of Eratosthenes : Generate all the primes less than any integer nn """<import_from_stmt>math sqrt<def_stmt>get_primes n<block_start>m=n+1<line_sep># numbers = [True for i in range(m)] numbers=[<true>]<times>m<for_stmt>i range(2 int(sqrt(n)+1))<block_start><if_stmt>numbers[i]<block_start><for_stmt>j range(i<times>i m i)<block_start>numbers[j]=<false><block_end><block_end><block_end>primes=[]<for_stmt>i range(2 m)<block_start><if_stmt>numbers[i]<block_start>primes.append(i)<block_end><block_end><return>primes<block_end>print(get_primes(25))<line_sep>
<import_from_stmt>unittest.mock patch MagicMock<import_stmt>pytest<import_stmt>sciencebeam.pipeline_runners.pipeline_runner_utils<as>pipeline_runner_utils_module<import_from_stmt>sciencebeam.pipeline_runners.pipeline_runner_utils get_remaining_file_list_for_args <line_sep>BASE_TEST_PATH='/tmp/test/conversion-pipeline'<line_sep>BASE_DATA_PATH=BASE_TEST_PATH+'/data'<line_sep>PDF_PATH='*/*.pdf'<line_sep>FILE_LIST_PATH='file-list.csv'<line_sep>FILE_COLUMN='column1'<line_sep>REL_PDF_FILE_WITHOUT_EXT_1='1/file'<line_sep>PDF_FILE_1=BASE_DATA_PATH+'/'+REL_PDF_FILE_WITHOUT_EXT_1+'.pdf'<line_sep>OUTPUT_PATH=BASE_TEST_PATH+'/out'<line_sep>OUTPUT_SUFFIX='.xml'<line_sep>@pytest.fixture(name='load_file_list_mock' autouse=<true>)<def_stmt>_load_file_list_mock <block_start><with_stmt>patch.object(pipeline_runner_utils_module 'load_file_list')<as>mock<block_start><yield>mock<block_end><block_end>@pytest.fixture(name='find_matching_filenames_with_limit_mock' autouse=<true>)<def_stmt>_find_matching_filenames_with_limit_mock <block_start><with_stmt>patch.object(pipeline_runner_utils_module 'find_matching_filenames_with_limit')<as>mock<block_start><yield>mock<block_end><block_end>@pytest.fixture(name='map_file_list_to_file_exists_mock' autouse=<true>)<def_stmt>_map_file_list_to_file_exists_mock <block_start><with_stmt>patch.object(pipeline_runner_utils_module 'map_file_list_to_file_exists')<as>mock<block_start>mock.side_effect=<lambda>file_list:[<false>]<times>len(file_list)<line_sep><yield>mock<block_end><block_end>@pytest.fixture(name='args')<def_stmt>get_default_args <block_start>opt=MagicMock()<line_sep>opt.base_data_path=BASE_DATA_PATH<line_sep>opt.output_path=OUTPUT_PATH<line_sep>opt.output_suffix=OUTPUT_SUFFIX<line_sep>opt.limit=<none><line_sep><return>opt<block_end>@pytest.fixture(name='file_path_args')<def_stmt>get_file_path_args args<block_start>opt=args<line_sep>opt.source_path=PDF_PATH<line_sep>opt.source_file_list=<none><line_sep><return>opt<block_end>@pytest.fixture(name='file_list_args')<def_stmt>get_file_list_args args<block_start>opt=args<line_sep>opt.source_path=<none><line_sep>opt.source_file_list=BASE_DATA_PATH+'/file-list.tsv'<line_sep>opt.source_file_column='url'<line_sep><return>opt<block_end><class_stmt>TestGetRemainingFileListForArgs<block_start><def_stmt>test_should_pass_file_pattern_to_find_files self file_path_args find_matching_filenames_with_limit_mock:MagicMock<block_start>find_matching_filenames_with_limit_mock.return_value=[PDF_FILE_1]<assert_stmt>(get_remaining_file_list_for_args(file_path_args)<eq>find_matching_filenames_with_limit_mock.return_value)<line_sep>find_matching_filenames_with_limit_mock.assert_called_with(BASE_DATA_PATH+'/'+PDF_PATH limit=file_path_args.limit)<block_end><def_stmt>test_should_pass_file_list_and_limit_to_load_file_list self file_list_args load_file_list_mock:MagicMock<block_start>opt=file_list_args<line_sep>opt.limit=100<line_sep>load_file_list_mock.return_value=[PDF_FILE_1]<assert_stmt>(get_remaining_file_list_for_args(opt)<eq>load_file_list_mock.return_value)<line_sep>load_file_list_mock.assert_called_with(opt.source_file_list column=opt.source_file_column limit=opt.limit)<block_end><block_end>
<import_stmt>setuptools<with_stmt>open("README.md" "r" encoding="utf-8")<as>fh<block_start>long_description=fh.read()<block_end>setuptools.setup(name="shazamio" version="0.0.5" author="dotX12" description="Is a FREE asynchronous library from reverse engineered Shazam API written in Python 3.6+ with asyncio and aiohttp. Includes all the methods that Shazam has, including searching for a song by file." long_description=long_description long_description_content_type="text/markdown" url="https://github.com/dotX12/ShazamIO" install_requires=['aiohttp' 'pydub' 'numpy' 'aiofiles' 'dataclass-factory' ] packages=setuptools.find_packages() python_requires='>=3.6' )<line_sep>
<import_stmt>unittest<import_from_stmt>basketball_reference_scraper.teams get_roster get_team_stats get_opp_stats get_roster_stats get_team_misc<class_stmt>TestTeams(unittest.TestCase)<block_start><def_stmt>test_get_roster self<block_start>df=get_roster('GSW' 2019)<line_sep>curry_df=df[df['PLAYER']<eq>'<NAME>']<line_sep>self.assertEqual(len(curry_df) 1)<line_sep>expected_columns=['NUMBER' 'PLAYER' 'POS' 'HEIGHT' 'WEIGHT' 'BIRTH_DATE' 'NATIONALITY' 'EXPERIENCE' 'COLLEGE']<line_sep>self.assertListEqual(list(df.columns) expected_columns)<block_end><def_stmt>test_get_roster_on_missing_nationality self<block_start>df=get_roster('FTW' 1956)<line_sep>expected_columns=['NUMBER' 'PLAYER' 'POS' 'HEIGHT' 'WEIGHT' 'BIRTH_DATE' 'NATIONALITY' 'EXPERIENCE' 'COLLEGE']<line_sep>self.assertListEqual(list(df.columns) expected_columns)<block_end><def_stmt>get_team_stats self<block_start>series=get_team_stats('GSW' 2019)<line_sep>expected_indices=['G' 'MP' 'FG' 'FGA' 'FG%' '3P' '3PA' '3P%' '2P' '2PA' '2P%' 'FT' 'FTA' 'FT%' 'ORB' 'DRB' 'TRB' 'AST' 'STL' 'BLK' 'TOV' 'PF' 'PTS']<line_sep>self.assertCountEqual(list(series.index) expected_indices)<block_end><def_stmt>get_opp_stats self<block_start>series=get_opp_stats('GSW' 2019)<line_sep>expected_indices=['OPP_G' 'OPP_MP' 'OPP_FG' 'OPP_FGA' 'OPP_FG%' 'OPP_3P' 'OPP_3PA' 'OPP_3P%' 'OPP_2P' 'OPP_2PA' 'OPP_2P%' 'OPP_FT' 'OPP_FTA' 'OPP_FT%' 'OPP_ORB' 'OPP_DRB' 'OPP_TRB' 'OPP_AST' 'OPP_STL' 'OPP_BLK' 'OPP_TOV' 'OPP_PF' 'OPP_PTS']<line_sep>self.assertCountEqual(list(series.index) expected_indices)<block_end><def_stmt>test_get_roster_stats self<block_start>df=get_roster_stats('GSW' 2019)<line_sep>expected_columns=['PLAYER' 'POS' 'AGE' 'TEAM' 'G' 'GS' 'MP' 'FG' 'FGA' 'FG%' '3P' '3PA' '3P%' '2P' '2PA' '2P%' 'eFG%' 'FT' 'FTA' 'FT%' 'ORB' 'DRB' 'TRB' 'AST' 'STL' 'BLK' 'TOV' 'PF' 'PTS' 'SEASON']<line_sep>self.assertCountEqual(list(df.columns) expected_columns)<block_end><def_stmt>test_get_team_misc self<block_start>series=get_team_misc('GSW' 2019)<line_sep>expected_indices=['AGE' 'W' 'L' 'PW' 'PL' 'MOV' 'SOS' 'SRS' 'ORtg' 'DRtg' 'NRtg' 'PACE' 'FTr' '3PAr' 'TS%' 'eFG%' 'TOV%' 'ORB%' 'FT/FGA' 'eFG%' 'TOV%' 'DRB%' 'FT/FGA' 'ARENA' 'ATTENDANCE' 'ATTENDANCE/G' 'TEAM' 'SEASON']<line_sep>self.assertCountEqual(list(series.index) expected_indices)<line_sep>series=get_team_misc('CHO' 2019)<line_sep>self.assertCountEqual(list(series.index) expected_indices)<line_sep>series=get_team_misc('NOK' 2007)<line_sep>self.assertCountEqual(list(series.index) expected_indices)<line_sep>series=get_team_misc('TCB' 1951)<line_sep>self.assertCountEqual(list(series.index) expected_indices)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>sqladmin.helpers secure_filename<def_stmt>test_secure_filename monkeypatch<block_start><assert_stmt>secure_filename("My cool movie.mov")<eq>"My_cool_movie.mov"<assert_stmt>secure_filename("../../../etc/passwd")<eq>"etc_passwd"<assert_stmt>(secure_filename("i contain cool \xfcml\xe4uts.txt")<eq>"i_contain_cool_umlauts.txt")<assert_stmt>secure_filename("__filename__")<eq>"filename"<assert_stmt>secure_filename("foo$&^*)bar")<eq>"foobar"<block_end>
# -*- coding: utf-8 -*- """ Created on Thu Nov 23 14:54:35 2017 @author: user """<import_from_future_stmt> unicode_literals<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_from_stmt>typing Any<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>typing Text<import_from_stmt>rasa_nlu.config RasaNLUConfig<import_from_stmt>rasa_nlu.tokenizers Tokenizer Token<import_from_stmt>rasa_nlu.components Component<import_from_stmt>rasa_nlu.training_data Message<import_from_stmt>rasa_nlu.training_data TrainingData<import_stmt>sys<import_from_stmt>yaha Cuttor<line_sep>reload(sys)<line_sep>sys.setdefaultencoding('utf-8')<class_stmt>YahaTokenizer(Tokenizer Component)<block_start>name="tokenizer_yaha"<line_sep>provides=["tokens"]<line_sep>cuttor=Cuttor()<def_stmt>__init__ self<block_start><pass><block_end>@classmethod<def_stmt>required_packages cls# type: () -> List[Text] <block_start><return>["yaha"]<block_end><def_stmt>train self training_data config **kwargs# type: (TrainingData, RasaNLUConfig, **Any) -> None <block_start><if_stmt>config['language']<ne>'zh'<block_start><raise>Exception("tokenizer_yaha is only used for Chinese. Check your configure json file.")<block_end><for_stmt>example training_data.training_examples<block_start>example.set("tokens" self.tokenize(example.text))<block_end><block_end><def_stmt>process self message **kwargs# type: (Message, **Any) -> None <block_start>message.set("tokens" self.tokenize(message.text))<block_end><def_stmt>tokenize self text# type: (Text) -> List[Token] <block_start>tokenized=self.cuttor.tokenize(text.decode('utf-8') search=<true>)<line_sep>tokens=[Token(word start)<for>(word start end) tokenized]<line_sep><return>tokens<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkvideoenhan.endpoint endpoint_data<class_stmt>ChangeVideoSizeRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'videoenhan' '2020-03-20' 'ChangeVideoSize' 'videoenhan')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_Height self<block_start><return>self.get_body_params().get('Height')<block_end><def_stmt>set_Height self Height<block_start>self.add_body_params('Height' Height)<block_end><def_stmt>get_B self<block_start><return>self.get_body_params().get('B')<block_end><def_stmt>set_B self B<block_start>self.add_body_params('B' B)<block_end><def_stmt>get_FillType self<block_start><return>self.get_body_params().get('FillType')<block_end><def_stmt>set_FillType self FillType<block_start>self.add_body_params('FillType' FillType)<block_end><def_stmt>get_G self<block_start><return>self.get_body_params().get('G')<block_end><def_stmt>set_G self G<block_start>self.add_body_params('G' G)<block_end><def_stmt>get_CropType self<block_start><return>self.get_body_params().get('CropType')<block_end><def_stmt>set_CropType self CropType<block_start>self.add_body_params('CropType' CropType)<block_end><def_stmt>get_R self<block_start><return>self.get_body_params().get('R')<block_end><def_stmt>set_R self R<block_start>self.add_body_params('R' R)<block_end><def_stmt>get_VideoUrl self<block_start><return>self.get_body_params().get('VideoUrl')<block_end><def_stmt>set_VideoUrl self VideoUrl<block_start>self.add_body_params('VideoUrl' VideoUrl)<block_end><def_stmt>get_Width self<block_start><return>self.get_body_params().get('Width')<block_end><def_stmt>set_Width self Width<block_start>self.add_body_params('Width' Width)<block_end><def_stmt>get_Tightness self<block_start><return>self.get_body_params().get('Tightness')<block_end><def_stmt>set_Tightness self Tightness<block_start>self.add_body_params('Tightness' Tightness)<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.bio_chemists bio_chemists<def_stmt>test_bio_chemists <block_start>"""Test module bio_chemists.py by downloading bio_chemists.csv and testing shape of extracted data has 915 rows and 6 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=bio_chemists(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(915 6)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>.provider MendeleyCitationsProvider<import_from_stmt>website.citations.views GenericCitationViews<line_sep>mendeley_views=GenericCitationViews('mendeley' MendeleyCitationsProvider)<line_sep>
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_stmt>threading<import_from_stmt>google.cloud datastore<import_from_stmt>google.cloud resource_manager<import_from_stmt>googleapiclient discovery<import_from_stmt>googleapiclient errors<import_stmt>httplib2<import_from_stmt>oauth2client client<import_stmt>webapp2<def_stmt>resource_iterator next_page_function<block_start>"""Loop through resources from a Google API. An iterator that returns all of the resources from a Google API 'list' operation paging through each set. Args: next_page_function: A function that when called will return the next page of results. Yields: A list if resources, which are typically dictionaries. """<line_sep>next_page_token=<none><line_sep>more_results=<true><while_stmt>more_results<block_start>resource_response=<none><try_stmt><block_start>resource_response=next_page_function(next_page_token).execute()<block_end><except_stmt>errors.HttpError# Some projects throw a 403. (compute engine isn't enabled) # just ignore those resources. <block_start>logging.debug('skipping resources.' exc_info=<true>)<line_sep><return><block_end><for_stmt>items_field ['items' 'rrsets' 'managedZones']<block_start>items=resource_response.get(items_field {})<if_stmt>items<and>(type(items)<eq>dict)<block_start><for_stmt>item items.iteritems()<block_start><yield>item<block_end><block_end><if_stmt>items<and>(type(items)<eq>list)<block_start><for_stmt>item items<block_start><yield>item<block_end><block_end><block_end><if_stmt>'nextPageToken'<in>resource_response<block_start>next_page_token=resource_response['nextPageToken']<block_end><else_stmt><block_start>more_results=<false><block_end><block_end><block_end><class_stmt>ThreadsafeClientLocal(object)<block_start>"""A thread local Google API client descriptor. Httplib2 is not threadsafe so each request thread requires it's own threadlocal client object which this creates. Attributes: service: String name of the API to create the client for. version: String version of the API client. """<line_sep>_class_thread_local=threading.local()<def_stmt>__init__ self service version<block_start>"""Create a thread local API client. Will create the underlying httplib2.Http object on construction, but the underlying API client is lazy constructed. Args: service: Name of API. version: Version of the api. """<line_sep>self.service=service<line_sep>self.version=version<line_sep>self.http=httplib2.Http(timeout=60)<line_sep>self.cache_discovery=<true><block_end><def_stmt>__get__ self instance instance_type<block_start>"""Construct the API client."""<if_stmt>instance<is><none><block_start><return>self<block_end>thread_local=<none><try_stmt><block_start>app=webapp2.get_app()<line_sep># Python Google API clients aren't threadsafe as they use httplib2 # which isn't threadsafe. thread_local=app.registry.get(self)<if_stmt>thread_local<is><none><block_start>thread_local=threading.local()<line_sep>app.registry[self]=thread_local<block_end><block_end><except_stmt>AssertionError# When not in a request context, use class thread local. <block_start>thread_local=ThreadsafeClientLocal._class_thread_local<block_end>cached_client=getattr(thread_local 'api' <none>)<if_stmt>cached_client<is><none><block_start>credentials=client.GoogleCredentials.get_application_default()<if_stmt>credentials.create_scoped_required()<block_start>credentials=credentials.create_scoped('https://www.googleapis.com/auth/cloud-platform')<block_end>cached_client=discovery.build(self.service self.version http=credentials.authorize(self.http) cache_discovery=self.cache_discovery)<line_sep>thread_local.api=cached_client<block_end><return>cached_client<block_end><block_end><class_stmt>Clients(object)<block_start>"""Holds API clients. For Google API clients, we use thread local descriptors which creates the client on first access. The "google.cloud" clients are threadsafe and are simple properties. """<line_sep>metrics=ThreadsafeClientLocal('monitoring' 'v3')<line_sep>compute=ThreadsafeClientLocal('compute' 'v1')<line_sep>dns=ThreadsafeClientLocal('dns' 'v1')<line_sep>iam=ThreadsafeClientLocal('cloudresourcemanager' 'v1')<def_stmt>__init__ self<block_start>self.datastore=datastore.Client()<line_sep>self.crm=resource_manager.Client()<block_end><block_end>CLIENTS=Clients()<line_sep>
''' hooks for using tensorRT with the object detection program. names and parameters are defined as required by the detect.py infrastructure. '''<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.contrib.tensorrt<as>trt<def_stmt>load_graph_tensorrt params<block_start>graph_def=tf.compat.v1.GraphDef()<with_stmt>tf.compat.v1.gfile.GFile(params["FROZEN_GRAPH"] 'rb')<as>f<block_start>graph_def.ParseFromString(f.read())<line_sep>tf.import_graph_def(graph_def name='')<block_end>trt_graph=trt.create_inference_graph(input_graph_def=graph_def outputs=['detection_boxes:0' 'detection_scores:0' 'detection_classes:0' 'num_detections:0'] max_batch_size=params["BATCH_SIZE"] max_workspace_size_bytes=4000000000 is_dynamic_op=<true><if>params["TENSORRT_DYNAMIC"]<eq>1<else><false> precision_mode=params["TENSORRT_PRECISION"])<line_sep>tf.import_graph_def(trt_graph return_elements=['detection_boxes:0' 'detection_scores:0' 'detection_classes:0' 'num_detections:0'])<block_end>##no more needed <def_stmt>convert_from_tensorrt tmp_output_dict<block_start><return>tmp_output_dict<block_end>### names of tensors are different from normal TF names, but can be retrieved and a dict with the same shape of the original one can be formed, thus avoiding the conversion after the postprocessing. # note that for the tf session, the names are enough and there is no real need to get the tensors. <def_stmt>get_handles_to_tensors_RT <block_start>graph=tf.get_default_graph()<line_sep>tensor_dict={}<line_sep>tensor_dict['num_detections']=graph.get_tensor_by_name('import/num_detections:0')<line_sep>tensor_dict['detection_classes']=graph.get_tensor_by_name('import/detection_classes:0')<line_sep>tensor_dict['detection_boxes']=graph.get_tensor_by_name('import/detection_boxes:0')<line_sep>tensor_dict['detection_scores']=graph.get_tensor_by_name('import/detection_scores:0')<line_sep>image_tensor=graph.get_tensor_by_name('import/image_tensor:0')<line_sep><return>tensor_dict image_tensor<block_end>
<import_stmt>sys<import_stmt>os<line_sep>current_path=os.path.dirname(os.path.realpath(__file__))<line_sep>root_path=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<line_sep>sys.path.append(root_path)<import_stmt>json<import_stmt>signal<import_stmt>threading<import_stmt>torch<import_from_stmt>flownmt.data NMTDataSet<import_stmt>experiments.options<as>options<import_from_stmt>experiments.nmt main<as>single_process_main<def_stmt>create_dataset args<block_start>model_path=args.model_path<if_stmt><not>os.path.exists(model_path)<block_start>os.makedirs(model_path)<block_end>result_path=os.path.join(model_path 'translations')<if_stmt><not>os.path.exists(result_path)<block_start>os.makedirs(result_path)<block_end>vocab_path=os.path.join(model_path 'vocab')<if_stmt><not>os.path.exists(vocab_path)<block_start>os.makedirs(vocab_path)<block_end>data_path=args.data_path<line_sep>src_lang=args.src<line_sep>tgt_lang=args.tgt<line_sep>src_vocab_path=os.path.join(vocab_path '{}.vocab'.format(src_lang))<line_sep>tgt_vocab_path=os.path.join(vocab_path '{}.vocab'.format(tgt_lang))<line_sep>params=json.load(open(args.config 'r'))<line_sep>src_max_vocab=params['{}_vocab_size'.format(src_lang)]<line_sep>tgt_max_vocab=params['{}_vocab_size'.format(tgt_lang)]<line_sep>NMTDataSet(data_path src_lang tgt_lang src_vocab_path tgt_vocab_path src_max_vocab tgt_max_vocab subword=args.subword create_vocab=<true>)<block_end><def_stmt>main <block_start>args=options.parse_distributed_args()<line_sep>args_dict=vars(args)<line_sep>nproc_per_node=args_dict.pop('nproc_per_node')<line_sep>nnodes=args_dict.pop('nnodes')<line_sep>node_rank=args_dict.pop('node_rank')<line_sep># world size in terms of number of processes dist_world_size=nproc_per_node<times>nnodes<line_sep># set PyTorch distributed related environmental variables current_env=os.environ<line_sep>current_env["MASTER_ADDR"]=args_dict.pop('master_addr')<line_sep>current_env["MASTER_PORT"]=str(args_dict.pop('master_port'))<line_sep>current_env["WORLD_SIZE"]=str(dist_world_size)<line_sep>create_vocab=args_dict.pop('create_vocab')<if_stmt>create_vocab<block_start>create_dataset(args)<block_end>args.create_vocab=<false><line_sep>batch_size=args.batch_size<floordiv>dist_world_size<line_sep>args.batch_size=batch_size<line_sep>mp=torch.multiprocessing.get_context('spawn')<line_sep># Create a thread to listen for errors in the child processes. error_queue=mp.SimpleQueue()<line_sep>error_handler=ErrorHandler(error_queue)<line_sep>processes=[]<for_stmt>local_rank range(0 nproc_per_node)# each process's rank <block_start>dist_rank=nproc_per_node<times>node_rank+local_rank<line_sep>args.rank=dist_rank<line_sep>args.local_rank=local_rank<line_sep>process=mp.Process(target=run args=(args error_queue ) daemon=<true>)<line_sep>process.start()<line_sep>error_handler.add_child(process.pid)<line_sep>processes.append(process)<block_end><for_stmt>process processes<block_start>process.join()<block_end><block_end><def_stmt>run args error_queue<block_start><try_stmt><block_start>single_process_main(args)<block_end><except_stmt>KeyboardInterrupt<block_start><pass># killed by parent, do nothing <block_end><except_stmt>Exception# propagate exception to parent process, keeping original traceback <block_start><import_stmt>traceback<line_sep>error_queue.put((args.rank traceback.format_exc()))<block_end><block_end><class_stmt>ErrorHandler(object)<block_start>"""A class that listens for exceptions in children processes and propagates the tracebacks to the parent process."""<def_stmt>__init__ self error_queue<block_start>self.error_queue=error_queue<line_sep>self.children_pids=[]<line_sep>self.error_thread=threading.Thread(target=self.error_listener daemon=<true>)<line_sep>self.error_thread.start()<line_sep>signal.signal(signal.SIGUSR1 self.signal_handler)<block_end><def_stmt>add_child self pid<block_start>self.children_pids.append(pid)<block_end><def_stmt>error_listener self<block_start>(rank original_trace)=self.error_queue.get()<line_sep>self.error_queue.put((rank original_trace))<line_sep>os.kill(os.getpid() signal.SIGUSR1)<block_end><def_stmt>signal_handler self signalnum stackframe<block_start><for_stmt>pid self.children_pids<block_start>os.kill(pid signal.SIGINT)# kill children processes <block_end>(rank original_trace)=self.error_queue.get()<line_sep>msg="\n\n-- Tracebacks above this line can probably be ignored --\n\n"<line_sep>msg<augadd>original_trace<line_sep><raise>Exception(msg)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>pcbnew<import_stmt>os<import_from_stmt>.pcbnew2boardview convert<class_stmt>Pcbnew2Boardview(pcbnew.ActionPlugin)<block_start><def_stmt>defaults self<block_start>self.name="Pcbnew to Boardview"<line_sep>self.category="Read PCB"<line_sep>self.description="Generate Boardview file from KiCad pcb."<block_end><def_stmt>Run self<block_start>kicad_pcb=pcbnew.GetBoard()<with_stmt>open(kicad_pcb.GetFileName().replace('.kicad_pcb' '.brd') 'wt')<as>brd_file<block_start>convert(kicad_pcb brd_file)<block_end><block_end><block_end>plugin=Pcbnew2Boardview()<line_sep>plugin.register()<line_sep>
SECRET_KEY='secret'<line_sep>ROOT_URLCONF='jsonrpc.tests.test_backend_django.urls'<line_sep>ALLOWED_HOSTS=['testserver']<line_sep>DATABASE_ENGINE='django.db.backends.sqlite3'<line_sep>DATABASES={'default':{'ENGINE':'django.db.backends.sqlite3' 'NAME':':memory:' }}<line_sep>JSONRPC_MAP_VIEW_ENABLED=<true><line_sep>
<import_from_stmt>.anchor *# noqa: F401,F403 <import_from_stmt>.bbox *# noqa: F401,F403 <import_from_stmt>.post_processing *# noqa: F401,F403
<import_from_stmt>typing Union<import_stmt>re<import_from_stmt>phonemizer.phonemize phonemize<import_from_stmt>data.text.symbols all_phonemes _punctuations<class_stmt>Tokenizer<block_start><def_stmt>__init__ self start_token='>' end_token='<' pad_token='/' add_start_end=<true> alphabet=<none> model_breathing=<true><block_start><if_stmt><not>alphabet<block_start>self.alphabet=all_phonemes<block_end><else_stmt><block_start>self.alphabet=sorted(list(set(alphabet)))# for testing <block_end>self.idx_to_token={i:s<for>i,s enumerate(self.alphabet start=1)}<line_sep>self.idx_to_token[0]=pad_token<line_sep>self.token_to_idx={s:[i]<for>i,s self.idx_to_token.items()}<line_sep>self.vocab_size=len(self.alphabet)+1<line_sep>self.add_start_end=add_start_end<if_stmt>add_start_end<block_start>self.start_token_index=len(self.alphabet)+1<line_sep>self.end_token_index=len(self.alphabet)+2<line_sep>self.vocab_size<augadd>2<line_sep>self.idx_to_token[self.start_token_index]=start_token<line_sep>self.idx_to_token[self.end_token_index]=end_token<block_end>self.model_breathing=model_breathing<if_stmt>model_breathing<block_start>self.breathing_token_index=self.vocab_size<line_sep>self.token_to_idx[' ']=self.token_to_idx[' ']+[self.breathing_token_index]<line_sep>self.vocab_size<augadd>1<line_sep>self.breathing_token='@'<line_sep>self.idx_to_token[self.breathing_token_index]=self.breathing_token<line_sep>self.token_to_idx[self.breathing_token]=[self.breathing_token_index]<block_end><block_end><def_stmt>__call__ self sentence:str<arrow>list<block_start>sequence=[self.token_to_idx[c]<for>c sentence]# No filtering: text should only contain known chars. sequence=[item<for>items sequence<for>item items]<if_stmt>self.model_breathing<block_start>sequence=[self.breathing_token_index]+sequence<block_end><if_stmt>self.add_start_end<block_start>sequence=[self.start_token_index]+sequence+[self.end_token_index]<block_end><return>sequence<block_end><def_stmt>decode self sequence:list<arrow>str<block_start><return>''.join([self.idx_to_token[int(t)]<for>t sequence])<block_end><block_end><class_stmt>Phonemizer<block_start><def_stmt>__init__ self language:str with_stress:bool njobs=4<block_start>self.language=language<line_sep>self.njobs=njobs<line_sep>self.with_stress=with_stress<line_sep>self.special_hyphen='—'<line_sep>self.punctuation=';:,.!?¡¿—…"«»“”'<line_sep>self._whitespace_re=re.compile(r'\s+')<line_sep>self._whitespace_punctuation_re=re.compile(f'\s*([{_punctuations}])\s*')<block_end><def_stmt>__call__ self text:Union[str list] with_stress=<none> njobs=<none> language=<none><arrow>Union[str list]<block_start>language=language<or>self.language<line_sep>njobs=njobs<or>self.njobs<line_sep>with_stress=with_stress<or>self.with_stress<line_sep># phonemizer does not like hyphens. text=self._preprocess(text)<line_sep>phonemes=phonemize(text language=language backend='espeak' strip=<true> preserve_punctuation=<true> with_stress=with_stress punctuation_marks=self.punctuation njobs=njobs language_switch='remove-flags')<line_sep><return>self._postprocess(phonemes)<block_end><def_stmt>_preprocess_string self text:str<block_start>text=text.replace('-' self.special_hyphen)<line_sep><return>text<block_end><def_stmt>_preprocess self text:Union[str list]<arrow>Union[str list]<block_start><if_stmt>isinstance(text list)<block_start><return>[self._preprocess_string(t)<for>t text]<block_end><elif_stmt>isinstance(text str)<block_start><return>self._preprocess_string(text)<block_end><else_stmt><block_start><raise>TypeError(f'{self} input must be list or str, not {type(text)}')<block_end><block_end><def_stmt>_collapse_whitespace self text:str<arrow>str<block_start>text=re.sub(self._whitespace_re ' ' text)<line_sep><return>re.sub(self._whitespace_punctuation_re r'\1' text)<block_end><def_stmt>_postprocess_string self text:str<arrow>str<block_start>text=text.replace(self.special_hyphen '-')<line_sep>text=''.join([c<for>c text<if>c<in>all_phonemes])<line_sep>text=self._collapse_whitespace(text)<line_sep>text=text.strip()<line_sep><return>text<block_end><def_stmt>_postprocess self text:Union[str list]<arrow>Union[str list]<block_start><if_stmt>isinstance(text list)<block_start><return>[self._postprocess_string(t)<for>t text]<block_end><elif_stmt>isinstance(text str)<block_start><return>self._postprocess_string(text)<block_end><else_stmt><block_start><raise>TypeError(f'{self} input must be list or str, not {type(text)}')<block_end><block_end><block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>mock Mock<import_from_stmt>openelex.base.transform registry<class_stmt>TestTransformRegistry(TestCase)<block_start><def_stmt>test_register_with_validators self<block_start>mock_transform=Mock(return_value=<none>)<line_sep>mock_transform.__name__='mock_transform'<line_sep>mock_validator1=Mock(return_value=<none>)<line_sep>mock_validator1.__name__='mock_validator1'<line_sep>mock_validator2=Mock(return_value=<none>)<line_sep>mock_validator2.__name__='mock_validator2'<line_sep>validators=[mock_validator1 mock_validator2]<line_sep>registry.register("XX" mock_transform validators)<line_sep>transform=registry.get("XX" "mock_transform")<line_sep>self.assertEqual(list(transform.validators.values()) validators)<line_sep>transform()<line_sep>mock_transform.assert_called_once_with()<block_end><def_stmt>test_register_raw self<block_start>mock_transform=Mock(return_value=<none>)<line_sep>mock_transform.__name__='mock_transform'<line_sep>registry.register("XX" mock_transform raw=<true>)<line_sep>transform=registry.get("XX" "mock_transform" raw=<true>)<line_sep>transform()<line_sep>mock_transform.assert_called_once_with()<block_end><block_end>
<import_from_stmt>django.db models<class_stmt>OverallTotals(models.Model)<block_start>id=models.AutoField(primary_key=<true>)<line_sep>create_date=models.DateTimeField(auto_now_add=<true> blank=<true> null=<true>)<line_sep>update_date=models.DateTimeField(auto_now=<true> null=<true>)<line_sep>fiscal_year=models.IntegerField(blank=<true> null=<true>)<line_sep>total_budget_authority=models.DecimalField(max_digits=23 decimal_places=2 blank=<true> null=<true>)<class_stmt>Meta<block_start>managed=<true><line_sep>db_table="overall_totals"<block_end><block_end>
<import_from_future_stmt> print_function<line_sep># The following comments couldn't be translated into the new config version: #! /bin/env cmsRun <import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("validation")<import_stmt>FWCore.ParameterSet.VarParsing<as>VarParsing<line_sep>options=VarParsing.VarParsing('analysis')<line_sep># load the full reconstraction configuration, to make sure we're getting all needed dependencies process.load("Configuration.StandardSequences.MagneticField_cff")<line_sep>process.load("Configuration.StandardSequences.GeometryRecoDB_cff")<line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>process.load("Configuration.StandardSequences.Reconstruction_cff")<line_sep>options.register('jets' "ak4PFJetsCHS" # default value, examples : "ak4PFJets", "ak4PFJetsCHS" VarParsing.VarParsing.multiplicity.singleton VarParsing.VarParsing.varType.string "jet collection to use")<line_sep>options.parseArguments()<line_sep>whichJets=options.jets<line_sep>applyJEC=<true><line_sep>corrLabel="ak4PFCHS"<import_from_stmt>Configuration.AlCa.GlobalTag GlobalTag<line_sep>tag=GlobalTag(process.GlobalTag 'auto:run2_mc' '')<line_sep>useTrigger=<false><line_sep>triggerPath="HLT_PFJet80_v*"<line_sep>runOnMC=<true><line_sep>#Flavour plots for MC: "all" = plots for all jets ; "dusg" = plots for d, u, s, dus, g independently ; not mandatory and any combinations are possible #b, c, light (dusg), non-identified (NI), PU jets plots are always produced flavPlots="allbcldusg"<line_sep>###prints### print("jet collcetion asked : " whichJets)<line_sep>print("JEC applied?" applyJEC ", correction:" corrLabel)<line_sep>print("trigger will be used ? : " useTrigger ", Trigger paths:" triggerPath)<line_sep>print("is it MC ? : " runOnMC ", Flavours:" flavPlots)<line_sep>print("Global Tag : " tag.globaltag)<line_sep>############ process.load("DQMServices.Components.DQMEnvironment_cfi")<line_sep>process.load("DQMServices.Core.DQM_cfg")<line_sep>process.load("JetMETCorrections.Configuration.JetCorrectors_cff")<line_sep>process.load("CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi")<line_sep>process.load("RecoJets.JetAssociationProducers.ak4JTA_cff")<line_sep>process.load("RecoBTag.Configuration.RecoBTag_cff")<line_sep>process.load("PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi")<line_sep>process.load("PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi")<line_sep>process.load("PhysicsTools.JetMCAlgos.CaloJetsMCFlavour_cfi")<line_sep>process.load("PhysicsTools.PatAlgos.mcMatchLayer0.jetMatch_cfi")<line_sep>process.JECseq=cms.Sequence(getattr(process corrLabel+"L1FastL2L3CorrectorChain"))<line_sep>newjetID=cms.InputTag(whichJets)<line_sep>process.ak4JetFlavourInfos.jets=newjetID<line_sep>process.ak4JetFlavourInfos.hadronFlavourHasPriority=cms.bool(<true>)<line_sep>process.AK4byRef.jets=newjetID<if_stmt><not>"ak4PFJetsCHS"<in>whichJets<block_start>process.ak4JetTracksAssociatorAtVertexPF.jets=newjetID<line_sep>process.pfImpactParameterTagInfos.jets=newjetID<line_sep>process.softPFMuonsTagInfos.jets=newjetID<line_sep>process.softPFElectronsTagInfos.jets=newjetID<line_sep>process.patJetGenJetMatch.src=newjetID<block_end>process.btagSequence=cms.Sequence(process.ak4JetTracksAssociatorAtVertexPF<times>process.btagging)<line_sep>process.jetSequences=cms.Sequence(process.goodOfflinePrimaryVertices<times>process.btagSequence)<line_sep>### print("inputTag : " process.ak4JetTracksAssociatorAtVertexPF.jets)<line_sep>### <if_stmt>runOnMC<block_start>process.flavourSeq=cms.Sequence(process.selectedHadronsAndPartons<times>process.ak4JetFlavourInfos)<line_sep>process.load("Validation.RecoB.bTagAnalysis_cfi")<line_sep>process.bTagValidation.jetMCSrc='ak4JetFlavourInfos'<if_stmt>"Calo"<in>whichJets<block_start>process.bTagValidation.caloJetMCSrc='AK4byValAlgo'<line_sep>process.bTagValidation.useOldFlavourTool=<true><line_sep>process.flavourSeq=cms.Sequence(process.myPartons<times>process.AK4Flavour)<block_end>process.bTagValidation.applyPtHatWeight=<false><line_sep>process.bTagValidation.doJetID=<true><line_sep>process.bTagValidation.doJEC=applyJEC<line_sep>process.bTagValidation.JECsourceMC=cms.InputTag(corrLabel+"L1FastL2L3Corrector")<line_sep>process.bTagValidation.flavPlots=flavPlots<line_sep>process.bTagHarvestMC.flavPlots=flavPlots<line_sep>#process.bTagValidation.ptRecJetMin = cms.double(20.) process.bTagValidation.genJetsMatched=cms.InputTag("patJetGenJetMatch")<line_sep>process.bTagValidation.doPUid=cms.bool(<true>)<line_sep>process.ak4GenJetsForPUid=cms.EDFilter("GenJetSelector" src=cms.InputTag("ak4GenJets") cut=cms.string('pt > 8.') filter=cms.bool(<false>))<line_sep>process.patJetGenJetMatch.matched=cms.InputTag("ak4GenJetsForPUid")<line_sep>process.patJetGenJetMatch.maxDeltaR=cms.double(0.25)<line_sep>process.patJetGenJetMatch.resolveAmbiguities=cms.bool(<true>)<block_end><else_stmt><block_start>process.load("DQMOffline.RecoB.bTagAnalysisData_cfi")<line_sep>process.bTagAnalysis.doJEC=applyJEC<line_sep>process.bTagAnalysis.JECsourceData=cms.InputTag(corrLabel+"L1FastL2L3ResidualCorrector")<line_sep>process.JECseq<augmul>(getattr(process corrLabel+"ResidualCorrector")<times>getattr(process corrLabel+"L1FastL2L3ResidualCorrector"))<block_end>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(-1))<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring())<import_from_stmt>HLTrigger.HLTfilters.hltHighLevel_cfi *<if_stmt>useTrigger<block_start>process.bTagHLT=hltHighLevel.clone(TriggerResultsTag="TriggerResults::HLT" HLTPaths=["HLT_PFJet40_v*"])<line_sep>process.bTagHLT.HLTPaths=[triggerPath]<block_end><if_stmt>runOnMC<block_start>process.dqmSeq=cms.Sequence(process.ak4GenJetsForPUid<times>process.patJetGenJetMatch<times>process.flavourSeq<times>process.bTagValidation<times>process.bTagHarvestMC<times>process.dqmSaver)<block_end><else_stmt><block_start>process.dqmSeq=cms.Sequence(process.bTagAnalysis<times>process.bTagHarvest<times>process.dqmSaver)<block_end><if_stmt>useTrigger<block_start>process.plots=cms.Path(process.bTagHLT<times>process.JECseq<times>process.jetSequences<times>process.dqmSeq)<block_end><else_stmt><block_start>process.plots=cms.Path(process.JECseq<times>process.jetSequences<times>process.dqmSeq)<block_end>process.dqmEnv.subSystemFolder='BTAG'<line_sep>process.dqmSaver.producer='DQM'<line_sep>process.dqmSaver.workflow='/POG/BTAG/BJET'<line_sep>process.dqmSaver.convention='Offline'<line_sep>process.dqmSaver.saveByRun=cms.untracked.int32(-1)<line_sep>process.dqmSaver.saveAtJobEnd=cms.untracked.bool(<true>)<line_sep>process.dqmSaver.forceRunNumber=cms.untracked.int32(1)<line_sep>process.PoolSource.fileNames=[]<line_sep>#keep the logging output to a nice level process.load("FWCore.MessageLogger.MessageLogger_cfi")<line_sep>process.MessageLogger.cerr.FwkReport.reportEvery=100<line_sep>process.GlobalTag=tag<line_sep>
<import_from_stmt>dependencies *<line_sep>IMAGE_HEIGHT,IMAGE_WIDTH=101 101<line_sep>HEIGHT,WIDTH=128 128<line_sep>DY0,DY1,DX0,DX1=compute_center_pad(IMAGE_HEIGHT IMAGE_WIDTH factor=32)<line_sep>#---------------------------------------- <def_stmt>null_augment image label index<block_start>cache=Struct(image=image.copy() mask=mask.copy())<line_sep><return>image label index cache<block_end><def_stmt>null_collate batch<block_start>batch_size=len(batch)<line_sep>cache=[]<line_sep>input=[]<line_sep>truth=[]<line_sep>index=[]<for_stmt>b range(batch_size)<block_start>input.append(batch[b][0])<line_sep>truth.append(batch[b][1])<line_sep>index.append(batch[b][2])<line_sep>cache.append(batch[b][3])<block_end>input=torch.from_numpy(np.array(input)).float().unsqueeze(1)<if_stmt>truth[0]<ne>[]<block_start>truth=torch.from_numpy(np.array(truth)).float().unsqueeze(1)<block_end><return>input truth index cache<block_end>#---------------------------------------- <class_stmt>TGSDataset(Dataset)<block_start><def_stmt>__init__ self split augment=null_augment mode='train'<block_start>super(TGSDataset self).__init__()<line_sep>self.split=split<line_sep>self.mode=mode<line_sep>self.augment=augment<line_sep>split_file=CODE+'/datasets/TGS_salt/splits/'+split<line_sep>lines=read_list_from_file(split_file)<line_sep>self.ids=[]<line_sep>self.images=[]<for_stmt>l lines<block_start>folder,name=l.split('/')<line_sep>image_file=DATA+'/'+folder+'/images/'+name+'.png'<line_sep>image=cv2.imread(image_file cv2.IMREAD_GRAYSCALE).astype(np.float32)/255<line_sep>self.images.append(image)<line_sep>self.ids.append(name)<line_sep>#print(image.shape) <block_end>self.masks=[]<if_stmt>self.mode<in>['train' 'valid']<block_start><for_stmt>l lines<block_start>folder,file=l.split('/')<line_sep>mask_file=DATA+'/'+folder+'/masks/'+file+'.png'<line_sep>mask=cv2.imread(mask_file cv2.IMREAD_GRAYSCALE).astype(np.float32)/255<line_sep>self.masks.append(mask)<block_end><block_end><elif_stmt>self.mode<in>['test']<block_start>self.masks=[[]<for>l lines]<block_end>#------- df=pd.read_csv(DATA+'/depths.csv')<line_sep>df=df.set_index('id')<line_sep>self.zs=df.loc[self.ids].z.values<line_sep>#------- print('\tTGSDataset')<line_sep>print('\tsplit = %s'%split)<line_sep>print('\tlen(self.images) = %d'%len(self.images))<line_sep>print('')<block_end><def_stmt>__getitem__ self index<block_start>image=self.images[index]<line_sep>mask=self.masks[index]<line_sep><return>self.augment(image mask index)<block_end><def_stmt>__len__ self<block_start><return>len(self.images)<block_end><block_end><def_stmt>run_check_data <block_start>dataset=TGSDataset('list_train0_3600' mode='train')# #-- zz=0<line_sep>zero=np.zeros((101 101) np.uint8)<line_sep>save_dir=CODE+'/datasets/TGS_salt/demo'<line_sep>num=len(dataset)<for_stmt>m [3 5 6 7 8 9 10 11 12]<block_start>image=dataset.images[m]<line_sep>mask=dataset.masks[m]<line_sep>cv2.imshow('image' image)<line_sep>#image_show_norm('image',image,1, 2) #image_show_norm('mask', mask,1, 2) <for_stmt>i range(5)#image1, mask1 = do_random_pad_to_factor2(image, mask, limit=(-4,4), factor=32) #image1, mask1 = do_horizontal_flip2(image, mask) <block_start>mask1=mask<line_sep>#image1 = do_invert_intensity(image) #image1 = do_brightness_shift(image, np.random.uniform(-0.125,0.125)) #image1 = do_brightness_multiply(image, np.random.uniform(1-0.125,1+0.125)) image1=do_gamma(image np.random.uniform(1-0.25 1+0.25))<line_sep>#----------------------------------------------- image1=(image1<times>255).astype(np.uint8)<line_sep>image1=np.dstack([image1 image1 image1])<line_sep>#overlay1 = draw_mask_overlay(mask1, image1, color=[0,0,255]) #image_show('overlay1',overlay1,2) #image_show('image1',image1,2) #image_show_norm('mask1',mask1,1, 2) #cv2.waitKey(0) <block_end><block_end><block_end># main ################################################################# <if_stmt>__name__<eq>'__main__'<block_start>print('%s: calling main function ... '%os.path.basename(__file__))<line_sep>run_check_data()<block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.9.11 on 2016-11-14 18:50 <import_from_stmt>django.db migrations<def_stmt>user_model_content_type apps schema_editor<block_start><import_from_stmt>...core.conf settings<if_stmt><not>hasattr(settings 'AUTH_USER_MODEL')<block_start><return><block_end>user=apps.get_model(settings.AUTH_USER_MODEL)<if_stmt>user._meta.db_table<eq>'spirit_user_user'<block_start>app_label,model=settings.AUTH_USER_MODEL.split('.')<line_sep>content_types=apps.get_model('contenttypes.ContentType')<line_sep>(content_types.objects.filter(app_label='spirit_user' model='User'.lower()).update(app_label=app_label model=model.lower()))<block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('spirit_user' '0008_auto_20161114_1707') ]<line_sep>operations=[migrations.RunPython(user_model_content_type) ]<block_end>
<import_from_future_stmt> print_function<import_stmt>pyxb.bundles.opengis.gml<as>gml<line_sep>dv=gml.DegreesType(32 direction='N')<line_sep>print(dv.toDOM(element_name='degrees').toxml("utf-8"))<line_sep>
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Name: streamStatus.py # Purpose: functionality for reporting on the notational status of streams # # Authors: <NAME> # # Copyright: Copyright © 2013 <NAME> and the music21 # Project # License: BSD, see license.txt # ----------------------------------------------------------------------------- <import_stmt>unittest<import_from_stmt>music21 environment<import_from_stmt>music21 common<import_from_stmt>music21.common.objects SlottedObjectMixin<line_sep>environLocal=environment.Environment(__file__)<line_sep># ----------------------------------------------------------------------------- <class_stmt>StreamStatus(SlottedObjectMixin)<block_start>''' An object that stores the current notation state for the client stream. Separates out tasks such as whether notation has been made, etc. >>> s = stream.Stream() >>> ss = s.streamStatus >>> ss <music21.stream.streamStatus.StreamStatus object at 0x...> >>> s.streamStatus.client is s True Copying of StreamStatus and surrounding Streams >>> import copy >>> ss2 = copy.deepcopy(ss) >>> ss2.client is None True >>> s2 = copy.deepcopy(s) >>> s2.streamStatus <music21.stream.streamStatus.StreamStatus object at 0x...> >>> s2.streamStatus is ss False >>> s2.streamStatus.client is s2 True '''<line_sep># CLASS VARIABLES # __slots__=('_accidentals' '_beams' '_client' '_concertPitch' '_dirty' '_enharmonics' '_measures' '_ornaments' '_rests' '_ties' '_tuplets' )<line_sep># INITIALIZER # <def_stmt>__init__ self client=<none><block_start>self._client=<none><line_sep>self._accidentals=<none><line_sep>self._beams=<none><line_sep>self._concertPitch=<none><line_sep>self._dirty=<none><line_sep>self._enharmonics=<none><line_sep>self._measures=<none><line_sep>self._ornaments=<none><line_sep>self._rests=<none><line_sep>self._ties=<none><line_sep>self._tuplets=<none><line_sep>self.client=client<block_end># SPECIAL METHODS # <def_stmt>__deepcopy__ self memo=<none><block_start>''' Manage deepcopying by creating a new reference to the same object. leaving out the client '''<line_sep>new=type(self)()<for_stmt>x self.__slots__<block_start><if_stmt>x<eq>'_client'<block_start>new._client=<none><block_end><else_stmt><block_start>setattr(new x getattr(self x))<block_end><block_end><return>new<block_end># unwrap weakref for pickling <def_stmt>__getstate__ self<block_start>self._client=common.unwrapWeakref(self._client)<line_sep><return>SlottedObjectMixin.__getstate__(self)<block_end><def_stmt>__setstate__ self state<block_start>SlottedObjectMixin.__setstate__(self state)<line_sep>self._client=common.wrapWeakref(self._client)<block_end># PUBLIC METHODS # <def_stmt>haveAccidentalsBeenMade self<block_start>''' If Accidentals.displayStatus is None for all contained pitches, it as assumed that accidentals have not been set for display and/or makeAccidentals has not been run. If any Accidental has displayStatus other than None, this method returns True, regardless of if makeAccidentals has actually been run. '''<for_stmt>p self.client.pitches<block_start><if_stmt>p.accidental<is><not><none><block_start><if_stmt>p.accidental.displayStatus<is><not><none><block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>haveBeamsBeenMade self<block_start>''' If any Note in this Stream has .beams defined, it as assumed that Beams have not been set and/or makeBeams has not been run. If any Beams exist, this method returns True, regardless of if makeBeams has actually been run. '''<for_stmt>n self.client.recurse(classFilter=('NotRest' ) restoreActiveSites=<false>)<block_start><if_stmt>n.beams<is><not><none><and>n.beams.beamsList<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>haveTupletBracketsBeenMade self<block_start>''' If any GeneralNote in this Stream is a tuplet, then check to see if any of them have a first Tuplet with type besides None return True. Otherwise return False if there is a tuplet. Return None if no Tuplets. >>> s = stream.Stream() >>> s.streamStatus.haveTupletBracketsBeenMade() is None True >>> s.append(note.Note()) >>> s.streamStatus.haveTupletBracketsBeenMade() is None True >>> n = note.Note(quarterLength=1/3) >>> s.append(n) >>> s.streamStatus.haveTupletBracketsBeenMade() False >>> n.duration.tuplets[0].type = 'start' >>> s.streamStatus.haveTupletBracketsBeenMade() True '''<line_sep>foundTuplet=<false><for_stmt>n self.client.recurse(classFilter='GeneralNote' restoreActiveSites=<false>)<block_start><if_stmt>n.duration.tuplets<block_start>foundTuplet=<true><if_stmt>n.duration.tuplets[0].type<is><not><none><block_start><return><true><block_end><block_end><block_end><if_stmt>foundTuplet<block_start><return><false><block_end><else_stmt><block_start><return><none><block_end><block_end># PUBLIC PROPERTIES # @property<def_stmt>client self<block_start><return>common.unwrapWeakref(self._client)<block_end>@client.setter<def_stmt>client self client# client is the Stream that this status lives on <block_start>self._client=common.wrapWeakref(client)<block_end>@property<def_stmt>accidentals self<block_start><if_stmt>self._accidentals<is><none><block_start>self._accidentals=self.haveAccidentalsBeenMade()<block_end><return>self._accidentals<block_end>@accidentals.setter<def_stmt>accidentals self expr<block_start><if_stmt>expr<is><not><none><block_start>self._accidentals=bool(expr)<block_end><else_stmt><block_start>self._accidentals=<none><block_end><block_end>@property<def_stmt>beams self<block_start><if_stmt>self._beams<is><none><block_start>self._beams=self.haveBeamsBeenMade()<block_end><return>self._beams<block_end>@beams.setter<def_stmt>beams self expr<block_start><if_stmt>expr<is><not><none><block_start>self._beams=bool(expr)<block_end><else_stmt><block_start>self._beams=<none><block_end><block_end>@property<def_stmt>tuplets self<block_start><if_stmt>self._tuplets<is><none><block_start>self._tuplets=self.haveTupletBracketsBeenMade()<line_sep># If there were no tuplet durations, # tuplet brackets don't need to be made. <if_stmt>self._tuplets<is><none><block_start>self._tuplets=<true><block_end><block_end><return>self._tuplets<block_end>@tuplets.setter<def_stmt>tuplets self expr<block_start><if_stmt>expr<is><not><none><block_start>self._tuplets=bool(expr)<block_end><else_stmt><block_start>self._tuplets=<none><block_end><block_end><block_end># ----------------------------------------------------------------------------- <class_stmt>Test(unittest.TestCase)<block_start>''' Note: most Stream tests are found in stream.tests '''<def_stmt>testHaveBeamsBeenMadeAfterDeepcopy self<block_start><import_stmt>copy<import_from_stmt>music21 stream<import_from_stmt>music21 note<line_sep>m=stream.Measure()<line_sep>c=note.Note('C4' type='quarter')<line_sep>m.append(c)<line_sep>d1=note.Note('D4' type='eighth')<line_sep>d2=note.Note('D4' type='eighth')<line_sep>m.append([d1 d2])<line_sep>e3=note.Note('E4' type='eighth')<line_sep>e4=note.Note('E4' type='eighth')<line_sep>m.append([e3 e4])<line_sep>d1.beams.append('start')<line_sep>d2.beams.append('stop')<line_sep>self.assertTrue(m.streamStatus.haveBeamsBeenMade())<line_sep>mm=copy.deepcopy(m)<line_sep>self.assertTrue(mm.streamStatus.haveBeamsBeenMade())<line_sep>mm.streamStatus.beams=<false><line_sep>mmm=copy.deepcopy(mm)<line_sep>self.assertFalse(mmm.streamStatus.beams)<line_sep># m.show() <block_end><block_end># ----------------------------------------------------------------------------- <if_stmt>__name__<eq>'__main__'<block_start><import_stmt>music21<line_sep>music21.mainTest(Test)<block_end>
<class_stmt>Solution(object)# def isPerfectSquare(self, num): # """ # :type num: int # :rtype: bool # """ # i = 1 # while num > 0: # num -= i # i += 2 # return num == 0 <block_start><def_stmt>isPerfectSquare self num<block_start>low,high=1 num<while_stmt>low<le>high<block_start>mid=(low+high)/2<line_sep>mid_square=mid<times>mid<if_stmt>mid_square<eq>num<block_start><return><true><block_end><elif_stmt>mid_square<l>num<block_start>low=mid+1<block_end><else_stmt><block_start>high=mid-1<block_end><block_end><return><false><block_end># def isPerfectSquare(self, num): # x = num # while x * x > num: # x = (x + num / x) / 2 # return x * x == num <block_end>
<import_stmt>os<import_stmt>sys<import_from_stmt>collections defaultdict<import_from_stmt>django.core.management.base BaseCommand<import_stmt>csv<import_from_stmt>corehq.util.log with_progress_bar<import_from_stmt>...models PhoneNumber SQLMobileBackend<import_from_stmt>...util clean_phone_number<class_stmt>Command(BaseCommand)<block_start>help="Reassign phone numbers with old backend id to new backend id"<def_stmt>add_arguments self parser<block_start>parser.add_argument("old_backend" help="Old backend ID")<line_sep>parser.add_argument("--new-backend" help=("New backend ID. Dry-run if this option is absent. Use 'None' "<concat>"to clear the old backend without specifying a new backend; "<concat>"the phone number will use the domain/system default backend."))<line_sep>parser.add_argument("--domain" help="Limit to phone numbers in domain.")<line_sep>parser.add_argument("--dump-csv" help="Dump phone numbers to CSV file path "<concat>"(the path is the value given for this option).")<block_end><def_stmt>handle self old_backend new_backend=<none> domain=<none> **options<block_start>query=PhoneNumber.objects.filter(backend_id=old_backend)<if_stmt>domain<is><not><none><block_start>query=query.filter(domain=domain)<block_end><if_stmt>options["dump_csv"]<block_start>dump_csv(query options["dump_csv"])<block_end>print_counts_by_default_backend(query)<line_sep>print("Total assigned to {}: {}".format(old_backend len(query)))<if_stmt>new_backend<block_start>reassign(query new_backend)<block_end><block_end><block_end><def_stmt>dump_csv query path<block_start>path=os.path.expanduser(path)<line_sep>print("dumping to CSV: {}".format(path))<with_stmt>open(path "w" encoding="utf-8")<as>output<block_start>csvfile=csv.writer(output)<line_sep>csvfile.writerow(["domain" "couch_id" "phonenumber"])<for_stmt>phone query<block_start>csvfile.writerow([phone.domain phone.couch_id phone.phone_number ])<block_end><block_end><block_end><def_stmt>print_counts_by_default_backend query<block_start>counts=defaultdict(int)<for_stmt>phone with_progress_bar(query len(query) oneline=<true>)<block_start>default_backend=SQLMobileBackend.load_default_by_phone_and_domain(SQLMobileBackend.SMS clean_phone_number(phone.phone_number) domain=phone.domain)<line_sep>counts[default_backend.name]<augadd>1<block_end>print("Counts by default backend")<for_stmt>default,count sorted(counts.items())<block_start>print("{:<25}{:>4}".format(default count))<block_end><block_end><def_stmt>reassign query new_backend<block_start><if_stmt>new_backend<eq>"None"<block_start>new_backend=<none><block_end>ok=confirm("Reassign to {}".format(new_backend))<if_stmt>ok<block_start>updated=query.update(backend_id=new_backend)<line_sep>print("{} phone numbers updated".format(updated))<block_end><else_stmt><block_start>print("abort")<line_sep>sys.exit(1)<block_end><block_end><def_stmt>confirm msg<block_start><return>input(msg+" (y/N) ").lower()<eq>'y'<block_end>
<import_from_future_stmt> unicode_literals<import_stmt>json<import_stmt>time<class_stmt>Event(object)<block_start>"""Base class for a websocket 'event'."""<line_sep>__slots__=['received_time']<def_stmt>__init__ self<block_start>self.received_time=time.time()<block_end><def_stmt>__repr__ self<block_start><return>"{}()".format(self.__class__.__name__)<block_end>@classmethod<def_stmt>_summarize_bytes cls data max_len=24<block_start>"""Avoid spamming logs by truncating byte strings in repr."""<if_stmt>len(data)<g>max_len<block_start><return>"{!r} + {} bytes".format(data[:max_len] len(data)-max_len)<block_end><return>repr(data)<block_end>@classmethod<def_stmt>_summarize_text cls text max_len=24<block_start>"""Avoid spamming logs by truncating text."""<if_stmt>len(text)<g>max_len<block_start><return>"{!r} + {} chars".format(text[:max_len] len(text)-max_len)<block_end><return>repr(text)<block_end><block_end><class_stmt>Poll(Event)<block_start>"""A generated poll event."""<line_sep>name='poll'<block_end><class_stmt>Connecting(Event)<block_start>""" Generated prior to establishing a websocket connection to a server. :param url: The websocket URL the websocket is connecting to. """<line_sep>__slots__=['url']<line_sep>name='connecting'<def_stmt>__init__ self url<block_start>self.url=url<line_sep>super(Connecting self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(url='{}')".format(self.__class__.__name__ self.url)<block_end><block_end><class_stmt>ConnectFail(Event)<block_start>""" Generate when Lomond was unable to connect to a Websocket server. :param reason: A short description of the reason for the failure. :type reason: str """<line_sep>__slots__=['reason']<line_sep>name='connect_fail'<def_stmt>__init__ self reason<block_start>self.reason=reason<line_sep>super(ConnectFail self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(reason='{}')".format(self.__class__.__name__ self.reason )<block_end><block_end><class_stmt>Connected(Event)<block_start>"""Generated when Lomond has connected to a server but not yet negotiated the websocket upgrade. :param str url: The websocket URL connected to. :param str proxy: The proxy URL connected to (or None). """<line_sep>__slots__=['url' 'proxy']<line_sep>name='connected'<def_stmt>__init__ self url proxy=<none><block_start>self.url=url<line_sep>self.proxy=proxy<line_sep>super(Connected self).__init__()<block_end><def_stmt>__repr__ self<block_start>_class=self.__class__.__name__<line_sep><return>("{}(url='{}')".format(_class self.url)<if>self.proxy<is><none><else>"{}(url='{}', proxy='{}')".format(_class self.url self.proxy))<block_end><block_end><class_stmt>Rejected(Event)<block_start>"""Server rejected WS connection."""<line_sep>__slots__=['response' 'reason']<line_sep>name='rejected'<def_stmt>__init__ self response reason<block_start>""" Generated when Lomond is connected to the server, but the websocket upgrade failed. :param response: The response returned by the server. :param str reason: A description of why the connection was rejects. """<line_sep>self.response=response<line_sep>self.reason=reason<line_sep>super(Rejected self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(response={!r}, reason='{}')".format(self.__class__.__name__ self.response self.reason)<block_end><block_end><class_stmt>Ready(Event)<block_start>"""Generated when Lomond has connected to the server, and successfully negotiated the websocket upgrade. :param response: A :class:`~lomond.response.Response` object. :param str protocol: A websocket protocol or ``None`` if no protocol was supplied. :param set extensions: A set of negotiated websocket extensions. Currently only the ``'permessage-deflate'`` extension is supported. """<line_sep>__slots__=['response' 'protocol' 'extensions']<line_sep>name='ready'<def_stmt>__init__ self response protocol extensions<block_start>self.response=response<line_sep>self.protocol=protocol<line_sep>self.extensions=extensions<line_sep>super(Ready self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>'{}(response={!r}, protocol={!r}, extensions={!r})'.format(self.__class__.__name__ self.response self.protocol self.extensions)<block_end><block_end><class_stmt>ProtocolError(Event)<block_start>"""Generated when the server deviates from the protocol. :param str error: A description of the error. :param bool critical: Indicates if the error is considered 'critical'. If ``True``, Lomond will disconnect immediately. If ``False``, Lomond will send a close message to the server. """<line_sep>__slots__=['error' 'critical']<line_sep>name='protocol_error'<def_stmt>__init__ self error critical<block_start>self.error=error<line_sep>self.critical=critical<line_sep>super(ProtocolError self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(error='{}', critical={!r})".format(self.__class__.__name__ self.error self.critical)<block_end><block_end><class_stmt>Unresponsive(Event)<block_start>"""The server has not responding to pings within `ping_timeout` seconds. Will be followed by a :class:`~lomond.events.Disconnected` event. """<line_sep>name='unresponsive'<block_end><class_stmt>Disconnected(Event)<block_start>"""Generated when a websocket connection has been dropped. :param str reason: A description of why the websocket was closed. :param bool graceful: Flag indicating if the connection was dropped gracefully (`True`), or disconnected due to a socket failure (`False`) or other problem. """<line_sep>__slots__=['graceful' 'reason']<line_sep>name='disconnected'<def_stmt>__init__ self reason='closed' graceful=<false><block_start>self.reason=reason<line_sep>self.graceful=graceful<line_sep>super(Disconnected self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(reason='{}', graceful={!r})".format(self.__class__.__name__ self.reason self.graceful)<block_end><block_end><class_stmt>Closed(Event)<block_start>"""Generated when the websocket was closed. The websocket may no longer send packets after this event has been received. This event will be followed by :class:`~lomond.events.Disconnected`. :param code: The closed code returned from the server. :param str reason: An optional description why the websocket was closed, as returned from the server. """<line_sep>__slots__=['code' 'reason']<line_sep>name='closed'<def_stmt>__init__ self code reason<block_start>self.code=code<line_sep>self.reason=reason<line_sep>super(Closed self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>'{}(code={!r}, reason={!r})'.format(self.__class__.__name__ self.code self.reason )<block_end><block_end><class_stmt>Closing(Event)<block_start>"""Generated when the server is closing the connection. No more messages will be received from the server, but you may still send messages while handling this event. A :class:`~lomond.events.Disconnected` event should be generated shortly after this event. :param code: The closed code returned from the server. :param str reason: An optional description why the websocket was closed, as returned from the server. """<line_sep>__slots__=['code' 'reason']<line_sep>name='closing'<def_stmt>__init__ self code reason<block_start>self.code=code<line_sep>self.reason=reason<line_sep>super(Closing self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>'{}(code={!r}, reason={!r})'.format(self.__class__.__name__ self.code self.reason )<block_end><block_end><class_stmt>UnknownMessage(Event)<block_start>""" An application message was received, with an unknown opcode. """<line_sep>__slots__=['message']<line_sep>name='unknown'<def_stmt>__init__ self message<block_start>self.message=message<line_sep>super(UnknownMessage self).__init__()<block_end><block_end><class_stmt>Ping(Event)<block_start>"""Generated when Lomond received a ping packet from the server. :param bytes data: Ping payload data. """<line_sep>__slots__=['data']<line_sep>name='ping'<def_stmt>__init__ self data<block_start>self.data=data<line_sep>super(Ping self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(data={!r})".format(self.__class__.__name__ self.data)<block_end><block_end><class_stmt>Pong(Event)<block_start>"""Generated when Lomond receives a pong packet from the server. :param bytes data: The pong payload data. """<line_sep>__slots__=['data']<line_sep>name='pong'<def_stmt>__init__ self data<block_start>self.data=data<line_sep>super(Pong self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(data={!r})".format(self.__class__.__name__ self.data)<block_end><block_end><class_stmt>Text(Event)<block_start>"""Generated when Lomond receives a text message from the server. :param str text: The text payload. """<line_sep>__slots__=['text' '_json']<line_sep>name='text'<def_stmt>__init__ self text<block_start>self.text=text<line_sep>self._json=<none><line_sep>super(Text self).__init__()<block_end>@property<def_stmt>json self<block_start>"""Text decoded as JSON. Calls ``json.loads`` to decode the ``text`` attribute, and may throw the same exceptions if the text is not valid json. """<if_stmt>self._json<is><none><block_start>self._json=json.loads(self.text)<block_end><return>self._json<block_end><def_stmt>__repr__ self<block_start><return>"{}(text={})".format(self.__class__.__name__ self._summarize_text(self.text))<block_end><block_end><class_stmt>Binary(Event)<block_start>"""Generated when Lomond receives a binary message from the server. :param bytes data: The binary payload. """<line_sep>__slots__=['data']<line_sep>name='binary'<def_stmt>__init__ self data<block_start>self.data=data<line_sep>super(Binary self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(data={})".format(self.__class__.__name__ self._summarize_bytes(self.data))<block_end><block_end><class_stmt>BackOff(Event)<block_start>"""Generated when a persistent connection has to wait before re- attempting a connection. :param float delay: The delay (in seconds) before Lomond will re- attempt to connect. """<line_sep>__slots__=['delay']<line_sep>name='back_off'<def_stmt>__init__ self delay<block_start>self.delay=delay<line_sep>super(BackOff self).__init__()<block_end><def_stmt>__repr__ self<block_start><return>"{}(delay={:0.1f})".format(self.__class__.__name__ self.delay)<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("L1SKIM")<line_sep>process.load("FWCore.MessageService.MessageLogger_cfi")<line_sep>process.MessageLogger.cerr.FwkReport.reportEvery=100000<line_sep>process.options=cms.untracked.PSet(wantSummary=cms.untracked.bool(<true>))<line_sep>####################### configure pool source ############################# process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring('/store/data/Run2010A/MinimumBias/RECO/Apr21ReReco-v1/0000/08275F4A-5270-E011-9DC3-003048635E02.root') skipEvents=cms.untracked.uint32(0))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(100))<line_sep>##################### digi-2-raw plus L1 emulation ######################### process.load("Configuration.StandardSequences.Services_cff")<line_sep>process.load('Configuration.StandardSequences.GeometryRecoDB_cff')<line_sep>process.load('Configuration.StandardSequences.MagneticField_cff')<line_sep>#################### Conditions and L1 menu ################################ process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<import_from_stmt>Configuration.AlCa.autoCond autoCond<line_sep>process.GlobalTag.globaltag=autoCond['run1_data']<line_sep>############ Skim the events according to the L1 seeds #################### #select on HLT_HcalNZS_8E29 trigger <import_stmt>HLTrigger.HLTfilters.hltLevel1GTSeed_cfi<line_sep>process.skimL1Seeds=HLTrigger.HLTfilters.hltLevel1GTSeed_cfi.hltLevel1GTSeed.clone()<line_sep>process.skimL1Seeds.L1GtReadoutRecordTag=cms.InputTag("gtDigis")<line_sep>process.skimL1Seeds.L1GtObjectMapTag=cms.InputTag("hltL1GtObjectMap")<line_sep>process.skimL1Seeds.L1CollectionsTag=cms.InputTag("l1extraParticles")<line_sep>process.skimL1Seeds.L1MuonCollectionTag=cms.InputTag("l1extraParticles")<line_sep>process.skimL1Seeds.L1SeedsLogicalExpression="L1_SingleEG2 OR L1_SingleEG5 OR L1_SingleEG8 OR L1_SingleEG10 OR L1_SingleEG12 OR L1_SingleEG15 OR L1_SingleEG20 OR L1_SingleIsoEG5 OR L1_SingleIsoEG8 OR L1_SingleIsoEG10 OR L1_SingleIsoEG12 OR L1_SingleIsoEG15 OR L1_SingleJet6U OR L1_SingleJet10U OR L1_SingleJet20U OR L1_SingleJet30U OR L1_SingleJet40U OR L1_SingleJet50U OR L1_SingleJet60U OR L1_SingleTauJet10U OR L1_SingleTauJet20U OR L1_SingleTauJet30U OR L1_SingleTauJet50U OR L1_SingleMuOpen OR L1_SingleMu0 OR L1_SingleMu3 OR L1_SingleMu5 OR L1_SingleMu7 OR L1_SingleMu10 OR L1_SingleMu14 OR L1_SingleMu20 OR L1_ZeroBias"<line_sep># select on HLT_HcalPhiSym trigger process.load("HLTrigger.HLTfilters.hltLevel1Activity_cfi")<line_sep>process.hltLevel1Activity.L1GtReadoutRecordTag=cms.InputTag('gtDigis')<line_sep>######################## Configure Analyzer ############################### process.load("RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi")<line_sep>process.load("Calibration.IsolatedParticles.isolatedTracksNxN_cfi")<line_sep>process.isolatedTracksNxN.Verbosity=cms.untracked.int32(0)<line_sep>process.isolatedTracksNxN.HBHERecHitSource=cms.InputTag("hbhereco")<line_sep>process.isolatedTracksNxN.L1TriggerAlgoInfo=<true><line_sep>#process.isolatedTracksNxN.DebugL1Info = True process.isolatedTracksNxN_NZS=process.isolatedTracksNxN.clone(Verbosity=cms.untracked.int32(0) HBHERecHitSource=cms.InputTag("hbherecoMB") L1TriggerAlgoInfo=<true>)<line_sep>process.TFileService=cms.Service("TFileService" fileName=cms.string('IsolatedTracksNxNData.root'))<line_sep># configure Technical Bits to ensure collision and remove BeamHalo process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')<line_sep>process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')<line_sep>process.hltLevel1GTSeed.L1TechTriggerSeeding=cms.bool(<true>)<line_sep>process.hltLevel1GTSeed.L1SeedsLogicalExpression=cms.string('0 AND NOT (36 OR 37 OR 38 OR 39)')<line_sep># filter out scrapping events process.noScraping=cms.EDFilter("FilterOutScraping" applyfilter=cms.untracked.bool(<true>) debugOn=cms.untracked.bool(<false>) ## Or 'True' to get some per-event info numtrack=cms.untracked.uint32(10) thresh=cms.untracked.double(0.25))<line_sep># select on primary vertex process.primaryVertexFilter=cms.EDFilter("GoodVertexFilter" vertexCollection=cms.InputTag('offlinePrimaryVertices') minimumNDOF=cms.uint32(4) maxAbsZ=cms.double(25.0) maxd0=cms.double(5.0))<line_sep>#============================================================================= # define an EndPath to analyze all other path results process.hltTrigReport=cms.EDAnalyzer('HLTrigReport' HLTriggerResults=cms.InputTag('TriggerResults' '' 'HLT'))<line_sep>process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi")<line_sep>process.l1GtTrigReport.L1GtRecordInputTag='gtDigis'<line_sep>process.l1GtTrigReport.PrintVerbosity=1<line_sep>#============================================================================= #### by Benedikt process.p1=cms.Path(process.primaryVertexFilter<times>process.hltLevel1GTSeed<times>process.noScraping<times>process.skimL1Seeds<times>process.isolatedTracksNxN<times>process.isolatedTracksNxN_NZS)<line_sep>process.e=cms.EndPath(process.l1GtTrigReport+process.hltTrigReport)<line_sep>
<import_from_stmt>build.plugins.lib.nots.package_manager.base PackageJson<import_from_stmt>build.plugins.lib.nots.package_manager.pnpm.workspace PnpmWorkspace<def_stmt>test_workspace_get_paths <block_start>ws=PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")<line_sep>ws.packages=set(["." "../bar" "../../another/baz"])<assert_stmt>sorted(ws.get_paths())<eq>["/another/baz" "/packages/bar" "/packages/foo" ]<block_end><def_stmt>test_workspace_set_from_package_json <block_start>ws=PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")<line_sep>pj=PackageJson(path="/packages/foo/package.json")<line_sep>pj.data={"dependencies":{"@a/bar":"workspace:../bar" } "devDependencies":{"@a/baz":"workspace:../../another/baz" } "peerDependencies":{"@a/qux":"workspace:../../another/qux" } "optionalDependencies":{"@a/quux":"workspace:../../another/quux" }}<line_sep>ws.set_from_package_json(pj)<assert_stmt>sorted(ws.get_paths())<eq>["/another/baz" "/another/quux" "/another/qux" "/packages/bar" "/packages/foo" ]<block_end><def_stmt>test_workspace_merge <block_start>ws1=PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")<line_sep>ws1.packages=set(["." "../bar" "../../another/baz"])<line_sep>ws2=PnpmWorkspace(path="/another/baz/pnpm-workspace.yaml")<line_sep>ws2.packages=set(["." "../qux"])<line_sep>ws1.merge(ws2)<assert_stmt>sorted(ws1.get_paths())<eq>["/another/baz" "/another/qux" "/packages/bar" "/packages/foo" ]<block_end>
<import_stmt>unittest<import_stmt>mock<import_from_stmt>jiracli.interface build_parser cli<class_stmt>AddCommandTests(unittest.TestCase)<block_start><def_stmt>test_issue_type_parsing self<block_start>"Previously, calling this would raise an exception on python3"<with_stmt>mock.patch("jiracli.interface.print_output")<block_start><with_stmt>mock.patch("jiracli.interface.prompt")<as>prompt<block_start><with_stmt>mock.patch("jiracli.interface.initialize")<as>init<block_start>init().get_issue_types.return_value={'story':1}<line_sep>cli("new title --type story --project FOO --description bar".split(" "))<block_end><block_end><block_end><block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # TODO: Once internal torchvision transforms become stable either in torchvision # or in pytorchvideo, move to use those transforms. <import_stmt>random<import_stmt>mmf.datasets.processors.functional<as>F<import_stmt>torch<import_from_stmt>mmf.common.registry registry<import_from_stmt>mmf.datasets.processors BaseProcessor<line_sep>@registry.register_processor("video_random_crop")<class_stmt>VideoRandomCrop(BaseProcessor)<block_start><def_stmt>__init__ self *args size=<none> **kwargs<block_start>super().__init__()<if_stmt>size<is><none><block_start><raise>TypeError("Parameter 'size' is required")<block_end>self.size=size<block_end>@staticmethod<def_stmt>get_params vid output_size<block_start>"""Get parameters for ``crop`` for a random crop. """<line_sep>h,w=vid.shape[-2:]<line_sep>th,tw=output_size<if_stmt>w<eq>tw<and>h<eq>th<block_start><return>0 0 h w<block_end>i=random.randint(0 h-th)<line_sep>j=random.randint(0 w-tw)<line_sep><return>i j th tw<block_end><def_stmt>__call__ self vid<block_start>i,j,h,w=self.get_params(vid self.size)<line_sep><return>F.video_crop(vid i j h w)<block_end><block_end>@registry.register_processor("video_center_crop")<class_stmt>VideoCenterCrop(BaseProcessor)<block_start><def_stmt>__init__ self *args size=<none> **kwargs<block_start>super().__init__()<if_stmt>size<is><none><block_start><raise>TypeError("Parameter 'size' is required")<block_end>self.size=size<block_end><def_stmt>__call__ self vid<block_start><return>F.video_center_crop(vid self.size)<block_end><block_end>@registry.register_processor("video_resize")<class_stmt>VideoResize(BaseProcessor)<block_start><def_stmt>__init__ self *args size=<none> **kwargs<block_start><if_stmt>size<is><none><block_start><raise>TypeError("Parameter 'size' is required")<block_end>self.size=size<block_end><def_stmt>__call__ self vid<block_start><return>F.video_resize(vid self.size)<block_end><block_end>@registry.register_processor("video_to_tensor")<class_stmt>VideoToTensor(BaseProcessor)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__()<line_sep><pass><block_end><def_stmt>__call__ self vid<block_start><return>F.video_to_normalized_float_tensor(vid)<block_end><block_end>@registry.register_processor("video_normalize")<class_stmt>VideoNormalize(BaseProcessor)<block_start><def_stmt>__init__ self mean=<none> std=<none> **kwargs<block_start>super().__init__()<if_stmt>mean<is><none><and>std<is><none><block_start><raise>TypeError("'mean' and 'std' params are required")<block_end>self.mean=mean<line_sep>self.std=std<block_end><def_stmt>__call__ self vid<block_start><return>F.video_normalize(vid self.mean self.std)<block_end><block_end>@registry.register_processor("video_random_horizontal_flip")<class_stmt>VideoRandomHorizontalFlip(BaseProcessor)<block_start><def_stmt>__init__ self p=0.5 **kwargs<block_start>super().__init__()<line_sep>self.p=p<block_end><def_stmt>__call__ self vid<block_start><if_stmt>random.random()<l>self.p<block_start><return>F.video_hflip(vid)<block_end><return>vid<block_end><block_end>@registry.register_processor("video_pad")<class_stmt>Pad(BaseProcessor)<block_start><def_stmt>__init__ self padding=<none> fill=0 **kwargs<block_start>super().__init__()<if_stmt>padding<is><none><block_start><raise>TypeError("Parameter 'padding' is required")<block_end>self.padding=padding<line_sep>self.fill=fill<block_end><def_stmt>__call__ self vid<block_start><return>F.video_pad(vid self.padding self.fill)<block_end><block_end>@registry.register_processor("truncate_or_pad")<class_stmt>TruncateOrPad(BaseProcessor)# truncate or add 0 until the desired output size <block_start><def_stmt>__init__ self output_size=<none> **kwargs<block_start>super().__init__()<if_stmt>output_size<is><none><block_start><raise>TypeError("Parameter 'output_size' is required")<block_end><assert_stmt>isinstance(output_size (int tuple))<line_sep>self.output_size=output_size<block_end><def_stmt>__call__ self sample<block_start><if_stmt>sample.shape[1]<ge>self.output_size<block_start><return>sample[0 :self.output_size]<block_end><else_stmt><block_start><return>torch.cat((sample[0 :] torch.zeros(1 self.output_size-sample.shape[1])) axis=1 )<block_end><block_end><block_end>
<import_from_stmt>kanmail.settings.hidden get_hidden_value<line_sep>OAUTH_PROVIDERS={'gmail':{'auth_endpoint':'https://accounts.google.com/o/oauth2/auth' 'token_endpoint':'https://accounts.google.com/o/oauth2/token' 'profile_endpoint':'https://www.googleapis.com/userinfo/v2/me' 'scope':'https://mail.google.com https://www.googleapis.com/auth/userinfo.email' 'client_id':get_hidden_value('GOOGLE_OAUTH_CLIENT_ID') 'client_secret':get_hidden_value('GOOGLE_OAUTH_CLIENT_SECRET') } }<line_sep>
# pylint: disable=C0111,R0903 """Display HTTP status code Parameters: * http__status.label: Prefix label (optional) * http__status.target: Target to retrieve the HTTP status from * http__status.expect: Expected HTTP status contributed by `valkheim <https://github.com/valkheim>`_ - many thanks! """<import_from_stmt>requests head<import_stmt>psutil<import_stmt>core.module<import_stmt>core.widget<import_stmt>core.decorators<class_stmt>Module(core.module.Module)<block_start>UNK="UNK"<line_sep>@core.decorators.every(seconds=30)<def_stmt>__init__ self config theme<block_start>super().__init__(config theme core.widget.Widget(self.output))<line_sep>self.__label=self.parameter("label")<line_sep>self.__target=self.parameter("target")<line_sep>self.__expect=self.parameter("expect" "200")<block_end><def_stmt>labelize self s<block_start><if_stmt>self.__label<is><none><block_start><return>s<block_end><return>"{}: {}".format(self.__label s)<block_end><def_stmt>getStatus self<block_start><try_stmt><block_start>res=head(self.__target)<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep><return>self.UNK<block_end><else_stmt><block_start>status=str(res.status_code)<line_sep><return>status<block_end><block_end><def_stmt>getOutput self<block_start><if_stmt>self.__status<eq>self.__expect<block_start><return>self.labelize(self.__status)<block_end><else_stmt><block_start>reason=" != {}".format(self.__expect)<line_sep><return>self.labelize("{}{}".format(self.__status reason))<block_end><block_end><def_stmt>output self widget<block_start><return>self.__output<block_end><def_stmt>update self<block_start>self.__status=self.getStatus()<line_sep>self.__output=self.getOutput()<block_end><def_stmt>state self widget<block_start><if_stmt>self.__status<eq>self.UNK<block_start><return>"warning"<block_end><if_stmt>self.__status<ne>self.__expect<block_start><return>"critical"<block_end><return>self.__output<block_end><block_end># vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
########################################################################## # # Copyright (c) 2015, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## <import_stmt>itertools<import_stmt>Gaffer<import_stmt>GafferImage<line_sep>Gaffer.Metadata.registerNode(GafferImage.Resample "description" """ Utility node used internally within GafferImage, but not intended to be used directly by end users. """ plugs={"matrix":["description" """ The transform to be applied to the input image. This must contain only translation and scaling. """ ] "filter":["description" """ The filter used to perform the resampling. The name of any OIIO filter may be specified. The default automatically picks an appropriate high-quality filter based on whether or not the image is being enlarged or reduced. """ "plugValueWidget:type" "GafferUI.PresetsPlugValueWidget" "preset:Default" "" ]+list(itertools.chain(*[("preset:"+x.title() x)<for>x GafferImage.FilterAlgo.filterNames()])) "filterScale":["description" """ A multiplier for the scale of the filter used. Scaling up gives a softer result, scaling down gives a sharper result ( likely to alias or even create black patches where no pixels can be found ). Less than 1 is not recommended unless you have a special technical reason. """ ] "boundingMode":["description" """ The method used when a filter references pixels outside the input data window. """ "preset:Black" GafferImage.Sampler.BoundingMode.Black "preset:Clamp" GafferImage.Sampler.BoundingMode.Clamp "plugValueWidget:type" "GafferUI.PresetsPlugValueWidget" ] "expandDataWindow":["description" """ Expands the data window by the filter radius, to include the external pixels affected by the filter. """ ] "debug":["description" """ Enables debug output. The HorizontalPass setting outputs an intermediate image filtered just in the horizontal direction - this is an internal optimisation used when filtering with a separable filter. The SinglePass setting forces all filtering to be done in a single pass (as if the filter was non-separable) and can be used for validating the results of the the two-pass (default) approach. """ "preset:Off" GafferImage.Resample.Debug.Off "preset:HorizontalPass" GafferImage.Resample.Debug.HorizontalPass "preset:SinglePass" GafferImage.Resample.Debug.SinglePass "plugValueWidget:type" "GafferUI.PresetsPlugValueWidget" ] })<line_sep>
<import_stmt>pytest<import_from_stmt>hypothesis assume given<import_from_stmt>pfun compose identity<import_from_stmt>pfun.aio_trampoline Done<import_from_stmt>pfun.hypothesis_strategies aio_trampolines anything unaries<import_from_stmt>.monad_test MonadTest<class_stmt>TestTrampoline(MonadTest)<block_start>@pytest.mark.asyncio@given(aio_trampolines(anything()))<async_keyword><def_stmt>test_right_identity_law self trampoline<block_start><assert_stmt>(<await>trampoline.and_then(Done).run())<eq>(<await>trampoline.run())<block_end>@pytest.mark.asyncio@given(anything() unaries(aio_trampolines(anything())))<async_keyword><def_stmt>test_left_identity_law self value f<block_start><assert_stmt>(<await>Done(value).and_then(f).run())<eq>(<await>f(value).run())<block_end>@pytest.mark.asyncio@given(aio_trampolines(anything()) unaries(aio_trampolines(anything())) unaries(aio_trampolines(anything())))<async_keyword><def_stmt>test_associativity_law self trampoline f g<block_start><assert_stmt>(<await>trampoline.and_then(f).and_then(g).run())<eq>(<await>trampoline.and_then(<lambda>x:f(x).and_then(g)).run())<block_end>@given(anything())<def_stmt>test_equality self value<block_start><assert_stmt>Done(value)<eq>Done(value)<block_end>@given(anything() anything())<def_stmt>test_inequality self first second<block_start>assume(first<ne>second)<assert_stmt>Done(first)<ne>Done(second)<block_end>@pytest.mark.asyncio@given(anything())<async_keyword><def_stmt>test_identity_law self value<block_start><assert_stmt>(<await>Done(value).map(identity).run())<eq>(<await>Done(value).run())<block_end>@pytest.mark.asyncio@given(unaries(anything()) unaries(anything()) anything())<async_keyword><def_stmt>test_composition_law self f g value<block_start>h=compose(f g)<assert_stmt>(<await>Done(value).map(g).map(f).run())<eq>(<await>Done(value).map(h).run())<block_end><block_end>
# SPDX-License-Identifier: Apache-2.0 """ Tests scikit-normalizer converter. """<import_stmt>unittest<import_stmt>numpy<import_from_stmt>sklearn.preprocessing Normalizer<import_from_stmt>skl2onnx convert_sklearn<import_from_stmt>skl2onnx.common.data_types Int64TensorType FloatTensorType DoubleTensorType <import_from_stmt>test_utils dump_data_and_model TARGET_OPSET<class_stmt>TestSklearnNormalizerConverter(unittest.TestCase)<block_start><def_stmt>test_model_normalizer self<block_start>model=Normalizer(norm="l2")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" Int64TensorType([<none> 1]))] target_opset=TARGET_OPSET)<line_sep>self.assertTrue(model_onnx<is><not><none>)<line_sep>self.assertTrue(len(model_onnx.graph.node)<eq>1)<block_end><def_stmt>test_model_normalizer_blackop self<block_start>model=Normalizer(norm="l2")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" FloatTensorType([<none> 3]))] target_opset=TARGET_OPSET black_op={"Normalizer"})<line_sep>self.assertNotIn('op_type: "Normalizer' str(model_onnx))<line_sep>dump_data_and_model(numpy.array([[1 -1 3] [3 1 2]] dtype=numpy.float32) model model_onnx basename="SklearnNormalizerL1BlackOp-SkipDim1")<block_end><def_stmt>test_model_normalizer_float_l1 self<block_start>model=Normalizer(norm="l1")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" FloatTensorType([<none> 3]))] target_opset=TARGET_OPSET)<line_sep>self.assertTrue(model_onnx<is><not><none>)<line_sep>self.assertTrue(len(model_onnx.graph.node)<eq>1)<line_sep>dump_data_and_model(numpy.array([[1 -1 3] [3 1 2]] dtype=numpy.float32) model model_onnx basename="SklearnNormalizerL1-SkipDim1")<block_end><def_stmt>test_model_normalizer_float_l2 self<block_start>model=Normalizer(norm="l2")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" FloatTensorType([<none> 3]))] target_opset=TARGET_OPSET)<line_sep>self.assertTrue(model_onnx<is><not><none>)<line_sep>self.assertTrue(len(model_onnx.graph.node)<eq>1)<line_sep>dump_data_and_model(numpy.array([[1 -1 3] [3 1 2]] dtype=numpy.float32) model model_onnx basename="SklearnNormalizerL2-SkipDim1")<block_end><def_stmt>test_model_normalizer_double_l1 self<block_start>model=Normalizer(norm="l1")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" DoubleTensorType([<none> 3]))] target_opset=TARGET_OPSET)<line_sep>self.assertTrue(model_onnx<is><not><none>)<line_sep>dump_data_and_model(numpy.array([[1 -1 3] [3 1 2]] dtype=numpy.float64) model model_onnx basename="SklearnNormalizerL1Double-SkipDim1")<block_end><def_stmt>test_model_normalizer_double_l2 self<block_start>model=Normalizer(norm="l2")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" DoubleTensorType([<none> 3]))] target_opset=TARGET_OPSET)<line_sep>self.assertTrue(model_onnx<is><not><none>)<line_sep>dump_data_and_model(numpy.array([[1 -1 3] [3 1 2]] dtype=numpy.float64) model model_onnx basename="SklearnNormalizerL2Double-SkipDim1")<block_end><def_stmt>test_model_normalizer_float_noshape self<block_start>model=Normalizer(norm="l2")<line_sep>model_onnx=convert_sklearn(model "scikit-learn normalizer" [("input" FloatTensorType([]))] target_opset=TARGET_OPSET)<line_sep>self.assertTrue(model_onnx<is><not><none>)<line_sep>self.assertTrue(len(model_onnx.graph.node)<eq>1)<line_sep>dump_data_and_model(numpy.array([[1 -1 3] [3 1 2]] dtype=numpy.float32) model model_onnx basename="SklearnNormalizerL2NoShape-SkipDim1")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_stmt>universe.wrappers.experimental.action_space SafeActionSpace SoftmaxClickMouse<import_from_stmt>universe.wrappers.experimental.observation CropObservations<import_from_stmt>universe.wrappers.experimental.random_env RandomEnv<line_sep>
<import_stmt>urllib.request<import_stmt>tensorflow<as>tf<import_stmt>itertools<line_sep>URL='http://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001'<line_sep>DST_ORIG='fsns-00000-of-00001.orig'<line_sep>DST='fsns-00000-of-00001'<line_sep>KEEP_NUM_RECORDS=5<line_sep>print('Downloading %s ...'%URL)<line_sep>urllib.request.urlretrieve(URL DST_ORIG)<line_sep>print('Writing %d records from %s to %s ...'%(KEEP_NUM_RECORDS DST_ORIG DST))<with_stmt>tf.io.TFRecordWriter(DST)<as>writer<block_start><for_stmt>raw_record itertools.islice(tf.compat.v1.python_io.tf_record_iterator(DST_ORIG) KEEP_NUM_RECORDS)<block_start>writer.write(raw_record)<block_end><block_end>
<import_from_stmt>pipeline_monitor prometheus_monitor<as>monitor<line_sep>_labels={'a_label_key':'a_label_value'}<line_sep>@monitor(labels=_labels name="test_monitor")<def_stmt>test_log_inputs_and_outputs arg1:int arg2:int<block_start><return>arg1+arg2<block_end>test_log_inputs_and_outputs(4 5)<line_sep>
<import_from_stmt>.present XPoweredByPresentChecker<line_sep>__all__=['XPoweredByPresentChecker']<line_sep>
<import_stmt>types<import_from_stmt>collections OrderedDict<import_stmt>apiclient<import_stmt>pandas<as>pd<import_from_stmt>datasheets exceptions helpers<class_stmt>Tab(object)<block_start><def_stmt>__init__ self tabname workbook drive_svc sheets_svc<block_start>"""Create a datasheets.Tab instance of an existing Google Sheets tab. This class in not intended to be directly instantiated; it is created by datasheets.Workbook.fetch_tab(). Args: tabname (str): The name of the tab workbook (datasheets.Workbook): The workbook instance that instantiated this tab drive_svc (googleapiclient.discovery.Resource): An instance of Google Drive sheets_svc (googleapiclient.discovery.Resource): An instance of Google Sheets """<line_sep>self.tabname=tabname<line_sep>self._workbook=workbook<line_sep>self.drive_svc=drive_svc<line_sep>self.sheets_svc=sheets_svc<line_sep># Get basic properties of the tab. We do this here partly # to force failures early if tab can't be found <try_stmt><block_start>self._update_tab_properties()<block_end><except_stmt>apiclient.errors.HttpError<as>e<block_start><if_stmt>'Unable to parse range'.encode()<in>e.content<block_start><raise>exceptions.TabNotFound('The given tab could not be found. Error generated: {}'.format(e))<block_end><else_stmt><block_start><raise><block_end><block_end>self.url='https://docs.google.com/spreadsheets/d/{}#gid={}'.format(self.workbook.file_id self.tab_id)<block_end><def_stmt>__getattribute__ self attr<block_start>"""Get an attribute (variable or method) of this instance of this class For client OAuth, before each user-facing method call this method will verify that the access token is not expired and refresh it if it is. We only refresh on user-facing method calls since otherwise we'd be refreshing multiple times per user action (once for the user call, possibly multiple times for the private method calls invoked by it). """<line_sep>requested_attr=super(Tab self).__getattribute__(attr)<if_stmt>isinstance(requested_attr types.MethodType)<and><not>attr.startswith('_')<block_start>self.workbook.client._refresh_token_if_needed()<block_end><return>requested_attr<block_end><def_stmt>__repr__ self<block_start>msg="<{module}.{name}(filename='{filename}', tabname='{tabname}')>"<line_sep><return>msg.format(module=self.__class__.__module__ name=self.__class__.__name__ filename=self.workbook.filename tabname=self.tabname)<block_end>@staticmethod<def_stmt>_process_rows raw_data<block_start>"""Prepare a tab's raw data so that a pandas.DataFrame can be produced from it Args: raw_data (dict): The raw data from a tab Returns: list: A list of lists representing the raw_data, with one list per row in the tab """<line_sep>raw_rows=raw_data['sheets'][0]['data'][0].get('rowData' {})<line_sep>rows=[]<for_stmt>row_num,row enumerate(raw_rows)<block_start>row_values=[]<for_stmt>col_num,cell enumerate(row.get('values' {}))# If the cell is empty, use None <block_start>value=cell.get('effectiveValue' {<none>:<none>})<line_sep># If a cell has an error in it (e.g. someone divides by zero, adds a number to # text, etc.), then we raise an exception. <if_stmt>'errorValue'<in>value.keys()<block_start>cell_label=helpers.convert_cell_index_to_label(row_num+1 col_num+1)<line_sep>error_type=value['errorValue'].get('type' 'unknown type')<line_sep>error_message=value['errorValue'].get('message' 'unknown error message')<line_sep>msg='Error of type "{}" within cell {} prevents fetching data. Message: "{}"'<line_sep><raise>exceptions.FetchDataError(msg.format(error_type cell_label error_message))<block_end># value is a dict with only 1 key so this next(iter()) is safe base_fmt,cell_value=next(iter(value.items()))<line_sep>num_fmt=cell.get('effectiveFormat' {}).get('numberFormat')<if_stmt>num_fmt<block_start>cell_format=num_fmt['type']<block_end><else_stmt><block_start>cell_format=base_fmt<block_end>formatting_fn=helpers._TYPE_CONVERSIONS[cell_format]<if_stmt>cell_value<block_start><try_stmt><block_start>cell_value=formatting_fn(cell_value)<block_end><except_stmt>ValueError<block_start><pass><block_end><except_stmt>TypeError<block_start><raise>TypeError("Mismatch exists in expected and actual data types for cell with "<concat>"value '{value}'. Cell format is '{cell_format}' but cell value type "<concat>"is '{value_type}'. To correct this, in Google Sheets set the "<concat>"appropriate cell format or set it to Automatic".format(value=cell_value cell_format=cell_format value_type=type(cell_value)))<block_end><block_end>row_values.append(cell_value)<block_end>rows.append(row_values)<block_end><return>rows<block_end>@property<def_stmt>ncols self<block_start>""" Property for the number (int) of columns in the tab """<line_sep><return>self.properties['gridProperties']['columnCount']<block_end>@property<def_stmt>nrows self<block_start>""" Property for the number (int) of rows in the tab """<line_sep><return>self.properties['gridProperties']['rowCount']<block_end>@property<def_stmt>tab_id self<block_start>""" Property that gives the ID for the tab """<line_sep><return>self.properties['sheetId']<block_end>@property<def_stmt>workbook self<block_start>""" Property for the workbook instance that this tab belongs to """<line_sep><return>self._workbook<block_end><def_stmt>_add_rows_or_columns self kind n<block_start>request_body={'appendDimension':{'sheetId':self.tab_id 'dimension':kind 'length':n}}<line_sep>body={'requests':[request_body]}<line_sep>self.workbook.batch_update(body)<line_sep>self._update_tab_properties()<block_end><def_stmt>_update_tab_properties self<block_start>raw_properties=self.sheets_svc.get(spreadsheetId=self.workbook.file_id ranges=self.tabname+'!A1' fields='sheets/properties').execute()<line_sep>self.properties=raw_properties['sheets'][0]['properties']<block_end><def_stmt>add_rows self n<block_start>"""Add n rows to the given tab Args: n (int): The number of rows to add Returns: None """<line_sep>self._add_rows_or_columns(kind='ROWS' n=n)<block_end><def_stmt>add_columns self n<block_start>"""Add n columns to the given tab Args: n (int): The number of columns to add Returns: None """<line_sep>self._add_rows_or_columns(kind='COLUMNS' n=n)<block_end><def_stmt>align_cells self horizontal='LEFT' vertical='MIDDLE'<block_start>"""Align all cells in the tab Args: horizontal (str): The horizontal alignment for cells. May be one of 'LEFT', 'CENTER', or 'RIGHT' vertical (str): The vertical alignment for cells. May be one of 'TOP', 'MIDDLE', 'BOTTOM' Returns: None """<line_sep>request_body={'repeatCell':{'range':{'sheetId':self.tab_id 'startRowIndex':0 'endRowIndex':self.nrows} 'cell':{'userEnteredFormat':{'horizontalAlignment':horizontal 'verticalAlignment':vertical }} 'fields':'userEnteredFormat(horizontalAlignment,verticalAlignment)'}}<line_sep>body={'requests':[request_body]}<line_sep>self.workbook.batch_update(body)<block_end><def_stmt>alter_dimensions self nrows=<none> ncols=<none><block_start>"""Alter the dimensions of the current tab. If either dimension is left to None, that dimension will not be altered. Note that it is possible to set nrows or ncols to smaller than the current tab dimensions, in which case that data will be eliminated. Args: nrows (int): The number of rows for the tab to have ncols (int): The number of columns for the tab to have Returns: None """<line_sep>request_body={'updateSheetProperties':{'properties':{'sheetId':self.tab_id 'gridProperties':{'columnCount':ncols<or>self.ncols 'rowCount':nrows<or>self.nrows}} 'fields':'gridProperties(columnCount, rowCount)'}}<line_sep>body={'requests':[request_body]}<line_sep>self.workbook.batch_update(body)<line_sep>self._update_tab_properties()<block_end><def_stmt>append_data self data index=<true> autoformat=<true><block_start>"""Append data to the existing data in this tab. If the new data exceeds the tab's current dimensions the tab will be resized to accommodate it. Data headers will not be included among the appended data as they are assumed to already be among the existing tab data. If the dimensions of `data` are larger than the tab's current dimensions, the tab will automatically be resized to fit it. Args: data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a pandas.DataFrame, a dict of lists, or a list of lists index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well Returns: None """<line_sep># Convert everything to lists of lists, which Google Sheets requires headers,values=helpers._make_list_of_lists(data index)<line_sep>values=helpers._convert_nan_and_datelike_values(values)<line_sep>body={'values':values}<line_sep>self.sheets_svc.values().append(spreadsheetId=self.workbook.file_id range=self.tabname valueInputOption='USER_ENTERED' body=body).execute()<if_stmt>autoformat<block_start>self.autoformat(len(headers))<block_end>self._update_tab_properties()<block_end><def_stmt>autoformat self n_header_rows<block_start>"""Apply default stylings to the tab This will apply the following stylings to the tab: - Header rows will be formatted to a dark gray background and off-white text - Font for all cells will be set to size 10 Proxima Nova - Cells will be horizontally left-aligned and vertically middle-aligned - Columns will be resized to display their largest entry - Empty columns and rows will be trimmed from the tab Args: n_header_rows (int): The number of header rows (i.e. row of labels / metadata) Returns: None """<line_sep>self.format_headers(nrows=n_header_rows)<line_sep>self.format_font()<line_sep>self.align_cells()<line_sep>self.autosize_columns()<line_sep>populated_cells=self.sheets_svc.values().get(spreadsheetId=self.workbook.file_id range=self.tabname).execute()<line_sep>nrows=len(populated_cells['values'])<line_sep>ncols=max(map(len populated_cells['values']))<line_sep>self.alter_dimensions(nrows=nrows ncols=ncols)<line_sep>self._update_tab_properties()<block_end><def_stmt>autosize_columns self<block_start>"""Resize the widths of all columns in the tab to fit their data Returns: None """<line_sep>request_body={'autoResizeDimensions':{'dimensions':{'sheetId':self.tab_id 'dimension':'COLUMNS' 'startIndex':0 'endIndex':self.ncols}}}<line_sep>body={'requests':[request_body]}<line_sep>self.workbook.batch_update(body)<block_end><def_stmt>clear_data self<block_start>"""Clear all data from the tab while leaving formatting intact Returns: None """<line_sep>self.sheets_svc.values().clear(spreadsheetId=self.workbook.file_id range=self.tabname body={}).execute()<block_end><def_stmt>format_font self font='Proxima Nova' size=10<block_start>"""Set the font and size for all cells in the tab Args: font (str): The name of the font to use size (int): The size to set the font to Returns: None """<line_sep>request_body={'repeatCell':{'range':{'sheetId':self.tab_id} 'cell':{'userEnteredFormat':{'textFormat':{'fontSize':size 'fontFamily':font}}} 'fields':'userEnteredFormat(textFormat(fontSize,fontFamily))'}}<line_sep>body={'requests':[request_body]}<line_sep>self.workbook.batch_update(body)<block_end><def_stmt>format_headers self nrows<block_start>"""Format the first n rows of a tab. The following stylings will be applied to these rows: - Background will be set to dark gray with off-white text - Font will be set to size 10 Proxima Nova - Text will be horizontally left-aligned and vertically middle-aligned - Rows will be made "frozen" so that when the user scrolls these rows stay visible Args: nrows (int): The number of rows of headers in the tab Returns: None """<line_sep>body={'requests':[{'repeatCell':{'range':{'sheetId':self.tab_id 'startRowIndex':0 'endRowIndex':nrows} 'cell':{'userEnteredFormat':{'backgroundColor':{'red':0.26274511 'green':0.26274511 'blue':0.26274511} 'horizontalAlignment':'LEFT' 'textFormat':{'foregroundColor':{'red':0.95294118 'green':0.95294118 'blue':0.95294118} 'fontSize':10 'fontFamily':'Proxima Nova' 'bold':<false>}}} 'fields':'userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)'}} {'updateSheetProperties':{'properties':{'sheetId':self.tab_id 'gridProperties':{'frozenRowCount':nrows}} 'fields':'gridProperties(frozenRowCount)'}}]}<line_sep>self.workbook.batch_update(body)<block_end><def_stmt>fetch_data self headers=<true> fmt='df'<block_start>"""Retrieve the data within this tab. Efforts are taken to ensure that returned rows are always the same length. If headers=True, this length will be equal to the length of the headers. If headers=False, this length will be equal to the longest row. In either case, shorter rows will be padded with Nones and longer rows will be truncated (i.e. if there are 3 headers then all rows will have 3 entries regardless of the amount of populated cells they have). Args: headers (bool): If True, the first row will be used as the column names for the pandas.DataFrame. Otherwise, a 0-indexed range will be used instead fmt (str): The format in which to return the data. Accepted values: 'df', 'dict', 'list' Returns: When fmt='df' --> pandas.DataFrame When fmt='dict' --> list of dicts, e.g.:: [{header1: row1cell1, header2: row1cell2}, {header1: row2cell1, header2: row2cell2}, ...] When fmt='list' --> tuple of header names, list of lists with row data, e.g.:: ([header1, header2, ...], [[row1cell1, row1cell2, ...], [row2cell1, row2cell2, ...], ...]) """<if_stmt>fmt<not><in>('df' 'dict' 'list')<block_start><raise>ValueError("Unexpected value '{}' for parameter `fmt`. "<concat>"Accepted values are 'df', 'dict', and 'list'".format(fmt))<block_end>fields='sheets/data/rowData/values(effectiveValue,effectiveFormat/numberFormat/type)'<line_sep>raw_data=self.sheets_svc.get(spreadsheetId=self.workbook.file_id ranges=self.tabname includeGridData=<true> fields=fields).execute()<line_sep>processed_rows=self._process_rows(raw_data)<line_sep># filter out empty rows max_idx=helpers._find_max_nonempty_row(processed_rows)<if_stmt>max_idx<is><none><block_start><if_stmt>fmt<eq>'df'<block_start><return>pd.DataFrame([])<block_end><elif_stmt>fmt<eq>'dict'<block_start><return>[]<block_end><else_stmt><block_start><return>([] [])<block_end><block_end>processed_rows=processed_rows[:max_idx+1]<line_sep># remove trailing Nones on rows processed_rows=list(map(helpers._remove_trailing_nones processed_rows))<if_stmt>headers<block_start>header_names=processed_rows.pop(0)<line_sep>max_width=len(header_names)<block_end><else_stmt># Iterate through rows to find widest one <block_start>max_width=max(map(len processed_rows))<line_sep>header_names=list(range(max_width))<block_end># resize the rows to match the number of column headers processed_rows=[helpers._resize_row(row max_width)<for>row processed_rows]<if_stmt>fmt<eq>'df'<block_start>df=pd.DataFrame(data=processed_rows columns=header_names)<line_sep><return>df<block_end><elif_stmt>fmt<eq>'dict'<block_start>make_row_dict=<lambda>row:OrderedDict(zip(header_names row))<line_sep><return>list(map(make_row_dict processed_rows))<block_end><else_stmt><block_start><return>header_names processed_rows<block_end><block_end><def_stmt>insert_data self data index=<true> autoformat=<true><block_start>"""Overwrite all data in this tab with the provided data. All existing data in the tab will be removed, even if it might not have been overwritten (for example, if there is 4x2 data already in the tab and only 2x2 data is being inserted). If the dimensions of `data` are larger than the tab's current dimensions, the tab will automatically be resized to fit it. Args: data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a pandas.DataFrame, a dict of lists, or a list of lists index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well Returns: None """<line_sep># Convert everything to lists of lists, which Google Sheets requires headers,values=helpers._make_list_of_lists(data index)<line_sep>values=headers+values# Include headers for inserts but not for appends self.clear_data()<line_sep>values=helpers._convert_nan_and_datelike_values(values)<line_sep>body={'values':values}<line_sep>self.sheets_svc.values().update(spreadsheetId=self.workbook.file_id range=self.tabname valueInputOption='USER_ENTERED' body=body).execute()<if_stmt>autoformat<block_start>self.autoformat(len(headers))<block_end>self._update_tab_properties()<block_end><block_end>
# # File: # streamline1.py # # Synopsis: # Draws streamlines on a map over water only. # # Category: # Streamlines on a map. # # Author: # <NAME> # # Date of original publication: # December, 2004 # # Description: # This example draws streamlines over water on a map using a # Cylindrical Equidistant map projection. The "add_cyclic" # function is illustrated graphically. # # Effects illustrated: # o Streamlines over maps. # o Adding cyclic points. # o Specifying colors by name. # o Polylines. # o Masking land areas. # # Output: # This example produces two visualizations: # 1.) Streamlines on a Cylindrical Equidistant map over water # only highlighting missing cyclic points. # 2.) Same as 1.) with the cyclic points added. # # Notes: # # # Import Nio for reading netCDF files. # <import_from_future_stmt> print_function<import_stmt>Nio<line_sep># # Import Ngl support functions. # <import_stmt>Ngl<import_stmt>os<line_sep># # Open the netCDF file. # file=Nio.open_file(os.path.join(Ngl.pynglpath("data") "cdf" "pop.nc"))<line_sep># # Open a workstation. # wks_type="png"<line_sep>wks=Ngl.open_wks(wks_type "streamline1")<line_sep># # Get the u/v and lat/lon variables. # urot=file.variables["urot"]<line_sep>vrot=file.variables["vrot"]<line_sep>lat2d=file.variables["lat2d"]<line_sep>lon2d=file.variables["lon2d"]<line_sep># # Set up resource list. # resources=Ngl.Resources()<line_sep># # Don't advance frame, because we want to draw a couple of lines on # plot later. # resources.nglFrame=<false><line_sep># # Coordinate arrays for data # resources.vfXArray=lon2d[::4 ::4]<line_sep>resources.vfYArray=lat2d[::4 ::4]<line_sep>resources.mpProjection="CylindricalEquidistant"<line_sep>resources.mpFillOn=<true><line_sep>resources.mpLandFillColor="Tan1"<line_sep>resources.mpOceanFillColor="SkyBlue"<line_sep>resources.mpInlandWaterFillColor="SkyBlue"<line_sep>resources.mpGridAndLimbOn=<false><line_sep>resources.tiMainString="Streamline plot without cyclic point added"<line_sep>plot=Ngl.streamline_map(wks urot[::4 ::4] vrot[::4 ::4] resources)<line_sep># # Add a couple of lines showing the area where there's a gap in the # data because of lack of a cyclic point. (It should be obvious even # without the lines.) # line_res=Ngl.Resources()# line resources line_res.gsLineColor="Red"# line color line_res.gsLineThicknessF=1.5# line thickness scale line_res.gsLineDashPattern=2# dashed lines Ngl.polyline(wks plot lon2d[::4 0] lat2d[::4 0] line_res)<line_sep>Ngl.polyline(wks plot lon2d[::4 -1] lat2d[::4 -1] line_res)<line_sep># # Add a text string explaining the lines. # text_res=Ngl.Resources()# text resources text_res.txFontHeightF=0.03# font height text_res.txFontColor="Red"<line_sep>Ngl.text_ndc(wks "dashed red line shows area with no data" 0.5 0.17 text_res)<line_sep>Ngl.frame(wks)# Now advance frame. # # Add cyclic points. Since lat2d/lon2d are 2D arrays, make them # cyclic the same way you do the 2D data array. # u=Ngl.add_cyclic(urot[::4 ::4])<line_sep>v=Ngl.add_cyclic(vrot[::4 ::4])<line_sep>lon=Ngl.add_cyclic(lon2d[::4 ::4])<line_sep>lat=Ngl.add_cyclic(lat2d[::4 ::4])<line_sep># # Specify new coordinate arrays for data. # resources.vfXArray=lon<line_sep>resources.vfYArray=lat<line_sep>resources.tiMainString="Streamline plot with cyclic point added"<line_sep>plot=Ngl.streamline_map(wks u v resources)<line_sep># # Add a couple of lines showing the area where the missing data were. # Make the lines solid so we can see them. # line_res.gsLineDashPattern=0<line_sep>Ngl.polyline(wks plot lon2d[::4 0] lat2d[::4 0] line_res)<line_sep>Ngl.polyline(wks plot lon2d[::4 -1] lat2d[::4 -1] line_res)<line_sep># # Add a text string explaining the lines. # Ngl.text_ndc(wks "red line shows area that previously had no data" 0.5 0.17 text_res)<line_sep>Ngl.frame(wks)<line_sep>Ngl.end()<line_sep>
<import_from_stmt>swampdragon.serializers.model_serializer ModelSerializer<import_from_stmt>swampdragon.testing.dragon_testcase DragonTestCase<import_from_stmt>.models TextModel SDModel<import_from_stmt>datetime datetime<import_from_stmt>django.db models<line_sep># to make sure none of the ModelSerializer variables are clobbering the data MODEL_KEYWORDS=('data' )<line_sep># TODO: support the rest of these field names # MODEL_KEYWORDS = ('data', 'opts', 'initial', 'base_fields', 'm2m_fields', 'related_fields', 'errors') <class_stmt>KeywordModel(SDModel)<block_start>data=models.TextField()<line_sep># TODO: support the rest of these field names # opts = models.TextField() # initial = models.TextField() # base_fields = models.TextField() # m2m_fields = models.TextField() # related_fields = models.TextField() # errors = models.TextField() <block_end><class_stmt>KeywordModelSerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=KeywordModel<line_sep>publish_fields=MODEL_KEYWORDS<line_sep>update_fields=MODEL_KEYWORDS<block_end><block_end><class_stmt>DateModel(SDModel)<block_start>date=models.DateTimeField()<block_end><class_stmt>DateModelSerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=DateModel<line_sep>publish_fields=('date')<line_sep>update_fields=('date')<block_end><block_end><class_stmt>TextModelSerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=TextModel<line_sep>publish_fields=('text')<line_sep>update_fields=('text')<block_end><block_end><class_stmt>TestModelSerializer(DragonTestCase)<block_start><def_stmt>test_deserialize_model self<block_start>data={'text':'foo'}<line_sep>serializer=TextModelSerializer(data)<line_sep>model_instance=serializer.save()<line_sep>self.assertEqual(model_instance.text data['text'])<block_end><def_stmt>test_passing_invalid_data self<block_start>foo='text'<with_stmt>self.assertRaises(Exception)<block_start>TextModelSerializer(foo)<block_end><block_end><def_stmt>test_ignore_non_model_fields self<block_start>data={'text':'foo' 'random_field':'val'}<line_sep>serializer=TextModelSerializer(data)<line_sep>model_instance=serializer.deserialize()<line_sep>self.assertEqual(model_instance.text data['text'])<block_end><def_stmt>test_deserialize_field self<block_start>date=datetime.now()<line_sep>data={'date':str(date)}<line_sep>serializer=DateModelSerializer(data)<line_sep>object=serializer.save()<line_sep>self.assertEqual(object.date date)<block_end><def_stmt>test_deserialize_keyword_field self<block_start>data=dict(zip(MODEL_KEYWORDS MODEL_KEYWORDS))<line_sep>serializer=KeywordModelSerializer(data)<line_sep>object=serializer.save()<for_stmt>attr MODEL_KEYWORDS<block_start>self.assertEqual(getattr(object attr) attr)<block_end><block_end><block_end>
<import_from_stmt>..registry_tools iso_register<import_from_stmt>.core UnitedStates<line_sep>@iso_register('US-PA')<class_stmt>Pennsylvania(UnitedStates)<block_start>"""Pennsylvania"""<line_sep>include_good_friday=<true><line_sep>include_thanksgiving_friday=<true><line_sep>include_election_day_every_year=<true><block_end>
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## ## @package sampling_train # Module caffe2.python.layers.sampling_train <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>caffe2.python schema<import_from_stmt>caffe2.python.layers.layers ModelLayer get_layer_class<import_from_stmt>caffe2.python.layers.sampling_trainable_mixin SamplingTrainableMixin<class_stmt>SamplingTrain(ModelLayer)<block_start><def_stmt>__init__ self model input_record prediction_layer output_dims subtract_log_odd=<true> name='sampling_train' **kwargs<block_start>super(SamplingTrain self).__init__(model name input_record **kwargs)<line_sep>layer_class=get_layer_class(prediction_layer)<assert_stmt>issubclass(layer_class SamplingTrainableMixin)<assert_stmt>'indices'<in>input_record<assert_stmt>isinstance(input_record.indices schema.Scalar) "input_record.indices is expected to be a schema.Scalar"<assert_stmt>'input'<in>input_record<line_sep>self.subtract_log_odd=subtract_log_odd<if_stmt>self.subtract_log_odd<block_start><assert_stmt>'sampling_prob'<in>input_record<block_end>self._prediction_layer=layer_class(model input_record.input output_dims=output_dims **kwargs)<line_sep>self._prediction_layer.train_param_blobs=[model.net.NextBlob(str(blob)+'_sampled')<for>blob self._prediction_layer.param_blobs]<line_sep>self.params=self._prediction_layer.params<line_sep>self.output_schema=self._prediction_layer.output_schema<block_end><def_stmt>add_ops self net<block_start>self._prediction_layer.add_ops(net)<block_end><def_stmt>add_train_ops self net<block_start><for_stmt>full_blob,sampled_blob zip(self._prediction_layer.param_blobs self._prediction_layer.train_param_blobs)<block_start>net.Gather([full_blob self.input_record.indices()] sampled_blob)<block_end>self._prediction_layer.add_train_ops(net)<if_stmt><not>self.subtract_log_odd<block_start><return><block_end>log_q=net.Log(self.input_record.sampling_prob() net.NextScopedBlob("log_q"))<line_sep>net.Sub([self.output_schema() log_q] self.output_schema() broadcast=1 use_grad_hack=1)<block_end><block_end>
""" Mr Greenguest puzzle (a.k.a fancy dress problem) in cpmpy. Problem (and LPL) code in http://diuflx71.unifr.ch/lpl/GetModel?name=/demo/demo2 ''' Mr. Greenfan wants to give a dress party where the male guests must wear green dresses. The following rules are given: 1 If someone wears a green tie he has to wear a green shirt. 2 A guest may only wear green socks and a green shirt if he wears a green tie or a green hat. 3 A guest wearing a green shirt or a green hat or who does not wear green socks must wear a green tie. 4 A guest who is not dressed according to rules 1-3 must pay a $11 entrance fee. Mr Greenguest wants to participate but owns only a green shirt (otherwise he would have to pay one for $9). He could buy a green tie for $10, a green hat (used) for $2 and green socks for $12. What is the cheapest solution for Mr Greenguest to participate? ''' Model created by <NAME>, <EMAIL> See also my cpmpy page: http://www.hakank.org/cpmpy/ """<import_stmt>sys<import_stmt>numpy<as>np<import_from_stmt>cpmpy *<import_from_stmt>cpmpy.solvers *<import_from_stmt>cpmpy_hakank *<def_stmt>fancy # variables # t: tie # h: hat # r: shirt # s: socks # n: entrance fee <block_start>t=boolvar(name="t")<line_sep>h=boolvar(name="h")<line_sep>r=boolvar(name="r")<line_sep>s=boolvar(name="s")<line_sep>n=boolvar(name="n")<line_sep>cost=intvar(0 100 name="cost")<line_sep>model=Model(minimize=cost)<line_sep># constraints # This is a straight translation from the LPL code # ( (t->r) \/ n) model<augadd>[t.implies(r)|n]<line_sep># ( ((s \/ r) -> (t \/ h)) \/ n ) model<augadd>[((s|r).implies(t|h))|n]<line_sep># ( ((r \/ h \/ not s) -> t) \/ n ) model<augadd>[(r|h|~(s)).implies(t|n)]<line_sep>model<augadd>[cost<eq>10<times>t+2<times>h+12<times>s+11<times>n]<line_sep>ss=CPM_ortools(model)<line_sep>num_solutions=0<if_stmt>ss.solve()<block_start>num_solutions<augadd>1<line_sep>print("cost:" cost.value())<line_sep>print("t:" t.value() "h:" h.value() "r:" r.value() "s:" s.value() "n:" n.value())<block_end>print("num_solutions:" num_solutions)<block_end>fancy()<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>json<import_stmt>matplotlib.pyplot<as>plt<import_stmt>os<import_stmt>plot_tools<import_stmt>settings<import_from_stmt>pythonapi anno_tools<def_stmt>plt_print_text *args<block_start>print('plot_tools.print_text' *args[:-1])<with_stmt>plt.style.context({'pdf.fonttype':42 })<block_start>plot_tools.print_text(*args)<block_end><block_end>plt_print_text.concurrent=<false><line_sep>print_text=plt_print_text<def_stmt>main <block_start><with_stmt>open(settings.DATA_LIST)<as>f<block_start>data_list=json.load(f)<block_end>lines=[]<with_stmt>open(settings.TRAIN)<as>f<block_start>lines<augadd>f.read().splitlines()<block_end><with_stmt>open(settings.VAL)<as>f<block_start>lines<augadd>f.read().splitlines()<block_end><with_stmt>open(settings.TEST_DETECTION_GT)<as>f<block_start>lines<augadd>f.read().splitlines()<block_end><def_stmt>gt2array gt<block_start>color='#0f0'<line_sep>a=list()<for_stmt>char anno_tools.each_char(gt)<block_start><if_stmt>char['is_chinese']<block_start>a.append({'polygon':char['polygon'] 'text':char['text'] 'color':color 'fontsize':10})<block_end><block_end><for_stmt>char gt['ignore']<block_start>a.append({'polygon':char['polygon'] 'text':'' 'color':'#ff0' 'fontsize':10})<block_end><return>a<block_end>selected=[('0000507' 0 0 2048 2048) ('1023899' 0 0 2048 2048) ('1031755' 0 0 2048 2048) ('1044721' 0 0 2048 2048) ('1046905' 0 0 2048 2048) ('2000215' 0 0 2048 2048) ('2004154' 0 0 2048 2048) ('2005679' 0 0 2048 2048) ('2024003' 0 0 2048 2048) ('3005669' 0 0 2048 2048) ('3029319' 0 0 2048 2048) ('3040629' 0 0 2048 2048) ('3001838' 0 650 700 550) ('1041797' 530 740 700 550) ]<if_stmt><not>os.path.isdir(settings.PRINTTEXT_DRAWING_DIR)<block_start>os.makedirs(settings.PRINTTEXT_DRAWING_DIR)<block_end>tasks=[]<for_stmt>image_id,x,y,w,h selected<block_start>i=[o['image_id']<for>o data_list['train']+data_list['val']+data_list['test_det']].index(image_id)<line_sep>gt=json.loads(lines[i])<line_sep>crop=(x y w h)<line_sep>file_name=os.path.join(settings.TRAINVAL_IMAGE_DIR<if>i<l>len(data_list['train']+data_list['val'])<else>settings.TEST_IMAGE_DIR gt['file_name'])<line_sep>output=os.path.join(settings.PRINTTEXT_DRAWING_DIR 'gt_{}_{}_{}_{}_{}.pdf'.format(image_id x y w h))<line_sep>print_text(file_name output {'boxes':gt2array(gt) 'crop':crop })<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>unittest<import_stmt>torch.utils.data<import_from_stmt>nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils get_test_nuplan_scenario<import_from_stmt>nuplan.planning.simulation.trajectory.trajectory_sampling TrajectorySampling<import_from_stmt>nuplan.planning.training.data_loader.scenario_dataset ScenarioDataset<import_from_stmt>nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder RasterFeatureBuilder<import_from_stmt>nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder VectorMapFeatureBuilder<import_from_stmt>nuplan.planning.training.preprocessing.feature_collate FeatureCollate<import_from_stmt>nuplan.planning.training.preprocessing.feature_preprocessor FeaturePreprocessor<import_from_stmt>nuplan.planning.training.preprocessing.features.vector_map VectorMap<import_from_stmt>nuplan.planning.training.preprocessing.target_builders.ego_trajectory_target_builder EgoTrajectoryTargetBuilder <line_sep>NUM_BATCHES=20<class_stmt>TestCollateDataLoader(unittest.TestCase)<block_start>""" Tests data loading functionality """<def_stmt>setUp self<arrow><none><block_start>"""Set up the test case."""<line_sep>self.batch_size=4<line_sep>feature_preprocessor=FeaturePreprocessor(cache_path=<none> feature_builders=[RasterFeatureBuilder(map_features={'LANE':1.0 'INTERSECTION':1.0 'STOP_LINE':0.5 'CROSSWALK':0.5} num_input_channels=4 target_width=224 target_height=224 target_pixel_size=0.5 ego_width=2.297 ego_front_length=4.049 ego_rear_length=1.127 ego_longitudinal_offset=0.0 baseline_path_thickness=1 ) VectorMapFeatureBuilder(radius=20) ] target_builders=[EgoTrajectoryTargetBuilder(TrajectorySampling(time_horizon=6.0 num_poses=12))] force_feature_computation=<false> )<line_sep># Keep only a few scenarios instead of testing the whole extraction scenario=get_test_nuplan_scenario()<line_sep>scenarios=[scenario]<times>3<line_sep>dataset=ScenarioDataset(scenarios=scenarios feature_preprocessor=feature_preprocessor)<line_sep>self.dataloader=torch.utils.data.DataLoader(dataset=dataset batch_size=self.batch_size num_workers=2 pin_memory=<false> drop_last=<true> collate_fn=FeatureCollate() )<block_end><def_stmt>test_dataloader self<arrow><none><block_start>""" Tests that the training dataloader can be iterated without errors """<line_sep>dataloader=self.dataloader<line_sep>dataloader_iter=iter(dataloader)<line_sep>iterations=min(len(dataloader) NUM_BATCHES)<for_stmt>_ range(iterations)<block_start>features,targets=next(dataloader_iter)<line_sep>self.assertTrue("vector_map"<in>features.keys())<line_sep>vector_map:VectorMap=features["vector_map"]<line_sep>self.assertEqual(vector_map.num_of_batches self.batch_size)<line_sep>self.assertEqual(len(vector_map.coords) self.batch_size)<line_sep>self.assertEqual(len(vector_map.multi_scale_connections) self.batch_size)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>re<line_sep>ASCII_IS_DEFAULT_ENCODING=<false><line_sep>cookie_re=re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")<line_sep>BOM_UTF8='\xef\xbb\xbf'<def_stmt>_prepare_source fn<block_start>"""Read the source code for re-writing."""<try_stmt><block_start>stat=fn.stat()<line_sep>source=fn.read("rb")<block_end><except_stmt>EnvironmentError<block_start><return><none> <none><block_end><if_stmt>ASCII_IS_DEFAULT_ENCODING# ASCII is the default encoding in Python 2. Without a coding # declaration, Python 2 will complain about any bytes in the file # outside the ASCII range. Sadly, this behavior does not extend to # compile() or ast.parse(), which prefer to interpret the bytes as # latin-1. (At least they properly handle explicit coding cookies.) To # preserve this error behavior, we could force ast.parse() to use ASCII # as the encoding by inserting a coding cookie. Unfortunately, that # messes up line numbers. Thus, we have to check ourselves if anything # is outside the ASCII range in the case no encoding is explicitly # declared. For more context, see issue #269. Yay for Python 3 which # gets this right. <block_start>end1=source.find("\n")<line_sep>end2=source.find("\n" end1+1)<if_stmt>(<not>source.startswith(BOM_UTF8)<and>cookie_re.match(source[0:end1])<is><none><and>cookie_re.match(source[end1+1:end2])<is><none>)<block_start><try_stmt><block_start>source.decode("ascii")<block_end><except_stmt>UnicodeDecodeError# Let it fail in real import. <block_start><return><none> <none><block_end><block_end><block_end># On Python versions which are not 2.7 and less than or equal to 3.1, the # parser expects *nix newlines. <return>stat source<block_end>
<import_stmt>unittest<import_from_stmt>main *<class_stmt>FunctionDocumentationStringsTests(unittest.TestCase)<block_start><def_stmt>test_main self<block_start>self.assertIsNone(docstring_function())<line_sep>self.assertIsNotNone(docstring_function.__doc__)<line_sep>self.assertIsInstance(docstring_function.__doc__ str)<block_end><block_end>
<import_stmt>asyncio<import_from_stmt>aiohttp web<async_keyword><def_stmt>handle request<block_start>index=open("index.html" 'rb')<line_sep>content=index.read()<line_sep><return>web.Response(body=content content_type='text/html')<block_end><async_keyword><def_stmt>wshandler request<block_start>app=request.app<line_sep>ws=web.WebSocketResponse()<line_sep><await>ws.prepare(request)<if_stmt>app["game_loop"]<is><none><or>app["game_loop"].cancelled()<block_start>app["game_loop"]=asyncio.ensure_future(game_loop(app))<line_sep># this is required to propagate exceptions app["game_loop"].add_done_callback(<lambda>t:t.result()<if><not>t.cancelled()<else><none>)<block_end>app["sockets"].append(ws)<while_stmt>1<block_start>msg=<await>ws.receive()<if_stmt>msg.tp<eq>web.MsgType.text<block_start>ws.send_str("Pressed key code: {}".format(msg.data))<line_sep>print("Got message %s"%msg.data)<block_end><elif_stmt>msg.tp<eq>web.MsgType.close<or>msg.tp<eq>web.MsgType.error<block_start><break><block_end><block_end>app["sockets"].remove(ws)<if_stmt>len(app["sockets"])<eq>0<block_start>print("Stopping game loop")<line_sep>app["game_loop"].cancel()<block_end>print("Closed connection")<line_sep><return>ws<block_end><async_keyword><def_stmt>game_loop app<block_start>print("Game loop started")<while_stmt>1<block_start><for_stmt>ws app["sockets"]<block_start>ws.send_str("game loop passed")<block_end><await>asyncio.sleep(2)<block_end><block_end>app=web.Application()<line_sep>app["sockets"]=[]<line_sep>app["game_loop"]=<none><line_sep>app.router.add_route('GET' '/connect' wshandler)<line_sep>app.router.add_route('GET' '/' handle)<line_sep>web.run_app(app)<line_sep>
""" Game fix for Eterium """<line_sep>#pylint: disable=C0103 <import_from_stmt>protonfixes util<def_stmt>main <block_start>""" Install xna40 """<line_sep>util.protontricks('xna40')<block_end>
<import_stmt>JeevesLib<line_sep># import fast.AST # from collections import defaultdict <class_stmt>WritePolicyEnv<block_start><def_stmt>__init__ self<block_start>self.writers={}<block_end><def_stmt>mapPrimaryContext self ivar ctxt<block_start>self.writers[ivar]=ctxt<block_end># This function associates a new set of write policies with a label. <def_stmt>addWritePolicy self label policy newWriter# If the label is associated with a writer, then associate it with the # new write policies. <block_start><if_stmt>self.writers.has_key(label)<block_start>ictxt=self.writers[label]<line_sep># Make a new label mapped to the same writer. newLabel=JeevesLib.mkLabel(label.name)<line_sep>self.mapPrimaryContext(newLabel ictxt)<line_sep># Associate the new policies with this new label. JeevesLib.restrict(newLabel <lambda>oc:JeevesLib.jand(<lambda>:label <lambda>:JeevesLib.jand(<lambda>:policy(ictxt)(oc) <lambda>:policy(newWriter)(oc))))<line_sep><return>newLabel<block_end># Otherwise return the label as is. <else_stmt><block_start><return>label<block_end><block_end><block_end>
""" Usage playmany.py Uses media_player to play a sequence of samples and record debug info A configuration must be active, see command configure.py If the active configuration has disallowed dbg overwrites it will do nothing. If a playlist was provided at session creation, then only the samples in the playlist will be played, otherwise all files in samples_dir. """<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>fs<import_stmt>mpexceptions<def_stmt>main <block_start><try_stmt><block_start>pathserv=fs.get_path_info_for_active_session()<block_end><except_stmt>mpexceptions.ExceptionUndefinedSamplesDir<block_start>print("The env var 'pyglet_mp_samples_dir' is not defined.")<line_sep><return>1<block_end><except_stmt>mpexceptions.ExceptionNoSessionIsActive<block_start>print("*** Error, no session active.")<line_sep><return>1<block_end><try_stmt><block_start>play_many(pathserv timeout=120)<block_end><except_stmt>mpexceptions.ExceptionAttemptToBreakRawDataProtection<block_start>print("*** Error, attempt to overwrite raw data when protect_raw_data is True.")<line_sep><return>1<block_end><return>0<block_end><def_stmt>play_many pathserv timeout=120<block_start>"""plays the samples in the session playlist for the current active session timeout: max time allowed to play a sample, default is 120 seconds """<line_sep>conf=fs.get_session_configuration(pathserv)<if_stmt>conf["dev_debug"]<block_start><pass><block_end><else_stmt><block_start><if_stmt>conf["protect_raw_data"]<block_start><raise>mpexceptions.ExceptionAttemptToBreakRawDataProtection()<block_end><block_end>playlist_gen=pathserv.session_playlist_generator()<line_sep>core_play_many(pathserv playlist_gen timeout=timeout)<block_end><def_stmt>core_play_many pathserv playlist_gen timeout=120<block_start><for_stmt>sample,filename playlist_gen<block_start>dbg_file=pathserv.dbg_filename(sample)<line_sep>print("playmany playing:" filename)<line_sep>cmdline=[os.path.join(fs.get_media_player_path() "media_player.py") "--debug" "--outfile="+dbg_file filename]<line_sep>killed,returncode=cmd__py3(cmdline timeout=timeout)<if_stmt>killed<block_start>print("WARNING: killed by timeout, file: %s"%filename)<block_end><block_end><block_end><def_stmt>cmd__py3 cmdline bufsize=-1 cwd=<none> timeout=60<block_start>"""runs a .py script as a subprocess with the same python as the caller cmdline: list [<scriptname>, arg1, ...] timeout: time in seconds; subprocess wil be killed if it is still running at that time. """<line_sep># use the same python as the caller to run the script cmdline.insert(0 "-u")<line_sep>cmdline.insert(0 sys.executable)<line_sep>p=subprocess.Popen(cmdline bufsize=bufsize shell=<false> stdout=subprocess.PIPE stderr=subprocess.PIPE cwd=cwd)<line_sep>killed=<true><try_stmt><block_start>out,err=p.communicate(timeout=timeout)<line_sep>killed=<false><block_end><except_stmt>subprocess.TimeoutExpired<block_start>p.kill()<line_sep>out,err=p.communicate()<block_end>## print("out:", out) ## print("err:", err) returncode=p.returncode<line_sep><return>killed returncode<block_end><def_stmt>sysargs_to_mainargs <block_start>"""builds main args from sys.argv"""<if_stmt>len(sys.argv)<g>1<and>sys.argv[1].startswith("--help")<block_start>print(__doc__)<line_sep>sys.exit(1)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>sysargs_to_mainargs()<line_sep>main()<block_end>
<import_stmt>trimesh<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>copy<import_stmt>pickle<import_stmt>torch<import_stmt>pdb<def_stmt>depth2normal depth f_pix_x f_pix_y=<none><block_start>''' To compute a normal map from the depth map Input: - depth: torch.Tensor (H, W) - f_pix_x: K[0, 0] - f_pix_y: K[1, 1] Return: - normal: torch.Tensor (H, W, 3) '''<if_stmt>f_pix_y<is><none><block_start>f_pix_y=f_pix_x<block_end>h,w=depth.shape<line_sep>eps=1e-12<line_sep>bg_flag=(depth<g>1e5)|(depth<eq>0)<line_sep>depth[bg_flag]=0.0<line_sep>depth_left,depth_right,depth_up,depth_down=torch.zeros(h w) torch.zeros(h w) torch.zeros(h w) torch.zeros(h w)<if_stmt>depth.get_device()<ne>-1<block_start>device_id=depth.get_device()<line_sep>depth_left,depth_right,depth_up,depth_down=depth_left.to(device_id) depth_right.to(device_id) depth_up.to(device_id) depth_down.to(device_id)<block_end>depth_left[: 1:w-1]=depth[: :w-2].clone()<line_sep>depth_right[: 1:w-1]=depth[: 2:].clone()<line_sep>depth_up[1:h-1 :]=depth[:h-2 :].clone()<line_sep>depth_down[1:h-1 :]=depth[2: :].clone()<line_sep>dzdx=(depth_right-depth_left)<times>f_pix_x/2.0<line_sep>dzdy=(depth_down-depth_up)<times>f_pix_y/2.0<line_sep>normal=torch.stack([dzdx dzdy -torch.ones_like(dzdx)]).permute(1 2 0)<line_sep>normal_length=torch.norm(normal p=2 dim=2)<line_sep>normal=normal/(normal_length+1e-12)[: : <none>]<line_sep>normal[bg_flag]=0.0<line_sep><return>normal<block_end><def_stmt>quad2rotation quad<block_start>''' input: torch.Tensor (4) '''<line_sep>bs=quad.shape[0]<line_sep>qr,qi,qj,qk=quad[: 0] quad[: 1] quad[: 2] quad[: 3]<line_sep>rot_mat=torch.zeros(bs 3 3).to(quad.get_device())<line_sep>rot_mat[: 0 0]=1-2<times>(qj<power>2+qk<power>2)<line_sep>rot_mat[: 0 1]=2<times>(qi<times>qj-qk<times>qr)<line_sep>rot_mat[: 0 2]=2<times>(qi<times>qk+qj<times>qr)<line_sep>rot_mat[: 1 0]=2<times>(qi<times>qj+qk<times>qr)<line_sep>rot_mat[: 1 1]=1-2<times>(qi<power>2+qk<power>2)<line_sep>rot_mat[: 1 2]=2<times>(qj<times>qk-qi<times>qr)<line_sep>rot_mat[: 2 0]=2<times>(qi<times>qk-qj<times>qr)<line_sep>rot_mat[: 2 1]=2<times>(qj<times>qk+qi<times>qr)<line_sep>rot_mat[: 2 2]=1-2<times>(qi<power>2+qj<power>2)<line_sep><return>rot_mat<block_end><def_stmt>get_camera_from_tensor inputs<block_start>N=len(inputs.shape)<if_stmt>N<eq>1<block_start>inputs=inputs.unsqueeze(0)<block_end>quad,T=inputs[: :4] inputs[: 4:]<line_sep>R=quad2rotation(quad)<line_sep>RT=torch.cat([R T[: : <none>]] 2)<if_stmt>N<eq>1<block_start>RT=RT[0]<block_end><return>RT<block_end><def_stmt>get_tensor_from_camera RT<block_start>gpu_id=-1<if_stmt>type(RT)<eq>torch.Tensor<block_start><if_stmt>RT.get_device()<ne>-1<block_start>RT=RT.detach().cpu()<line_sep>gpu_id=RT.get_device()<block_end>RT=RT.numpy()<block_end><import_from_stmt>mathutils Matrix<line_sep>R,T=RT[: :3] RT[: 3]<line_sep>rot=Matrix(R)<line_sep>quad=rot.to_quaternion()<line_sep>tensor=np.concatenate([quad T] 0)<line_sep>tensor=torch.from_numpy(tensor).float()<if_stmt>gpu_id<ne>-1<block_start>tensor=tensor.to(gpu_id)<block_end><return>tensor<block_end><def_stmt>downsize_camera_intrinsic intrinsic factor<block_start>''' Input: - intrinsic type: np.array (3,3) - factor int '''<line_sep>img_h,img_w=int(2<times>intrinsic[1 2]) int(2<times>intrinsic[0 2])<line_sep>img_h_new,img_w_new=img_h/factor img_w/factor<if_stmt>(img_h_new-round(img_h_new))<g>1e-12<or>(img_w_new-round(img_w_new))<g>1e-12<block_start><raise>ValueError('The image size {0} should be divisible by the factor {1}.'.format((img_h img_w) factor))<block_end>intrinsic_new=copy.deepcopy(intrinsic)<line_sep>intrinsic_new[0 :]=intrinsic[0 :]/factor<line_sep>intrinsic_new[1 :]=intrinsic[1 :]/factor<line_sep><return>intrinsic_new<block_end><def_stmt>sample_points_from_mesh mesh N=30000<block_start>''' Return: -- points: np.array (N, 3) '''<line_sep>points=trimesh.sample.sample_surface(mesh N)[0]<line_sep><return>points<block_end><def_stmt>transform_point_cloud points<block_start>''' solve the mismatch between the point cloud coordinate and the mesh obj. '''<line_sep>points_new=copy.deepcopy(points)<line_sep>points_new[: 1]=-points[: 2]<line_sep>points_new[: 2]=points[: 1]<line_sep><return>points_new<block_end><def_stmt>read_pickle fname<block_start><with_stmt>open(fname 'rb')<as>f<block_start>data=pickle.load(f encoding='latin1')<block_end><return>data<block_end><def_stmt>save_render_output render_output fname<block_start>depth_rendered,normal_rendered,valid_mask_rendered,_=render_output<line_sep>output={}<line_sep>output['depth']=depth_rendered.detach().cpu().numpy()<line_sep>output['normal']=normal_rendered.detach().cpu().numpy()<line_sep>output['valid_mask']=valid_mask_rendered.detach().cpu().numpy()<line_sep>save_pkl(output fname)<block_end><def_stmt>save_pkl data fname<block_start><with_stmt>open(fname 'wb')<as>f<block_start>pickle.dump(data f)<block_end><block_end>
<import_from_future_stmt> annotations <import_from_stmt>abc ABC abstractmethod <import_from_stmt>datetime datetime <import_from_stmt>typing AsyncIterator Optional <import_from_stmt>uuid UUID <import_from_stmt>minos.common Inject Injectable Lock LockPool NotProvidedException PoolFactory SetupMixin <import_from_stmt>...exceptions TransactionNotFoundException <import_from_stmt>..entries TransactionEntry TransactionStatus <line_sep>@Injectable("transaction_repository")<class_stmt>TransactionRepository(ABC SetupMixin)<block_start>"""Transaction Repository base class."""<line_sep>@Inject()<def_stmt>__init__ self lock_pool:Optional[LockPool]=<none> pool_factory:Optional[PoolFactory]=<none> *args **kwargs<block_start>super().__init__(*args **kwargs)<if_stmt>lock_pool<is><none><and>pool_factory<is><not><none><block_start>lock_pool=pool_factory.get_pool("lock")<block_end><if_stmt>lock_pool<is><none><block_start><raise>NotProvidedException("A lock pool instance is required.")<block_end>self._lock_pool=lock_pool<block_end><async_keyword><def_stmt>submit self transaction:TransactionEntry<arrow>TransactionEntry<block_start>"""Submit a new or updated transaction to store it on the repository. :param transaction: The transaction to be stored. :return: This method does not return anything. """<line_sep><return><await>self._submit(transaction)<block_end>@abstractmethod<async_keyword><def_stmt>_submit self transaction:TransactionEntry<arrow>TransactionEntry<block_start><raise>NotImplementedError<block_end># noinspection PyUnusedLocal <async_keyword><def_stmt>get self uuid:UUID **kwargs<arrow>TransactionEntry<block_start>"""Get a ``TransactionEntry`` from its identifier. :param uuid: Identifier of the ``RootEntity``. :param kwargs: Additional named arguments. :return: The ``TransactionEntry`` instance. """<try_stmt><block_start><return><await>self.select(uuid=uuid).__anext__()<block_end><except_stmt>StopAsyncIteration<block_start><raise>TransactionNotFoundException(f"Transaction identified by {uuid!r} does not exist.")<block_end><block_end><async_keyword><def_stmt>select self uuid:Optional[UUID]=<none> uuid_ne:Optional[UUID]=<none> uuid_in:Optional[tuple[UUID <ellipsis>]]=<none> destination_uuid:Optional[UUID]=<none> status:Optional[TransactionStatus]=<none> status_in:Optional[tuple[str <ellipsis>]]=<none> event_offset:Optional[int]=<none> event_offset_lt:Optional[int]=<none> event_offset_gt:Optional[int]=<none> event_offset_le:Optional[int]=<none> event_offset_ge:Optional[int]=<none> updated_at:Optional[datetime]=<none> updated_at_lt:Optional[datetime]=<none> updated_at_gt:Optional[datetime]=<none> updated_at_le:Optional[datetime]=<none> updated_at_ge:Optional[datetime]=<none> **kwargs <arrow>AsyncIterator[TransactionEntry]<block_start>"""Get a transaction from the repository. :param uuid: Transaction identifier equal to the given value. :param uuid_ne: Transaction identifier not equal to the given value :param uuid_in: Transaction identifier within the given values. :param destination_uuid: Destination Transaction identifier equal to the given value. :param status: Transaction status equal to the given value. :param status_in: Transaction status within the given values :param event_offset: Event offset equal to the given value. :param event_offset_lt: Event Offset lower than the given value :param event_offset_gt: Event Offset greater than the given value :param event_offset_le: Event Offset lower or equal to the given value :param event_offset_ge: Event Offset greater or equal to the given value :param updated_at: Updated at equal to the given value. :param updated_at_lt: Updated at lower than the given value. :param updated_at_gt: Updated at greater than the given value. :param updated_at_le: Updated at lower or equal to the given value. :param updated_at_ge: Updated at greater or equal to the given value. :param kwargs: Additional named arguments. :return: An asynchronous iterator. """<line_sep>generator=self._select(uuid=uuid uuid_ne=uuid_ne uuid_in=uuid_in destination_uuid=destination_uuid status=status status_in=status_in event_offset=event_offset event_offset_lt=event_offset_lt event_offset_gt=event_offset_gt event_offset_le=event_offset_le event_offset_ge=event_offset_ge updated_at=updated_at updated_at_lt=updated_at_lt updated_at_gt=updated_at_gt updated_at_le=updated_at_le updated_at_ge=updated_at_ge **kwargs )<line_sep># noinspection PyTypeChecker <async_keyword><for_stmt>entry generator<block_start><yield>entry<block_end><block_end>@abstractmethod<async_keyword><def_stmt>_select self **kwargs<arrow>AsyncIterator[TransactionEntry]<block_start><raise>NotImplementedError<block_end><def_stmt>write_lock self<arrow>Lock<block_start>"""Get write lock. :return: An asynchronous context manager. """<line_sep><return>self._lock_pool.acquire("aggregate_transaction_write_lock")<block_end><block_end>
""" Package-related classes and methods are in pkg.package module. All constructing arguments are accessible via property. """<import_stmt>ctypes<import_stmt>glob<import_stmt>json<import_stmt>os<import_stmt>random<import_stmt>runpy<import_stmt>shutil<import_stmt>sys<import_stmt>traceback<import_stmt>zipfile<import_stmt>ida_kernwin<import_stmt>ida_loader<import_stmt>ida_diskio<import_from_stmt>.config g<import_from_stmt>.env ea<as>current_ea os<as>current_os<import_from_stmt>.internal_api invalidate_proccache get_extlangs idausr_remove idausr_add<import_from_stmt>.logger getLogger<import_from_stmt>.vendor.semantic_version Version Spec<import_from_stmt>.virtualenv_utils FixInterpreter<line_sep>__all__=["LocalPackage" "InstallablePackage"]<line_sep>log=getLogger(__name__)<def_stmt>rename old new<block_start><if_stmt>sys.platform<eq>'win32'<block_start><if_stmt><not>ctypes.windll.kernel32.MoveFileExA(str(old) str(new) 0)<block_start><raise>WindowsError(ctypes.windll.kernel32.GetLastError())<block_end><block_end><else_stmt><block_start><return>os.rename(old new)<block_end><block_end><def_stmt>_get_native_suffix <block_start><if_stmt>current_os<eq>'win'<block_start>suffix='.dll'<block_end><elif_stmt>current_os<eq>'linux'<block_start>suffix='.so'<block_end><elif_stmt>current_os<eq>'mac'<block_start>suffix='.dylib'<block_end><else_stmt><block_start><raise>Exception("unknown os: %r"%current_os)<block_end><return>suffix<block_end><class_stmt>LocalPackage(object)<block_start><def_stmt>__init__ self id path version<block_start>self.id=str(id)<line_sep>self.version=str(version)<line_sep>self.path=os.path.normpath(path)<block_end><def_stmt>remove self<block_start>""" Removes a package. """<line_sep>idausr_remove(self.path)<with_stmt>FixInterpreter()<block_start><for_stmt>script self.info().get('uninstallers' [])<block_start>script=os.path.join(self.path script)<try_stmt><block_start>runpy.run_path(script)<block_end><except_stmt>Exception# XXX: How can I rollback this? <block_start>traceback.print_exc()<line_sep>log.warn("Uninstallation script %r exited with exception!" script)<block_end><block_end><block_end><if_stmt><not>LocalPackage._remove_package_dir(self.path)<block_start>log.error("Package directory is in use and will be removed after restart.")<line_sep># If not modified, the only case this fails is, custom ld.so or windows. # Latter case is common. new_path=self.path.rstrip('/\\')+'-removed'<if_stmt>os.path.exists(new_path)<block_start>new_path<augadd>'-%x'%random.getrandbits(64)<block_end>rename(self.path new_path)<line_sep># XXX: is it good to mutate this object? self.path=new_path<block_end>log.info("Done!")<block_end><def_stmt>install self remove_on_fail=<false><block_start>""" Run python scripts specified by :code:`installers` field in `info.json`. :returns: None """<line_sep>orig_cwd=os.getcwd()<try_stmt><block_start>os.chdir(self.path)<line_sep>info=self.info()<line_sep>scripts=info.get('installers' [])<if_stmt><not>isinstance(scripts list)<block_start><raise>Exception('%r: Corrupted package: installers key is not list'%self.id)<block_end><with_stmt>FixInterpreter()<block_start><for_stmt>script scripts<block_start>log.info('Executing installer path %r...' script)<line_sep>script=os.path.join(self.path script)<line_sep>runpy.run_path(script)<block_end><block_end><block_end><except_stmt>Exception<block_start>log.info('Installer failed!')<if_stmt>remove_on_fail<block_start>self.remove()<block_end><raise><block_end><finally_stmt><block_start>os.chdir(orig_cwd)<block_end><block_end><def_stmt>load self force=<false><block_start>""" Actually does :code:`ida_loaders.load_plugin(paths)`, and updates IDAUSR variable. """<if_stmt><not>force<and>self.path<in>ida_diskio.get_ida_subdirs('')# Already loaded, just update sys.path for python imports <block_start><if_stmt>self.path<not><in>sys.path<block_start>sys.path.append(self.path)<block_end><return><block_end># XXX: find a more efficient way to ensure dependencies errors=[]<for_stmt>dependency self.info().get('dependencies' {}).keys()<block_start>dep=LocalPackage.by_name(dependency)<if_stmt><not>dep<block_start>errors.append('Dependency not found: %r'%dependency)<line_sep><continue><block_end>dep.load()<block_end><if_stmt>errors<block_start><for_stmt>error errors<block_start>log.error(error)<block_end><return><block_end><def_stmt>handler # Load plugins immediately # processors / loaders will be loaded on demand <block_start><if_stmt>self.path<not><in>sys.path<block_start>sys.path.append(self.path)<block_end># Update IDAUSR variable idausr_add(self.path)<line_sep># Immediately load compatible plugins self._find_loadable_modules('plugins' ida_loader.load_plugin)<line_sep># Find loadable processor modules, and if exists, invalidate cached process list (proccache). invalidates=[]<line_sep>self._find_loadable_modules('procs' invalidates.append)<if_stmt>invalidates<block_start>invalidate_proccache()<block_end><block_end># Run in main thread ida_kernwin.execute_sync(handler ida_kernwin.MFF_FAST)<block_end><def_stmt>populate_env self<block_start>""" A passive version of load; it only populates IDAUSR variable. It's called at :code:`idapythonrc.py`. """<line_sep>errors=[]<for_stmt>dependency self.info().get('dependencies' {}).keys()<block_start>dep=LocalPackage.by_name(dependency)<if_stmt><not>dep<block_start>errors.append('Dependency not found: %r'%dependency)<line_sep><continue><block_end>dep.populate_env()<block_end><if_stmt>errors<block_start><for_stmt>error errors<block_start>log.error(error)<block_end><return><block_end>idausr_add(self.path)<if_stmt>self.path<not><in>sys.path<block_start>sys.path.append(self.path)<block_end><block_end><def_stmt>plugins self<block_start><return>self._collect_modules('plugins')<block_end><def_stmt>loaders self<block_start><return>self._collect_modules('loaders')<block_end><def_stmt>procs self<block_start><return>self._collect_modules('procs')<block_end><def_stmt>_collect_modules self category<block_start>result=[]<line_sep>self._find_loadable_modules(category result.append)<line_sep><return>result<block_end><def_stmt>_find_loadable_modules self subdir callback# Load modules in external languages (.py, .idc, ...) <block_start><for_stmt>suffix ['.'+x.fileext<for>x get_extlangs()]<block_start>expr=os.path.join(self.path subdir '*'+suffix)<for_stmt>path glob.glob(expr)<block_start>callback(str(path))<block_end><block_end># Load native modules <for_stmt>suffix (_get_native_suffix() )<block_start>expr=os.path.join(self.path subdir '*'+suffix)<for_stmt>path glob.glob(expr)<block_start>is64=path[:-len(suffix)][-2:]<eq>'64'<if_stmt>is64<eq>(current_ea<eq>64)<block_start>callback(str(path))<block_end><block_end><block_end><block_end><def_stmt>info self<block_start>""" Loads :code:`info.json` and returns a parsed JSON object. :rtype: dict """<with_stmt>open(os.path.join(self.path 'info.json') 'rb')<as>_file<block_start><return>json.load(_file)<block_end><block_end>@staticmethod<def_stmt>by_name name prefix=<none><block_start>""" Returns a package with specified `name`. :rtype: LocalPackage """<if_stmt>prefix<is><none><block_start>prefix=g['path']['packages']<block_end>path=os.path.join(prefix name)<line_sep># check if the folder exists <if_stmt><not>os.path.isdir(path)<block_start><return><none><block_end># filter removed package removed=os.path.join(path '.removed')<if_stmt>os.path.isfile(removed)<block_start>LocalPackage._remove_package_dir(path)<line_sep><return><none><block_end>info_json=os.path.join(path 'info.json')<if_stmt><not>os.path.isfile(info_json)<block_start>log.warn('Warning: info.json is not found at %r' path)<line_sep><return><none><block_end><with_stmt>open(info_json 'rb')<as>_file<block_start><try_stmt><block_start>info=json.load(_file)<block_end><except_stmt>Exception<block_start>traceback.print_exc()<line_sep>log.warn('Warning: info.json is not valid at %r' path)<line_sep><return><none><block_end><block_end>result=LocalPackage(id=info['_id'] path=path version=info['version'])<line_sep><return>result<block_end>@staticmethod<def_stmt>all disabled=<false><block_start>""" List all packages installed at :code:`g['path']['packages']`. :rtype: list(LocalPackage) """<line_sep>prefix=g['path']['packages']<line_sep>res=os.listdir(prefix)<line_sep>res=(x<for>x res<if>os.path.isdir(os.path.join(prefix x)))<line_sep>res=(LocalPackage.by_name(x)<for>x res)<line_sep>res=(x<for>x res<if>x)<line_sep>res=[x<for>x res<if>(x.id<in>g['ignored_packages'])<eq>disabled]<line_sep><return>res<block_end>@staticmethod<def_stmt>_remove_package_dir path<block_start>errors=[]<def_stmt>onerror _listdir _path exc_info<block_start>log.error("%s: %s" _path str(exc_info[1]))<line_sep>errors.append(exc_info[1])<block_end>shutil.rmtree(path onerror=onerror)<if_stmt>errors# Mark for later removal <block_start>open(os.path.join(path '.removed') 'wb').close()<block_end><return><not>errors<block_end><def_stmt>__repr__ self<block_start><return>'<LocalPackage id=%r path=%r version=%r>'%(self.id self.path self.version)<block_end><block_end><class_stmt>InstallablePackage(object)<block_start><def_stmt>__init__ self id name version description author repo<block_start>self.id=str(id)<line_sep>self.name=name<line_sep>self.version=str(version)<line_sep>self.description=description<line_sep>self.repo=repo<line_sep>self.author=author<block_end><def_stmt>install self upgrade=<false><block_start>""" Just calls :code:`InstallablePackage.install_from_repo(self.repo, self.id, upgrade)`. """<line_sep>install_from_repo(self.repo self.id allow_upgrade=upgrade)<block_end><def_stmt>__repr__ self<block_start><return>'<InstallablePackage id=%r version=%r repo=%r>'%(self.id self.version self.repo)<block_end><block_end><def_stmt>install_from_repo repo name version_spec='*' allow_upgrade=<false> _visited=<none><block_start>""" This method downloads a package satisfying spec. .. note :: The function waits until all of dependencies are installed. Run it as separate thread if possible. """<line_sep>top_level=_visited<is><none><line_sep>_visited=_visited<or>{}<if_stmt>name<in>_visited<block_start>log.warn("Cyclic dependency found when installing %r <-> %r" name _visited)<line_sep><return><block_end>prev=LocalPackage.by_name(name)<line_sep>_version_spec=Spec(version_spec)<line_sep>satisfies_local=prev<and>Version(prev.version)<in>_version_spec<if_stmt>allow_upgrade<or><not>satisfies_local<block_start>log.debug("Fetching releases for %r from %r..." name repo)<line_sep>releases=repo.releases(name)<if_stmt><not>releases<block_start>error="Release not found on remote repository: %r on %r (error: %r)"%(name repo releases['error'])<line_sep><raise>Exception(error)<block_end>releases=[release<for>release releases<if>Version(release['version'])<in>_version_spec]<if_stmt><not>releases<block_start>error="Release satisfying the condition %r %r not found on remote repository %r"%(name version_spec repo)<line_sep><raise>Exception(error)<block_end>downloading=<none><if>(prev<and>releases[-1]['version']<eq>prev.version)<else>releases[-1]['version']<block_end><else_stmt><block_start>downloading=<none><block_end><if_stmt>downloading<block_start>log.info('Collecting %s...' name)<line_sep>data=repo.download(name downloading)<line_sep>f=zipfile.ZipFile(data 'r')<line_sep># No /: topmost files # One /: topmost folders topmost_files=[path<for>path f.namelist()<if>path.count('/')<eq>0]<line_sep># From ZipInfo.is_dir() in Python 3.x topmost_folders=[path<for>path f.namelist()<if>path.endswith('/')]<line_sep>common_prefix=topmost_folders[0]<if>len(topmost_files)<eq>0<and>len(topmost_folders)<eq>1<else>""<line_sep>info=json.load(f.open(common_prefix+'info.json'))<line_sep>packages_path=g['path']['packages']<line_sep>install_path=os.path.join(packages_path info["_id"])<line_sep># this ensures os.path.exists(install_path) == False # TODO: should we unload a already-loaded plugin? <if_stmt>prev<block_start>prev.remove()<assert_stmt><not>os.path.exists(install_path)<block_end># XXX: edge case? removed=os.path.join(install_path '.removed')<if_stmt>os.path.isfile(removed)<block_start>os.unlink(removed)<block_end>log.info('Extracting into %r...' install_path)<if_stmt>common_prefix<block_start>f.extractall(packages_path)<line_sep>os.rename(os.path.join(packages_path common_prefix) install_path)<block_end><else_stmt><block_start>f.extractall(install_path)<block_end># Initiate LocalPackage object pkg=LocalPackage(info['_id'] install_path info['version'])<block_end><else_stmt><block_start>pkg=prev<line_sep>log.info("Requirement already satisfied: %s%s" name ''<if>version_spec<eq>'*'<else>version_spec)<block_end>restart_required=pkg.info().get('restart_required' <false>)<line_sep>_visited[name]=(pkg.version restart_required)<line_sep># First, install dependencies # TODO: add version check <for_stmt>dep_name,dep_version_spec pkg.info().get('dependencies' {}).items()<block_start>install_from_repo(repo dep_name dep_version_spec allow_upgrade _visited)<block_end># Then, install this package. <if_stmt>downloading<block_start>pkg.install()<block_end><if_stmt><not>restart_required<block_start>pkg.load()<block_end><if_stmt>top_level<block_start>log.info("Successfully installed %s" ' '.join('%s-%s'%(key value[0])<for>key,value _visited.items()))<line_sep>delayed=[(key value)<for>key,value _visited.items()<if>value[1]]<if_stmt>delayed<block_start>log.info("Plugins in the following packages will be loaded after restarting IDA.")<line_sep>log.info(" %s" " ".join('%s-%s'%(key value[0])<for>key,value delayed))<block_end><block_end><return>pkg<block_end>
# # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # <try_stmt><block_start><import_stmt>sionna<block_end><except_stmt>ImportError<as>e<block_start><import_stmt>sys<line_sep>sys.path.append("../")<block_end><import_stmt>tensorflow<as>tf<line_sep>gpus=tf.config.list_physical_devices('GPU')<line_sep>print('Number of GPUs available :' len(gpus))<if_stmt>gpus<block_start>gpu_num=0# Number of the GPU to be used <try_stmt><block_start>tf.config.set_visible_devices(gpus[gpu_num] 'GPU')<line_sep>print('Only GPU number' gpu_num 'used.')<line_sep>tf.config.experimental.set_memory_growth(gpus[gpu_num] <true>)<block_end><except_stmt>RuntimeError<as>e<block_start>print(e)<block_end><block_end><import_stmt>unittest<import_stmt>pytest# for pytest filterwarnings <import_stmt>numpy<as>np<import_from_stmt>sionna.fec.polar.encoding PolarEncoder Polar5GEncoder<import_from_stmt>sionna.fec.polar.decoding PolarSCDecoder PolarSCLDecoder PolarBPDecoder<import_from_stmt>sionna.fec.polar.decoding Polar5GDecoder<import_from_stmt>sionna.fec.crc CRCEncoder<import_from_stmt>sionna.fec.utils GaussianPriorSource<import_from_stmt>sionna.utils BinarySource<import_from_stmt>sionna.fec.polar.utils generate_5g_ranking<class_stmt>TestPolarDecodingSC(unittest.TestCase)<block_start><def_stmt>test_invalid_inputs self<block_start>"""Test against invalid values of n and frozen_pos."""<line_sep># frozen vec to long n=32<line_sep>frozen_pos=np.arange(n+1)<with_stmt>self.assertRaises(AssertionError)<block_start>PolarSCDecoder(frozen_pos n)<block_end># n not a pow of 2 # frozen vec to long n=32<line_sep>k=12<line_sep>frozen_pos,_=generate_5g_ranking(k n)<with_stmt>self.assertRaises(AssertionError)<block_start>PolarSCDecoder(frozen_pos n+1)<block_end># test valid shapes # (k, n) param_valid=[[0 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>PolarSCDecoder(frozen_pos p[1])<block_end># no complex-valued input allowed <with_stmt>self.assertRaises(ValueError)<block_start>frozen_pos,_=generate_5g_ranking(32 64)<line_sep>PolarSCDecoder(frozen_pos 64 output_dtype=tf.complex64)<block_end><block_end><def_stmt>test_output_dim self<block_start>"""Test that output dims are correct (=n) and output equals all-zero codeword."""<line_sep>bs=10<line_sep># (k, n) param_valid=[[1 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>dec=PolarSCDecoder(frozen_pos p[1])<line_sep>c=-10.<times>np.ones([bs p[1]])# all-zero with BPSK (no noise);logits u=dec(c).numpy()<line_sep>self.assertTrue(u.shape[-1]<eq>p[0])<line_sep># also check that all-zero input yields all-zero output u_hat=np.zeros([bs p[0]])<line_sep>self.assertTrue(np.array_equal(u u_hat))<block_end><block_end><def_stmt>test_numerical_stab self<block_start>"""Test for numerical stability (no nan or infty as output)."""<line_sep>bs=10<line_sep># (k,n) param_valid=[[1 32] [10 32] [32 32] [100 256]]<line_sep>source=GaussianPriorSource()<for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>dec=PolarSCDecoder(frozen_pos p[1])<line_sep># case 1: extremely large inputs c=source([[bs p[1]] 0.0001])<line_sep># llrs u1=dec(c).numpy()<line_sep># no nan self.assertFalse(np.any(np.isnan(u1)))<line_sep>#no inftfy self.assertFalse(np.any(np.isinf(u1)))<line_sep>self.assertFalse(np.any(np.isneginf(u1)))<line_sep># case 2: zero llr input c=tf.zeros([bs p[1]])<line_sep># llrs u2=dec(c).numpy()<line_sep># no nan self.assertFalse(np.any(np.isnan(u2)))<line_sep>#no inftfy self.assertFalse(np.any(np.isinf(u2)))<line_sep>self.assertFalse(np.any(np.isneginf(u2)))<block_end><block_end><def_stmt>test_identity self<block_start>"""test that info bits can be recovered if no noise is added."""<line_sep>bs=10<line_sep># (k, n) param_valid=[[1 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>p param_valid<block_start>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>enc=PolarEncoder(frozen_pos p[1])<line_sep>dec=PolarSCDecoder(frozen_pos p[1])<line_sep>u=source([bs p[0]])<line_sep>c=enc(u)<line_sep>llr_ch=20.<times>(2.<times>c-1)# demod BPSK witout noise u_hat=dec(llr_ch)<line_sep>self.assertTrue(np.array_equal(u.numpy() u_hat.numpy()))<block_end><block_end><def_stmt>test_keras self<block_start>"""Test that Keras model can be compiled (supports dynamic shapes)."""<line_sep>bs=10<line_sep>k=100<line_sep>n=128<line_sep>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>inputs=tf.keras.Input(shape=(n) dtype=tf.float32)<line_sep>x=PolarSCDecoder(frozen_pos n)(inputs)<line_sep>model=tf.keras.Model(inputs=inputs outputs=x)<line_sep>b=source([bs n])<line_sep>model(b)<line_sep># call twice to see that bs can change b2=source([bs+1 n])<line_sep>model(b2)<line_sep>model.summary()<block_end><def_stmt>test_multi_dimensional self<block_start>"""Test against arbitrary shapes. """<line_sep>k=120<line_sep>n=256<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<line_sep>dec=PolarSCDecoder(frozen_pos n)<line_sep>b=source([100 n])<line_sep>b_res=tf.reshape(b [4 5 5 n])<line_sep># encode 2D Tensor c=dec(b).numpy()<line_sep># encode 4D Tensor c_res=dec(b_res).numpy()<line_sep># and reshape to 2D shape c_res=tf.reshape(c_res [100 k])<line_sep># both version should yield same result self.assertTrue(np.array_equal(c c_res))<block_end><def_stmt>test_batch self<block_start>"""Test that all samples in batch yield same output (for same input). """<line_sep>bs=100<line_sep>k=120<line_sep>n=256<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<line_sep>dec=PolarSCDecoder(frozen_pos n)<line_sep>b=source([1 15 n])<line_sep>b_rep=tf.tile(b [bs 1 1])<line_sep># and run tf version (to be tested) c=dec(b_rep).numpy()<for_stmt>i range(bs)<block_start>self.assertTrue(np.array_equal(c[0 : :] c[i : :]))<block_end><block_end><def_stmt>test_tf_fun self<block_start>"""Test that graph mode works and xla is supported."""<line_sep>@tf.function<def_stmt>run_graph u<block_start><return>dec(u)<block_end>@tf.function(jit_compile=<true>)<def_stmt>run_graph_xla u<block_start><return>dec(u)<block_end>bs=10<line_sep>k=100<line_sep>n=128<line_sep>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>dec=PolarSCDecoder(frozen_pos n)<line_sep>u=source([bs n])<line_sep>x=run_graph(u).numpy()<line_sep># execute the graph twice x=run_graph(u).numpy()<line_sep># and change batch_size u=source([bs+1 n])<line_sep>x=run_graph(u).numpy()<line_sep># run same test for XLA (jit_compile=True) u=source([bs n])<line_sep>x=run_graph_xla(u).numpy()<line_sep>x=run_graph_xla(u).numpy()<line_sep>u=source([bs+1 n])<line_sep>x=run_graph_xla(u).numpy()<block_end><def_stmt>test_ref_implementation self<block_start>"""Test against pre-calculated results from internal implementation. """<line_sep>ref_path='../test/codes/polar/'<line_sep>filename=["P_128_37" "P_128_110" "P_256_128"]<for_stmt>f filename<block_start>A=np.load(ref_path+f+"_Avec.npy")<line_sep>llr_ch=np.load(ref_path+f+"_Lch.npy")<line_sep>u_hat=np.load(ref_path+f+"_uhat.npy")<line_sep>frozen_pos=np.array(np.where(A<eq>0)[0])<line_sep>info_pos=np.array(np.where(A<eq>1)[0])<line_sep>n=len(frozen_pos)+len(info_pos)<line_sep>k=len(info_pos)<line_sep>dec=PolarSCDecoder(frozen_pos n)<line_sep>l_in=-1.<times>llr_ch# logits u_hat_tf=dec(l_in).numpy()<line_sep># the output should be equal to the reference self.assertTrue(np.array_equal(u_hat_tf u_hat))<block_end><block_end><def_stmt>test_dtype_flexible self<block_start>"""Test that output_dtype can be flexible."""<line_sep>batch_size=100<line_sep>k=30<line_sep>n=64<line_sep>source=GaussianPriorSource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>dtypes_supported=(tf.float16 tf.float32 tf.float64)<for_stmt>dt_in dtypes_supported<block_start><for_stmt>dt_out dtypes_supported<block_start>llr=source([[batch_size n] 0.5])<line_sep>llr=tf.cast(llr dt_in)<line_sep>dec=PolarSCDecoder(frozen_pos n output_dtype=dt_out)<line_sep>x=dec(llr)<line_sep>self.assertTrue(x.dtype<eq>dt_out)<block_end><block_end># test that complex-valued inputs raise error llr=source([[batch_size n] 0.5])<line_sep>llr_c=tf.complex(llr tf.zeros_like(llr))<line_sep>dec=PolarSCDecoder(frozen_pos n output_dtype=tf.float32)<with_stmt>self.assertRaises(TypeError)<block_start>x=dec(llr_c)<block_end><block_end><block_end><class_stmt>TestPolarDecodingSCL(unittest.TestCase)# Filter warnings related to large ressource allocation <block_start>@pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_invalid_inputs self<block_start>"""Test against invalid values of n and frozen_pos."""<line_sep># frozen vec to long n=32<line_sep>frozen_pos=np.arange(n+1)<with_stmt>self.assertRaises(AssertionError)<block_start>PolarSCLDecoder(frozen_pos n)<block_end># n not a pow of 2 # frozen vec to long n=32<line_sep>k=12<line_sep>frozen_pos,_=generate_5g_ranking(k n)<with_stmt>self.assertRaises(AssertionError)<block_start>PolarSCLDecoder(frozen_pos n+1)<block_end># also test valid shapes # (k, n) param_valid=[[0 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>PolarSCLDecoder(frozen_pos p[1])<block_end># no complex-valued input allowed <with_stmt>self.assertRaises(ValueError)<block_start>frozen_pos,_=generate_5g_ranking(32 64)<line_sep>PolarSCLDecoder(frozen_pos 64 output_dtype=tf.complex64)<block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_output_dim self<block_start>"""Test that output dims are correct (=n) and output is the all-zero codeword."""<line_sep>bs=10<line_sep># (k, n) param_valid=[[1 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<line_sep># use_hybrid, use_fast_scl, cpu_only, use_scatter <for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos p[1] use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)<line_sep># all-zero with BPSK (no noise);logits c=-10.<times>np.ones([bs p[1]])<line_sep>u=dec(c).numpy()<line_sep># check shape self.assertTrue(u.shape[-1]<eq>p[0])<line_sep># also check that all-zero input yields all-zero u_hat=np.zeros([bs p[0]])<line_sep>self.assertTrue(np.array_equal(u u_hat))<block_end><block_end><block_end><block_end># also test different list sizes n=32<line_sep>k=16<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>list_sizes=[1 2 8 32]<for_stmt>list_size list_sizes<block_start><for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos n list_size=list_size use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)<line_sep># all-zero with BPSK (no noise);logits c=-10.<times>np.ones([bs n])<line_sep>u=dec(c).numpy()<line_sep>self.assertTrue(u.shape[-1]<eq>k)<line_sep># also check that all-zero input yields all-zero u_hat=np.zeros([bs k])<line_sep>self.assertTrue(np.array_equal(u u_hat))<block_end><block_end><block_end><block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_numerical_stab self<block_start>"""Test for numerical stability (no nan or infty as output)"""<line_sep>bs=10<line_sep># (k, n) param_valid=[[1 32] [10 32] [32 32] [100 256]]<line_sep>source=GaussianPriorSource()<for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos p[1] use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)<line_sep># case 1: extremely large inputs c=source([[bs p[1]] 0.0001])<line_sep># llrs u1=dec(c).numpy()<line_sep># no nan self.assertFalse(np.any(np.isnan(u1)))<line_sep>#no inftfy self.assertFalse(np.any(np.isinf(u1)))<line_sep>self.assertFalse(np.any(np.isneginf(u1)))<line_sep># case 2: zero input c=tf.zeros([bs p[1]])<line_sep># llrs u2=dec(c).numpy()<line_sep># no nan self.assertFalse(np.any(np.isnan(u2)))<line_sep>#no inftfy self.assertFalse(np.any(np.isinf(u2)))<line_sep>self.assertFalse(np.any(np.isneginf(u2)))<block_end><block_end><block_end><block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_identity self<block_start>"""Test that info bits can be recovered if no noise is added."""<line_sep>bs=10<line_sep># (k,n) param_valid=[[1 32] [10 32] [32 32] [100 256]]<line_sep>source=BinarySource()<line_sep># use_hybrid, use_fast_scl, cpu_only, use_scatter <for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>enc=PolarEncoder(frozen_pos p[1])<line_sep>u=source([bs p[0]])<line_sep>c=enc(u)<line_sep>llr_ch=200.<times>(2.<times>c-1)# demod BPSK witout noise <for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos p[1] use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)<line_sep>u_hat=dec(llr_ch)<line_sep>self.assertTrue(np.array_equal(u.numpy() u_hat.numpy()))<block_end><block_end><block_end><block_end># also test different list sizes n=32<line_sep>k=16<line_sep>crc_degree="CRC11"<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>enc=PolarEncoder(frozen_pos n)<line_sep>enc_crc=CRCEncoder(crc_degree)<line_sep>u=source([bs k-enc_crc.crc_length])<line_sep>u_crc=enc_crc(u)<line_sep>c=enc(u_crc)<line_sep>llr_ch=200.<times>(2.<times>c-1)# demod BPSK witout noise list_sizes=[1 2 8 32]<for_stmt>list_size list_sizes<block_start><for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos n list_size=list_size use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter crc_degree=crc_degree)<line_sep>u_hat=dec(llr_ch)<line_sep>self.assertTrue(np.array_equal(u_crc.numpy() u_hat.numpy()))<block_end><block_end><block_end><block_end><block_end><def_stmt>test_keras self<block_start>"""Test that Keras model can be compiled (supports dynamic shapes)."""<line_sep>bs=10<line_sep>k=16<line_sep>n=32<for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>inputs=tf.keras.Input(shape=(n) dtype=tf.float32)<line_sep>x=PolarSCLDecoder(frozen_pos n use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)(inputs)<line_sep>model=tf.keras.Model(inputs=inputs outputs=x)<line_sep>b=source([bs n])<line_sep>model(b)<line_sep># call twice to see that bs can change b2=source([bs+1 n])<line_sep>model(b2)<line_sep>model.summary()<block_end><block_end><block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_multi_dimensional self<block_start>"""Test against multi-dimensional input shapes. As reshaping is done before calling the actual decoder, no exhaustive testing against all decoder options is required. """<line_sep>k=120<line_sep>n=256<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<line_sep>dec=PolarSCLDecoder(frozen_pos n)<line_sep>b=source([100 n])<line_sep>b_res=tf.reshape(b [4 5 5 n])<line_sep># encode 2D Tensor c=dec(b).numpy()<line_sep># encode 4D Tensor c_res=dec(b_res).numpy()<line_sep># and reshape to 2D shape c_res=tf.reshape(c_res [100 k])<line_sep># both version should yield same result self.assertTrue(np.array_equal(c c_res))<block_end><def_stmt>test_batch self<block_start>"""Test that all samples in batch yield same output (for same input). """<line_sep>bs=100<line_sep>k=78<line_sep>n=128<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos n use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)<line_sep>b=source([1 15 n])<line_sep>b_rep=tf.tile(b [bs 1 1])<line_sep># and run tf version (to be tested) c=dec(b_rep).numpy()<for_stmt>i range(bs)<block_start>self.assertTrue(np.array_equal(c[0 : :] c[i : :]))<block_end><block_end><block_end><block_end><block_end><def_stmt>test_tf_fun self<block_start>"""Test that graph mode works and XLA is supported."""<line_sep>bs=10<line_sep>k=16<line_sep>n=32<line_sep>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>crc_degrees=[<none> "CRC11"]<for_stmt>crc_degree crc_degrees<block_start><for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>@tf.function<def_stmt>run_graph u<block_start><return>dec(u)<block_end>@tf.function(jit_compile=<true>)<def_stmt>run_graph_xla u<block_start><return>dec(u)<block_end>dec=PolarSCLDecoder(frozen_pos n use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter crc_degree=crc_degree)<line_sep># test that for arbitrary input only binary values are # returned u=source([bs n])<line_sep>x=run_graph(u).numpy()<line_sep># execute the graph twice x=run_graph(u).numpy()<line_sep># and change batch_size u=source([bs+1 n])<line_sep>x=run_graph(u).numpy()<if_stmt><not>cpu_only# cpu only does not support XLA # run same test for XLA (jit_compile=True) <block_start>u=source([bs n])<line_sep>x=run_graph_xla(u).numpy()<line_sep>x=run_graph_xla(u).numpy()<line_sep>u=source([bs+1 n])<line_sep>x=run_graph_xla(u).numpy()<block_end><block_end><block_end><block_end><block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_ref_implementation self<block_start>"""Test against pre-calculated results from internal implementation. Also verifies that all decoding options yield same results. Remark: results are for SC only, i.e., list_size=1. """<line_sep>ref_path='../test/codes/polar/'<line_sep>filename=["P_128_37" "P_128_110" "P_256_128"]<for_stmt>f filename<block_start>A=np.load(ref_path+f+"_Avec.npy")<line_sep>llr_ch=np.load(ref_path+f+"_Lch.npy")<line_sep>u_hat=np.load(ref_path+f+"_uhat.npy")<line_sep>frozen_pos=np.array(np.where(A<eq>0)[0])<line_sep>info_pos=np.array(np.where(A<eq>1)[0])<line_sep>n=len(frozen_pos)+len(info_pos)<line_sep>k=len(info_pos)<for_stmt>use_fast_scl [<false> <true>]<block_start><for_stmt>cpu_only [<false> <true>]<block_start><for_stmt>use_scatter [<false> <true>]<block_start>dec=PolarSCLDecoder(frozen_pos n list_size=1 use_fast_scl=use_fast_scl cpu_only=cpu_only use_scatter=use_scatter)<line_sep>l_in=-1.<times>llr_ch# logits u_hat_tf=dec(l_in).numpy()<line_sep># the output should be equal to the reference self.assertTrue(np.array_equal(u_hat_tf u_hat))<block_end><block_end><block_end><block_end><block_end><def_stmt>test_hybrid_scl self<block_start>"""Verify hybrid SC decoding option. Remark: XLA is currently not supported. """<line_sep>bs=10<line_sep>n=32<line_sep>k=16<line_sep>crc_degree="CRC11"<line_sep>list_sizes=[1 2 8 32]<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<line_sep>enc=PolarEncoder(frozen_pos n)<line_sep>enc_crc=CRCEncoder(crc_degree)<line_sep>k_crc=enc_crc.crc_length<line_sep>u=source([bs k-k_crc])<line_sep>u_crc=enc_crc(u)<line_sep>c=enc(u_crc)<line_sep>llr_ch=20.<times>(2.<times>c-1)# demod BPSK witout noise <for_stmt>list_size list_sizes<block_start>dec=PolarSCLDecoder(frozen_pos n list_size=list_size use_hybrid_sc=<true> crc_degree=crc_degree)<line_sep>u_hat=dec(llr_ch)<line_sep>self.assertTrue(np.array_equal(u_crc.numpy() u_hat.numpy()))<line_sep># verify that graph can be executed @tf.function<def_stmt>run_graph u<block_start><return>dec(u)<block_end>u=source([bs n])<line_sep># execute the graph twice x=run_graph(u).numpy()<line_sep>x=run_graph(u).numpy()<line_sep># and change batch_size u=source([bs+1 n])<line_sep>x=run_graph(u).numpy()<block_end><block_end><def_stmt>test_dtype_flexible self<block_start>"""Test that output_dtype is variable."""<line_sep>batch_size=100<line_sep>k=30<line_sep>n=64<line_sep>source=GaussianPriorSource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>dtypes_supported=(tf.float16 tf.float32 tf.float64)<for_stmt>dt_in dtypes_supported<block_start><for_stmt>dt_out dtypes_supported<block_start>llr=source([[batch_size n] 0.5])<line_sep>llr=tf.cast(llr dt_in)<line_sep>dec=PolarSCLDecoder(frozen_pos n output_dtype=dt_out)<line_sep>x=dec(llr)<line_sep>self.assertTrue(x.dtype<eq>dt_out)<block_end><block_end># test that complex-valued inputs raise error llr=source([[batch_size n] 0.5])<line_sep>llr_c=tf.complex(llr tf.zeros_like(llr))<line_sep>dec=PolarSCLDecoder(frozen_pos n output_dtype=tf.float32)<with_stmt>self.assertRaises(TypeError)<block_start>x=dec(llr_c)<block_end><block_end><block_end><class_stmt>TestPolarDecodingBP(unittest.TestCase)<block_start>"""Test Polar BP decoder."""<def_stmt>test_invalid_inputs self<block_start>"""Test against invalid values of n and frozen_pos."""<line_sep># frozen vec to long n=32<line_sep>frozen_pos=np.arange(n+1)<with_stmt>self.assertRaises(AssertionError)<block_start>PolarBPDecoder(frozen_pos n)<block_end># n not a pow of 2 # frozen vec to long n=32<line_sep>k=12<line_sep>frozen_pos,_=generate_5g_ranking(k n)<with_stmt>self.assertRaises(AssertionError)<block_start>PolarBPDecoder(frozen_pos n+1)<block_end># test also valid shapes # (k, n) param_valid=[[0 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>PolarBPDecoder(frozen_pos p[1])<block_end># no complex-valued input allowed <with_stmt>self.assertRaises(ValueError)<block_start>frozen_pos,_=generate_5g_ranking(32 64)<line_sep>PolarBPDecoder(frozen_pos 64 output_dtype=tf.complex64)<block_end><block_end><def_stmt>test_output_dim self<block_start>"""Test that output dims are correct (=n) and output is all-zero codeword."""<line_sep># batch size bs=10<line_sep># (k, n) param_valid=[[1 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>hard_out [<true> <false>]<block_start><for_stmt>p param_valid<block_start>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>dec=PolarBPDecoder(frozen_pos p[1] hard_out=hard_out)<line_sep># all-zero with BPSK (no noise);logits c=-10.<times>np.ones([bs p[1]])<line_sep>u=dec(c).numpy()<line_sep>self.assertTrue(u.shape[-1]<eq>p[0])<if_stmt>hard_out# also check that all-zero input yields all-zero output <block_start>u_hat=np.zeros([bs p[0]])<line_sep>self.assertTrue(np.array_equal(u u_hat))<block_end><block_end><block_end><block_end><def_stmt>test_identity self<block_start>"""Test that info bits can be recovered if no noise is added."""<line_sep>bs=10<line_sep># (k, n) param_valid=[[1 32] [10 32] [32 32] [100 256] [123 1024] [1024 1024]]<for_stmt>p param_valid<block_start>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(p[0] p[1])<line_sep>enc=PolarEncoder(frozen_pos p[1])<line_sep>dec=PolarBPDecoder(frozen_pos p[1])<line_sep>u=source([bs p[0]])<line_sep>c=enc(u)<line_sep>llr_ch=20.<times>(2.<times>c-1)# demod BPSK witout noise u_hat=dec(llr_ch)<line_sep>self.assertTrue(np.array_equal(u.numpy() u_hat.numpy()))<block_end><block_end><def_stmt>test_keras self<block_start>"""Test that Keras model can be compiled (supports dynamic shapes)."""<line_sep>bs=10<line_sep>k=100<line_sep>n=128<line_sep>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>inputs=tf.keras.Input(shape=(n) dtype=tf.float32)<line_sep>x=PolarBPDecoder(frozen_pos n)(inputs)<line_sep>model=tf.keras.Model(inputs=inputs outputs=x)<line_sep>b=source([bs n])<line_sep>model(b)<line_sep># call twice to see that bs can change b2=source([bs+1 n])<line_sep>model(b2)<line_sep>model.summary()<block_end><def_stmt>test_multi_dimensional self<block_start>"""Test against arbitrary shapes."""<line_sep>k=120<line_sep>n=256<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<line_sep>dec=PolarBPDecoder(frozen_pos n)<line_sep>b=source([100 n])<line_sep>b_res=tf.reshape(b [4 5 5 n])<line_sep># encode 2D Tensor c=dec(b).numpy()<line_sep># encode 4D Tensor c_res=dec(b_res).numpy()<line_sep># and reshape to 2D shape c_res=tf.reshape(c_res [100 k])<line_sep># both version should yield same result self.assertTrue(np.array_equal(c c_res))<block_end><def_stmt>test_batch self<block_start>"""Test that all samples in batch yield same output (for same input). """<line_sep>bs=100<line_sep>k=120<line_sep>n=256<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=BinarySource()<line_sep>dec=PolarBPDecoder(frozen_pos n)<line_sep>b=source([1 15 n])<line_sep>b_rep=tf.tile(b [bs 1 1])<line_sep># and run tf version (to be tested) c=dec(b_rep).numpy()<for_stmt>i range(bs)<block_start>self.assertTrue(np.array_equal(c[0 : :] c[i : :]))<block_end><block_end><def_stmt>test_numerics self<block_start>"""Test for numerical stability with large llrs and many iterations. """<line_sep>bs=100<line_sep>k=120<line_sep>n=256<line_sep>num_iter=200<for_stmt>hard_out [<false> <true>]<block_start>frozen_pos,_=generate_5g_ranking(k n)<line_sep>source=GaussianPriorSource()<line_sep>dec=PolarBPDecoder(frozen_pos n hard_out=hard_out num_iter=num_iter)<line_sep>b=source([[bs n] 0.001])# very large llrs c=dec(b).numpy()<line_sep># all values are finite (not nan and not inf) self.assertTrue(np.sum(np.abs(1-np.isfinite(c)))<eq>0)<block_end><block_end><def_stmt>test_tf_fun self<block_start>"""Test that graph mode works and XLA is supported."""<line_sep>@tf.function<def_stmt>run_graph u<block_start><return>dec(u)<block_end>@tf.function(jit_compile=<true>)<def_stmt>run_graph_xla u<block_start><return>dec(u)<block_end>bs=10<line_sep>k=32<line_sep>n=64<line_sep>num_iter=10<line_sep>source=BinarySource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>dec=PolarBPDecoder(frozen_pos n num_iter=num_iter)<line_sep># test that for arbitrary input only 0,1 values are returned u=source([bs n])<line_sep>x=run_graph(u).numpy()<line_sep># execute the graph twice x=run_graph(u).numpy()<line_sep># and change batch_size u=source([bs+1 n])<line_sep>x=run_graph(u).numpy()<line_sep>x=run_graph(u).numpy()<line_sep># Currently not supported # run same test for XLA (jit_compile=True) #u = source([bs, n]) #x = run_graph_xla(u).numpy() #x = run_graph_xla(u).numpy() #u = source([bs+1, n]) #x = run_graph_xla(u).numpy() <block_end><def_stmt>test_ref_implementation self<block_start>"""Test against Numpy reference implementation. Test hard and soft output. """<def_stmt>boxplus_np x y<block_start>"""Check node update (boxplus) for LLRs in numpy. See [Stimming_LLR]_ and [Hashemi_SSCL]_ for detailed equations. """<line_sep>x_in=np.maximum(np.minimum(x llr_max) -llr_max)<line_sep>y_in=np.maximum(np.minimum(y llr_max) -llr_max)<line_sep># avoid division for numerical stability llr_out=np.log(1+np.exp(x_in+y_in))<line_sep>llr_out<augsub>np.log(np.exp(x_in)+np.exp(y_in))<line_sep><return>llr_out<block_end><def_stmt>decode_bp llr_ch n_iter frozen_pos info_pos<block_start>n=llr_ch.shape[-1]<line_sep>bs=llr_ch.shape[0]<line_sep>n_stages=int(np.log2(n))<line_sep>msg_r=np.zeros([bs n_stages+1 n])<line_sep>msg_l=np.zeros([bs n_stages+1 n])<line_sep># init llr_ch msg_l[: n_stages :]=-1<times>llr_ch.numpy()<line_sep># init frozen positions with infty msg_r[: 0 frozen_pos]=llr_max<line_sep># and decode <for_stmt>iter range(n_iter)# update r messages <block_start><for_stmt>s range(n_stages)# calc indices <block_start>ind_range=np.arange(int(n/2))<line_sep>ind_1=ind_range<times>2-np.mod(ind_range 2<power>(s))<line_sep>ind_2=ind_1+2<power>s<line_sep># load messages l1_in=msg_l[: s+1 ind_1]<line_sep>l2_in=msg_l[: s+1 ind_2]<line_sep>r1_in=msg_r[: s ind_1]<line_sep>r2_in=msg_r[: s ind_2]<line_sep># r1_out msg_r[: s+1 ind_1]=boxplus_np(r1_in l2_in+r2_in)<line_sep># r2_out msg_r[: s+1 ind_2]=boxplus_np(r1_in l1_in)+r2_in<block_end># update l messages <for_stmt>s range(n_stages-1 -1 -1)<block_start>ind_range=np.arange(int(n/2))<line_sep>ind_1=ind_range<times>2-np.mod(ind_range 2<power>(s))<line_sep>ind_2=ind_1+2<power>s<line_sep>l1_in=msg_l[: s+1 ind_1]<line_sep>l2_in=msg_l[: s+1 ind_2]<line_sep>r1_in=msg_r[: s ind_1]<line_sep>r2_in=msg_r[: s ind_2]<line_sep># l1_out msg_l[: s ind_1]=boxplus_np(l1_in l2_in+r2_in)<line_sep># l2_out msg_l[: s ind_2]=boxplus_np(r1_in l1_in)+l2_in<block_end><block_end># recover u_hat u_hat_soft=msg_l[: 0 info_pos]<line_sep>u_hat=0.5<times>(1-np.sign(u_hat_soft))<line_sep><return>u_hat u_hat_soft<block_end># generate llr_ch noise_var=0.3<line_sep>num_iters=[5 10 20 40]<line_sep>llr_max=19.3<line_sep>bs=100<line_sep>n=128<line_sep>k=64<line_sep>frozen_pos,info_pos=generate_5g_ranking(k n)<for_stmt>num_iter num_iters<block_start>source=GaussianPriorSource()<line_sep>llr_ch=source([[bs n] noise_var])<line_sep># and decode dec_bp=PolarBPDecoder(frozen_pos n hard_out=<true> num_iter=num_iter)<line_sep>dec_bp_soft=PolarBPDecoder(frozen_pos n hard_out=<false> num_iter=num_iter)<line_sep>u_hat_bp=dec_bp(llr_ch).numpy()<line_sep>u_hat_bp_soft=dec_bp_soft(llr_ch ).numpy()<line_sep># and run BP decoder u_hat_ref,u_hat_ref_soft=decode_bp(llr_ch num_iter frozen_pos info_pos)<line_sep># the output should be equal to the reference self.assertTrue(np.array_equal(u_hat_bp u_hat_ref))<line_sep>self.assertTrue(np.allclose(-u_hat_bp_soft u_hat_ref_soft rtol=5e-2 atol=5e-3))<block_end><block_end><def_stmt>test_dtype_flexible self<block_start>"""Test that output dtype is variable."""<line_sep>batch_size=100<line_sep>k=30<line_sep>n=64<line_sep>source=GaussianPriorSource()<line_sep>frozen_pos,_=generate_5g_ranking(k n)<line_sep>dtypes_supported=(tf.float16 tf.float32 tf.float64)<for_stmt>dt_in dtypes_supported<block_start><for_stmt>dt_out dtypes_supported<block_start>llr=source([[batch_size n] 0.5])<line_sep>llr=tf.cast(llr dt_in)<line_sep>dec=PolarBPDecoder(frozen_pos n output_dtype=dt_out)<line_sep>x=dec(llr)<line_sep>self.assertTrue(x.dtype<eq>dt_out)<block_end><block_end># test that complex inputs raise error llr=source([[batch_size n] 0.5])<line_sep>llr_c=tf.complex(llr tf.zeros_like(llr))<line_sep>dec=PolarBPDecoder(frozen_pos n output_dtype=tf.float32)<with_stmt>self.assertRaises(TypeError)<block_start>x=dec(llr_c)<block_end><block_end><block_end><class_stmt>TestPolarDecoding5G(unittest.TestCase)<block_start><def_stmt>test_invalid_inputs self<block_start>"""Test against invalid input values. Note: consistency of code parameters is already checked by the encoder. """<line_sep>enc=Polar5GEncoder(40 60)<with_stmt>self.assertRaises(AssertionError)<block_start>Polar5GDecoder(enc dec_type=1)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>Polar5GDecoder(enc dec_type="ABC")<block_end><with_stmt>self.assertRaises(AssertionError)<block_start>Polar5GDecoder("SC")<block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_identity_de_ratematching self<block_start>"""Test that info bits can be recovered if no noise is added and dimensions are correct."""<line_sep>bs=10<line_sep># (k,n) param_valid=[[12 32] [20 32] [100 257] [123 897] [1013 1088]]<line_sep>dec_types=["SC" "SCL" "hybSCL" "BP"]<for_stmt>p param_valid<block_start><for_stmt>dec_type dec_types<block_start>source=BinarySource()<line_sep>enc=Polar5GEncoder(p[0] p[1])<line_sep>dec=Polar5GDecoder(enc dec_type=dec_type)<line_sep>u=source([bs p[0]])<line_sep>c=enc(u)<line_sep>self.assertTrue(c.numpy().shape[-1]<eq>p[1])<line_sep>llr_ch=20.<times>(2.<times>c-1)# demod BPSK witout noise u_hat=dec(llr_ch)<line_sep>self.assertTrue(np.array_equal(u.numpy() u_hat.numpy()))<block_end><block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_keras self<block_start>"""Test that Keras model can be compiled (supports dynamic shapes)."""<line_sep>bs=10<line_sep>k=100<line_sep>n=145<line_sep>source=BinarySource()<line_sep>enc=Polar5GEncoder(k n)<line_sep>dec_types=["SC" "SCL" "hybSCL" "BP"]<for_stmt>dec_type dec_types<block_start>inputs=tf.keras.Input(shape=(n) dtype=tf.float32)<line_sep>x=Polar5GDecoder(enc dec_type=dec_type)(inputs)<line_sep>model=tf.keras.Model(inputs=inputs outputs=x)<line_sep>b=source([bs n])<line_sep>model(b)<line_sep># call twice to see that bs can change b2=source([bs+1 n])<line_sep>model(b2)<line_sep>model.summary()<block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_multi_dimensional self<block_start>"""Test against arbitrary shapes."""<line_sep>k=120<line_sep>n=237<line_sep>enc=Polar5GEncoder(k n)<line_sep>source=BinarySource()<line_sep>dec_types=["SC" "SCL" "hybSCL" "BP"]<for_stmt>dec_type dec_types<block_start>dec=Polar5GDecoder(enc dec_type=dec_type)<line_sep>b=source([100 n])<line_sep>b_res=tf.reshape(b [4 5 5 n])<line_sep># encode 2D Tensor c=dec(b).numpy()<line_sep># encode 4D Tensor c_res=dec(b_res).numpy()<line_sep># and reshape to 2D shape c_res=tf.reshape(c_res [100 k])<line_sep># both version should yield same result self.assertTrue(np.array_equal(c c_res))<block_end><block_end># Filter warnings related to large ressource allocation @pytest.mark.filterwarnings("ignore: Required ressource allocation")<def_stmt>test_batch self<block_start>"""Test that all samples in batch yield same output (for same input). """<line_sep>bs=100<line_sep>k=95<line_sep>n=145<line_sep>enc=Polar5GEncoder(k n)<line_sep>source=GaussianPriorSource()<line_sep>dec_types=["SC" "SCL" "hybSCL" "BP"]<for_stmt>dec_type dec_types<block_start>dec=Polar5GDecoder(enc dec_type=dec_type)<line_sep>llr=source([[1 4 n] 0.5])<line_sep>llr_rep=tf.tile(llr [bs 1 1])<line_sep># and run tf version (to be tested) c=dec(llr_rep).numpy()<for_stmt>i range(bs)<block_start>self.assertTrue(np.array_equal(c[0 : :] c[i : :]))<block_end><block_end><block_end><def_stmt>test_tf_fun self<block_start>"""Test that tf.function decorator works include xla compiler test."""<line_sep>bs=10<line_sep>k=45<line_sep>n=67<line_sep>enc=Polar5GEncoder(k n)<line_sep>source=GaussianPriorSource()<line_sep># hybSCL does not support graph mode! dec_types=["SC" "SCL" "BP"]<for_stmt>dec_type dec_types<block_start>print(dec_type)<line_sep>dec=Polar5GDecoder(enc dec_type=dec_type)<line_sep>@tf.function<def_stmt>run_graph u<block_start><return>dec(u)<block_end>@tf.function(jit_compile=<true>)<def_stmt>run_graph_xla u<block_start><return>dec(u)<block_end># test that for arbitrary input only binary values are returned u=source([[bs n] 0.5])<line_sep>x=run_graph(u).numpy()<line_sep># execute the graph twice x=run_graph(u).numpy()<line_sep># and change batch_size u=source([[bs+1 n] 0.5])<line_sep>x=run_graph(u).numpy()<line_sep># run same test for XLA (jit_compile=True) # BP does currently not support XLA <if_stmt>dec_type<ne>"BP"<block_start>u=source([[bs n] 0.5])<line_sep>x=run_graph_xla(u).numpy()<line_sep>x=run_graph_xla(u).numpy()<line_sep>u=source([[bs+1 n] 0.5])<line_sep>x=run_graph_xla(u).numpy()<block_end><block_end><block_end><def_stmt>test_dtype_flexible self<block_start>"""Test that output dtype can be variable."""<line_sep>batch_size=100<line_sep>k=30<line_sep>n=64<line_sep>source=GaussianPriorSource()<line_sep>enc=Polar5GEncoder(k n)<line_sep>dtypes_supported=(tf.float16 tf.float32 tf.float64)<for_stmt>dt_in dtypes_supported<block_start><for_stmt>dt_out dtypes_supported<block_start>llr=source([[batch_size n] 0.5])<line_sep>llr=tf.cast(llr dt_in)<line_sep>dec=Polar5GDecoder(enc output_dtype=dt_out)<line_sep>x=dec(llr)<line_sep>self.assertTrue(x.dtype<eq>dt_out)<block_end><block_end># test that complex inputs raise error llr=source([[batch_size n] 0.5])<line_sep>llr_c=tf.complex(llr tf.zeros_like(llr))<line_sep>dec=Polar5GDecoder(enc output_dtype=tf.float32)<with_stmt>self.assertRaises(TypeError)<block_start>x=dec(llr_c)<block_end><block_end><block_end>
# IMPORTATION STANDARD # IMPORTATION THIRDPARTY <import_stmt>pytest<line_sep># IMPORTATION INTERNAL <import_from_stmt>openbb_terminal.stocks.due_diligence csimarket_model<line_sep>@pytest.mark.vcr<def_stmt>test_get_suppliers recorder<block_start>result_txt=csimarket_model.get_suppliers(ticker="TSLA")<line_sep>recorder.capture(result_txt)<block_end>@pytest.mark.vcr<def_stmt>test_get_suppliers_invalid recorder<block_start>result_txt=csimarket_model.get_suppliers(ticker="INVALID_TICKER")<line_sep>recorder.capture(result_txt)<block_end>@pytest.mark.vcr<def_stmt>test_get_customers recorder<block_start>result_txt=csimarket_model.get_customers(ticker="TSLA")<line_sep>recorder.capture(result_txt)<block_end>@pytest.mark.vcr<def_stmt>test_get_customers_invalid recorder<block_start>result_txt=csimarket_model.get_customers(ticker="INVALID_TICKER")<line_sep>recorder.capture(result_txt)<block_end>
<def_stmt>extractSharramycatsTranslations item<block_start>""" 'Sharramycats Translations' """<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol<or>frag)<or>'preview'<in>item['title'].lower()<block_start><return><none><block_end>tagmap=[('11 Ways to Forget Your Ex-Boyfriend' '11 Ways to Forget Your Ex-Boyfriend' 'translated') ('The Monster Inside Of My Bed' 'The Monster Inside Of My Bed' 'translated') ('The Peculiars\' Tale' 'The Peculiars\' Tale' 'translated') ('ARG' '<NAME>.' 'translated') ('Legend of Gemini' 'Legend of Gemini' 'translated') ('Kaliskis' 'Kaliskis' 'translated') ('She Died' 'She Died' 'translated') ('Ice Goddess' 'Ice Goddess' 'translated') ('The Friendly Wedding' 'The Friendly Wedding' 'translated') ('Forlorn Madness' 'Forlorn Madness' 'translated') ('Hidden Inside The Academy' 'Hidden Inside The Academy' 'translated') ('The Señorita' 'The Señorita' 'translated') ('School Of Myths' 'School of Myths' 'translated') ('The Guys Inside of My Bed' 'The Guys Inside of My Bed' 'translated') ('The Guy Inside Of My Bed' 'The Guys Inside of My Bed' 'translated') ('Titan Academy Of Special Abilities' 'Titan Academy Of Special Abilities' 'oel') ]<for_stmt>tagname,name,tl_type tagmap<block_start><if_stmt>tagname<in>item['tags']<block_start><return>buildReleaseMessageWithType(item name vol chp frag=frag postfix=postfix tl_type=tl_type)<block_end><block_end><return><false><block_end>
# Copyright (c) Glow Contributors. See CONTRIBUTORS file. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>torch<import_from_stmt>tests utils<class_stmt>CopyModel(torch.nn.Module)<block_start><def_stmt>__init__ self shape<block_start>super(CopyModel self).__init__()<line_sep>self.other=torch.randn(shape)<block_end><def_stmt>forward self a<block_start>b=a.copy_(self.other)<line_sep><return>a+b<block_end><block_end><class_stmt>TestCopy(utils.TorchGlowTestCase)<block_start>@utils.deterministic_expand([<lambda>:("1x1 => 1x3" [1 1] [1 3]) <lambda>:("1x3x5 => 1x3x5" [1 3 5] [1 3 5]) <lambda>:("1x3 => 4x4x3" [1 3] [4 4 3]) ])<def_stmt>test_copy_ self _ other_shape tensor_shape<block_start>"""Test of the PyTorch copy_ method on Glow."""<line_sep>utils.compare_tracing_methods(CopyModel(other_shape) torch.randn(tensor_shape) fusible_ops={"aten::copy_"} )<block_end>@utils.deterministic_expand([<lambda>:("1x1x1 => 1x3" [1 1 1] [1 3]) <lambda>:("1x4 => 4x4x3" [1 4] [4 4 3]) <lambda>:("4x4x3 => 1x3" [4 4 3] [1 3]) ])<def_stmt>test_copy_broadcast_failure self _ other_shape tensor_shape<block_start>"""Test of the PyTorch copy_ method on Glow."""<with_stmt>self.assertRaises(RuntimeError)<block_start>utils.compare_tracing_methods(CopyModel(other_shape) torch.randn(tensor_shape) fusible_ops={"aten::copy_"} )<block_end><block_end><block_end>
<import_from_stmt>unittest.mock MagicMock<import_stmt>pytest<import_from_stmt>pyspark.sql SparkSession<import_from_stmt>prefect.tasks.sodaspark SodaSparkScan<class_stmt>TestSodaSparkScan<block_start><def_stmt>test_construction_provide_scan_and_df self<block_start>expected_scan_def="/foo/bar.yaml"<line_sep>expected_df=SparkSession.builder.getOrCreate().createDataFrame([{"id":123 "value":"foo"} {"id":456 "value":"bar"}])<line_sep>soda_spark_scan_task=SodaSparkScan(scan_def=expected_scan_def df=expected_df)<assert_stmt>soda_spark_scan_task.scan_def<eq>expected_scan_def<assert_stmt>soda_spark_scan_task.df<eq>expected_df<block_end><def_stmt>test_construction_no_scan_and_df self<block_start>soda_spark_scan_task=SodaSparkScan()<assert_stmt>soda_spark_scan_task.scan_def<is><none><assert_stmt>soda_spark_scan_task.df<is><none><block_end># @pytest.mark.skip(reason="Requires PySpark and Java to be installed") <def_stmt>test_run_no_scan self<block_start>df=SparkSession.builder.getOrCreate().createDataFrame([{"id":123 "value":"foo"} {"id":456 "value":"bar"}])<line_sep>soda_spark_scan_task=SodaSparkScan(df=df)<with_stmt>pytest.raises(ValueError)<as>exc<block_start>soda_spark_scan_task.run()<block_end><assert_stmt>"scan_def cannot be None"<in>str(exc)<block_end><def_stmt>test_run_no_df self<block_start>soda_spark_scan_task=SodaSparkScan(scan_def="/foo/bar.yaml")<with_stmt>pytest.raises(ValueError)<as>exc<block_start>soda_spark_scan_task.run()<block_end><assert_stmt>"df cannot be None"<in>str(exc)<block_end># @pytest.mark.skip(reason="Requires PySpark and Java to be installed") <def_stmt>test_run_invalid_scan self monkeypatch<block_start>scan_def="invalid scan definition"<line_sep>df=SparkSession.builder.getOrCreate().createDataFrame([{"id":123 "value":"foo"} {"id":456 "value":"bar"}])<line_sep>soda_spark_scan_task=SodaSparkScan(scan_def=scan_def df=df)<with_stmt>pytest.raises(AttributeError)<block_start>soda_spark_scan_task.run()<block_end><block_end><def_stmt>test_run_invalid_df self monkeypatch<block_start>scan_def=""" table_name: demodata metrics: - row_count - max - min_length tests: - row_count > 0 """<line_sep>df="not a valid df"<line_sep>soda_spark_scan_task=SodaSparkScan(scan_def=scan_def df=df)<with_stmt>pytest.raises(AttributeError)<block_start>soda_spark_scan_task.run()<block_end><block_end># @pytest.mark.skip(reason="Requires PySpark and Java to be installed") <def_stmt>test_run_valid_scan_and_df_with_measurements self<block_start>scan_def=""" table_name: demodata metrics: - row_count - max - min_length tests: - row_count > 0 """<line_sep>df=SparkSession.builder.getOrCreate().createDataFrame([{"id":123 "value":"foo"} {"id":456 "value":"bar"}])<line_sep>soda_spark_scan_task=SodaSparkScan(scan_def=scan_def df=df)<line_sep>res=soda_spark_scan_task.run()<assert_stmt>hasattr(res "measurements")<block_end># @pytest.mark.skip(reason="Requires PySpark and Java to be installed") <def_stmt>test_run_valid_scan_and_df_with_errors self<block_start>scan_def=""" table_name: demodata metrics: - row_count - max - min_length tests: - row_count == 0 """<line_sep>df=SparkSession.builder.getOrCreate().createDataFrame([{"id":123 "value":"foo"} {"id":456 "value":"bar"}])<line_sep>soda_spark_scan_task=SodaSparkScan(scan_def=scan_def df=df)<line_sep>res=soda_spark_scan_task.run()<assert_stmt>hasattr(res "errors")<block_end><block_end>
<import_from_stmt>.acl_list acl_list<import_from_stmt>.eig2_nL eig2_nL eig2nL_subgraph<import_from_stmt>.fista_dinput_dense fista_dinput_dense<import_from_stmt>.sweepcut sweepcut<line_sep>
# Author: <NAME> # License: BSD <import_stmt>warnings<import_from_stmt>nilearn.input_data NiftiMasker<line_sep>warnings.filterwarnings("ignore" category=DeprecationWarning)<import_stmt>os<import_from_stmt>os.path expanduser join<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>seaborn<as>sns<import_from_stmt>joblib Memory dump<import_from_stmt>joblib Parallel delayed<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.utils check_random_state<import_from_stmt>modl.datasets fetch_adhd<import_from_stmt>modl.decomposition.fmri fMRIDictFact<import_from_stmt>modl.decomposition.stability mean_amari_discrepency<import_from_stmt>modl.plotting.fmri display_maps<import_from_stmt>nilearn.datasets fetch_atlas_smith_2009<import_from_stmt>modl.utils.system get_cache_dirs<line_sep>batch_size=200<line_sep>learning_rate=.92<line_sep>method='masked'<line_sep>step_size=0.01<line_sep>reduction_=8<line_sep>alpha=1e-3<line_sep>n_epochs=4<line_sep>verbose=15<line_sep>n_jobs=70<line_sep>smoothing_fwhm=6<line_sep>components_list=[20 40 80 120 200 300 500]<line_sep>n_runs=20<line_sep>dict_init=fetch_atlas_smith_2009().rsn20<line_sep>dataset=fetch_adhd(n_subjects=40)<line_sep>data=dataset.rest.values<line_sep>train_data,test_data=train_test_split(data test_size=2 random_state=0)<line_sep>train_imgs,train_confounds=zip(*train_data)<line_sep>test_imgs,test_confounds=zip(*test_data)<line_sep>mask=dataset.mask<line_sep>mem=Memory(location=get_cache_dirs()[0])<line_sep>masker=NiftiMasker(mask_img=mask).fit()<def_stmt>fit_single train_imgs test_imgs n_components random_state<block_start>dict_fact=fMRIDictFact(smoothing_fwhm=smoothing_fwhm method=method step_size=step_size mask=mask memory=mem memory_level=2 verbose=verbose n_epochs=n_epochs n_jobs=1 random_state=random_state n_components=n_components positive=<true> learning_rate=learning_rate batch_size=batch_size reduction=reduction_ alpha=alpha callback=<none> )<line_sep>dict_fact.fit(train_imgs confounds=train_confounds)<line_sep>score=dict_fact.score(test_imgs)<line_sep><return>dict_fact.components_ score<block_end><def_stmt>fit_many_runs train_imgs test_imgs components_list n_runs=10 n_jobs=1<block_start>random_states=check_random_state(0).randint(0 int(1e7) size=n_runs)<line_sep>cached_fit=mem.cache(fit_single)<line_sep>res=Parallel(n_jobs=n_jobs)(delayed(cached_fit)(train_imgs test_imgs n_components random_state)<for>n_components components_list<for>random_state random_states)<line_sep>components,scores=zip(*res)<line_sep>shape=(len(components_list) len(random_states))<line_sep>components=np.array(components).reshape(shape).tolist()<line_sep>scores=np.array(scores).reshape(shape).tolist()<line_sep>discrepencies=[]<line_sep>var_discrepencies=[]<line_sep>best_components=[]<for_stmt>n_components,these_components,these_scores zip(components_list components scores)<block_start>discrepency,var_discrepency=mean_amari_discrepency(these_components)<line_sep>best_estimator=these_components[np.argmin(these_scores)]<line_sep>discrepencies.append(var_discrepency)<line_sep>var_discrepencies.append(var_discrepency)<line_sep>best_components.append(best_estimator)<block_end>discrepencies=np.array(discrepencies)<line_sep>var_discrepencies=np.array(var_discrepencies)<line_sep>best_components=np.array(best_components)<line_sep>components=best_components[np.argmin(discrepencies)]<line_sep><return>discrepencies var_discrepencies components<block_end>output_dir=expanduser('~/output_drago4/modl/fmri_stability2')<if_stmt><not>os.path.exists(output_dir)<block_start>os.makedirs(output_dir)<block_end>discrepencies,var_discrepencies,components=fit_many_runs(train_imgs test_imgs components_list n_jobs=n_jobs n_runs=n_runs)<line_sep>components_img=masker.inverse_transform(components)<line_sep>components_img.to_filename(join(output_dir 'components.nii.gz'))<line_sep>dump((components_list discrepencies var_discrepencies) join(output_dir 'discrepencies.pkl'))<line_sep>fig=plt.figure()<line_sep>display_maps(fig components_img)<line_sep>plt.savefig(join(output_dir 'components.pdf'))<line_sep>fig,ax=plt.subplots(1 1)<line_sep>ax.fill_between(components_list discrepencies-var_discrepencies discrepencies+var_discrepencies alpha=0.5)<line_sep>ax.plot(components_list discrepencies marker='o')<line_sep>ax.set_xlabel('Number of components')<line_sep>ax.set_ylabel('Mean Amari discrepency')<line_sep>sns.despine(fig)<line_sep>fig.suptitle('Stability selection using DL')<line_sep>plt.savefig(join(output_dir 'discrepencies.pdf'))<line_sep>
<import_stmt>string<import_stmt>random<line_sep>#Characters List to Generate Password characters=list(string.ascii_letters+string.digits+"!@#$%^&*()")<def_stmt>password_gen #Length of Password from the User <block_start>length=int(input("Password length: "))<line_sep>#Shuffling the Characters random.shuffle(characters)<line_sep>#Picking random Characters from the given List password=[]<for_stmt>i range(length)<block_start>password.append(random.choice(characters))<block_end>#Shuffling the Resultant Password random.shuffle(password)<line_sep>#Converting the List to String #Printing the List print("".join(password))<block_end>#Invoking the function password_gen()<line_sep>
__all__=["resnet50_caffe_fpn_1x" "resnet50_fpn_1x" "resnet50_fpn_2x" "resnet101_caffe_fpn_1x" "resnet101_fpn_1x" "resnet101_fpn_2x" "resnext101_32x4d_fpn_1x" "resnext101_32x4d_fpn_2x" "resnext101_64x4d_fpn_1x" "resnext101_64x4d_fpn_2x" ]<import_from_stmt>icevision.imports *<import_from_stmt>icevision.models.mmdet.utils *<class_stmt>MMDetRetinanetBackboneConfig(MMDetBackboneConfig)<block_start><def_stmt>__init__ self **kwargs<block_start>super().__init__(model_name="retinanet" **kwargs)<block_end><block_end>base_config_path=mmdet_configs_path/"retinanet"<line_sep>base_weights_url="http://download.openmmlab.com/mmdetection/v2.0/retinanet"<line_sep>resnet50_caffe_fpn_1x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_r50_caffe_fpn_1x_coco.py" weights_url=f"{base_weights_url}/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth" )<line_sep>resnet50_fpn_1x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_r50_fpn_1x_coco.py" weights_url=f"{base_weights_url}/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth" )<line_sep>resnet50_fpn_2x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_r50_fpn_2x_coco.py" weights_url=f"{base_weights_url}/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth" )<line_sep>resnet101_caffe_fpn_1x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_r101_caffe_fpn_1x_coco.py" weights_url=f"{base_weights_url}/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth" )<line_sep>resnet101_fpn_1x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_r101_fpn_1x_coco.py" weights_url=f"{base_weights_url}/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth" )<line_sep>resnet101_fpn_2x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_r101_fpn_2x_coco.py" weights_url=f"{base_weights_url}/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth" )<line_sep>resnext101_32x4d_fpn_1x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_x101_32x4d_fpn_1x_coco.py" weights_url=f"{base_weights_url}/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth" )<line_sep>resnext101_32x4d_fpn_2x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_x101_32x4d_fpn_2x_coco.py" weights_url=f"{base_weights_url}/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth" )<line_sep>resnext101_64x4d_fpn_1x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_x101_64x4d_fpn_1x_coco.py" weights_url=f"{base_weights_url}/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth" )<line_sep>resnext101_64x4d_fpn_2x=MMDetRetinanetBackboneConfig(config_path=base_config_path/"retinanet_x101_64x4d_fpn_2x_coco.py" weights_url=f"{base_weights_url}/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth" )<line_sep>
"""Test code snippets embedded in the docs. Reference: https://sybil.readthedocs.io/en/latest/use.html#pytest """<import_from_stmt>doctest NORMALIZE_WHITESPACE<import_from_stmt>os chdir getcwd<import_from_stmt>shutil rmtree<import_from_stmt>tempfile mkdtemp<import_stmt>pytest<import_from_stmt>sybil Sybil<import_from_stmt>sybil.parsers.doctest DocTestParser<import_from_stmt>sybil.parsers.skip skip<line_sep>@pytest.fixture(scope="module")<def_stmt>tempdir <block_start>path=mkdtemp()<line_sep>cwd=getcwd()<try_stmt><block_start>chdir(path)<line_sep><yield>path<block_end><finally_stmt><block_start>chdir(cwd)<line_sep>rmtree(path)<block_end><block_end>pytest_collect_file=Sybil(parsers=[DocTestParser(optionflags=NORMALIZE_WHITESPACE) skip] pattern="*.rst" fixtures=["tempdir"] ).pytest()<line_sep>
<import_stmt>math<import_from_stmt>queue Queue<import_stmt>numpy<as>np<def_stmt>is_connected A<block_start>""" :param A:np.array the adjacency matrix :return:bool whether the graph is connected or not """<for_stmt>_ range(int(1+math.ceil(math.log2(A.shape[0]))))<block_start>A=np.dot(A A)<block_end><return>np.min(A)<g>0<block_end><def_stmt>identity A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return:F """<line_sep><return>F<block_end><def_stmt>first_neighbours A<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the number of nodes reachable in 1 hop """<line_sep><return>np.sum(A<g>0 axis=0)<block_end><def_stmt>second_neighbours A<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the number of nodes reachable in no more than 2 hops """<line_sep>A=A<g>0.0<line_sep>A=A+np.dot(A A)<line_sep>np.fill_diagonal(A 0)<line_sep><return>np.sum(A<g>0 axis=0)<block_end><def_stmt>kth_neighbours A k<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the number of nodes reachable in k hops """<line_sep>A=A<g>0.0<line_sep>R=np.zeros(A.shape)<for_stmt>_ range(k)<block_start>R=np.dot(R A)+A<block_end>np.fill_diagonal(R 0)<line_sep><return>np.sum(R<g>0 axis=0)<block_end><def_stmt>map_reduce_neighbourhood A F f_reduce f_map=<none> hops=1 consider_itself=<false><block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, map its neighbourhood with f_map, and reduce it with f_reduce """<if_stmt>f_map<is><not><none><block_start>F=f_map(F)<block_end>A=np.array(A)<line_sep>A=A<g>0<line_sep>R=np.zeros(A.shape)<for_stmt>_ range(hops)<block_start>R=np.dot(R A)+A<block_end>np.fill_diagonal(R 1<if>consider_itself<else>0)<line_sep>R=R<g>0<line_sep><return>np.array([f_reduce(F[R[i]])<for>i range(A.shape[0])])<block_end><def_stmt>max_neighbourhood A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the maximum in its neighbourhood """<line_sep><return>map_reduce_neighbourhood(A F np.max consider_itself=<true>)<block_end><def_stmt>min_neighbourhood A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the minimum in its neighbourhood """<line_sep><return>map_reduce_neighbourhood(A F np.min consider_itself=<true>)<block_end><def_stmt>std_neighbourhood A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the standard deviation of its neighbourhood """<line_sep><return>map_reduce_neighbourhood(A F np.std consider_itself=<true>)<block_end><def_stmt>mean_neighbourhood A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the mean of its neighbourhood """<line_sep><return>map_reduce_neighbourhood(A F np.mean consider_itself=<true>)<block_end><def_stmt>local_maxima A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, whether it is the maximum in its neighbourhood """<line_sep><return>F<eq>map_reduce_neighbourhood(A F np.max consider_itself=<true>)<block_end><def_stmt>graph_laplacian A<block_start>""" :param A:np.array the adjacency matrix :return: the laplacian of the adjacency matrix """<line_sep>L=(A<g>0)<times>-1<line_sep>np.fill_diagonal(L np.sum(A<g>0 axis=0))<line_sep><return>L<block_end><def_stmt>graph_laplacian_features A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: the laplacian of the adjacency matrix multiplied by the features """<line_sep><return>np.matmul(graph_laplacian(A) F)<block_end><def_stmt>isomorphism A1 A2 F1=<none> F2=<none><block_start>""" Takes two adjacency matrices (A1,A2) and (optionally) two lists of features. It uses Weisfeiler-Lehman algorithms, so false positives might arise :param A1: adj_matrix, N*N numpy matrix :param A2: adj_matrix, N*N numpy matrix :param F1: node_values, numpy array of size N :param F1: node_values, numpy array of size N :return: isomorphic: boolean which is false when the two graphs are not isomorphic, true when they probably are. """<line_sep>N=A1.shape[0]<if_stmt>(F1<is><none>)^(F2<is><none>)<block_start><raise>ValueError("either both or none between F1,F2 must be defined.")<block_end><if_stmt>F1<is><none># Assign same initial value to each node <block_start>F1=np.ones(N int)<line_sep>F2=np.ones(N int)<block_end><else_stmt><block_start><if_stmt><not>np.array_equal(np.sort(F1) np.sort(F2))<block_start><return><false><block_end><if_stmt>F1.dtype()<ne>int<block_start><raise>NotImplementedError('Still have to implement this')<block_end><block_end>p=1000000007<def_stmt>mapping F<block_start><return>(F<times>234+133)%1000000007<block_end><def_stmt>adjacency_hash F<block_start>F=np.sort(F)<line_sep>b=257<line_sep>h=0<for_stmt>f F<block_start>h=(b<times>h+f)%1000000007<block_end><return>h<block_end><for_stmt>i range(N)<block_start>F1=map_reduce_neighbourhood(A1 F1 adjacency_hash f_map=mapping consider_itself=<true> hops=1)<line_sep>F2=map_reduce_neighbourhood(A2 F2 adjacency_hash f_map=mapping consider_itself=<true> hops=1)<if_stmt><not>np.array_equal(np.sort(F1) np.sort(F2))<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>count_edges A<block_start>""" :param A:np.array the adjacency matrix :return: the number of edges in the graph """<line_sep><return>np.sum(A)/2<block_end><def_stmt>is_eulerian_cyclable A<block_start>""" :param A:np.array the adjacency matrix :return: whether the graph has an eulerian cycle """<line_sep><return>is_connected(A)<and>np.count_nonzero(first_neighbours(A)%2<eq>1)<eq>0<block_end><def_stmt>is_eulerian_percorrible A<block_start>""" :param A:np.array the adjacency matrix :return: whether the graph has an eulerian path """<line_sep><return>is_connected(A)<and>np.count_nonzero(first_neighbours(A)%2<eq>1)<in>[0 2]<block_end><def_stmt>map_reduce_graph A F f_reduce<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: the features of the nodes reduced by f_reduce """<line_sep><return>f_reduce(F)<block_end><def_stmt>mean_graph A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: the mean of the features """<line_sep><return>map_reduce_graph(A F np.mean)<block_end><def_stmt>max_graph A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: the maximum of the features """<line_sep><return>map_reduce_graph(A F np.max)<block_end><def_stmt>min_graph A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: the minimum of the features """<line_sep><return>map_reduce_graph(A F np.min)<block_end><def_stmt>std_graph A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: the standard deviation of the features """<line_sep><return>map_reduce_graph(A F np.std)<block_end><def_stmt>has_hamiltonian_cycle A<block_start>""" :param A:np.array the adjacency matrix :return:bool whether the graph has an hamiltonian cycle """<line_sep>A<augadd>np.transpose(A)<line_sep>A=A<g>0<line_sep>V=A.shape[0]<def_stmt>ham_cycle_loop pos<block_start><if_stmt>pos<eq>V<block_start><if_stmt>A[path[pos-1]][path[0]]<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><for_stmt>v range(1 V)<block_start><if_stmt>A[path[pos-1]][v]<and><not>used[v]<block_start>path[pos]=v<line_sep>used[v]=<true><if_stmt>ham_cycle_loop(pos+1)<block_start><return><true><block_end>path[pos]=-1<line_sep>used[v]=<false><block_end><block_end><return><false><block_end>used=[<false>]<times>V<line_sep>path=[-1]<times>V<line_sep>path[0]=0<line_sep><return>ham_cycle_loop(1)<block_end><def_stmt>all_pairs_shortest_paths A inf_sub=math.inf<block_start>""" :param A:np.array the adjacency matrix :param inf_sub: the placeholder value to use for pairs which are not connected :return:np.array all pairs shortest paths """<line_sep>A=np.array(A)<line_sep>N=A.shape[0]<for_stmt>i range(N)<block_start><for_stmt>j range(N)<block_start><if_stmt>A[i][j]<eq>0<block_start>A[i][j]=math.inf<block_end><if_stmt>i<eq>j<block_start>A[i][j]=0<block_end><block_end><block_end><for_stmt>k range(N)<block_start><for_stmt>i range(N)<block_start><for_stmt>j range(N)<block_start>A[i][j]=min(A[i][j] A[i][k]+A[k][j])<block_end><block_end><block_end>A=np.where(A<eq>math.inf inf_sub A)<line_sep><return>A<block_end><def_stmt>diameter A<block_start>""" :param A:np.array the adjacency matrix :return: the diameter of the gra[h """<line_sep>sum=np.sum(A)<line_sep>apsp=all_pairs_shortest_paths(A)<line_sep>apsp=np.where(apsp<l>sum+1 apsp -1)<line_sep><return>np.max(apsp)<block_end><def_stmt>eccentricity A<block_start>""" :param A:np.array the adjacency matrix :return: the eccentricity of the gra[h """<line_sep>sum=np.sum(A)<line_sep>apsp=all_pairs_shortest_paths(A)<line_sep>apsp=np.where(apsp<l>sum+1 apsp -1)<line_sep><return>np.max(apsp axis=0)<block_end><def_stmt>sssp_predecessor A F<block_start>""" :param A:np.array the adjacency matrix :param F:np.array the nodes features :return: for each node, the best next step to reach the designated source """<assert_stmt>(np.sum(F)<eq>1)<assert_stmt>(np.max(F)<eq>1)<line_sep>s=np.argmax(F)<line_sep>N=A.shape[0]<line_sep>P=np.zeros(A.shape)<line_sep>V=np.zeros(N)<line_sep>bfs=Queue()<line_sep>bfs.put(s)<line_sep>V[s]=1<while_stmt><not>bfs.empty()<block_start>u=bfs.get()<for_stmt>v range(N)<block_start><if_stmt>A[u][v]<g>0<and>V[v]<eq>0<block_start>V[v]=1<line_sep>P[v][u]=1<line_sep>bfs.put(v)<block_end><block_end><block_end><return>P<block_end><def_stmt>max_eigenvalue A<block_start>""" :param A:np.array the adjacency matrix :return: the maximum eigenvalue of A since A is positive symmetric, all the eigenvalues are guaranteed to be real """<line_sep>[W _]=np.linalg.eig(A)<line_sep><return>W[np.argmax(np.absolute(W))].real<block_end><def_stmt>max_eigenvalues A k<block_start>""" :param A:np.array the adjacency matrix :param k:int the number of eigenvalues to be selected :return: the k greatest (by absolute value) eigenvalues of A """<line_sep>[W _]=np.linalg.eig(A)<line_sep>values=W[sorted(range(len(W)) key=<lambda>x:-np.absolute(W[x]))[:k]]<line_sep><return>values.real<block_end><def_stmt>max_absolute_eigenvalues A k<block_start>""" :param A:np.array the adjacency matrix :param k:int the number of eigenvalues to be selected :return: the absolute value of the k greatest (by absolute value) eigenvalues of A """<line_sep><return>np.absolute(max_eigenvalues(A k))<block_end><def_stmt>max_absolute_eigenvalues_laplacian A n<block_start>""" :param A:np.array the adjacency matrix :param k:int the number of eigenvalues to be selected :return: the absolute value of the k greatest (by absolute value) eigenvalues of the laplacian of A """<line_sep>A=graph_laplacian(A)<line_sep><return>np.absolute(max_eigenvalues(A n))<block_end><def_stmt>max_eigenvector A<block_start>""" :param A:np.array the adjacency matrix :return: the maximum (by absolute value) eigenvector of A since A is positive symmetric, all the eigenvectors are guaranteed to be real """<line_sep>[W V]=np.linalg.eig(A)<line_sep><return>V[: np.argmax(np.absolute(W))].real<block_end><def_stmt>spectral_radius A<block_start>""" :param A:np.array the adjacency matrix :return: the maximum (by absolute value) eigenvector of A since A is positive symmetric, all the eigenvectors are guaranteed to be real """<line_sep><return>np.abs(max_eigenvalue(A))<block_end><def_stmt>page_rank A F=<none> iter=64<block_start>""" :param A:np.array the adjacency matrix :param F:np.array with initial weights. If None, uniform initialization will happen. :param iter: log2 of length of power iteration :return: for each node, its pagerank """<line_sep># normalize A rows A=np.array(A)<line_sep>A<augdiv>A.sum(axis=1)[: np.newaxis]<line_sep># power iteration <for_stmt>_ range(iter)<block_start>A=np.matmul(A A)<block_end># generate prior distribution <if_stmt>F<is><none><block_start>F=np.ones(A.shape[-1])<block_end><else_stmt><block_start>F=np.array(F)<block_end># normalize prior F<augdiv>np.sum(F)<line_sep># compute limit distribution <return>np.matmul(F A)<block_end><def_stmt>tsp_length A F=<none><block_start>""" :param A:np.array the adjacency matrix :param F:np.array determining which nodes are to be visited. If None, all of them are. :return: the length of the Traveling Salesman Problem shortest solution """<line_sep>A=all_pairs_shortest_paths(A)<line_sep>N=A.shape[0]<if_stmt>F<is><none><block_start>F=np.ones(N)<block_end>targets=np.nonzero(F)[0]<line_sep>T=targets.shape[0]<line_sep>S=(1<lshift>T)<line_sep>dp=np.zeros((S T))<def_stmt>popcount x<block_start>b=0<while_stmt>x<g>0<block_start>x<augand>x-1<line_sep>b<augadd>1<block_end><return>b<block_end>msks=np.argsort(np.vectorize(popcount)(np.arange(S)))<for_stmt>i range(T+1)<block_start><for_stmt>j range(T)<block_start><if_stmt>(1<lshift>j)&msks[i]<eq>0<block_start>dp[msks[i]][j]=math.inf<block_end><block_end><block_end><for_stmt>i range(T+1 S)<block_start>msk=msks[i]<for_stmt>u range(T)<block_start><if_stmt>(1<lshift>u)&msk<eq>0<block_start>dp[msk][u]=math.inf<line_sep><continue><block_end>cost=math.inf<for_stmt>v range(T)<block_start><if_stmt>v<eq>u<or>(1<lshift>v)&msk<eq>0<block_start><continue><block_end>cost=min(cost dp[msk^(1<lshift>u)][v]+A[targets[v]][targets[u]])<block_end>dp[msk][u]=cost<block_end><block_end><return>np.min(dp[S-1])<block_end><def_stmt>get_nodes_labels A F<block_start>""" Takes the adjacency matrix and the list of nodes features (and a list of algorithms) and returns a set of labels for each node :param A: adj_matrix, N*N numpy matrix :param F: node_values, numpy array of size N :return: labels: KxN numpy matrix where K is the number of labels for each node """<line_sep>labels=[identity(A F) map_reduce_neighbourhood(A F np.mean consider_itself=<true>) map_reduce_neighbourhood(A F np.max consider_itself=<true>) map_reduce_neighbourhood(A F np.std consider_itself=<true>) first_neighbours(A) second_neighbours(A) eccentricity(A)]<line_sep><return>np.swapaxes(np.stack(labels) 0 1)<block_end><def_stmt>get_graph_labels A F<block_start>""" Takes the adjacency matrix and the list of nodes features (and a list of algorithms) and returns a set of labels for the whole graph :param A: adj_matrix, N*N numpy matrix :param F: node_values, numpy array of size N :return: labels: numpy array of size K where K is the number of labels for the graph """<line_sep>labels=[diameter(A)]<line_sep><return>np.asarray(labels)<block_end>
<import_stmt>pytest<import_from_stmt>galaxy config<import_from_stmt>galaxy.config BaseAppConfiguration<import_from_stmt>galaxy.config reload_config_options<import_from_stmt>galaxy.config.schema AppSchema<line_sep>R1,R2,N1,N2='reloadable1' 'reloadable2' 'nonrelodable1' 'nonreloadable2'# config options MOCK_SCHEMA={R1:{'reloadable':<true> 'default':1} R2:{'reloadable':<true> 'default':2} N1:{'default':3} N2:{'default':4} }<def_stmt>get_schema app_mapping<block_start><return>{'mapping':{'_':{'mapping':app_mapping}}}<block_end>@pytest.fixture<def_stmt>mock_init monkeypatch<block_start>monkeypatch.setattr(BaseAppConfiguration '_load_schema' <lambda>a:AppSchema(<none> '_'))<line_sep>monkeypatch.setattr(AppSchema '_read_schema' <lambda>a b:get_schema(MOCK_SCHEMA))<block_end><def_stmt>test_update_property mock_init monkeypatch# This also covers adding a property. When a config file does not set a property, # that property is set to its default value. Thus, if we add a reloadable property # to the config file, it's the same as modifying that property's value. # edits to config file: R2, N1 modified <block_start>monkeypatch.setattr(config 'read_properties_from_file' <lambda>_:{R1:1 R2:42 N1:99})<line_sep>appconfig=BaseAppConfiguration()<assert_stmt>getattr(appconfig R1)<eq>1<assert_stmt>getattr(appconfig R2)<eq>2<assert_stmt>getattr(appconfig N1)<eq>3<line_sep>reload_config_options(appconfig)<assert_stmt>getattr(appconfig R1)<eq>1# no change <assert_stmt>getattr(appconfig R2)<eq>42# change: reloadable option modified <assert_stmt>getattr(appconfig N1)<eq>3<block_end># no change: option modified but is non-relodable <def_stmt>test_overwrite_reloadable_attribute mock_init monkeypatch# This is similar to test_update_property, but here we overwrite the attribute before reloading. # This can happen if a config property is modified AFTER it has been loaded from schema or kwargs. # For example: load `foo` (from schema or kwargs), but then, in a # subsequent step while initializing # GalaxyAppConfiguration, do something like this: `foo = resove_path(foo, bar)`. Now the value of `foo` # is not what was initially loaded, and if `foo` is reloadable, it will be reset to its default as soon # as the config file is modified. To prevent this, we compare the values read from the modified file # to the `_raw_config` dict. This test ensures this works correctly. # edits to config file: R2 modified <block_start>monkeypatch.setattr(config 'read_properties_from_file' <lambda>_:{R1:1 R2:42})<line_sep>appconfig=BaseAppConfiguration()<assert_stmt>getattr(appconfig R1)<eq>1<assert_stmt>getattr(appconfig R2)<eq>2<line_sep># overwrite R1 setattr(appconfig R1 99)<assert_stmt>getattr(appconfig R1)<eq>99<line_sep># then reload reload_config_options(appconfig)<assert_stmt>getattr(appconfig R1)<eq>99# no change; should remain overwritten <assert_stmt>getattr(appconfig R2)<eq>42<block_end># change: reloadable option modified <def_stmt>test_cant_delete_property mock_init monkeypatch# A property should not be deleted: we don't know whether it was initially # set to a default, loaded from a config file, env var, etc. Therefore, if a property # is removed from the config file, it will not be modified or deleted. # edits to config file: R2, N2 deleted <block_start>monkeypatch.setattr(config 'read_properties_from_file' <lambda>_:{R1:1 N1:3})<line_sep>appconfig=BaseAppConfiguration()<assert_stmt>getattr(appconfig R1)<eq>1<assert_stmt>getattr(appconfig R2)<eq>2<assert_stmt>getattr(appconfig N1)<eq>3<assert_stmt>getattr(appconfig N2)<eq>4<line_sep>reload_config_options(appconfig)<assert_stmt>getattr(appconfig R1)<eq>1# no change <assert_stmt>getattr(appconfig R2)<eq>2# no change: option cannot be deleted <assert_stmt>getattr(appconfig N1)<eq>3# no change <assert_stmt>getattr(appconfig N2)<eq>4<block_end># no change: option cannot be deleted
<import_stmt>logging<import_stmt>sys<line_sep>log=logging.getLogger(__name__)<line_sep>log.propagate=<false><line_sep>out_hdlr=logging.StreamHandler(sys.stdout)<line_sep>out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))<line_sep>out_hdlr.setLevel(logging.INFO)<line_sep>log.addHandler(out_hdlr)<line_sep>log.setLevel(logging.INFO)<line_sep>root=logging.getLogger()<line_sep>root.addHandler(out_hdlr)<line_sep>root.propagate=<false><line_sep>root.setLevel(logging.INFO)<line_sep>
""" An IPython FileContentsManager that uses Postgres for checkpoints. """<import_from_future_stmt> unicode_literals<import_from_stmt>.api_utils _decode_unknown_from_base64 outside_root_to_404 reads_base64 to_b64 writes_base64 <import_from_stmt>.managerbase PostgresManagerMixin<import_from_stmt>.query delete_remote_checkpoints delete_single_remote_checkpoint get_remote_checkpoint list_remote_checkpoints move_remote_checkpoints purge_remote_checkpoints save_remote_checkpoint <import_from_stmt>.utils.ipycompat Checkpoints GenericCheckpointsMixin<class_stmt>PostgresCheckpoints(PostgresManagerMixin GenericCheckpointsMixin Checkpoints)<block_start>""" A Checkpoints implementation that saves checkpoints to a remote database. """<line_sep>@outside_root_to_404<def_stmt>create_notebook_checkpoint self nb path<block_start>"""Create a checkpoint of the current state of a notebook Returns a checkpoint_id for the new checkpoint. """<line_sep>b64_content=writes_base64(nb)<with_stmt>self.engine.begin()<as>db<block_start><return>save_remote_checkpoint(db self.user_id path b64_content self.crypto.encrypt self.max_file_size_bytes )<block_end><block_end>@outside_root_to_404<def_stmt>create_file_checkpoint self content format path<block_start>"""Create a checkpoint of the current state of a file Returns a checkpoint_id for the new checkpoint. """<try_stmt><block_start>b64_content=to_b64(content format)<block_end><except_stmt>ValueError<as>e<block_start>self.do_400(str(e))<block_end><with_stmt>self.engine.begin()<as>db<block_start><return>save_remote_checkpoint(db self.user_id path b64_content self.crypto.encrypt self.max_file_size_bytes )<block_end><block_end>@outside_root_to_404<def_stmt>delete_checkpoint self checkpoint_id path<block_start>"""delete a checkpoint for a file"""<with_stmt>self.engine.begin()<as>db<block_start><return>delete_single_remote_checkpoint(db self.user_id path checkpoint_id )<block_end><block_end><def_stmt>get_checkpoint_content self checkpoint_id path<block_start>"""Get the content of a checkpoint."""<with_stmt>self.engine.begin()<as>db<block_start><return>get_remote_checkpoint(db self.user_id path checkpoint_id self.crypto.decrypt )['content']<block_end><block_end>@outside_root_to_404<def_stmt>get_notebook_checkpoint self checkpoint_id path<block_start>b64_content=self.get_checkpoint_content(checkpoint_id path)<line_sep><return>{'type':'notebook' 'content':reads_base64(b64_content) }<block_end>@outside_root_to_404<def_stmt>get_file_checkpoint self checkpoint_id path<block_start>b64_content=self.get_checkpoint_content(checkpoint_id path)<line_sep>content,format=_decode_unknown_from_base64(path b64_content)<line_sep><return>{'type':'file' 'content':content 'format':format }<block_end>@outside_root_to_404<def_stmt>list_checkpoints self path<block_start>"""Return a list of checkpoints for a given file"""<with_stmt>self.engine.begin()<as>db<block_start><return>list_remote_checkpoints(db self.user_id path)<block_end><block_end>@outside_root_to_404<def_stmt>rename_all_checkpoints self old_path new_path<block_start>"""Rename all checkpoints for old_path to new_path."""<with_stmt>self.engine.begin()<as>db<block_start><return>move_remote_checkpoints(db self.user_id old_path new_path )<block_end><block_end>@outside_root_to_404<def_stmt>delete_all_checkpoints self path<block_start>"""Delete all checkpoints for the given path."""<with_stmt>self.engine.begin()<as>db<block_start>delete_remote_checkpoints(db self.user_id path)<block_end><block_end><def_stmt>purge_db self<block_start>""" Purge all database records for the current user. """<with_stmt>self.engine.begin()<as>db<block_start>purge_remote_checkpoints(db self.user_id)<block_end><block_end><block_end>
__all__=['Item' 'Info' 'Doc' 'apply_schema' 'info' 'doc']<import_from_stmt>typing Optional List<import_from_stmt>.rt_struct Struct<import_from_stmt>.rt_fastarray FastArray<import_from_stmt>.rt_display DisplayText<line_sep>META_DICT='_meta'<line_sep>DOC_KEY='Doc'<line_sep>DESCRIPTION_KEY='Description'<line_sep>STEWARD_KEY='Steward'<line_sep>TYPE_KEY='Type'<line_sep>DETAIL_KEY='Detail'<line_sep>CONTENTS_KEY='Contents'<line_sep>NO_DESCRIPTION='<no description>'<line_sep>NO_STEWARD='<no steward>'<line_sep>NO_TYPE='<none>'<line_sep>NAME_DEFAULT_WIDTH=4<line_sep>DESCRIPTION_DEFAULT_WIDTH=50<line_sep>STEWARD_DEFAULT_WIDTH=12<line_sep>TYPE_STR_DEFAULT_WIDTH=4<line_sep># ERROR KEYS TYPE_MISMATCH='Type Mismatch'<line_sep>EXTRA_COLUMN='Extra Column'<line_sep>MISSING_COLUMN='Missing Column'<class_stmt>Item<block_start>"""Descriptive information for a data object. Parameters ---------- name : str The name of the data object. type : str The type of the data object. description : str A description of the data object. steward : str The steward of the data object. """<line_sep>name:str<line_sep>"""str: The name of the data object."""<line_sep>type:str<line_sep>"""str: The type of the data object."""<line_sep>description:str<line_sep>"""str: A description of the data object."""<line_sep>steward:str<line_sep>"""steward: The steward of the data object."""<def_stmt>__init__ self name:str type:str description:str steward:str<block_start>self.name=name<line_sep>self.type=type<line_sep>self.description=description<line_sep>self.steward=steward<block_end><block_end><class_stmt>Info<block_start>"""A hierarchically structured container of descriptive information for a data object. """<line_sep>title=[]<line_sep>"""list: The title of the data object"""<line_sep>description:Optional[str]=<none><line_sep>"""str: The description of the data object."""<line_sep>steward:Optional[str]=<none><line_sep>"""str: The steward of the data object."""<line_sep>type:Optional[str]=<none><line_sep>"""str: The type of the data object."""<line_sep>detail=<none><line_sep>"""str: Detail about the data object."""<line_sep>items:Optional[List[Item]]=<none><line_sep>"""list of `Item`: For a :class:`~.rt_struct.Struct` or :class:`~.rt_dataset.Dataset`, the items contained within it."""<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>_make_text self<block_start>title_format=DisplayText.title_format<line_sep>header_format=DisplayText.header_format<line_sep>rows=[]<if_stmt>self.title<block_start>rows<augadd>[title_format('{}'.format(self.title))]<line_sep>rows<augadd>[title_format('='<times>len(self.title))]<block_end><if_stmt>self.description<block_start>rows<augadd>[header_format('Description: ')+self.description]<block_end><if_stmt>self.steward<block_start>rows<augadd>[header_format('Steward: ')+self.steward]<block_end><if_stmt>self.type<block_start>rows<augadd>[header_format('Type: ')+self.type]<block_end><if_stmt>self.detail<block_start>rows<augadd>[header_format('Detail: ')+self.detail]<block_end><if_stmt>self.items<block_start>rows<augadd>[header_format('Contents:') '']<line_sep># Set column widths name_width=max(NAME_DEFAULT_WIDTH max(len(item.name)<for>item self.items))<line_sep>descrip_width=DESCRIPTION_DEFAULT_WIDTH<line_sep>steward_width=STEWARD_DEFAULT_WIDTH<line_sep>stype_width=max(TYPE_STR_DEFAULT_WIDTH max(len(item.type)<for>item self.items))<line_sep># Add list header rows<augadd>[header_format("{: <{}} {: <{}} {: <{}} {: <{}}".format("Type" stype_width "Name" name_width "Description" descrip_width "Steward" steward_width))]<line_sep>rows<augadd>[header_format("{} {} {} {}".format("-"<times>stype_width "-"<times>name_width "-"<times>descrip_width "-"<times>steward_width))]<line_sep># Add item rows <for_stmt>item self.items<block_start>rows<augadd>["{: <{}} {} {: <{}} {: <{}}".format(item.type stype_width title_format('{: <{}}'.format(item.name name_width)) item.description descrip_width item.steward steward_width)]<block_end><block_end># Add a newline at the end if there is a title on top <if_stmt>self.title<block_start>rows<augadd>['']<block_end><return>"\n".join(rows)<block_end><def_stmt>__str__ self<block_start><return>DisplayText(self._make_text()).__str__()<block_end><def_stmt>__repr__ self<block_start><return>DisplayText(self._make_text()).__repr__()<block_end><def_stmt>_repr_html_ self<block_start><return>DisplayText(self._make_text())._repr_html_()<block_end><block_end><class_stmt>Doc(Struct)<block_start>"""A document object containing metadata about a data object. Parameters ---------- schema : dict See :meth:`apply_schema` for more information on the format of the dictionary. """<line_sep>_type=NO_TYPE<line_sep>_descrip=NO_DESCRIPTION<line_sep>_steward=NO_STEWARD<line_sep>_detail=<none><def_stmt>__init__ self schema<block_start>super().__init__()<line_sep>self._type=schema.get(TYPE_KEY)<line_sep>self._descrip=schema.get(DESCRIPTION_KEY NO_DESCRIPTION)<line_sep>self._steward=schema.get(STEWARD_KEY NO_STEWARD)<line_sep>self._detail=schema.get(DETAIL_KEY <none>)<line_sep>schema_contents=schema.get(CONTENTS_KEY)<if_stmt>schema_contents<block_start><for_stmt>key schema_contents.keys()<block_start><if_stmt>self.is_valid_colname(key)<block_start>self[key]=Doc(schema_contents[key])<block_end><block_end><block_end><block_end><def_stmt>_as_info self<block_start>info=Info()<line_sep>info.title=<none><line_sep>info.description=self._descrip<line_sep>info.steward=self._steward<line_sep>info.type=self._type<line_sep>info.detail=self._detail<line_sep>info.items=[]<for_stmt>name self.keys()<block_start>elem=self[name]<line_sep>info.items.append(Item(name elem._type elem._descrip elem._steward))<block_end><return>info<block_end><def_stmt>__str__ self<block_start><return>self._as_info().__str__()<block_end><def_stmt>__repr__ self<block_start><return>self._as_info().__repr__()<block_end><def_stmt>_repr_html_ self<block_start><return>self._as_info()._repr_html_()<block_end><block_end><def_stmt>apply_schema obj schema:dict doc:bool=<true><block_start>""" Apply a schema containing descriptive information recursively to the input data object. The schema should be in the form of a hierarchical dictionary, where for the data object, and recursively for each element it may contain, there is a descriptive dictionary with the following keys and values: * Type: 'Struct', 'Dataset', 'Multiset', 'FastArray', etc. * Description: a brief description of the data object * Steward: the name of the steward for that data object * Detail: any additional descriptive information * Contents: if the data object is a :class:`~.rt_struct.Struct`, :class:`~.rt_dataset.Dataset`, or :class:`~.rt_multiset.Multiset`, a recursively formed dictionary where there is a descriptive dictionary of this form associated with the name of each element contained by the data object. When the schema is applied to the data object, key/value pairs are set within the ``_meta`` dictionary attribute of the object and all of its elements, to enable subsequent retrieval of the descriptive information using the :meth:`.rt_struct.Struct.info` method or :meth:`.rt_struct.Struct.doc` property. In addition, during the schema application process, the contents and type of each data object is compared to the expectation of the schema, with any differences returned in the form of a dictionary. Parameters ---------- obj : Struct or FastArray The data object to apply the schema information to. schema : dict A descriptive dictionary defining the schema that should apply to the data object and any elements it may contain. doc : bool Indicates whether to create and attach a :class:`Doc` to the object, so that the :meth:`doc` method may be run on the object. Returns ------- res : dict Dictionary of deviations from the schema See Also -------- :meth:`.rt_struct.Struct.apply_schema` """<line_sep>res={}<if_stmt>isinstance(obj (Struct FastArray))<block_start><if_stmt><not>hasattr(obj META_DICT)<block_start>obj._meta={}<block_end><if_stmt>doc<block_start>obj._meta[DOC_KEY]=Doc(schema)<block_end>obj._meta[DESCRIPTION_KEY]=schema.get(DESCRIPTION_KEY NO_DESCRIPTION)<line_sep>obj._meta[STEWARD_KEY]=schema.get(STEWARD_KEY NO_STEWARD)<line_sep>obj._meta[DETAIL_KEY]=schema.get(DETAIL_KEY <none>)<line_sep>stype=schema.get(TYPE_KEY)<if_stmt>stype<and>_type_str(obj)<ne>stype<block_start>res[TYPE_MISMATCH]="Type {} does not match schema type {}".format(_type_str(obj) stype)<block_end>schema_contents=schema.get(CONTENTS_KEY)<if_stmt>schema_contents<block_start><for_stmt>key obj.keys()<block_start>elem_schema=schema_contents.get(key)<if_stmt>elem_schema<block_start>elem_res=apply_schema(obj[key] elem_schema <false>)<if_stmt>elem_res<block_start>res[key]=elem_res<block_end><block_end><else_stmt><block_start>res[EXTRA_COLUMN]=key<block_end><block_end><for_stmt>key schema_contents.keys()<block_start><if_stmt>key<not><in>obj.keys()<block_start>res[MISSING_COLUMN]=key<block_end><block_end><block_end><block_end><return>res<block_end><def_stmt>_type_str obj<arrow>str<block_start>""" Return the string representation of an object's type. Parameters ---------- obj : Any An object Returns ------- str : str String representation of an object's type. """<if_stmt>isinstance(obj FastArray)<block_start>stype=obj.dtype.name<block_end><else_stmt><block_start>stype=type(obj).__name__<block_end><return>stype<block_end><def_stmt>info obj title=<none><arrow>Info<block_start>""" Return the :class:`Info` for the object, describing its contents. Parameters ---------- obj : Any The object title : str The title to give the object, defaults to None. Returns ------- info : Info Information about `obj`. """<line_sep>info=Info()<line_sep>info.title=title<line_sep>info.description=NO_DESCRIPTION<line_sep>info.steward=NO_STEWARD<line_sep>info.detail=<none><line_sep>info.type=_type_str(obj)<if_stmt>hasattr(obj META_DICT)<block_start>info.description=obj._meta.get(DESCRIPTION_KEY info.description)<line_sep>info.steward=obj._meta.get(STEWARD_KEY info.steward)<line_sep>info.detail=obj._meta.get(DETAIL_KEY <none>)<block_end><if_stmt>isinstance(obj Struct)<block_start>info.items=[]<for_stmt>name obj.keys()<block_start>descrip=NO_DESCRIPTION<line_sep>steward=NO_STEWARD<if_stmt>hasattr(obj[name] META_DICT)<block_start>descrip=obj[name]._meta.get(DESCRIPTION_KEY descrip)<line_sep>steward=obj[name]._meta.get(STEWARD_KEY steward)<block_end>info.items.append(Item(name _type_str(obj[name]) descrip steward))<block_end><block_end><return>info<block_end><def_stmt>doc obj<arrow>Optional[Doc]<block_start>""" Return the :class:`Doc` for the object, describing its contents. Parameters ---------- obj : Any The object. Returns ------- doc : Doc Returns a :class:`Doc` instance if the object contains documentation metadata, otherwise None. """<if_stmt>hasattr(obj META_DICT)<block_start><if_stmt>DOC_KEY<in>obj._meta<block_start><return>obj._meta[DOC_KEY]<block_end><block_end><return><none><block_end>
<import_stmt>torch<import_from_stmt>torch sin cos atan2 acos<def_stmt>rot_z gamma<block_start><return>torch.tensor([[cos(gamma) -sin(gamma) 0] [sin(gamma) cos(gamma) 0] [0 0 1]] dtype=gamma.dtype)<block_end><def_stmt>rot_y beta<block_start><return>torch.tensor([[cos(beta) 0 sin(beta)] [0 1 0] [-sin(beta) 0 cos(beta)]] dtype=beta.dtype)<block_end><def_stmt>rot alpha beta gamma<block_start><return>rot_z(alpha)@rot_y(beta)@rot_z(gamma)<block_end>
<import_from_stmt>django.urls path re_path<import_from_stmt>recruiter api_views<line_sep>app_name="api_recruiter"<line_sep>urlpatterns=[path("login/" api_views.login_view name="api_login") path("out/" api_views.getout name="getout") path("change-password/" api_views.change_password name="api_change_password") path("profile/" api_views.user_profile name="api_user_profile") path("job/list/" api_views.jobs_list name="api_list") path("skill/list/" api_views.skill_list) path("industry/list/" api_views.industry_list) path("city/list/" api_views.city_list) path("state/list/" api_views.state_list) path("company/list/" api_views.company_list) path("functional-area/list/" api_views.functional_area_list) path("job/inactive/list/" api_views.inactive_jobs name="api_inactive_jobs") path("profile/edit/" api_views.edit_profile name="edit_profile") path("company-profile/" api_views.view_company name="view_company") re_path(r"^job/(?P<job_type>[-\w]+)/new/$" api_views.new_job name="api_new_job") re_path(r"^job/edit/(?P<job_post_id>[a-zA-Z0-9]+)/$" api_views.edit_job name="api_edit_job" ) re_path(r"^job/delete/(?P<job_post_id>[a-zA-Z0-9]+)/$" api_views.delete_job name="api_delete_job" ) ]<line_sep>
# Python Security Project (PySec) and its related class files. # # PySec is a set of tools for secure application development under Linux # # Copyright 2014 PySec development team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- coding: ascii -*- <import_stmt>sys<import_from_stmt>pysec.core Object<line_sep>__all__='Context' <line_sep>CONTEXTS={'file' 'cmd' 'html' 'js'}<class_stmt>Context(Object)<block_start><def_stmt>__init__ self name='none' info=<none> locs=<none><block_start>name=str(name)<line_sep>self.name=name<line_sep>self.info={}<if>info<is><none><else>dict(info)<line_sep>CONTEXTS.add(name)<block_end><def_stmt>__enter__ self<block_start>frame=sys._getframe().f_back<line_sep>contexts=frame.f_locals.setdefault('__ctx__' [])<line_sep>contexts.append(self)<block_end><def_stmt>__exit__ self exc_type exc_value exc_tb<block_start>sys._getframe().f_back.__ctx__.pop()<line_sep><return>0<block_end><def_stmt>contexts self<block_start>frame=sys._getframe().f_back<while_stmt>frame<block_start>ls=frame.f_locals.get('__ctx__' <none>)<if_stmt>ls<block_start><for_stmt>ctx ls<block_start><yield>ctx<block_end><block_end><block_end>frame=sys._getframe().f_back<block_end><block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. <import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.utils.data<as>data<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>torchvision.transforms<as>transforms<import_stmt>os<import_stmt>time<import_stmt>argparse<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>cv2<import_from_stmt>data.choose_config cfg<line_sep>cfg=cfg.cfg<import_from_stmt>utils.augmentations to_chw_bgr<import_from_stmt>importlib import_module<def_stmt>str2bool v<block_start><return>v.lower()<in>("yes" "true" "t" "1")<block_end>parser=argparse.ArgumentParser(description='face detection demo')<line_sep>parser.add_argument('--save_dir' type=str default='results/' help='Directory for detect result')<line_sep>parser.add_argument('--model' type=str default='weights/rpool_face_c.pth' help='trained model')<line_sep>parser.add_argument('--thresh' default=0.17 type=float help='Final confidence threshold')<line_sep>parser.add_argument('--multigpu' default=<false> type=str2bool help='Specify whether model was trained with multigpu')<line_sep>parser.add_argument('--model_arch' default='RPool_Face_C' type=str choices=['RPool_Face_C' 'RPool_Face_Quant' 'RPool_Face_QVGA_monochrome' 'RPool_Face_M4'] help='choose architecture among rpool variants')<line_sep>parser.add_argument('--image_folder' default=<none> type=str help='folder containing images')<line_sep>parser.add_argument('--save_traces' default=<false> type=str2bool help='Specify whether to save input output traces')<line_sep>args=parser.parse_args()<if_stmt><not>os.path.exists(args.save_dir)<block_start>os.makedirs(args.save_dir)<block_end>use_cuda=torch.cuda.is_available()<if_stmt>use_cuda<block_start>torch.set_default_tensor_type('torch.cuda.FloatTensor')<block_end><else_stmt><block_start>torch.set_default_tensor_type('torch.FloatTensor')<block_end><def_stmt>detect net img_path thresh save_traces<block_start>img=Image.open(img_path)<line_sep>img=img.convert('RGB')<line_sep>img=np.array(img)<line_sep>height,width,_=img.shape<if_stmt>os.environ['IS_QVGA_MONO']<eq>'1'<block_start>max_im_shrink=np.sqrt(320<times>240/(img.shape[0]<times>img.shape[1]))<block_end><else_stmt><block_start>max_im_shrink=np.sqrt(640<times>480/(img.shape[0]<times>img.shape[1]))<block_end><if_stmt>save_traces<eq><true><and>os.environ['IS_QVGA_MONO']<eq>'1'<block_start>image=cv2.resize(img (320 240))<block_end><elif_stmt>save_traces<eq><true><block_start>image=cv2.resize(img (640 480))<block_end><else_stmt><block_start>image=cv2.resize(img <none> <none> fx=max_im_shrink fy=max_im_shrink interpolation=cv2.INTER_LINEAR)<block_end>x=to_chw_bgr(image)<line_sep>x=x.astype('float32')<line_sep>x<augsub>cfg.img_mean<line_sep>x=x[[2 1 0] : :]<if_stmt>cfg.IS_MONOCHROME<eq><true><block_start>x=0.299<times>x[0]+0.587<times>x[1]+0.114<times>x[2]<line_sep>x=torch.from_numpy(x).unsqueeze(0).unsqueeze(0)<block_end><else_stmt><block_start>x=torch.from_numpy(x).unsqueeze(0)<block_end><if_stmt>use_cuda<block_start>x=x.cuda()<block_end>t1=time.time()<line_sep>y,loc,conf=net(x)<line_sep>detections=y.data<line_sep>scale=torch.Tensor([img.shape[1] img.shape[0] img.shape[1] img.shape[0]])<line_sep>img=cv2.imread(img_path cv2.IMREAD_COLOR)<for_stmt>i range(detections.size(1))<block_start>j=0<while_stmt>detections[0 i j 0]<ge>thresh<block_start>score=detections[0 i j 0]<line_sep>pt=(detections[0 i j 1:]<times>scale).cpu().numpy()<line_sep>left_up,right_bottom=(pt[0] pt[1]) (pt[2] pt[3])<line_sep>j<augadd>1<line_sep>cv2.rectangle(img left_up right_bottom (0 0 255) 2)<line_sep>conf_score="{:.3f}".format(score)<line_sep>point=(int(left_up[0]) int(left_up[1]-5))<line_sep>cv2.putText(img conf_score point cv2.FONT_HERSHEY_COMPLEX 0.6 (0 255 0) 1)<block_end><block_end>t2=time.time()<line_sep>print('detect:{} timer:{}'.format(img_path t2-t1))<line_sep>cv2.imwrite(os.path.join(args.save_dir os.path.basename(img_path)) img)<if_stmt>save_traces<eq><true><block_start><return>x loc conf<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>module=import_module('models.'+args.model_arch)<line_sep>net=module.build_s3fd('test' cfg.NUM_CLASSES)<if_stmt>args.multigpu<eq><true><block_start>net=torch.nn.DataParallel(net)<block_end>checkpoint_dict=torch.load(args.model)<line_sep>model_dict=net.state_dict()<line_sep>model_dict.update(checkpoint_dict)<line_sep>net.load_state_dict(model_dict)<line_sep>net.eval()<if_stmt>use_cuda<block_start>net.cuda()<line_sep>cudnn.benckmark=<true><block_end>img_path=args.image_folder<line_sep>img_list=[os.path.join(img_path x)<for>x os.listdir(img_path)]<line_sep>x=[]<line_sep>loc=[]<line_sep>conf=[]<for_stmt>path img_list<block_start><if_stmt>args.save_traces<eq><true><block_start>x_temp,loc_temp,conf_temp=detect(net path args.thresh args.save_traces)<line_sep>x.append(x_temp)<line_sep>loc.append(loc_temp)<line_sep>conf.append(conf_temp)<block_end><else_stmt><block_start>detect(net path args.thresh args.save_traces)<block_end><block_end><if_stmt>args.save_traces<eq><true><block_start>np.save('trace_inputs.npy' torch.cat(x).cpu().detach().numpy())<line_sep>np.save('trace_outputs.npy' torch.cat([torch.cat(conf) torch.cat(loc)] dim=1).cpu().detach().numpy())<block_end><block_end>
<import_from_stmt>tkinter Tk Label<line_sep>root=Tk()<line_sep>a=Label(root text='Live de Python' font=('Arial' 30))<line_sep>a.pack()<line_sep>root.mainloop()<line_sep>
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>matplotlib.gridspec<as>gridspec<def_stmt>test_model env model render=<false># <block_start>fig=plt.figure(figsize=(18 12) tight_layout=<true>)<line_sep>gs=gridspec.GridSpec(5 12)<line_sep># ax_x=fig.add_subplot(gs[0 0:4])<line_sep>ax_y=fig.add_subplot(gs[0 4:8])<line_sep>ax_z=fig.add_subplot(gs[0 8:12])<line_sep># ax_dx=fig.add_subplot(gs[1 0:4])<line_sep>ax_dy=fig.add_subplot(gs[1 4:8])<line_sep>ax_dz=fig.add_subplot(gs[1 8:12])<line_sep># ax_euler_x=fig.add_subplot(gs[2 0:4])<line_sep>ax_euler_y=fig.add_subplot(gs[2 4:8])<line_sep>ax_euler_z=fig.add_subplot(gs[2 8:12])<line_sep># ax_euler_vx=fig.add_subplot(gs[3 0:4])<line_sep>ax_euler_vy=fig.add_subplot(gs[3 4:8])<line_sep>ax_euler_vz=fig.add_subplot(gs[3 8:12])<line_sep># ax_action0=fig.add_subplot(gs[4 0:3])<line_sep>ax_action1=fig.add_subplot(gs[4 3:6])<line_sep>ax_action2=fig.add_subplot(gs[4 6:9])<line_sep>ax_action3=fig.add_subplot(gs[4 9:12])<line_sep>max_ep_length=env.max_episode_steps<line_sep>num_rollouts=5<if_stmt>render<block_start>env.connectUnity()<block_end><for_stmt>n_roll range(num_rollouts)<block_start>pos,euler,dpos,deuler=[] [] [] []<line_sep>actions=[]<line_sep>obs,done,ep_len=env.reset() <false> 0<while_stmt><not>(done<or>(ep_len<ge>max_ep_length))<block_start>act,_=model.predict(obs deterministic=<true>)<line_sep>obs,rew,done,infos=env.step(act)<line_sep># ep_len<augadd>1<line_sep># pos.append(obs[0 0:3].tolist())<line_sep>dpos.append(obs[0 6:9].tolist())<line_sep>euler.append(obs[0 3:6].tolist())<line_sep>deuler.append(obs[0 9:12].tolist())<line_sep># actions.append(act[0 :].tolist())<block_end>pos=np.asarray(pos)<line_sep>dpos=np.asarray(dpos)<line_sep>euler=np.asarray(euler)<line_sep>deuler=np.asarray(deuler)<line_sep>actions=np.asarray(actions)<line_sep># t=np.arange(0 pos.shape[0])<line_sep>ax_x.step(t pos[: 0] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_y.step(t pos[: 1] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_z.step(t pos[: 2] color="C{0}".format(n_roll) label="pos [x, y, z] -- trail: {0}".format(n_roll))<line_sep># ax_dx.step(t dpos[: 0] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_dy.step(t dpos[: 1] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_dz.step(t dpos[: 2] color="C{0}".format(n_roll) label="vel [x, y, z] -- trail: {0}".format(n_roll))<line_sep># ax_euler_x.step(t euler[: -1] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_euler_y.step(t euler[: 0] color="C{0}".format(n_roll) label="trail :{0}".format(n_roll))<line_sep>ax_euler_z.step(t euler[: 1] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep># ax_euler_vx.step(t deuler[: -1] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_euler_vy.step(t deuler[: 0] color="C{0}".format(n_roll) label="trail :{0}".format(n_roll))<line_sep>ax_euler_vz.step(t deuler[: 1] color="C{0}".format(n_roll) label=r"$\theta$ [x, y, z] -- trail: {0}".format(n_roll))<line_sep># ax_action0.step(t actions[: 0] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_action1.step(t actions[: 1] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_action2.step(t actions[: 2] color="C{0}".format(n_roll) label="trail: {0}".format(n_roll))<line_sep>ax_action3.step(t actions[: 3] color="C{0}".format(n_roll) label="act [0, 1, 2, 3] -- trail: {0}".format(n_roll))<block_end># <if_stmt>render<block_start>env.disconnectUnity()<block_end>ax_z.legend()<line_sep>ax_dz.legend()<line_sep>ax_euler_z.legend()<line_sep>ax_euler_vz.legend()<line_sep>ax_action3.legend()<line_sep># plt.tight_layout()<line_sep>plt.show()<block_end>
<import_stmt>theano<import_stmt>numpy<as>np<import_from_stmt>dcnn WordEmbeddingLayer<import_from_stmt>dcnn_train WordEmbeddingLayer<as>TheanoWordEmbeddingLayer<import_from_stmt>test_util assert_matrix_eq<line_sep>########### NUMPY ########### vocab_size,embed_dm=10 5<line_sep>embeddings=np.random.rand(vocab_size embed_dm)<line_sep>sents=np.asarray(np.random.randint(10 size=(3 6)) dtype=np.int32)<line_sep>np_l=WordEmbeddingLayer(embeddings)<line_sep>actual=np_l.output(sents)<line_sep>########### THEANO ########### x_symbol=theano.tensor.imatrix('x')# the word indices matrix th_l=TheanoWordEmbeddingLayer(rng=np.random.RandomState(1234) input=x_symbol vocab_size=vocab_size embed_dm=embed_dm embeddings=theano.shared(value=embeddings name="embeddings"))<line_sep>f=theano.function(inputs=[x_symbol] outputs=th_l.output)<line_sep>expected=f(sents)<line_sep>assert_matrix_eq(actual expected "Embedding")<line_sep>
<import_stmt>os<import_from_stmt>conans ConanFile tools<import_from_stmt>conans.errors ConanInvalidConfiguration<class_stmt>StructoptConan(ConanFile)<block_start>name="structopt"<line_sep>homepage="https://github.com/p-ranav/structopt"<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>description="Parse command line arguments by defining a struct+"<line_sep>license="MIT"<line_sep>settings="compiler" "os"<line_sep>topics=("conan" "structopt" "argument-parser" "cpp17" "header-only" "single-header-lib" "header-library" "command-line" "arguments" "mit-license" "modern-cpp" "structopt" "lightweight" "reflection" "cross-platform" "library" "type-safety" "type-safe" "argparse" "clap" "visit-struct-library" "magic-enum")<line_sep>no_copy_source=<true><line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end>@property<def_stmt>_supported_compiler self<block_start>compiler=str(self.settings.compiler)<line_sep>version=tools.Version(self.settings.compiler.version)<if_stmt>compiler<eq>"Visual Studio"<and>version<ge>"15"<block_start><return><true><block_end><elif_stmt>compiler<eq>"gcc"<and>version<ge>"9"<block_start><return><true><block_end><elif_stmt>compiler<eq>"clang"<and>version<ge>"5"<block_start><return><true><block_end><elif_stmt>compiler<eq>"apple-clang"<and>version<ge>"10"<block_start><return><true><block_end><else_stmt><block_start>self.output.warn("{} recipe lacks information about the {} compiler standard version support".format(self.name compiler))<block_end><return><false><block_end><def_stmt>configure self<block_start><if_stmt>self.settings.compiler.get_safe("cppstd")<block_start>tools.check_min_cppstd(self "17")<block_end><if_stmt><not>self._supported_compiler<block_start><raise>ConanInvalidConfiguration("structopt: Unsupported compiler: {}-{} "<concat>"(https://github.com/p-ranav/structopt#compiler-compatibility).".format(self.settings.compiler self.settings.compiler.version))<block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version])<line_sep>os.rename("{}-{}".format(self.name self.version) self._source_subfolder)<block_end><def_stmt>package self<block_start>self.copy(pattern="LICENSE" src=self._source_subfolder dst="licenses")<line_sep>self.copy(pattern="*.h" src=os.path.join(self._source_subfolder "include") dst="include")<line_sep>self.copy(pattern="*.hpp" src=os.path.join(self._source_subfolder "include") dst="include")<block_end><def_stmt>package_id self<block_start>self.info.header_only()<block_end><block_end>
# Copyright 2018 The Tulsi Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for install_genfiles.py."""<import_stmt>os<import_stmt>unittest<import_stmt>install_genfiles<line_sep>DOES_EXIST_DATA={'generated_sources':[('src/TulsiGenerator/Scripts/install_genfiles.py' 'install_genfiles.py') ] }<line_sep>DOES_NOT_EXIST_DATA={'generated_sources':[('src/does/not/exist.txt' 'exist.txt')] }<class_stmt>TestInstallForData(unittest.TestCase)<block_start><def_stmt>testSrcDoeNotExist self<block_start>tmpdir=os.environ['TEST_TMPDIR']<line_sep>installer=install_genfiles.Installer('.' output_root=tmpdir)<line_sep>installer.InstallForData(DOES_NOT_EXIST_DATA)<line_sep>self.assertFalse(os.path.lexists(os.path.join(tmpdir 'bazel-tulsi-includes/x/x/exist.txt')))<block_end><def_stmt>testSrcDoesExist self<block_start>tmpdir=os.environ['TEST_TMPDIR']<line_sep>installer=install_genfiles.Installer('.' output_root=tmpdir)<line_sep>installer.InstallForData(DOES_EXIST_DATA)<line_sep># Must use lexists because we create a link but use the wrong exec root, # so the symlink is not valid. self.assertTrue(os.path.lexists(os.path.join(tmpdir 'bazel-tulsi-includes/x/x/install_genfiles.py')))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# type: ignore[attr-defined] <import_from_stmt>unittest.mock patch<import_stmt>networkx<as>nx<import_from_stmt>Tests.Marketplace.packs_dependencies calculate_single_pack_dependencies<def_stmt>find_pack_display_name_mock pack_folder_name<block_start><return>pack_folder_name<block_end><class_stmt>TestCalculateSinglePackDependencies<block_start>@classmethod<def_stmt>setup_class cls<block_start>patch('demisto_sdk.commands.find_dependencies.find_dependencies.find_pack_display_name' side_effect=find_pack_display_name_mock)<line_sep>patch('Tests.scripts.utils.log_util.install_logging')<line_sep>graph=nx.DiGraph()<line_sep>graph.add_node('pack1' mandatory_for_packs=[])<line_sep>graph.add_node('pack2' mandatory_for_packs=[])<line_sep>graph.add_node('pack3' mandatory_for_packs=[])<line_sep>graph.add_node('pack4' mandatory_for_packs=[])<line_sep>graph.add_node('pack5' mandatory_for_packs=[])<line_sep>graph.add_edge('pack1' 'pack2')<line_sep>graph.add_edge('pack2' 'pack3')<line_sep>graph.add_edge('pack1' 'pack4')<line_sep>graph.nodes()['pack4']['mandatory_for_packs'].append('pack1')<line_sep>dependencies=calculate_single_pack_dependencies('pack1' graph)<line_sep>cls.first_level_dependencies,cls.all_level_dependencies,_=dependencies<block_end><def_stmt>test_calculate_single_pack_dependencies_first_level_dependencies self<block_start>""" Given - A full dependency graph where: - pack1 -> pack2 -> pack3 - pack1 -> pack4 - pack4 is mandatory for pack1 - pack5 and pack1 are not a dependency for any pack When - Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies Then - Ensure first level dependencies for pack1 are only pack2 and pack4 """<line_sep>all_nodes={'pack1' 'pack2' 'pack3' 'pack4' 'pack5'}<line_sep>expected_first_level_dependencies={'pack2' 'pack4'}<for_stmt>node expected_first_level_dependencies<block_start><assert_stmt>node<in>self.first_level_dependencies<block_end><for_stmt>node all_nodes-expected_first_level_dependencies<block_start><assert_stmt>node<not><in>self.first_level_dependencies<block_end><block_end><def_stmt>test_calculate_single_pack_dependencies_all_levels_dependencies self<block_start>""" Given - A full dependency graph where: - pack1 -> pack2 -> pack3 - pack1 -> pack4 - pack4 is mandatory for pack1 - pack5 and pack1 are not a dependency for any pack When - Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies Then - Ensure all levels dependencies for pack1 are pack2, pack3 and pack4 only """<line_sep>all_nodes={'pack1' 'pack2' 'pack3' 'pack4' 'pack5'}<line_sep>expected_all_level_dependencies={'pack2' 'pack3' 'pack4'}<for_stmt>node expected_all_level_dependencies<block_start><assert_stmt>node<in>self.all_level_dependencies<block_end><for_stmt>node all_nodes-expected_all_level_dependencies<block_start><assert_stmt>node<not><in>self.all_level_dependencies<block_end><block_end><def_stmt>test_calculate_single_pack_dependencies_mandatory_dependencies self<block_start>""" Given - A full dependency graph where: - pack1 -> pack2 -> pack3 - pack1 -> pack4 - pack4 is mandatory for pack1 - pack5 and pack1 are not a dependency for any pack When - Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies Then - pack4 is mandatory for pack1 and that there are no other mandatory dependencies """<line_sep>expected_mandatory_dependency='pack4'<assert_stmt>self.first_level_dependencies[expected_mandatory_dependency]['mandatory']<for_stmt>node self.first_level_dependencies<block_start><if_stmt>node<ne>expected_mandatory_dependency<block_start><assert_stmt><not>self.first_level_dependencies[node]['mandatory']<block_end><block_end><block_end><block_end>
<import_stmt>datetime<import_stmt>subprocess<def_stmt>git_short_rev <block_start><try_stmt><block_start><return>subprocess.check_output(['git' 'rev-parse' '--short' 'HEAD' ]).decode('utf-8').strip()<block_end><except_stmt>Exception<block_start><raise>RuntimeError("Could not read git revision. Make sure you have git installed and you're working with a git clone of the repository.")<block_end><block_end><def_stmt>current_date <block_start><return>datetime.date.today().strftime('%Y-%m-%d')<block_end><def_stmt>git_date short=<true><block_start><try_stmt><block_start>iso=subprocess.check_output(['git' 'log' '-1' '--format=%ci' 'HEAD' ]).decode('utf-8').strip()<if_stmt>short<block_start><return>iso.split(' ')[0]<block_end><else_stmt><block_start><return>iso<block_end><block_end><except_stmt>Exception<block_start><raise>RuntimeError("Could not read git commit date. Make sure you have git installed and you're working with a git clone of the repository.")<block_end><block_end><def_stmt>git_release_version search_prefix<block_start><try_stmt><block_start>tags=subprocess.check_output(['git' 'tag' '--points-at' 'HEAD' ]).decode('utf-8').splitlines()<for_stmt>tag tags<block_start><if_stmt>tag.startswith(search_prefix)<block_start><return>tag[len(search_prefix):]<block_end><block_end><return><none><block_end><except_stmt>Exception<block_start><raise>RuntimeError("Could not read git release tags. Make sure you have git installed and you're working with a git clone of the repository.")<block_end><block_end>
<import_stmt>sys<line_sep>sys.path.insert(0 "../..")<import_stmt>pprint<import_from_stmt>ttp ttp<def_stmt>test_simple_anonymous_template <block_start>template_1="""interface {{ interface }} description {{ description | ORPHRASE }}"""<line_sep>data_1=""" interface Port-Chanel11 description Storage Management interface Loopback0 description RID """<line_sep>parser=ttp(template=template_1 data=data_1)<line_sep># check that data added: datums_added={"{}:{}".format(template.name input_name):input_obj.data<for>template parser._templates<for>input_name,input_obj template.inputs.items()}<line_sep># pprint.pprint(datums_added) parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res) # assert res == [[[{'description': 'Storage Management', 'interface': 'Port-Chanel11'}, {'description': 'RID', 'interface': 'Loopback0'}]]] <block_end># test_simple_anonymous_template() <def_stmt>test_anonymous_group_with_vars <block_start>template=""" <input load="text"> interface Port-Chanel11 description Storage Management interface Loopback0 description RID </input> <vars name="my.var.s"> a = 1 b = 2 </vars> <group> interface {{ interface }} description {{ description | ORPHRASE }} </group> """<line_sep>parser=ttp(template=template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res) <assert_stmt>res<eq>[[[{"description":"Storage Management" "interface":"Port-Chanel11"} {"description":"RID" "interface":"Loopback0"} {"my":{"var":{"s":{"a":1 "b":2}}}} ]]]<block_end># test_anonymous_group_with_vars() <def_stmt>test_anonymous_group_with_child_group_empty_absolute_path <block_start>template=""" <template results="per_template"> <input name="Cisco_ios" load="text"> r2#show interfaces | inc line protocol interface GigabitEthernet1 vrf forwarding MGMT ip address 10.123.89.55 255.255.255.0 </input> <input name="Cisco_ios" load="text"> r1#show interfaces | inc line protocol: interface GigabitEthernet1 description some info vrf forwarding MGMT ip address 10.123.89.56 255.255.255.0 interface GigabitEthernet2 ip address 10.123.89.55 255.255.255.0 </input> <group void=""> interface {{ interface }} description {{ description | ORPHRASE }} <group name="/"> ip address {{ ip }} {{ mask }} </group> </group> </template> """<line_sep>parser=ttp(template=template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res) <assert_stmt>res<eq>[[{"ip":"10.123.89.55" "mask":"255.255.255.0"} {"ip":"10.123.89.56" "mask":"255.255.255.0"} {"ip":"10.123.89.55" "mask":"255.255.255.0"} ]]<block_end># test_anonymous_group_with_child_group_empty_absolute_path() <def_stmt>test_anonymous_group_with_per_template_mode <block_start>template=""" <template results="per_template"> <group void=""> hostname {{ hostname | record(hostname_abc) }} </group> <group> interface {{ interface }} description {{ description | ORPHRASE }} ip address {{ ip }} {{ mask }} {{ hostname | set(hostname_abc) }} </group> </template> """<line_sep>datum_1=""" hostname r2 ! interface GigabitEthernet1 vrf forwarding MGMT ip address 10.123.89.55 255.255.255.0 """<line_sep>datum_2=""" hostname r1 ! interface GigabitEthernet1 description some info vrf forwarding MGMT ip address 10.123.89.56 255.255.255.0 interface GigabitEthernet2 ip address 10.123.89.55 255.255.255.0 """<line_sep>parser_a=ttp(template=template)<line_sep>parser_a.add_input(datum_1)<line_sep>parser_a.add_input(datum_2)<line_sep>parser_a.parse()<line_sep>res=parser_a.result()<line_sep># pprint.pprint(res) <assert_stmt>res<eq>[[{"hostname":"r2" "interface":"GigabitEthernet1" "ip":"10.123.89.55" "mask":"255.255.255.0" } {"description":"some info" "hostname":"r1" "interface":"GigabitEthernet1" "ip":"10.123.89.56" "mask":"255.255.255.0" } {"hostname":"r1" "interface":"GigabitEthernet2" "ip":"10.123.89.55" "mask":"255.255.255.0" } ]]<block_end># test_anonymous_group_with_per_template_mode()
<import_from_stmt>.version __version__<import_stmt>decorator<import_stmt>packaging.version<line_sep>@decorator.decorator<def_stmt>temporary func version=<none> *args **kwargs<block_start>"""Decorate a function as a temporary fix. Parameters ---------- version : str Version after which this function should raise a RuntimeError """<if_stmt>version<is><none><block_start><raise>TypeError("temporary() missing 1 required keyword argument: 'version'")<block_end><if_stmt>packaging.version.parse(__version__)<ge>packaging.version.parse(version)<block_start><raise>RuntimeError("Temporary function {}.{} is temporary and should not be used "<concat>"after version {} (current version: {})".format(func.__module__ func.__name__ version __version__))<block_end><return>func(*args **kwargs)<block_end><def_stmt>get_members module<block_start>"""Get all public members from a module."""<line_sep>namespace=[attr<for>attr dir(module)<if><not>attr.startswith("_")]<line_sep><return>[getattr(module attr)<for>attr namespace]<block_end><def_stmt>get_callable_members module<block_start>"""Get all callable public members from a module."""<line_sep><return>[member<for>member get_members(module)<if>callable(member)]<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>mobile_seg.modules.net MobileNetV2_unet<class_stmt>Wrapper(nn.Module)<block_start><def_stmt>__init__ self unet:MobileNetV2_unet scale:float=255.<block_start>super().__init__()<line_sep>self.unet=unet<line_sep>self.scale=scale<block_end><def_stmt>forward self x<block_start>x=x/self.scale<line_sep>x=self.unet(x)<line_sep>x=x<times>self.scale<line_sep>x=torch.cat((x x x) dim=1)<line_sep><return>x<block_end><block_end># %% <if_stmt>__name__<eq>'__main__'# %% <block_start>model=MobileNetV2_unet()<line_sep>wrapper=Wrapper(model)<line_sep>inputs=torch.randn((1 3 224 224))<line_sep>out=wrapper(inputs)<line_sep>print(out.shape)<block_end>
<import_stmt>sys<import_from_stmt>pathlib Path<import_from_stmt>argparse ArgumentParser<import_stmt>h5py<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_from_stmt>export export_read_file<def_stmt>get_args <block_start>parser=ArgumentParser(description="Parse sequencing_summary.txt files and .paf files to find split reads "<concat>"in an Oxford Nanopore Dataset" add_help=<false>)<line_sep>general=parser.add_argument_group(title='General options')<line_sep>general.add_argument("-h" "--help" action="help" help="Show this help and exit")<line_sep>in_args=parser.add_argument_group(title='Input sources')<line_sep>in_args.add_argument("-s" "--summary" required=<true> nargs='+' help='Sequencing summary file(s) generated by albacore or guppy. Can be compressed '<concat>'using gzip, bzip2, xz, or zip')<line_sep>in_args.add_argument("--start-events" help="start_events.csv file generated by event_finder.py" default="" required=<true> )<line_sep>in_args.add_argument("--end-events" help="end_events.csv file generated by event_finder.py" default="" required=<true> )<line_sep>in_args.add_argument("--targets" help="A text file of target read ids with one per line." default="" required=<true> )<line_sep>in_args.add_argument("--bulk-files" help="ONT bulk FAST5 files." nargs='+' default="" )<line_sep>in_args.add_argument("-o" "--output-name" help="Name of the output folder, this will be generated if it does not exist" required=<true> default="")<line_sep>in_args.add_argument("--extra-classifications" help="Any extra MinKNOW classifications to include." nargs='*' default="" )<line_sep><return>parser.parse_args()<block_end><def_stmt>main <block_start>args=get_args()<line_sep># debug(args) # # sys.exit() # Make folders <for_stmt>j ['starts' 'ends']<block_start>Path('{i}/{j}/{k}'.format(i=args.output_name j=j k='fast5')).mkdir(parents=<true> exist_ok=<true>)<block_end># Open files start_events=pd.read_csv(args.start_events sep=',')<line_sep>end_events=pd.read_csv(args.end_events sep=',')<line_sep>seq_sum_df=concat_files_to_df(file_list=args.summary sep='\t')<line_sep># Create end_time Series in seq_sum_df seq_sum_df['end_time']=seq_sum_df['start_time']+seq_sum_df['duration']<line_sep># Sort and Groupby to segregate runs and channels seq_sum_df=seq_sum_df.sort_values(by=['run_id' 'channel' 'start_time'] ascending=<true>)<line_sep>seq_sum_df_1=seq_sum_df.copy()<line_sep>gb=seq_sum_df.groupby(['run_id' 'channel'])<line_sep>gb1=seq_sum_df_1.groupby(['run_id' 'channel'])<line_sep># Get previous and next start times within groupby seq_sum_df['next_start']=gb['start_time'].shift(-1)<line_sep>seq_sum_df_1['prev_start']=gb1['start_time'].shift(1)<line_sep>target_read_ids=[]<with_stmt>open(args.targets 'r')<as>file<block_start><for_stmt>line file<block_start>target_read_ids.append(line.strip())<block_end><block_end>classifications=['pore' 'inrange' 'good_single' 'unblocking']<if_stmt>args.extra_classifications<block_start>classifications.extend(args.extra_classifications)<block_end># Get end_events for target_read_ids end_events=end_events[end_events['read_id'].isin(target_read_ids)]<line_sep>normal_ending_ids=end_events[end_events['time'].ge(0)&end_events['label'].isin(classifications)]['read_id'].unique()<line_sep>abnormally_ending_ids=end_events[~end_events['read_id'].isin(normal_ending_ids)]['read_id'].unique()<line_sep>end_target_ss=seq_sum_df[seq_sum_df['read_id'].isin(abnormally_ending_ids)]<line_sep># Get start_events for target_read_ids start_events=start_events[start_events['read_id'].isin(target_read_ids)]<line_sep>normal_starting_ids=start_events[start_events['time'].le(0)&start_events['label'].isin(classifications)]['read_id'].unique()<line_sep>abnormally_starting_ids=start_events[~start_events['read_id'].isin(normal_starting_ids)]['read_id'].unique()<line_sep>start_target_ss=seq_sum_df_1[seq_sum_df_1['read_id'].isin(abnormally_starting_ids)]<line_sep>print('Collecting abnormally ending reads:')<line_sep>end_read_info=write_files(end_target_ss args.bulk_files 'start_time' 'next_start' '{i}/ends/fast5/'.format(i=args.output_name))<line_sep>end_read_info.to_csv('{}/ends_read_info.txt'.format(args.output_name) sep='\t' index=<false> header=<true>)<line_sep>end_read_info.to_csv('{}/ends_filenames.txt'.format(args.output_name) sep='\t' index=<false> header=<false> columns=['filename'])<line_sep>print('Collecting abnormally starting reads:')<line_sep>start_read_info=write_files(start_target_ss args.bulk_files 'prev_start' 'end_time' '{i}/starts/fast5/'.format(i=args.output_name))<line_sep>start_read_info.to_csv('{}/starts_read_info.txt'.format(args.output_name) sep='\t' index=<false> header=<true>)<line_sep>start_read_info.to_csv('{}/starts_filenames.txt'.format(args.output_name) sep='\t' index=<false> header=<false> columns=['filename'])<line_sep><return><block_end><def_stmt>write_files target_ss bulkfiles read_start_col read_end_col export_path remove_pore=<true><block_start>"""Abstraction for export_read_file for collecting read info Parameters ---------- target_ss : pd.DataFrame DataFrame of reads to generate reads for bulkfiles: list list of bulk FAST5 files read_start_col : str Column in the target_ss that start index is derived from read_end_col : str Column in the target_ss that end index is derived from export_path : str The folder where read files will be written remove_pore : bool Remove pore-like signal from trace (>1500) Returns ------- pd.DataFrame DataFrame of read info about reads that have been written """<line_sep>d={'read_id':[] 'channel':[] 'start_index':[] 'end_index':[] 'bv_read_id':[] 'filename':[] 'bv_filename':[]}<line_sep>files_written=0<for_stmt>bf tqdm(bulkfiles)<block_start>f=h5py.File(bf 'r')<line_sep>run_id=f['UniqueGlobalKey']["tracking_id"].attrs["run_id"].decode('utf8')<line_sep>sf=int(f["UniqueGlobalKey"]["context_tags"].attrs["sample_frequency"].decode('utf8'))<line_sep>t=target_ss[target_ss['run_id']<eq>run_id]<line_sep>t=t.dropna()<line_sep>f.close()<line_sep>file=h5py.File(bf 'r')<for_stmt>idx,row tqdm(t.iterrows() total=t.shape[0] desc=run_id)<block_start>si=int(np.floor(row[read_start_col]<times>sf))<line_sep>ei=int(np.floor(row[read_end_col]<times>sf))<line_sep>d['read_id'].append(row['read_id'])<line_sep>d['channel'].append(row['channel'])<line_sep>d['start_index'].append(si)<line_sep>d['end_index'].append(ei)<line_sep>d['bv_read_id'].append("{ch}-{start}-{end}".format(ch=row['channel'] start=si end=ei))<line_sep>d['filename'].append(row['filename'])<line_sep>d['bv_filename'].append(export_read_file(row['channel'] si ei file export_path remove_pore=remove_pore))<line_sep>files_written<augadd>1<block_end><block_end>print('{} reads written'.format(files_written))<line_sep><return>pd.DataFrame(d)<block_end><def_stmt>concat_files_to_df file_list **kwargs<block_start>"""Return a pandas.DataFrame from a list of files """<line_sep>df_list=[]<for_stmt>f file_list<block_start><try_stmt><block_start>df_list.append(pd.read_csv(filepath_or_buffer=f **kwargs))<block_end><except_stmt>pd.errors.ParserError<as>e<block_start>print('{}\nThis is usually caused by an input file not being the expected format'.format(repr(e)))<line_sep>sys.exit(1)<block_end><except_stmt>Exception<as>e<block_start>sys.exit(1)<block_end><block_end><return>pd.concat(df_list ignore_index=<true>)<block_end><def_stmt>debug args<block_start>dirs=dir(args)<for_stmt>attr dirs<block_start><if_stmt>attr[0]<ne>'_'<block_start>print('{a:<25} {b}'.format(a=attr b=getattr(args attr)))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Symbol of SqueezeNet Reference: Iandola, <NAME>., et al. "Squeezenet: Alexnet-level accuracy with 50x fewer parameters and< 0.5 mb model size." (2016). """<import_stmt>mxnet<as>mx<line_sep># Helpers <def_stmt>_make_fire net squeeze_channels expand1x1_channels expand3x3_channels<block_start>net=_make_fire_conv(net squeeze_channels 1 0)<line_sep>left=_make_fire_conv(net expand1x1_channels 1 0)<line_sep>right=_make_fire_conv(net expand3x3_channels 3 1)<line_sep># NOTE : Assume NCHW layout here net=mx.sym.concat(left right dim=1)<line_sep><return>net<block_end><def_stmt>_make_fire_conv net channels kernel_size padding=0<block_start>net=mx.sym.Convolution(net num_filter=channels kernel=(kernel_size kernel_size) pad=(padding padding))<line_sep>net=mx.sym.Activation(net act_type="relu")<line_sep><return>net<block_end># Net <def_stmt>get_symbol num_classes=1000 version="1.0" **kwargs<block_start>"""Get symbol of SqueezeNet Parameters ---------- num_classes: int The number of classification results version : str, optional "1.0" or "1.1" of SqueezeNet """<assert_stmt>version<in>["1.0" "1.1" ] "Unsupported SqueezeNet version {version}:"<concat>"1.0 or 1.1 expected".format(version=version)<line_sep>net=mx.sym.Variable("data")<if_stmt>version<eq>"1.0"<block_start>net=mx.sym.Convolution(net num_filter=96 kernel=(7 7) stride=(2 2) pad=(3 3))<line_sep>net=mx.sym.Activation(net act_type="relu")<line_sep>net=mx.sym.Pooling(data=net kernel=(3 3) pool_type="max" stride=(2 2))<line_sep>net=_make_fire(net 16 64 64)<line_sep>net=_make_fire(net 16 64 64)<line_sep>net=_make_fire(net 32 128 128)<line_sep>net=mx.sym.Pooling(data=net kernel=(3 3) pool_type="max" stride=(2 2))<line_sep>net=_make_fire(net 32 128 128)<line_sep>net=_make_fire(net 48 192 192)<line_sep>net=_make_fire(net 48 192 192)<line_sep>net=_make_fire(net 64 256 256)<line_sep>net=mx.sym.Pooling(data=net kernel=(3 3) pool_type="max" stride=(2 2))<line_sep>net=_make_fire(net 64 256 256)<block_end><else_stmt><block_start>net=mx.sym.Convolution(net num_filter=64 kernel=(3 3) stride=(2 2) pad=(1 1))<line_sep>net=mx.sym.Activation(net act_type="relu")<line_sep>net=mx.sym.Pooling(data=net kernel=(3 3) pool_type="max" stride=(2 2))<line_sep>net=_make_fire(net 16 64 64)<line_sep>net=_make_fire(net 16 64 64)<line_sep>net=mx.sym.Pooling(data=net kernel=(3 3) pool_type="max" stride=(2 2))<line_sep>net=_make_fire(net 32 128 128)<line_sep>net=_make_fire(net 32 128 128)<line_sep>net=mx.sym.Pooling(data=net kernel=(3 3) pool_type="max" stride=(2 2))<line_sep>net=_make_fire(net 48 192 192)<line_sep>net=_make_fire(net 48 192 192)<line_sep>net=_make_fire(net 64 256 256)<line_sep>net=_make_fire(net 64 256 256)<block_end>net=mx.sym.Dropout(net p=0.5)<line_sep>net=mx.sym.Convolution(net num_filter=num_classes kernel=(1 1))<line_sep>net=mx.sym.Activation(net act_type="relu")<line_sep>net=mx.sym.Pooling(data=net global_pool=<true> kernel=(13 13) pool_type="avg")<line_sep>net=mx.sym.flatten(net)<line_sep><return>mx.sym.softmax(net)<block_end>
"""webOS Smart TV trigger dispatcher."""<import_from_future_stmt> annotations<import_from_stmt>typing cast<import_from_stmt>homeassistant.components.automation AutomationActionType AutomationTriggerInfo <import_from_stmt>homeassistant.const CONF_PLATFORM<import_from_stmt>homeassistant.core CALLBACK_TYPE HomeAssistant<import_from_stmt>homeassistant.helpers.typing ConfigType<import_from_stmt>.triggers TriggersPlatformModule turn_on<line_sep>TRIGGERS={"turn_on":turn_on }<def_stmt>_get_trigger_platform config:ConfigType<arrow>TriggersPlatformModule<block_start>"""Return trigger platform."""<line_sep>platform_split=config[CONF_PLATFORM].split("." maxsplit=1)<if_stmt>len(platform_split)<l>2<or>platform_split[1]<not><in>TRIGGERS<block_start><raise>ValueError(f"Unknown webOS Smart TV trigger platform {config[CONF_PLATFORM]}")<block_end><return>cast(TriggersPlatformModule TRIGGERS[platform_split[1]])<block_end><async_keyword><def_stmt>async_validate_trigger_config hass:HomeAssistant config:ConfigType<arrow>ConfigType<block_start>"""Validate config."""<line_sep>platform=_get_trigger_platform(config)<line_sep><return>cast(ConfigType platform.TRIGGER_SCHEMA(config))<block_end><async_keyword><def_stmt>async_attach_trigger hass:HomeAssistant config:ConfigType action:AutomationActionType automation_info:AutomationTriggerInfo <arrow>CALLBACK_TYPE<block_start>"""Attach trigger of specified platform."""<line_sep>platform=_get_trigger_platform(config)<assert_stmt>hasattr(platform "async_attach_trigger")<line_sep><return>cast(CALLBACK_TYPE <await>getattr(platform "async_attach_trigger")(hass config action automation_info) )<block_end>
<import_from_stmt>tflearn input_data conv_2d max_pool_2d fully_connected dropout Momentum regression DNN<line_sep>#model of vgg-19 <def_stmt>vgg_net_19 width height<block_start>network=input_data(shape=[<none> height width 3] name='input')<line_sep>network=conv_2d(network 64 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 64 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 128 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 128 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=fully_connected(network 4096 activation='relu' weight_decay=5e-4)<line_sep>network=dropout(network keep_prob=0.5)<line_sep>network=fully_connected(network 4096 activation='relu' weight_decay=5e-4)<line_sep>network=dropout(network keep_prob=0.5)<line_sep>network=fully_connected(network 1000 activation='softmax' weight_decay=5e-4)<line_sep>opt=Momentum(learning_rate=0 momentum=0.9)<line_sep>network=regression(network optimizer=opt loss='categorical_crossentropy' name='targets')<line_sep>model=DNN(network checkpoint_path='' max_checkpoints=1 tensorboard_verbose=2 tensorboard_dir='')<line_sep><return>model<block_end>#model of vgg-19 for testing of the activations #rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...)) #make sure to use the correct test function (depending if your output is a tensor or a vector) <def_stmt>vgg_net_19_activations width height<block_start>network=input_data(shape=[<none> height width 3] name='input')<line_sep>network1=conv_2d(network 64 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network2=conv_2d(network1 64 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network2 2 strides=2)<line_sep>network=conv_2d(network 128 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 128 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 256 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=conv_2d(network 512 3 activation='relu' regularizer='L2' weight_decay=5e-4)<line_sep>network=max_pool_2d(network 2 strides=2)<line_sep>network=fully_connected(network 4096 activation='relu' weight_decay=5e-4)<line_sep>network=dropout(network keep_prob=0.5)<line_sep>network=fully_connected(network 4096 activation='relu' weight_decay=5e-4)<line_sep>network=dropout(network keep_prob=0.5)<line_sep>network=fully_connected(network 1000 activation='softmax' weight_decay=5e-4)<line_sep>opt=Momentum(learning_rate=0 momentum=0.9)<line_sep>network=regression(network optimizer=opt loss='categorical_crossentropy' name='targets')<line_sep>model=DNN(network1 checkpoint_path='' max_checkpoints=1 tensorboard_verbose=2 tensorboard_dir='')<line_sep><return>model<block_end>
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_stmt>numpy<as>np<line_sep>#! [auto_compilation] <import_stmt>openvino.runtime<as>ov<line_sep>compiled_model=ov.compile_model("model.xml")<line_sep>#! [auto_compilation] #! [properties_example] core=ov.Core()<line_sep>input_a=ov.opset8.parameter([8])<line_sep>res=ov.opset8.absolute(input_a)<line_sep>model=ov.Model(res [input_a])<line_sep>compiled=core.compile_model(model "CPU")<line_sep>print(model.inputs)<line_sep>print(model.outputs)<line_sep>print(compiled.inputs)<line_sep>print(compiled.outputs)<line_sep>#! [properties_example] #! [tensor_basics] data_float64=np.ones(shape=(2 8))<line_sep>tensor=ov.Tensor(data_float64)<assert_stmt>tensor.element_type<eq>ov.Type.f64<line_sep>data_int32=np.ones(shape=(2 8) dtype=np.int32)<line_sep>tensor=ov.Tensor(data_int32)<assert_stmt>tensor.element_type<eq>ov.Type.i32<line_sep>#! [tensor_basics] #! [tensor_shared_mode] data_to_share=np.ones(shape=(2 8))<line_sep>shared_tensor=ov.Tensor(data_to_share shared_memory=<true>)<line_sep># Editing of the numpy array affects Tensor's data data_to_share[0][2]=6.0<assert_stmt>shared_tensor.data[0][2]<eq>6.0<line_sep># Editing of Tensor's data affects the numpy array shared_tensor.data[0][2]=0.6<assert_stmt>data_to_share[0][2]<eq>0.6<line_sep>#! [tensor_shared_mode] infer_request=compiled.create_infer_request()<line_sep>data=np.random.randint(-5 3+1 size=(8))<line_sep>#! [passing_numpy_array] # Passing inputs data in form of a dictionary infer_request.infer(inputs={0:data})<line_sep># Passing inputs data in form of a list infer_request.infer(inputs=[data])<line_sep>#! [passing_numpy_array] #! [getting_results] # Get output tensor results=infer_request.get_output_tensor().data<line_sep># Get tensor with CompiledModel's output node results=infer_request.get_tensor(compiled.outputs[0]).data<line_sep># Get all results with special helper property results=list(infer_request.results.values())<line_sep>#! [getting_results] #! [sync_infer] # Simple call to InferRequest results=infer_request.infer(inputs={0:data})<line_sep># Extra feature: calling CompiledModel directly results=compiled_model(inputs={0:data})<line_sep>#! [sync_infer] #! [asyncinferqueue] core=ov.Core()<line_sep># Simple model that adds two inputs together input_a=ov.opset8.parameter([8])<line_sep>input_b=ov.opset8.parameter([8])<line_sep>res=ov.opset8.add(input_a input_b)<line_sep>model=ov.Model(res [input_a input_b])<line_sep>compiled=core.compile_model(model "CPU")<line_sep># Number of InferRequests that AsyncInferQueue holds jobs=4<line_sep>infer_queue=ov.AsyncInferQueue(compiled jobs)<line_sep># Create data data=[np.array([i]<times>8 dtype=np.float32)<for>i range(jobs)]<line_sep># Run all jobs <for_stmt>i range(len(data))<block_start>infer_queue.start_async({0:data[i] 1:data[i]})<block_end>infer_queue.wait_all()<line_sep>#! [asyncinferqueue] #! [asyncinferqueue_access] results=infer_queue[3].get_output_tensor().data<line_sep>#! [asyncinferqueue_access] #! [asyncinferqueue_set_callback] data_done=[<false><for>_ range(jobs)]<def_stmt>f request userdata<block_start>print(f"Done! Result: {request.get_output_tensor().data}")<line_sep>data_done[userdata]=<true><block_end>infer_queue.set_callback(f)<for_stmt>i range(len(data))<block_start>infer_queue.start_async({0:data[i] 1:data[i]} userdata=i)<block_end>infer_queue.wait_all()<assert_stmt>all(data_done)<line_sep>#! [asyncinferqueue_set_callback] unt8_data=np.ones([100])<line_sep>#! [packing_data] <import_from_stmt>openvino.helpers pack_data<line_sep>packed_buffer=pack_data(unt8_data ov.Type.u4)<line_sep># Create tensor with shape in element types t=ov.Tensor(packed_buffer [1 128] ov.Type.u4)<line_sep>#! [packing_data] #! [unpacking] <import_from_stmt>openvino.helpers unpack_data<line_sep>unpacked_data=unpack_data(t.data t.element_type t.shape)<assert_stmt>np.array_equal(unpacked_data unt8_data)<line_sep>#! [unpacking] #! [releasing_gil] <import_stmt>openvino.runtime<as>ov<import_stmt>cv2<as>cv<import_from_stmt>threading Thread<line_sep>input_data=[]<line_sep># Processing input data will be done in a separate thread # while compilation of the model and creation of the infer request # is going to be executed in the main thread. <def_stmt>prepare_data input image_path<block_start>image=cv.imread(image_path)<line_sep>h,w=list(input.shape)[-2:]<line_sep>image=cv.resize(image (h w))<line_sep>image=image.transpose((2 0 1))<line_sep>image=np.expand_dims(image 0)<line_sep>input_data.append(image)<block_end>core=ov.Core()<line_sep>model=core.read_model("model.xml")<line_sep># Create thread with prepare_data function as target and start it thread=Thread(target=prepare_data args=[model.input() "path/to/image"])<line_sep>thread.start()<line_sep># The GIL will be released in compile_model. # It allows a thread above to start the job, # while main thread is running in the background. compiled=core.compile_model(model "GPU")<line_sep># After returning from compile_model, the main thread acquires the GIL # and starts create_infer_request which releases it once again. request=compiled.create_infer_request()<line_sep># Join the thread to make sure the input_data is ready thread.join()<line_sep># running the inference request.infer(input_data)<line_sep>#! [releasing_gil]
# -*- coding: utf-8 -*- <import_from_future_stmt> print_function division absolute_import unicode_literals<import_stmt>pytest<import_from_stmt>RPLCD.gpio CharLCD<import_from_stmt>RPLCD.common LCD_SETDDRAMADDR<def_stmt>test_write_simple mocker charlcd_kwargs<block_start>""" Write "HelloWorld" to the display. """<line_sep>lcd=CharLCD(**charlcd_kwargs)<line_sep>send_data=mocker.patch.object(lcd '_send_data')<line_sep>text='HelloWorld'<line_sep>lcd.write_string(text)<assert_stmt>send_data.call_count<eq>len(text)<line_sep>calls=[c[0]<for>c send_data.call_args_list]<assert_stmt>calls[0]<eq>(72 )<assert_stmt>calls[1]<eq>(101 )<assert_stmt>calls[2]<eq>(108 )<assert_stmt>calls[3]<eq>(108 )<assert_stmt>calls[4]<eq>(111 )<assert_stmt>calls[5]<eq>(87 )<assert_stmt>calls[6]<eq>(111 )<assert_stmt>calls[7]<eq>(114 )<assert_stmt>calls[8]<eq>(108 )<assert_stmt>calls[9]<eq>(100 )<block_end><def_stmt>test_caching mocker charlcd_kwargs<block_start>""" Characters should only be written if they have changed """<line_sep>lcd=CharLCD(**charlcd_kwargs)<line_sep>send_data=mocker.patch.object(lcd '_send_data')<line_sep>send_instruction=mocker.patch.object(lcd '_send_instruction')<line_sep>lcd.write_string('hello')<assert_stmt>send_data.call_count<eq>5<line_sep>data_calls=[c[0]<for>c send_data.call_args_list]<assert_stmt>data_calls[0]<eq>(104 )<assert_stmt>data_calls[1]<eq>(101 )<assert_stmt>data_calls[2]<eq>(108 )<assert_stmt>data_calls[3]<eq>(108 )<assert_stmt>data_calls[4]<eq>(111 )<line_sep>lcd.home()<line_sep>send_data.reset_mock()<line_sep>send_instruction.reset_mock()<line_sep>lcd.write_string('he77o')<assert_stmt>send_data.call_count<eq>2<assert_stmt>send_instruction.call_count<eq>3<line_sep>data_calls=[c[0]<for>c send_data.call_args_list]<line_sep>instruction_calls=[c[0]<for>c send_instruction.call_args_list]<assert_stmt>instruction_calls[0]<eq>(LCD_SETDDRAMADDR|1 )<assert_stmt>instruction_calls[1]<eq>(LCD_SETDDRAMADDR|2 )<assert_stmt>data_calls[0]<eq>(55 )<assert_stmt>data_calls[1]<eq>(55 )<assert_stmt>instruction_calls[2]<eq>(LCD_SETDDRAMADDR|5 )<block_end>@pytest.mark.parametrize(['charmap' 'ue'] [('A00' 0b11110101) ('A02' 0b11111100) ])<def_stmt>test_charmap mocker charmap ue charlcd_kwargs<block_start>""" The charmap should be used. The "ü" Umlaut should be encoded correctly. """<line_sep>lcd=CharLCD(charmap=charmap **charlcd_kwargs)<line_sep>send=mocker.patch.object(lcd '_send_data')<line_sep>text='Züri'<line_sep>lcd.write_string(text)<assert_stmt>send.call_count<eq>4 'call count was %d'%send.call_count<line_sep>calls=[c[0]<for>c send.call_args_list]<assert_stmt>calls[0]<eq>(90 )<assert_stmt>calls[1]<eq>(ue )<assert_stmt>calls[2]<eq>(114 )<assert_stmt>calls[3]<eq>(105 )<block_end>@pytest.mark.parametrize(['rows' 'cols'] [(2 16) (4 20) ])<def_stmt>test_write_newline mocker rows cols charlcd_kwargs<block_start>""" Write text containing CR/LF chars to the display. """<line_sep>lcd=CharLCD(rows=rows cols=cols **charlcd_kwargs)<line_sep>send_data=mocker.patch.object(lcd '_send_data')<line_sep>send_instruction=mocker.patch.object(lcd '_send_instruction')<line_sep>text='\nab\n\rcd'<line_sep>lcd.write_string(text)<assert_stmt>send_data.call_count+send_instruction.call_count<eq>len(text)<line_sep>data_calls=[c[0]<for>c send_data.call_args_list]<line_sep>instruction_calls=[c[0]<for>c send_instruction.call_args_list]<assert_stmt>instruction_calls[0]<eq>(0x80+0x40 ) instruction_calls<assert_stmt>data_calls[0]<eq>(97 ) data_calls<assert_stmt>data_calls[1]<eq>(98 ) data_calls<if_stmt>rows<eq>2<block_start><assert_stmt>instruction_calls[1]<eq>(0x80+2 ) instruction_calls<assert_stmt>instruction_calls[2]<eq>(0x80+0 ) instruction_calls<block_end><else_stmt><block_start><assert_stmt>instruction_calls[1]<eq>(0x80+cols+2 ) instruction_calls<assert_stmt>instruction_calls[2]<eq>(0x80+cols+0 ) instruction_calls<block_end><assert_stmt>data_calls[2]<eq>(99 ) data_calls<assert_stmt>data_calls[3]<eq>(100 ) data_calls<block_end>
# OLD USAGE # python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg # import the necessary packages <import_from_stmt>imutils.face_utils FaceAligner<import_from_stmt>PIL Image<import_stmt>numpy<as>np<line_sep># import argparse <import_stmt>imutils<import_stmt>dlib<import_stmt>cv2<line_sep># construct the argument parser and parse the arguments # ap = argparse.ArgumentParser() # ap.add_argument("--shape-predictor", help="path to facial landmark predictor", default='shape_predictor_68_face_landmarks.dat') # ap.add_argument("--input", help="path to input images", default='input_raw') # ap.add_argument("--output", help="path to input images", default='input_aligned') # args = vars(ap.parse_args()) # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor and the face aligner detector=dlib.get_frontal_face_detector()<line_sep>predictor=dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')<line_sep>fa=FaceAligner(predictor desiredFaceWidth=256 desiredLeftEye=(0.371 0.480))<line_sep># Input: numpy array for image with RGB channels # Output: (numpy array, face_found) <def_stmt>align_face img<block_start>img=img[: : ::-1]# Convert from RGB to BGR format img=imutils.resize(img width=800)<line_sep># detect faces in the grayscale image gray=cv2.cvtColor(img cv2.COLOR_BGR2GRAY)<line_sep>rects=detector(gray 2)<if_stmt>len(rects)<g>0# align the face using facial landmarks <block_start>align_img=fa.align(img gray rects[0])[: : ::-1]<line_sep>align_img=np.array(Image.fromarray(align_img).convert('RGB'))<line_sep><return>align_img <true><block_end><else_stmt># No face found <block_start><return><none> <false><block_end><block_end># Input: img_path # Output: aligned_img if face_found, else None <def_stmt>align img_path<block_start>img=Image.open(img_path)<line_sep>img=img.convert('RGB')# if image is RGBA or Grayscale etc img=np.array(img)<line_sep>x,face_found=align_face(img)<line_sep><return>x<block_end>
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_from_stmt>contextlib contextmanager<import_from_stmt>kafka KafkaClient<import_from_stmt>kafka SimpleConsumer<import_from_stmt>data_pipeline.config get_config<import_from_stmt>data_pipeline.message create_from_offset_and_message<line_sep>_ONE_MEGABYTE=1024<times>1024<line_sep>logger=get_config().logger<line_sep>@contextmanager<def_stmt>capture_new_data_pipeline_messages topic<block_start>"""contextmanager that moves to the tail of the given topic, and waits to receive new messages, returning a function that can be called zero or more times which will retrieve decoded data pipeline messages from the topic. Returns: Callable[[int], List[Message]]: Function that takes a single optional argument, count, and returns up to count decoded data pipeline messages. This function does not block, and will return however many messages are available immediately. Default count is 100. """<with_stmt>capture_new_messages(topic)<as>get_kafka_messages<block_start><def_stmt>get_data_pipeline_messages count=100<block_start>kafka_messages=get_kafka_messages(count)<line_sep><return>[create_from_offset_and_message(kafka_message)<for>kafka_message kafka_messages]<block_end><yield>get_data_pipeline_messages<block_end><block_end>@contextmanager<def_stmt>capture_new_messages topic<block_start>"""Seeks to the tail of the topic then returns a function that can consume messages from that point. """<with_stmt>setup_capture_new_messages_consumer(topic)<as>consumer<block_start><def_stmt>get_messages count=100<block_start><return>consumer.get_messages(count=count)<block_end><yield>get_messages<block_end><block_end>@contextmanager<def_stmt>setup_capture_new_messages_consumer topic<block_start>"""Seeks to the tail of the topic then returns a function that can consume messages from that point. """<line_sep>kafka=KafkaClient(get_config().cluster_config.broker_list)<line_sep>group=str('data_pipeline_clientlib_test')<line_sep>consumer=SimpleConsumer(kafka group topic max_buffer_size=_ONE_MEGABYTE)<line_sep>consumer.seek(0 2)# seek to tail, 0 is the offset, and 2 is the tail <yield>consumer<line_sep>kafka.close()<block_end>
<import_stmt>threading<import_stmt>errors<import_stmt>vs<import_stmt>logging<import_stmt>gc<import_from_stmt>blinker signal<import_from_stmt>utils performance<import_from_stmt>output.output Output<line_sep># We need to be able to load (not run) vs_server on windows to generate the documentation. # So we're skipping non-windows imports <try_stmt><block_start><import_stmt>psutil<block_end><except_stmt>ImportError<block_start><pass><block_end>PROFILING_STITCH_FORMAT=vs.NV12<class_stmt>ProfilingOutput(Output)<block_start>"""Profiling output """<def_stmt>__init__ self stitcher name="profiling" critical=<false> preserved=<false><block_start>super(ProfilingOutput self).__init__(stitcher name critical preserved)<line_sep>self.writer=<none><line_sep>self.pid=psutil.Process()<block_end><def_stmt>reset self<block_start>self._transition_check()<line_sep>self.pid.cpu_percent(interval=<none>)<line_sep>vs.Output_reset(self.writer.object())<block_end><def_stmt>_start self profiling_time=0 preserve=<false># Todo I don't like that it's created differently from other outputs here, but for now I left it like this <block_start>panorama=self.stitcher.project_manager.panorama<line_sep>self.writer=vs.Output_profiling(self.name panorama.width panorama.height self.stitcher.project_manager.controller.getFrameRateFromInputController() PROFILING_STITCH_FORMAT)<if_stmt>self.writer<is><none><block_start><raise>errors.InternalError()<block_end>self.shared_writer=vs.writerSharedPtr(self.writer.object())<line_sep>self.shared_video=vs.videoWriterSharedPtr(self.shared_writer)<line_sep>self.has_audio=<false><if_stmt>self.shared_video<is><not><none><and><not>self.stitcher.stitch_output.addWriter(self.shared_video)<block_start><raise>errors.InternalError("Cannot add profiling writer to stitcher")<block_end><if_stmt>profiling_time<g>0<block_start>threading.Timer(profiling_time self.t_stop).start()<block_end>self.pid.cpu_percent(interval=<none>)<line_sep>#jump automatically from starting state to started state self.t_writer_ok()<block_end><def_stmt>_stop self<block_start>self.fps=vs.Output_getFps(self.writer.release())<line_sep>self.writer=<none><line_sep>logging.info("fps is %f:"%self.fps)<line_sep>logging.info("cpu_util is %d"%self.pid.cpu_percent(interval=<none>))<line_sep>cuda=performance.getCudaInfo()<line_sep>logging.info("gpu_util is %d"%int(cuda['utilization.gpu']))<line_sep>logging.info("enc_util is %s"%cuda['utilization.enc'])<line_sep>success=self.stitcher.stitch_output.removeWriterNoGIL(self.name)<line_sep>signal("profiling_stopping").send()<if_stmt><not>success<block_start><raise>errors.InternalError("Cannot remove writer")<block_end>self.shared_video=<none><line_sep>self.shared_writer=<none><line_sep>gc.collect()<line_sep>#jump automatically from stopping state to stopped state self.t_writer_completed()<block_end><def_stmt>get_statistics self<block_start>cuda=performance.getCudaInfo()<line_sep>self._transition_check()<if_stmt>self.writer<is><not><none><block_start>self.fps=vs.Output_getFps(self.writer.object())<block_end><return>{"fps":self.fps "cpu":self.pid.cpu_percent(interval=<none>) "gpu":float(cuda['utilization.gpu']) "enc":float(cuda['utilization.enc'])}<block_end><block_end>
"""This module implements "Local" classes that mimic their associated `cloudpathlib` non-local counterparts but use the local filesystem in place of cloud storage. They can be used as drop-in replacements, with the intent that you can use them as mock or monkepatch substitutes in your tests. See ["Testing code that uses cloudpathlib"](../../testing_mocked_cloudpathlib/) for usage examples. """<import_from_stmt>.implementations local_azure_blob_implementation LocalAzureBlobClient LocalAzureBlobPath local_gs_implementation LocalGSClient LocalGSPath local_s3_implementation LocalS3Client LocalS3Path <import_from_stmt>.localclient LocalClient<import_from_stmt>.localpath LocalPath<line_sep>__all__=["local_azure_blob_implementation" "LocalAzureBlobClient" "LocalAzureBlobPath" "LocalClient" "local_gs_implementation" "LocalGSClient" "LocalGSPath" "LocalPath" "local_s3_implementation" "LocalS3Client" "LocalS3Path" ]<line_sep>
<import_stmt>jieba#分词库 <import_stmt>jieba.analyse<import_stmt>pymongo<import_stmt>redis<import_stmt>os<import_stmt>re<import_stmt>json<line_sep>client=pymongo.MongoClient(host="127.0.0.1" port=27017)<line_sep>db=client['job']<line_sep>collection=db['position']<line_sep>data=collection.find({})<line_sep>text=""<for_stmt>item data<block_start>text<augadd>item['body']<block_end>pwd=os.path.split(os.path.realpath(__file__))[0]<line_sep>stopWord=pwd+'/stop.txt'<line_sep>jieba.analyse.set_stop_words(stopWord)<line_sep>cut_text=jieba.cut(text)<line_sep>it_text=dict({})<for_stmt>x cut_text<block_start>G=re.match('[a-zA-Z]+' x)<if_stmt>G<block_start>key=G.group()<line_sep>keys=map(<lambda>x:x.lower() it_text.keys())<if_stmt>key.lower()<in>keys<block_start>it_text[key.lower()]<augadd>1<block_end><else_stmt><block_start>it_text[key.lower()]=1<block_end><block_end><block_end><with_stmt>open("word.json" "w+" encoding="utf-8")<as>file<block_start>data=file.write(json.dumps((it_text)))<block_end>result="/".join(cut_text)#必须给个符号分隔开分词结果来形成字符串,否则不能绘制词云 data=jieba.analyse.extract_tags(result.replace('/' '') withWeight=<false> allowPOS=())<line_sep>#print(",".join(data))
"""Migration for a given Submitty course database."""<def_stmt>up config database semester course<block_start><if_stmt><not>database.table_has_column('teams' 'last_viewed_time')<block_start>database.execute('ALTER TABLE teams ADD COLUMN last_viewed_time timestamp with time zone')<block_end><block_end><def_stmt>down config database semester course<block_start><pass><block_end>