content
stringlengths
0
1.55M
# -*- coding: utf-8 -*- """Simple demo of streaming transaction data."""<import_from_stmt>oandapyV20 API<import_from_stmt>oandapyV20.exceptions V20Error StreamTerminated<import_from_stmt>oandapyV20.endpoints.transactions TransactionsStream<import_from_stmt>exampleauth exampleAuth<line_sep>accountID,access_token=exampleAuth()<line_sep>api=API(access_token=access_token environment="practice")<line_sep>s=TransactionsStream(accountID=accountID)<line_sep>MAXTRANS=10<line_sep>print("read from stream until {} transactions received".format(MAXTRANS))<try_stmt><block_start>n=0<for_stmt>R api.request(s)<block_start>print(R)<line_sep>n<augadd>1<if_stmt>n<g>MAXTRANS<block_start>s.terminate("max transactions received")<block_end><block_end><block_end><except_stmt>StreamTerminated<as>e<block_start>print("{}".format(e))<block_end><except_stmt>V20Error<as>e<block_start>print("Error: {}".format(e))<block_end>
<import_from_stmt>office365.runtime.client_value ClientValue<class_stmt>Thumbnail(ClientValue)<block_start>""" The thumbnail resource type represents a thumbnail for an image, video, document, or any item that has a bitmap representation. """<line_sep><pass><block_end>
''' Motion Event Provider ===================== Abstract class for the implemention of a :class:`~kivy.input.motionevent.MotionEvent` provider. The implementation must support the :meth:`~MotionEventProvider.start`, :meth:`~MotionEventProvider.stop` and :meth:`~MotionEventProvider.update` methods. '''<line_sep>__all__=('MotionEventProvider' )<class_stmt>MotionEventProvider(object)<block_start>'''Base class for a provider. '''<def_stmt>__init__ self device args<block_start>self.device=device<if_stmt>self.__class__<eq>MotionEventProvider<block_start><raise>NotImplementedError('class MotionEventProvider is abstract')<block_end><block_end><def_stmt>start self<block_start>'''Start the provider. This method is automatically called when the application is started and if the configuration uses the current provider. '''<line_sep><pass><block_end><def_stmt>stop self<block_start>'''Stop the provider. '''<line_sep><pass><block_end><def_stmt>update self dispatch_fn<block_start>'''Update the provider and dispatch all the new touch events though the `dispatch_fn` argument. '''<line_sep><pass><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>django forms<import_from_stmt>django.contrib admin<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>.models Address<import_from_stmt>.widgets AddressWithMapWidget<class_stmt>HasExceptionFilter(admin.SimpleListFilter)<block_start>title=_("exception")<line_sep>parameter_name="has_exception"<def_stmt>lookups self request model_admin<block_start><return>((1 _("Yes")) (0 _("No")) )<block_end><def_stmt>queryset self request queryset<block_start><if_stmt>self.value()<is><not><none><block_start>ids=Address.objects.values_list("pk" flat=<true>)<if_stmt>self.value()<eq>"1"<block_start><return>queryset.filter(pk__in=ids)<block_end><elif_stmt>self.value()<eq>"0"<block_start><return>queryset.exclude(pk__in=ids)<block_end><block_end><return>queryset<block_end><block_end><class_stmt>AddressAdmin(admin.ModelAdmin)<block_start>list_display=["address" "computed_address" "latitude" "longitude" "has_exception"]<line_sep>list_filter=[HasExceptionFilter]<line_sep>search_fields=["address"]<class_stmt>form(forms.ModelForm)<block_start><class_stmt>Meta<block_start>widgets={"address":AddressWithMapWidget({"class":"vTextField"})}<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>benedict benedict<import_stmt>unittest<class_stmt>benedict_casting_test_case(unittest.TestCase)<block_start><def_stmt>test__getitem__ self<block_start>d={'a':1 'b':{'c':{'d':2 } } }<line_sep>b=benedict(d)<line_sep>c=b['b.c']<line_sep>self.assertTrue(isinstance(c benedict))<line_sep>self.assertEqual(type(c) benedict)<line_sep>self.assertTrue(c<eq>d['b']['c'])<line_sep>self.assertFalse(c<is>d['b']['c'])<block_end><def_stmt>test_cast_dict_to_benedict self<block_start>d={'a':1 'b':{'c':{'d':2 } } }<line_sep>b=benedict(d)<line_sep>bb=benedict(b)<line_sep>bbd=bb.dict()<line_sep>self.assertTrue(isinstance(bbd dict))<line_sep>self.assertFalse(isinstance(bbd benedict))<line_sep>self.assertEqual(d bbd)<line_sep>self.assertTrue(d<is>bbd)<block_end><def_stmt>test_cast_benedict_to_dict self<block_start>b=benedict({'a':1 'b':{'c':{'d':2 } } })<line_sep># d1 = dict(**b) # print(d1) d=dict(b)<line_sep>self.assertTrue(isinstance(d dict))<line_sep>self.assertEqual(type(d) dict)<line_sep>self.assertEqual(b d)<line_sep>self.assertFalse(b<is>d)<line_sep>d=dict(b)<line_sep>self.assertTrue(isinstance(d dict))<line_sep>self.assertEqual(type(d) dict)<line_sep>self.assertEqual(b d)<line_sep>self.assertFalse(b<is>d)<block_end><def_stmt>test_cast_benedict_kwargs_to_dict self<block_start>b=benedict({'a':1 'b':{'c':{'d':2 } } })<line_sep>d=dict(**b)<line_sep>self.assertTrue(isinstance(d dict))<line_sep>self.assertEqual(type(d) dict)<line_sep>self.assertEqual(b d)<line_sep>self.assertFalse(b<is>d)<block_end><def_stmt>test_dict self<block_start>d={'a':1 'b':{'c':{'d':2 } } }<line_sep>b=benedict(d)<line_sep>bd=b.dict()<line_sep>self.assertTrue(isinstance(bd dict))<line_sep>self.assertFalse(isinstance(bd benedict))<line_sep>self.assertTrue(d<eq>bd)<line_sep>self.assertTrue(d<is>bd)<block_end><def_stmt>test_get self<block_start>d={'a':1 'b':{'c':{'d':2 } } }<line_sep>b=benedict(d)<line_sep>c=b.get('b.c')<line_sep>self.assertTrue(isinstance(c benedict))<line_sep>self.assertEqual(type(c) benedict)<line_sep>self.assertTrue(c<eq>d['b']['c'])<line_sep>self.assertFalse(c<is>d['b']['c'])<block_end><def_stmt>test_get_dict self<block_start>d={'a':1 'b':{'c':{'d':2 } } }<line_sep>b=benedict(d)<line_sep>c=b.get_dict('b.c')<line_sep>self.assertTrue(isinstance(c benedict))<line_sep>self.assertEqual(type(c) benedict)<line_sep>self.assertTrue(c<eq>d['b']['c'])<line_sep>self.assertFalse(c<is>d['b']['c'])<block_end><def_stmt>test_get_list_item self<block_start>d={'a':1 'b':{'c':[{'d':2 } {'e':3 } {'f':4 } ]} }<line_sep>b=benedict(d)<line_sep>c=b.get_list_item('b.c' 1)<line_sep>self.assertTrue(isinstance(c benedict))<line_sep>self.assertEqual(type(c) benedict)<line_sep>self.assertTrue(c<eq>d['b']['c'][1])<line_sep>self.assertFalse(c<is>d['b']['c'][1])<block_end><def_stmt>test_pop self<block_start>d={'a':1 'b':{'c':{'d':2 } } }<line_sep>b=benedict(d)<line_sep>c=b.pop('b.c')<line_sep>self.assertTrue(isinstance(c benedict))<line_sep>self.assertEqual(type(c) benedict)<with_stmt>self.assertRaises(KeyError)<block_start>d['b']['c']<block_end><block_end><block_end>
<import_from_stmt>moshmosh.extensions.pattern_matching.runtime ListView<line_sep>v=ListView([1 2 3 5] range(1 4))<line_sep>v.sort(reverse=<true>)<assert_stmt>v<eq>[5 3 2]<line_sep>v.sort(reverse=<false>)<assert_stmt>v<eq>[2 3 5]<line_sep>v.sort(reverse=<false> key=<lambda>x:-x)<assert_stmt>v<eq>[5 3 2]<assert_stmt>isinstance(v list)<line_sep>
<import_stmt>argparse<import_from_stmt>copy deepcopy<import_from_stmt>pprint pprint<import_stmt>torch.backends<import_from_stmt>PIL Image<import_from_stmt>torch optim<import_from_stmt>torchvision.transforms transforms<import_from_stmt>tqdm tqdm<import_from_stmt>baal get_heuristic ActiveLearningLoop<import_from_stmt>baal.bayesian.dropout MCDropoutModule<import_from_stmt>baal ModelWrapper<import_from_stmt>baal ClassificationReport<import_from_stmt>baal PILToLongTensor<import_from_stmt>utils pascal_voc_ids active_pascal add_dropout FocalLoss<try_stmt><block_start><import_stmt>segmentation_models_pytorch<as>smp<block_end><except_stmt>ImportError<block_start><raise>Exception('This example requires `smp`.\n pip install segmentation_models_pytorch')<block_end><import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<as>np<def_stmt>mean_regions n grid_size=16# Compute the mean uncertainty per regions. # [batch_size, W, H] <block_start>n=torch.from_numpy(n[: <none> <ellipsis>])<line_sep># [Batch_size, 1, grid, grid] out=F.adaptive_avg_pool2d(n grid_size)<line_sep><return>np.mean(out.view([-1 grid_size<power>2]).numpy() -1)<block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--al_step" default=200 type=int)<line_sep>parser.add_argument("--batch_size" default=8 type=int)<line_sep>parser.add_argument("--initial_pool" default=40 type=int)<line_sep>parser.add_argument("--n_data_to_label" default=20 type=int)<line_sep>parser.add_argument("--lr" default=0.001)<line_sep>parser.add_argument("--heuristic" default="random" type=str)<line_sep>parser.add_argument("--reduce" default="sum" type=str)<line_sep>parser.add_argument("--data_path" default="/data" type=str)<line_sep>parser.add_argument("--iterations" default=20 type=int)<line_sep>parser.add_argument("--learning_epoch" default=50 type=int)<line_sep><return>parser.parse_args()<block_end><def_stmt>get_datasets initial_pool path<block_start>IM_SIZE=224<line_sep># TODO add better data augmentation scheme. transform=transforms.Compose([transforms.Resize(512) transforms.CenterCrop(IM_SIZE) transforms.ToTensor() transforms.Normalize([0.485 0.456 0.406] [0.229 0.224 0.225]) ])<line_sep>test_transform=transforms.Compose([transforms.Resize(512) transforms.CenterCrop(IM_SIZE) transforms.ToTensor() transforms.Normalize([0.485 0.456 0.406] [0.229 0.224 0.225]) ])<line_sep>target_transform=transforms.Compose([transforms.Resize(512 interpolation=Image.NEAREST) transforms.CenterCrop(IM_SIZE) PILToLongTensor(pascal_voc_ids)])<line_sep>active_set,test_set=active_pascal(path=path transform=transform test_transform=test_transform target_transform=target_transform)<line_sep>active_set.label_randomly(initial_pool)<line_sep><return>active_set test_set<block_end><def_stmt>main <block_start>args=parse_args()<line_sep>batch_size=args.batch_size<line_sep>use_cuda=torch.cuda.is_available()<line_sep>hyperparams=vars(args)<line_sep>pprint(hyperparams)<line_sep>active_set,test_set=get_datasets(hyperparams['initial_pool'] hyperparams['data_path'])<line_sep># We will use the FocalLoss criterion=FocalLoss(gamma=2 alpha=0.25)<line_sep># Our model is a simple Unet model=smp.Unet(encoder_name='resnext50_32x4d' encoder_depth=5 encoder_weights='imagenet' decoder_use_batchnorm=<false> classes=len(pascal_voc_ids))<line_sep># Add a Dropout layerto use MC-Dropout add_dropout(model classes=len(pascal_voc_ids) activation=<none>)<line_sep># This will enable Dropout at test time. model=MCDropoutModule(model)<line_sep># Put everything on GPU. <if_stmt>use_cuda<block_start>model.cuda()<block_end># Make an optimizer optimizer=optim.SGD(model.parameters() lr=hyperparams["lr"] momentum=0.9 weight_decay=5e-4)<line_sep># Keep a copy of the original weights initial_weights=deepcopy(model.state_dict())<line_sep># Add metrics model=ModelWrapper(model criterion)<line_sep>model.add_metric('cls_report' <lambda>:ClassificationReport(len(pascal_voc_ids)))<line_sep># Which heuristic you want to use? # We will use our custom reduction function. heuristic=get_heuristic(hyperparams['heuristic'] reduction=mean_regions)<line_sep># The ALLoop is in charge of predicting the uncertainty and loop=ActiveLearningLoop(active_set model.predict_on_dataset_generator heuristic=heuristic ndata_to_label=hyperparams['n_data_to_label'] # Instead of predicting on the entire pool, only a subset is used max_sample=1000 batch_size=batch_size iterations=hyperparams["iterations"] use_cuda=use_cuda)<line_sep>acc=[]<for_stmt>epoch tqdm(range(args.al_step))# Following Gal et al. 2016, we reset the weights. <block_start>model.load_state_dict(initial_weights)<line_sep># Train 50 epochs before sampling. model.train_on_dataset(active_set optimizer batch_size hyperparams['learning_epoch'] use_cuda)<line_sep># Validation! model.test_on_dataset(test_set batch_size use_cuda)<line_sep>should_continue=loop.step()<line_sep>metrics=model.metrics<line_sep>val_loss=metrics['test_loss'].value<line_sep>logs={"val":val_loss "epoch":epoch "train":metrics['train_loss'].value "labeled_data":active_set.labelled "Next Training set size":len(active_set) 'cls_report':metrics['test_cls_report'].value }<line_sep>pprint(logs)<line_sep>acc.append(logs)<if_stmt><not>should_continue<block_start><break><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Copyright 2020 The Kale Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_stmt>json<import_from_stmt>testfixtures mock<import_from_stmt>kale.common kfputils<line_sep>@mock.patch('kale.common.kfputils.workflowutils')@mock.patch('kale.common.kfputils.podutils')<def_stmt>test_update_uimetadata_not_exists podutils workflowutils tmpdir<block_start>"""Test the uimetadata file is created when it does not exists."""<line_sep>podutils.get_pod_name.return_value='test_pod'<line_sep>podutils.get_namespace.return_value='test_ns'<line_sep>workflowutils.get_workflow_name.return_value='test_wk'<line_sep>filepath=os.path.join(tmpdir 'tmp_uimetadata.json')<line_sep># update tmp file kfputils.update_uimetadata('test' uimetadata_path=filepath)<line_sep># check file has been updated correctly updated=json.loads(open(filepath).read())<line_sep>target={"outputs":[{'type':'web-app' 'storage':'minio' 'source':'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'}]}<assert_stmt>updated<eq>target<block_end>@mock.patch('kale.common.kfputils.workflowutils')@mock.patch('kale.common.kfputils.podutils')<def_stmt>test_update_uimetadata_from_empty podutils workflowutils tmpdir<block_start>"""Test that the uimetadata file is updated inplace correctly."""<line_sep>podutils.get_pod_name.return_value='test_pod'<line_sep>podutils.get_namespace.return_value='test_ns'<line_sep>workflowutils.get_workflow_name.return_value='test_wk'<line_sep># create base tmp file base={"outputs":[]}<line_sep>filepath=os.path.join(tmpdir 'tmp_uimetadata.json')<line_sep>json.dump(base open(filepath 'w'))<line_sep># update tmp file kfputils.update_uimetadata('test' uimetadata_path=filepath)<line_sep># check file has been updated correctly updated=json.loads(open(filepath).read())<line_sep>target={"outputs":[{'type':'web-app' 'storage':'minio' 'source':'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'}]}<assert_stmt>updated<eq>target<block_end>@mock.patch('kale.common.kfputils.workflowutils')@mock.patch('kale.common.kfputils.podutils')<def_stmt>test_update_uimetadata_from_not_empty podutils workflowutils tmpdir<block_start>"""Test that the uimetadata file is updated inplace correctly."""<line_sep>podutils.get_pod_name.return_value='test_pod'<line_sep>podutils.get_namespace.return_value='test_ns'<line_sep>workflowutils.get_workflow_name.return_value='test_wk'<line_sep># create base tmp file markdown={'type':'markdown' 'storage':'inline' 'source':'#Some markdown'}<line_sep>base={"outputs":[markdown]}<line_sep>filepath=os.path.join(tmpdir 'tmp_uimetadata.json')<line_sep>json.dump(base open(filepath 'w'))<line_sep># update tmp file kfputils.update_uimetadata('test' uimetadata_path=filepath)<line_sep># check file has been updated correctly updated=json.loads(open(filepath).read())<line_sep>target={"outputs":[markdown {'type':'web-app' 'storage':'minio' 'source':'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'}]}<assert_stmt>updated<eq>target<block_end>
# -*- coding: utf-8 -*- """ Simple logging class Released under the MIT license Copyright (c) 2012, <NAME> @category misc @version $Id: 1.7.0, 2016-08-22 14:53:29 ACST $; @author <NAME> @license http://opensource.org/licenses/MIT """<import_stmt>logging<import_stmt>os<import_stmt>sys<class_stmt>Logger(object)<block_start><def_stmt>__init__ self name debug silent<block_start>self.silent=silent<line_sep>frmt=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s' "%Y-%m-%d %H:%M:%S")<if_stmt>debug<block_start>loglevel=logging.DEBUG<block_end><else_stmt><block_start>loglevel=logging.INFO<block_end>self.createhandlers(frmt name loglevel)<block_end><def_stmt>__del__ self<block_start><if_stmt><not>self.silent<block_start>self.log.removeHandler(self.sh)<block_end>self.log.removeHandler(self.fh)<line_sep>self.log=<none><block_end><def_stmt>createhandlers self frmt name loglevel<block_start>self.log=logging.getLogger(name)<line_sep>self.log.setLevel(loglevel)<if_stmt><not>self.silent<block_start>self.sh=logging.StreamHandler(sys.stdout)<line_sep>self.sh.setLevel(loglevel)<line_sep>self.sh.setFormatter(frmt)<line_sep>self.log.addHandler(self.sh)<block_end>DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))<line_sep>self.fh=logging.FileHandler('%s/autorippr.log'%DIR)<line_sep>self.fh.setLevel(loglevel)<line_sep>self.fh.setFormatter(frmt)<line_sep>self.log.addHandler(self.fh)<block_end><def_stmt>debug self msg<block_start>self.log.debug(msg)<block_end><def_stmt>info self msg<block_start>self.log.info(msg)<block_end><def_stmt>warn self msg<block_start>self.log.warn(msg)<block_end><def_stmt>error self msg<block_start>self.log.error(msg)<block_end><def_stmt>critical self msg<block_start>self.log.critical(msg)<block_end><block_end>
<class_stmt>SerializerNotFound(Exception)<block_start>""" Serializer not found """<block_end>
<import_stmt>pandas<as>pd<import_from_stmt>.general_utils chunkify<import_from_stmt>dpu_utils.utils RichPath<import_from_stmt>multiprocessing Pool cpu_count<def_stmt>df_to_jsonl df:pd.DataFrame RichPath_obj:RichPath i:int basefilename='codedata'<arrow>str<block_start>dest_filename=f'{basefilename}_{str(i).zfill(5)}.jsonl.gz'<line_sep>RichPath_obj.join(dest_filename).save_as_compressed_file(df.to_dict(orient='records'))<line_sep><return>str(RichPath_obj.join(dest_filename))<block_end><def_stmt>chunked_save_df_to_jsonl df:pd.DataFrame output_folder:RichPath num_chunks:int=<none> parallel:bool=<true><arrow><none><block_start>"Chunk DataFrame (n chunks = num cores) and save as jsonl files."<line_sep>df.reset_index(drop=<true> inplace=<true>)<line_sep># parallel saving to jsonl files on azure n=cpu_count()<if>num_chunks<is><none><else>num_chunks<line_sep>dfs=chunkify(df n)<line_sep>args=zip(dfs [output_folder]<times>len(dfs) range(len(dfs)))<if_stmt><not>parallel<block_start><for_stmt>arg args<block_start>dest_filename=df_to_jsonl(*arg)<line_sep>print(f'Wrote chunk to {dest_filename}')<block_end><block_end><else_stmt><block_start><with_stmt>Pool(cpu_count())<as>pool<block_start>pool.starmap(df_to_jsonl args)<block_end><block_end><block_end>
# terrascript/data/davidji99/herokux.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:18:42 UTC) <import_stmt>terrascript<class_stmt>herokux_addons(terrascript.Data)<block_start><pass><block_end><class_stmt>herokux_kafka_mtls_iprules(terrascript.Data)<block_start><pass><block_end><class_stmt>herokux_postgres_mtls_certificate(terrascript.Data)<block_start><pass><block_end><class_stmt>herokux_registry_image(terrascript.Data)<block_start><pass><block_end>__all__=["herokux_addons" "herokux_kafka_mtls_iprules" "herokux_postgres_mtls_certificate" "herokux_registry_image" ]<line_sep>
""" Various simple (basic) functions in the "utilities". The MIT License (MIT) Originally created at 8/31/20, for Python 3.x Copyright (c) 2021 <NAME> (<EMAIL>) & Stanford Geometric Computing Lab """<import_stmt>torch<import_stmt>multiprocessing<as>mp<import_stmt>dask.dataframe<as>dd<import_from_stmt>torch nn<import_from_stmt>sklearn.model_selection train_test_split<def_stmt>iterate_in_chunks l n<block_start>"""Yield successive 'n'-sized chunks from iterable 'l'. Note: last chunk will be smaller than l if n doesn't divide l perfectly. """<for_stmt>i range(0 len(l) n)<block_start><yield>l[i:i+n]<block_end><block_end><def_stmt>df_parallel_column_apply df func column_name<block_start>n_partitions=mp.cpu_count()<times>4<line_sep>d_data=dd.from_pandas(df npartitions=n_partitions)<line_sep>res=d_data.map_partitions(<lambda>df:df.apply((<lambda>row:func(row[column_name])) axis=1)).compute(scheduler='processes')<line_sep><return>res<block_end><def_stmt>cross_entropy pred soft_targets<block_start>""" pred: unscaled logits soft_targets: target-distributions (i.e., sum to 1) """<line_sep>logsoftmax=nn.LogSoftmax(dim=1)<line_sep><return>torch.mean(torch.sum(-soft_targets<times>logsoftmax(pred) 1))<block_end><def_stmt>make_train_test_val_splits datataset_df loads random_seed unique_id_column=<none><block_start>""" Split the data into train/val/test. :param datataset_df: pandas Dataframe containing the dataset (e.g., ArtEmis) :param loads: list with the three floats summing to one for train/val/test :param random_seed: int :return: changes the datataset_df in-place to include a column ("split") indicating the split of each row """<if_stmt>sum(loads)<ne>1<block_start><raise>ValueError()<block_end>train_size,val_size,test_size=loads<line_sep>print("Using a {},{},{} for train/val/test purposes".format(train_size val_size test_size))<line_sep>df=datataset_df<line_sep>## unique id <if_stmt>unique_id_column<is><none><block_start>unique_id=df.art_style+df.painting# default for ArtEmis <block_end><else_stmt><block_start>unique_id=df[unique_id_column]<block_end>unique_ids=unique_id.unique()<line_sep>unique_ids.sort()<line_sep>train,rest=train_test_split(unique_ids test_size=val_size+test_size random_state=random_seed)<line_sep>train=set(train)<if_stmt>val_size<ne>0<block_start>val,test=train_test_split(rest test_size=round(test_size<times>len(unique_ids)) random_state=random_seed)<block_end><else_stmt><block_start>test=rest<block_end>test=set(test)<assert_stmt>len(test.intersection(train))<eq>0<def_stmt>mark_example x<block_start><if_stmt>x<in>train<block_start><return>'train'<block_end><elif_stmt>x<in>test<block_start><return>'test'<block_end><else_stmt><block_start><return>'val'<block_end><block_end>df=df.assign(split=unique_id.apply(mark_example))<line_sep><return>df<block_end>
""" Generic LibGAP-based Group This is useful if you need to use a GAP group implementation in Sage that does not have a dedicated Sage interface. If you want to implement your own group class, you should not derive from this but directly from :class:`~sage.groups.libgap_wrapper.ParentLibGAP`. EXAMPLES:: sage: F.<a,b> = FreeGroup() sage: G_gap = libgap.Group([ (a*b^2).gap() ]) sage: from sage.groups.libgap_group import GroupLibGAP sage: G = GroupLibGAP(G_gap); G Group([ a*b^2 ]) sage: type(G) <class 'sage.groups.libgap_group.GroupLibGAP_with_category'> sage: G.gens() (a*b^2,) """<line_sep>############################################################################## # Copyright (C) 2013 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ ############################################################################## <import_from_stmt>sage.groups.group Group<import_from_stmt>sage.groups.libgap_wrapper ParentLibGAP ElementLibGAP<import_from_stmt>sage.groups.libgap_mixin GroupMixinLibGAP<class_stmt>GroupLibGAP(GroupMixinLibGAP Group ParentLibGAP)<block_start>Element=ElementLibGAP<def_stmt>__init__ self *args **kwds<block_start>""" Group interface for LibGAP-based groups. INPUT: Same as :class:`~sage.groups.libgap_wrapper.ParentLibGAP`. TESTS:: sage: F.<a,b> = FreeGroup() sage: G_gap = libgap.Group([ (a*b^2).gap() ]) sage: from sage.groups.libgap_group import GroupLibGAP sage: G = GroupLibGAP(G_gap); G Group([ a*b^2 ]) sage: g = G.gen(0); g a*b^2 sage: TestSuite(G).run(skip=['_test_pickling', '_test_elements']) sage: TestSuite(g).run(skip=['_test_pickling']) """<line_sep>ParentLibGAP.__init__(self *args **kwds)<line_sep>Group.__init__(self)<block_end><block_end>
<import_stmt>torch<import_stmt>torch.distributed<as>dist<import_from_stmt>common distributed_test<import_stmt>pytest<line_sep>@distributed_test(world_size=3)<def_stmt>test_init <block_start><assert_stmt>dist.is_initialized()<assert_stmt>dist.get_world_size()<eq>3<assert_stmt>dist.get_rank()<l>3<block_end># Demonstration of pytest's parameterization @pytest.mark.parametrize('number,color' [(1138 'purple')])<def_stmt>test_dist_args number color<block_start>"""Outer test function with inputs from pytest.mark.parametrize(). Uses a distributed helper function. """<line_sep>@distributed_test(world_size=2)<def_stmt>_test_dist_args_helper x color='red'<block_start><assert_stmt>dist.get_world_size()<eq>2<assert_stmt>x<eq>1138<assert_stmt>color<eq>'purple'<block_end>"""Ensure that we can parse args to distributed_test decorated functions. """<line_sep>_test_dist_args_helper(number color=color)<block_end>@distributed_test(world_size=[1 2 4])<def_stmt>test_dist_allreduce <block_start>x=torch.ones(1 3).cuda()<times>(dist.get_rank()+1)<line_sep>sum_of_ranks=(dist.get_world_size()<times>(dist.get_world_size()+1))<floordiv>2<line_sep>result=torch.ones(1 3).cuda()<times>sum_of_ranks<line_sep>dist.all_reduce(x)<assert_stmt>torch.all(x<eq>result)<block_end>
<import_stmt>errno<import_stmt>pytest<import_from_stmt>tempfile TemporaryDirectory<import_from_stmt>unittest.mock patch<import_stmt>docker<import_stmt>escapism<import_from_stmt>repo2docker.app Repo2Docker<import_from_stmt>repo2docker.__main__ make_r2d<import_from_stmt>repo2docker.utils chdir<def_stmt>test_find_image <block_start>images=[{"RepoTags":["some-org/some-repo:latest"]}]<with_stmt>patch("repo2docker.docker.docker.APIClient")<as>FakeDockerClient<block_start>instance=FakeDockerClient.return_value<line_sep>instance.images.return_value=images<line_sep>r2d=Repo2Docker()<line_sep>r2d.output_image_spec="some-org/some-repo"<assert_stmt>r2d.find_image()<line_sep>instance.images.assert_called_with()<block_end><block_end><def_stmt>test_dont_find_image <block_start>images=[{"RepoTags":["some-org/some-image-name:latest"]}]<with_stmt>patch("repo2docker.docker.docker.APIClient")<as>FakeDockerClient<block_start>instance=FakeDockerClient.return_value<line_sep>instance.images.return_value=images<line_sep>r2d=Repo2Docker()<line_sep>r2d.output_image_spec="some-org/some-other-image-name"<assert_stmt><not>r2d.find_image()<line_sep>instance.images.assert_called_with()<block_end><block_end><def_stmt>test_image_name_remains_unchanged # if we specify an image name, it should remain unmodified <block_start><with_stmt>TemporaryDirectory()<as>src<block_start>app=Repo2Docker()<line_sep>argv=["--image-name" "a-special-name" "--no-build" src]<line_sep>app=make_r2d(argv)<line_sep>app.start()<assert_stmt>app.output_image_spec<eq>"a-special-name"<block_end><block_end><def_stmt>test_image_name_contains_sha1 repo_with_content<block_start>upstream,sha1=repo_with_content<line_sep>app=Repo2Docker()<line_sep># force selection of the git content provider by prefixing path with # file://. This is important as the Local content provider does not # store the SHA1 in the repo spec argv=["--no-build" "file://"+upstream]<line_sep>app=make_r2d(argv)<line_sep>app.start()<assert_stmt>app.output_image_spec.endswith(sha1[:7])<block_end><def_stmt>test_local_dir_image_name repo_with_content<block_start>upstream,sha1=repo_with_content<line_sep>app=Repo2Docker()<line_sep>argv=["--no-build" upstream]<line_sep>app=make_r2d(argv)<line_sep>app.start()<assert_stmt>app.output_image_spec.startswith("r2d"+escapism.escape(upstream escape_char="-").lower())<block_end><def_stmt>test_build_kwargs repo_with_content<block_start>upstream,sha1=repo_with_content<line_sep>argv=[upstream]<line_sep>app=make_r2d(argv)<line_sep>app.extra_build_kwargs={"somekey":"somevalue"}<with_stmt>patch.object(docker.APIClient "build")<as>builds<block_start>builds.return_value=[]<line_sep>app.build()<block_end>builds.assert_called_once()<line_sep>args,kwargs=builds.call_args<assert_stmt>"somekey"<in>kwargs<assert_stmt>kwargs["somekey"]<eq>"somevalue"<block_end><def_stmt>test_run_kwargs repo_with_content<block_start>upstream,sha1=repo_with_content<line_sep>argv=[upstream]<line_sep>app=make_r2d(argv)<line_sep>app.extra_run_kwargs={"somekey":"somevalue"}<with_stmt>patch.object(docker.DockerClient "containers")<as>containers<block_start>app.start_container()<block_end>containers.run.assert_called_once()<line_sep>args,kwargs=containers.run.call_args<assert_stmt>"somekey"<in>kwargs<assert_stmt>kwargs["somekey"]<eq>"somevalue"<block_end><def_stmt>test_root_not_allowed <block_start><with_stmt>TemporaryDirectory()<as>src patch("os.geteuid")<as>geteuid<block_start>geteuid.return_value=0<line_sep>argv=[src]<with_stmt>pytest.raises(SystemExit)<as>exc<block_start>app=make_r2d(argv)<assert_stmt>exc.code<eq>1<block_end><with_stmt>pytest.raises(ValueError)<block_start>app=Repo2Docker(repo=src run=<false>)<line_sep>app.build()<block_end>app=Repo2Docker(repo=src user_id=1000 user_name="jovyan" run=<false>)<line_sep>app.initialize()<with_stmt>patch.object(docker.APIClient "build")<as>builds<block_start>builds.return_value=[]<line_sep>app.build()<block_end>builds.assert_called_once()<block_end><block_end><def_stmt>test_dryrun_works_without_docker tmpdir capsys<block_start><with_stmt>chdir(tmpdir)<block_start><with_stmt>patch.object(docker "APIClient")<as>client<block_start>client.side_effect=docker.errors.DockerException("Error: no Docker")<line_sep>app=Repo2Docker(dry_run=<true>)<line_sep>app.build()<line_sep>captured=capsys.readouterr()<assert_stmt>"Error: no Docker"<not><in>captured.err<block_end><block_end><block_end><def_stmt>test_error_log_without_docker tmpdir capsys<block_start><with_stmt>chdir(tmpdir)<block_start><with_stmt>patch.object(docker "APIClient")<as>client<block_start>client.side_effect=docker.errors.DockerException("Error: no Docker")<line_sep>app=Repo2Docker()<with_stmt>pytest.raises(SystemExit)<block_start>app.build()<line_sep>captured=capsys.readouterr()<assert_stmt>"Error: no Docker"<in>captured.err<block_end><block_end><block_end><block_end>
<import_from_stmt>django.conf.urls.defaults *<line_sep>urlpatterns=patterns('' url('^$' 'market_community.views.overview' name='market_community') url(r'^overview/$' 'market_community.views.overview' name='community_overview') url(r'^forums/$' 'market_community.views.forums' name='community_forums') url(r'^blogs/$' 'market_community.views.blogs' name='community_blogs') url(r'^faq/$' 'market_community.views.faq' name='community_faq') url(r'^profiles/$' 'market_community.views.profiles' name='community_profiles') url(r'^profiles/(?P<letter>[\w]+)/$' 'market_community.views.profiles_list' name='community_profiles_list') )<line_sep>
<import_stmt>asyncio<import_stmt>discord<import_stmt>sys<import_from_stmt>discord.ext commands<import_from_stmt>utils checks<import_from_stmt>mods.cog Cog<class_stmt>Commands(Cog)<block_start><def_stmt>__init__ self bot<block_start>super().__init__(bot)<line_sep>self.cursor=bot.mysql.cursor<line_sep>self.escape=bot.escape<block_end>@commands.group(pass_context=<true> aliases=['setprefix' 'changeprefix'] invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>prefix self ctx * txt:str=<none><block_start>"""Change the Bots Prefix for the Server"""<if_stmt>txt<is><none><block_start>sql="SELECT prefix FROM `prefix` WHERE server={0}"<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>sql_channel="SELECT prefix FROM `prefix_channel` WHERE server={0} AND channel={1}"<line_sep>sql_channel=sql_channel.format(ctx.message.server.id ctx.message.channel.id)<line_sep>result=self.cursor.execute(sql).fetchall()<line_sep>result2=self.cursor.execute(sql_channel).fetchall()<if_stmt>len(result)<eq>0<block_start>server_prefix='.'<block_end><else_stmt><block_start>server_prefix=result[0]['prefix']<block_end><if_stmt>len(result2)<eq>0<block_start>channel_prefix=<none><block_end><else_stmt><block_start>channel_prefix=result2[0]['prefix']<block_end>msg="Server Prefix: `{0}`\n".format(server_prefix)<if_stmt>channel_prefix<ne><none><block_start>msg<augadd>"**Current** Channel Prefix: `{0}`".format(channel_prefix)<block_end><await>self.bot.say(msg)<line_sep><return><block_end>sql="INSERT INTO `prefix` (`server`, `prefix`, `id`) VALUES (%s, %s, %s)"<line_sep>update_sql="UPDATE `prefix` SET prefix={0} WHERE server={1}"<line_sep>update_sql=update_sql.format(self.escape(txt) ctx.message.server.id)<line_sep>check="SELECT server FROM `prefix` WHERE server={0}"<line_sep>check=check.format(ctx.message.server.id)<line_sep>result=self.cursor.execute(check).fetchall()<if_stmt>len(result)<eq>0<block_start>self.cursor.execute(sql (ctx.message.server.id txt ctx.message.author.id))<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for the server\n".format(txt))<block_end><else_stmt><block_start>self.cursor.execute(update_sql)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for the server".format(txt))<block_end><block_end>@prefix.command(pass_context=<true> name='channel' no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>_prefix_channel self ctx * txt:str<block_start>"""Change the Bots Prefix for the current Channel"""<line_sep>channel=ctx.message.channel<for_stmt>c ctx.message.channel_mentions<block_start>channel=c<line_sep>txt=txt.replace(channel.mention '').replace('#'+channel.name '')<block_end>sql="INSERT INTO `prefix_channel` (`server`, `prefix`, `channel`, `id`) VALUES (%s, %s, %s, %s)"<line_sep>update_sql="UPDATE `prefix_channel` SET prefix={0} WHERE server={1} AND channel={2}"<line_sep>update_sql=update_sql.format(self.escape(txt) ctx.message.server.id channel.id)<line_sep>check="SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"<line_sep>check=check.format(ctx.message.server.id channel.id)<line_sep>result=self.cursor.execute(check).fetchall()<if_stmt>len(result)<eq>0<block_start>self.cursor.execute(sql (ctx.message.server.id txt channel.id ctx.message.author.id))<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for {1}".format(txt channel.mention))<block_end><else_stmt><block_start>self.cursor.execute(update_sql)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for {1}".format(txt channel.mention))<block_end><block_end>@prefix.command(pass_context=<true> name='reset' no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>_prefix_reset self ctx what:str=<none> channel:discord.Channel=<none><block_start>"""Reset All Custom Set Prefixes For the Bot"""<if_stmt>what<is><none><or>what<eq>"server"<block_start>sql="DELETE FROM `prefix` WHERE server={0}"<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>check="SELECT * FROM `prefix` WHERE server={0}"<line_sep>check=check.format(ctx.message.server.id)<line_sep>result=self.cursor.execute(check).fetchall()<if_stmt>len(result)<eq>0<block_start><await>self.bot.say(":no_entry: Current server does **not** have a custom prefix set!")<line_sep><return><block_end><else_stmt><block_start>self.cursor.execute(sql)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":exclamation: **Reset server prefix**\nThis does not reset channel prefixes, run \"all\" after reset to reset all prefixes *or* \"channels\" to reset all custom channel prefixes.")<block_end><block_end><elif_stmt>what<eq>"channel"<block_start><if_stmt>channel<is><none><block_start>channel=ctx.message.channel<block_end>sql="DELETE FROM `prefix_channel` WHERE server={0} AND channel={1}"<line_sep>sql=sql.format(ctx.message.server.id channel.id)<line_sep>check="SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"<line_sep>check=check.format(ctx.message.server.id channel.id)<line_sep>result=self.cursor.execute(check).fetchall()<if_stmt>len(result)<eq>0<block_start><await>self.bot.say(":no_entry: {0} does **not** have a custom prefix Set!\nMention the channel after \"reset channel\" for a specific channel.".format(channel.mention))<line_sep><return><block_end><else_stmt><block_start>self.cursor.execute(sql)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":exclamation: Reset {0}'s prefix!\nThis does **not** reset all custom channel prefixes, \"reset channels\" to do so.".format(channel.mention))<line_sep><return><block_end><block_end><elif_stmt>what<eq>"channels"<block_start>sql="DELETE FROM `prefix_channel` WHERE server={0}"<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>check="SELECT * FROM `prefix_channel` WHERE server={0}"<line_sep>check=check.format(ctx.message.server.id)<line_sep>result=self.cursor.execute(check).fetchall()<if_stmt>len(result)<eq>0<block_start><await>self.bot.say(":no_entry: Server does **not** reset a custom prefix set for any channel!\nMention the channel after \"reset channel\" for a specific channel.")<line_sep><return><block_end><else_stmt><block_start>self.cursor.execute(sql)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":exclamation: Reset all channels custom prefixes!")<line_sep><return><block_end><block_end><elif_stmt>what<eq>"all"<or>what<eq>"everything"<block_start>sql="DELETE FROM `prefix_channel` WHERE server={0}"<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>sql2="DELETE FROM `prefix` WHERE server={0}"<line_sep>sql2=sql2.format(ctx.message.server.id)<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.execute(sql2)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(":warning: Reset all custom server prefix settings!")<line_sep><return><block_end><else_stmt><block_start><await>self.bot.say(":no_entry: Invalid Option\nOptions: `server, channel, channels, all/everything`")<block_end><block_end>good_commands=['command' 'blacklist' 'help' 'invite']<async_keyword><def_stmt>command_toggle self t:str ctx cmd:str user=<none> msg=<true><block_start><try_stmt><block_start><if_stmt>cmd<in>self.good_commands<block_start><await>self.bot.send_message(ctx.message.channel ':no_entry: You cannot disable command: `{0}`!'.format(self.good_commands[self.good_commands.index(cmd)]))<line_sep><return><block_end><if_stmt>t<eq>'server'<block_start>sql="SELECT * FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"<line_sep>sql=sql.format(ctx.message.server.id self.escape(cmd))<line_sep>result=self.cursor.execute(sql).fetchall()<if_stmt>len(result)<eq>0<block_start>sql='INSERT INTO `command_blacklist` (`command`, `type`, `server`) VALUES (%s, %s, %s)'<line_sep>self.cursor.execute(sql (cmd "server" ctx.message.server.id))<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':negative_squared_cross_mark: Disabled command `{0}`.'.format(cmd))<block_end><block_end><else_stmt><block_start>sql="DELETE FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"<line_sep>sql=sql.format(ctx.message.server.id self.escape(cmd))<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':white_check_mark: Enabled command `{0}`.'.format(cmd))<block_end><block_end><block_end><elif_stmt>t<eq>'channel'<block_start>channel=user<line_sep>sql="SELECT * FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"<line_sep>sql=sql.format(ctx.message.server.id channel.id self.escape(cmd))<line_sep>result=self.cursor.execute(sql).fetchall()<if_stmt>len(result)<eq>0<block_start>sql='INSERT INTO `command_blacklist` (`command`, `type`, `server`, `channel`) VALUES (%s, %s, %s, %s)'<line_sep>self.cursor.execute(sql (cmd "channel" ctx.message.server.id channel.id))<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':negative_squared_cross_mark: Disabled command `{0}` for channel {1}.'.format(cmd channel.mention))<block_end><block_end><else_stmt><block_start>sql="DELETE FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"<line_sep>sql=sql.format(ctx.message.server.id channel.id self.escape(cmd))<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':white_check_mark: Enabled command `{0}` for channel {1}.'.format(cmd channel.mention))<block_end><block_end><block_end><elif_stmt>t<eq>'user'<block_start>sql="SELECT * FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"<line_sep>sql=sql.format(ctx.message.server.id user.id self.escape(cmd))<line_sep>result=self.cursor.execute(sql).fetchall()<if_stmt>len(result)<eq>0<block_start>sql='INSERT INTO `command_blacklist` (`command`, `type`, `server`, `user`) VALUES (%s, %s, %s, %s)'<line_sep>self.cursor.execute(sql (cmd "user" ctx.message.server.id user.id))<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':negative_squared_cross_mark: Disabled command `{0}` for user `{1}`.'.format(cmd user))<block_end><block_end><else_stmt><block_start>sql="DELETE FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"<line_sep>sql=sql.format(ctx.message.server.id user.id self.escape(cmd))<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':white_check_mark: Enabled command `{0}` for user `{1}`.'.format(cmd user))<block_end><block_end><block_end><elif_stmt>t<eq>'role'<block_start>role=user<line_sep>sql="SELECT * FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"<line_sep>sql=sql.format(ctx.message.server.id role.id self.escape(cmd))<line_sep>result=self.cursor.execute(sql).fetchall()<if_stmt>len(result)<eq>0<block_start>sql='INSERT INTO `command_blacklist` (`command`, `type`, `server`, `role`) VALUES (%s, %s, %s, %s)'<line_sep>self.cursor.execute(sql (cmd "role" ctx.message.server.id role.id))<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':negative_squared_cross_mark: Disabled command `{0}` for role {1}.'.format(cmd role.mention))<block_end><block_end><else_stmt><block_start>sql="DELETE FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"<line_sep>sql=sql.format(ctx.message.server.id role.id self.escape(cmd))<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':white_check_mark: Enabled command `{0}` for role {1}.'.format(cmd role.mention))<block_end><block_end><block_end><elif_stmt>t<eq>'global'<block_start>sql="SELECT * FROM `command_blacklist` WHERE type='global' AND command={0}"<line_sep>sql=sql.format(self.escape(cmd))<line_sep>result=self.cursor.execute(sql).fetchall()<if_stmt>len(result)<eq>0<block_start>sql='INSERT INTO `command_blacklist` (`command`, `type`) VALUES (%s, %s)'<line_sep>self.cursor.execute(sql (cmd "global"))<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':globe_with_meridians: Disabled command `{0}` globally.'.format(cmd))<block_end><block_end><else_stmt><block_start>sql="DELETE FROM `command_blacklist` WHERE type='global' AND command={0}"<line_sep>sql=sql.format(self.escape(cmd))<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.commit()<if_stmt>msg<block_start><await>self.bot.send_message(ctx.message.channel ':white_check_mark: Enabled command `{0}` globally.'.format(cmd))<block_end><block_end><block_end><else_stmt><block_start><return><block_end><block_end><except_stmt>Exception<as>e<block_start><await>self.bot.send_message(ctx.message.channel str(e))<block_end><block_end><async_keyword><def_stmt>module_command_toggle self module t:str ctx<block_start><try_stmt><block_start>count=0<line_sep>disabled=[]<for_stmt>command self.bot.commands<block_start><if_stmt>self.bot.commands[command].module<eq>module<and>command<not><in>disabled<block_start>count<augadd>1<line_sep>cmd=str(self.bot.commands[command].name)<line_sep><await>self.command_toggle(t ctx cmd msg=<false>)<line_sep><await>asyncio.sleep(0.21)<line_sep>disabled.append(command)<block_end><block_end><return>count<block_end><except_stmt>Exception<as>e<block_start><await>self.bot.send_message(ctx.message.channel str(e))<block_end><block_end><async_keyword><def_stmt>get_modules self<block_start>modules=[]<for_stmt>module sys.modules<block_start><if_stmt>module.startswith('mods.')<block_start><if_stmt>module<eq>'mods.Repl'<or>module<eq>'mods.Stats'<or>module<eq>'mods.Commands'<block_start><continue><block_end>mod=module.replace('mods.' '')<line_sep>modules.append(mod)<block_end><block_end><return>modules<block_end>@commands.group(pass_context=<true> invoke_without_command=<true> aliases=['commands' 'cmd'] no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>command self ctx cmd:str<block_start>"""Toggle a command for the server"""<if_stmt>cmd<in>self.bot.commands<block_start>cmd=str(self.bot.commands[cmd])<line_sep><await>self.command_toggle('server' ctx cmd)<block_end><else_stmt><block_start><await>self.bot.say(':no_entry: `Command does not exist.`')<block_end><block_end>@command.command(name='toggle' aliases=['enable' 'disable'] pass_context=<true> invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>cmd_toggle self ctx cmd:str<block_start>"""Server wide Command Toggle"""<if_stmt>cmd<in>self.bot.commands<block_start>cmd=str(self.bot.commands[cmd])<line_sep><await>self.command_toggle('server' ctx cmd)<block_end><else_stmt><block_start><await>self.bot.say(':no_entry: `Command does not exist.`')<block_end><block_end>@command.command(name='user' pass_context=<true> aliases=['member'] invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>command_toggle_user self ctx cmd:str user:discord.User=<none><block_start>"""Toggle Command for a user"""<if_stmt>user<is><none><block_start>user=ctx.message.author<block_end><if_stmt>cmd<in>self.bot.commands<block_start>cmd=str(self.bot.commands[cmd])<line_sep><await>self.command_toggle('user' ctx cmd user)<block_end><else_stmt><block_start><await>self.bot.say(':no_entry: `Command does not exist.`')<block_end><block_end>@command.command(name='role' pass_context=<true> aliases=['rank'] invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>command_toggle_role self ctx cmd:str role:discord.Role<block_start>"""Toggle Command for a role"""<if_stmt>cmd<in>self.bot.commands<block_start>cmd=str(self.bot.commands[cmd])<line_sep><await>self.command_toggle('role' ctx cmd role)<block_end><else_stmt><block_start><await>self.bot.say(':no_entry: `Command does not exist.`')<block_end><block_end>@command.command(name='channel' pass_context=<true> invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>command_toggle_channel self ctx cmd:str chan:discord.Channel=<none><block_start>"""Toggle Command for a channel"""<if_stmt>chan<is><none><block_start>chan=ctx.message.channel<block_end><if_stmt>cmd<in>self.bot.commands<block_start>cmd=str(self.bot.commands[cmd])<line_sep><await>self.command_toggle('channel' ctx cmd chan)<block_end><else_stmt><block_start><await>self.bot.say(':no_entry: `Command does not exist.`')<block_end><block_end>@command.command(name='global' pass_context=<true> invoke_without_command=<true>)@checks.is_owner()<async_keyword><def_stmt>command_toggle_global self ctx cmd:str<block_start>"""Toggle command globally"""<if_stmt>cmd<in>self.bot.commands<block_start>cmd=str(self.bot.commands[cmd])<line_sep><await>self.command_toggle('global' ctx cmd)<block_end><else_stmt><block_start><await>self.bot.say(':no_entry: `Command does not exist.`')<block_end><block_end>@command.group(name='module' pass_context=<true> invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>command_toggle_module self ctx module:str chan:discord.Channel=<none><block_start>"""Toggle a bot command module"""<try_stmt><block_start>mod=sys.modules['mods.{0}'.format(module)]<block_end><except_stmt>KeyError<block_start>modules=<await>self.get_modules()<line_sep><await>self.bot.say(':no_entry: Invalid Module\n**Modules**\n`{0}`'.format(', '.join(modules)))<line_sep><return><block_end><if_stmt>chan<block_start>count=<await>self.module_command_toggle(mod 'channel' ctx)<block_end><else_stmt><block_start>count=<await>self.module_command_toggle(mod 'server' ctx)<block_end><await>self.bot.say(':white_check_mark: Disabled **{0}** commands in module `{1}`.'.format(count module))<block_end>@command_toggle_module.command(name='list' pass_context=<true> invoke_without_command=<true>)<async_keyword><def_stmt>command_toggle_module_list self ctx<block_start>modules=<await>self.get_modules()<line_sep><await>self.bot.say(':information_source: **Modules**\n`{0}`'.format(', '.join(modules)))<block_end>@command.command(name='all' pass_context=<true> invoke_without_command=<true> no_pm=<true>)@checks.admin_or_perm(manage_server=<true>)<async_keyword><def_stmt>command_toggle_all self ctx<block_start>sql='SELECT COUNT(*) FROM `command_blacklist` WHERE server={0}'<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>count=str(self.cursor.execute(sql).fetchall()[0]['COUNT(*)'])<line_sep>sql='DELETE FROM `command_blacklist` WHERE server={0}'<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>self.cursor.execute(sql)<line_sep>self.cursor.commit()<line_sep><await>self.bot.say(':white_check_mark: Enabled **{0}** server command(s).'.format(count))<block_end>@command.command(name='list' pass_context=<true> invoke_without_command=<true> no_pm=<true>)<async_keyword><def_stmt>command_list self ctx<block_start>sql='SELECT * FROM `command_blacklist` WHERE server={0} OR type="global"'<line_sep>sql=sql.format(ctx.message.server.id)<line_sep>result=self.cursor.execute(sql).fetchall()<if_stmt>len(result)<eq>0<block_start><await>self.bot.say(':no_entry: Server does **not** have any commands blacklisted.')<line_sep><return><block_end>msg=''<for_stmt>s result<block_start><if_stmt>s['type']<eq>'global'<block_start>msg<augadd>':globe_with_meridians: Globaly Command Disabled: `{0}`\n'.format(s['command'])<block_end><elif_stmt>s['type']<eq>'server'<block_start>msg<augadd>':desktop: Command Disabled on Server: `{0}`\n'.format(s['command'])<block_end><elif_stmt>s['type']<eq>'channel'<block_start>msg<augadd>':arrow_right: Command Disabled in <#{0}>: `{1}`\n'.format(s['channel'] s['command'])<block_end><elif_stmt>s['type']<eq>'role'<block_start>msg<augadd>':eight_spoked_asterisk: Command Disabled for <@&{0}>: `{1}`\n'.format(s['role'] s['command'])<block_end><elif_stmt>s['type']<eq>'user'<block_start>user=discord.utils.get(self.bot.get_all_members() id=str(s['user']))<if_stmt>user<is><none><block_start>user='<@{0}> (Not Found)'.format(s['user'])<block_end>msg<augadd>':bust_in_silhouette: Command Disabled for **{0}**: `{1}`\n'.format(user s['command'])<block_end><block_end><await>self.bot.say(':white_check_mark: **Commands Disabled**\n'+msg)<block_end><block_end><def_stmt>setup bot<block_start>bot.add_cog(Commands(bot))<block_end>
# Generated by Django 3.2 on 2021-04-20 21:05 <import_stmt>capdb.storages<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('capdb' '0113_auto_20210414_1532') ]<line_sep>operations=[migrations.AlterField(model_name='caseanalysis' name='value' field=models.JSONField() ) migrations.AlterField(model_name='casebodycache' name='json' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='caseinitialmetadata' name='metadata' field=models.JSONField() ) migrations.AlterField(model_name='casemetadata' name='attorneys' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='casemetadata' name='docket_numbers' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='casemetadata' name='judges' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='casemetadata' name='no_index_elided' field=models.JSONField(blank=<true> help_text='Elided text will be shown on click. Example: {"Text to elide (must be exact match)": "Extra text that\'s currently not used. Can be left as empty string."}' null=<true>) ) migrations.AlterField(model_name='casemetadata' name='no_index_redacted' field=models.JSONField(blank=<true> help_text='Redacted text will be hidden from view and replaced with key\'s value specified above. Example: {"Text to redact (must be exact match)": "Text to replace redacted text."}' null=<true>) ) migrations.AlterField(model_name='casemetadata' name='opinions' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='casemetadata' name='parties' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='casestructure' name='opinions' field=models.JSONField() ) migrations.AlterField(model_name='datamigration' name='alto_xml_changed' field=models.JSONField() ) migrations.AlterField(model_name='datamigration' name='alto_xml_rollback' field=models.JSONField() ) migrations.AlterField(model_name='datamigration' name='case_xml_changed' field=models.JSONField() ) migrations.AlterField(model_name='datamigration' name='case_xml_rollback' field=models.JSONField() ) migrations.AlterField(model_name='datamigration' name='volume_xml_changed' field=models.JSONField() ) migrations.AlterField(model_name='datamigration' name='volume_xml_rollback' field=models.JSONField() ) migrations.AlterField(model_name='historicalcasemetadata' name='attorneys' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalcasemetadata' name='docket_numbers' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalcasemetadata' name='judges' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalcasemetadata' name='no_index_elided' field=models.JSONField(blank=<true> help_text='Elided text will be shown on click. Example: {"Text to elide (must be exact match)": "Extra text that\'s currently not used. Can be left as empty string."}' null=<true>) ) migrations.AlterField(model_name='historicalcasemetadata' name='no_index_redacted' field=models.JSONField(blank=<true> help_text='Redacted text will be hidden from view and replaced with key\'s value specified above. Example: {"Text to redact (must be exact match)": "Text to replace redacted text."}' null=<true>) ) migrations.AlterField(model_name='historicalcasemetadata' name='opinions' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalcasemetadata' name='parties' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalpagestructure' name='blocks' field=models.JSONField() ) migrations.AlterField(model_name='historicalpagestructure' name='duplicates' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalpagestructure' name='extra_redacted_ids' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalpagestructure' name='font_names' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalpagestructure' name='spaces' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalvolumemetadata' name='bibliographic_review' field=models.CharField(blank=<true> choices=[('No' 'No') ('Complete' 'Complete') ('Yes' 'Yes')] max_length=8 null=<true>) ) migrations.AlterField(model_name='historicalvolumemetadata' name='ingest_errors' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='historicalvolumemetadata' name='task_statuses' field=models.JSONField(default=dict help_text='Date and results of tasks run for this volume') ) migrations.AlterField(model_name='historicalvolumemetadata' name='xml_metadata' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='pagestructure' name='blocks' field=models.JSONField() ) migrations.AlterField(model_name='pagestructure' name='duplicates' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='pagestructure' name='extra_redacted_ids' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='pagestructure' name='font_names' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='pagestructure' name='spaces' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='volumemetadata' name='bibliographic_review' field=models.CharField(blank=<true> choices=[('No' 'No') ('Complete' 'Complete') ('Yes' 'Yes')] max_length=8 null=<true>) ) migrations.AlterField(model_name='volumemetadata' name='ingest_errors' field=models.JSONField(blank=<true> null=<true>) ) migrations.AlterField(model_name='volumemetadata' name='pdf_file' field=models.FileField(blank=<true> help_text='Exported volume PDF' max_length=1000 storage=capdb.storages.DownloadOverlayStorage(base_url='http://case.test:8000/download/' location='/Users/jcushman/Documents/capstone/capstone/test_data/downloads') upload_to='') ) migrations.AlterField(model_name='volumemetadata' name='task_statuses' field=models.JSONField(default=dict help_text='Date and results of tasks run for this volume') ) migrations.AlterField(model_name='volumemetadata' name='xml_metadata' field=models.JSONField(blank=<true> null=<true>) ) ]<block_end>
<import_from_stmt>tensorwatch.stream Stream<line_sep>s1=Stream(stream_name='s1' console_debug=<true>)<line_sep>s2=Stream(stream_name='s2' console_debug=<true>)<line_sep>s3=Stream(stream_name='s3' console_debug=<true>)<line_sep>s1.subscribe(s2)<line_sep>s2.subscribe(s3)<line_sep>s3.write('S3 wrote this')<line_sep>s2.write('S2 wrote this')<line_sep>s1.write('S1 wrote this')<line_sep>
<import_from_stmt>django.contrib admin<import_from_stmt>.models Bar<line_sep>@admin.register(Bar)<class_stmt>BarAdmin(admin.ModelAdmin)<block_start><pass><block_end>
""" Semantic segmentation training for OmniDet. # author: <NAME> <<EMAIL>> This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; Authors provide no warranty with the software and are not liable for anything. """<import_stmt>time<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>data_loader.woodscape_loader WoodScapeRawDataset<import_from_stmt>losses.semantic_loss CrossEntropyLoss2d FocalLoss<import_from_stmt>models.resnet ResnetEncoder<import_from_stmt>models.semantic_decoder SemanticDecoder<import_from_stmt>utils TrainUtils semantic_color_encoding IoU<class_stmt>SemanticInit(TrainUtils)<block_start><def_stmt>__init__ self args<block_start>super().__init__(args)<line_sep>semantic_class_weights=dict(woodscape_enet=([3.25 2.33 20.42 30.59 38.4 45.73 10.76 34.16 44.3 49.19]) woodscape_mfb=(0.04 0.03 0.43 0.99 2.02 4.97 0.17 1.01 3.32 20.35))<line_sep>print(f"=> Setting Class weights based on: {args.semantic_class_weighting} \n"<concat>f"=> {semantic_class_weights[args.semantic_class_weighting]}")<line_sep>semantic_class_weights=torch.tensor(semantic_class_weights[args.semantic_class_weighting]).to(args.device)<line_sep># Setup Metrics self.metric=IoU(args.semantic_num_classes args.dataset ignore_index=<none>)<if_stmt>args.semantic_loss<eq>"cross_entropy"<block_start>self.semantic_criterion=CrossEntropyLoss2d(weight=semantic_class_weights)<block_end><elif_stmt>args.semantic_loss<eq>"focal_loss"<block_start>self.semantic_criterion=FocalLoss(weight=semantic_class_weights gamma=2 size_average=<true>)<block_end>self.best_semantic_iou=0.0<line_sep>self.alpha=0.5# to blend semantic predictions with color image self.color_encoding=semantic_color_encoding(args)<block_end><block_end><class_stmt>SemanticModel(SemanticInit)<block_start><def_stmt>__init__ self args<block_start>super().__init__(args)<line_sep># --- Init model --- self.models["encoder"]=ResnetEncoder(num_layers=self.args.network_layers pretrained=<true>).to(self.device)<line_sep>self.models["semantic"]=SemanticDecoder(self.models["encoder"].num_ch_enc n_classes=args.semantic_num_classes).to(self.device)<line_sep>self.parameters_to_train<augadd>list(self.models["encoder"].parameters())<line_sep>self.parameters_to_train<augadd>list(self.models["semantic"].parameters())<if_stmt>args.use_multiple_gpu<block_start>self.models["encoder"]=torch.nn.DataParallel(self.models["encoder"])<line_sep>self.models["semantic"]=torch.nn.DataParallel(self.models["semantic"])<block_end>print(f"=> Training on the {self.args.dataset.upper()} dataset \n"<concat>f"=> Training model named: {self.args.model_name} \n"<concat>f"=> Models and tensorboard events files are saved to: {self.args.output_directory} \n"<concat>f"=> Training is using the cuda device id: {self.args.cuda_visible_devices} \n"<concat>f"=> Loading {self.args.dataset} training and validation dataset")<line_sep># --- Load Data --- train_dataset=WoodScapeRawDataset(data_path=args.dataset_dir path_file=args.train_file is_train=<true> config=args)<line_sep>self.train_loader=DataLoader(train_dataset batch_size=args.batch_size shuffle=<true> num_workers=args.num_workers pin_memory=<true> drop_last=<false>)<line_sep>val_dataset=WoodScapeRawDataset(data_path=args.dataset_dir path_file=args.val_file is_train=<false> config=args)<line_sep>self.val_loader=DataLoader(val_dataset batch_size=args.batch_size shuffle=<true> num_workers=args.num_workers pin_memory=<true> drop_last=<true>)<line_sep>print(f"=> Total number of training examples: {len(train_dataset)} \n"<concat>f"=> Total number of validation examples: {len(val_dataset)}")<line_sep>self.num_total_steps=len(train_dataset)<floordiv>args.batch_size<times>args.epochs<line_sep>self.configure_optimizers()<if_stmt>args.pretrained_weights<block_start>self.load_model()<block_end>self.save_args()<if_stmt>'cuda'<in>self.device<block_start>torch.cuda.synchronize()<block_end><block_end><def_stmt>semantic_train self<block_start><for_stmt>self.epoch range(self.args.epochs)# switch to train mode <block_start>self.set_train()<line_sep>data_loading_time=0<line_sep>gpu_time=0<line_sep>before_op_time=time.time()<for_stmt>batch_idx,inputs enumerate(self.train_loader)<block_start>current_time=time.time()<line_sep>data_loading_time<augadd>(current_time-before_op_time)<line_sep>before_op_time=current_time<line_sep># -- PUSH INPUTS DICT TO DEVICE -- self.inputs_to_device(inputs)<line_sep>features=self.models["encoder"](inputs["color_aug" 0 0])<line_sep>outputs=self.models["semantic"](features)<line_sep>losses=dict()<line_sep>losses["semantic_loss"]=self.semantic_criterion(outputs["semantic" 0] inputs["semantic_labels" 0 0])<line_sep># -- COMPUTE GRADIENT AND DO OPTIMIZER STEP -- self.optimizer.zero_grad()<line_sep>losses["semantic_loss"].backward()<line_sep>self.optimizer.step()<line_sep>duration=time.time()-before_op_time<line_sep>gpu_time<augadd>duration<if_stmt>batch_idx%self.args.log_frequency<eq>0<block_start>self.log_time(batch_idx duration losses["semantic_loss"].cpu().data data_loading_time gpu_time)<line_sep>self.semantic_statistics("train" inputs outputs losses)<line_sep>data_loading_time=0<line_sep>gpu_time=0<block_end>self.step<augadd>1<line_sep>before_op_time=time.time()<block_end># Validate on each step, save model on improvements val_metrics=self.semantic_val()<line_sep>print(self.epoch "IoU:" val_metrics["mean_iou"])<if_stmt>val_metrics["mean_iou"]<ge>self.best_semantic_iou<block_start>print(f"=> Saving model weights with mean_iou of {val_metrics['mean_iou']:.3f} "<concat>f"at step {self.step} on {self.epoch} epoch.")<line_sep>self.best_semantic_iou=val_metrics["mean_iou"]<line_sep>self.save_model()<block_end>self.lr_scheduler.step(val_metrics["mean_iou"])<block_end>print("Training complete!")<block_end>@torch.no_grad()<def_stmt>semantic_val self<block_start>"""Validate the semantic model"""<line_sep>self.set_eval()<line_sep>losses=dict()<for_stmt>inputs self.val_loader<block_start>self.inputs_to_device(inputs)<line_sep>features=self.models["encoder"](inputs["color" 0 0])<line_sep>outputs=self.models["semantic"](features)<line_sep>losses["semantic_loss"]=self.semantic_criterion(outputs["semantic" 0] inputs["semantic_labels" 0 0])<line_sep>_,predictions=torch.max(outputs["semantic" 0].data 1)<line_sep>self.metric.add(predictions inputs["semantic_labels" 0 0])<block_end>outputs["class_iou"],outputs["mean_iou"]=self.metric.value()<line_sep># Compute stats for the tensorboard self.semantic_statistics("val" inputs outputs losses)<line_sep>self.metric.reset()<del_stmt>inputs losses<line_sep>self.set_train()<line_sep><return>outputs<block_end><def_stmt>semantic_statistics self mode inputs outputs losses<arrow><none><block_start>writer=self.writers[mode]<for_stmt>loss,value losses.items()<block_start>writer.add_scalar(f"{loss}" value.mean() self.step)<block_end><if_stmt>mode<eq>"val"<block_start>writer.add_scalar(f"mean_iou" outputs["mean_iou"] self.step)<for_stmt>k,v outputs["class_iou"].items()<block_start>writer.add_scalar(f"class_iou/{k}" v self.step)<block_end><block_end>writer.add_scalar("learning_rate" self.optimizer.param_groups[0]['lr'] self.step)<for_stmt>j range(min(4 self.args.batch_size))# write maximum of four images <block_start><if_stmt>self.args.train<eq>"semantic"<block_start>writer.add_image(f"color/{j}" inputs[("color" 0 0)][j] self.step)<block_end># Predictions is one-hot encoded with "num_classes" channels. # Convert it to a single int using the indices where the maximum (1) occurs _,predictions=torch.max(outputs["semantic" 0][j].data 0)<line_sep>predictions_gray=predictions.byte().squeeze().cpu().detach().numpy()<line_sep>color_semantic=np.array(self.trans_pil(inputs[("color" 0 0)].cpu()[j].data))<line_sep>not_background=predictions_gray<ne>0<line_sep>color_semantic[not_background <ellipsis>]=(color_semantic[not_background <ellipsis>]<times>(1-self.alpha)+self.color_encoding[predictions_gray[not_background]]<times>self.alpha)<line_sep>writer.add_image(f"semantic_pred_0/{j}" color_semantic.transpose(2 0 1) self.step)<line_sep>labels=inputs["semantic_labels" 0 0][j].data<line_sep>labels_gray=labels.byte().squeeze().cpu().detach().numpy()<line_sep>labels_rgb=np.array(self.trans_pil(inputs[("color" 0 0)].cpu()[j].data))<line_sep>not_background=labels_gray<ne>0<line_sep>labels_rgb[not_background <ellipsis>]=(labels_rgb[not_background <ellipsis>]<times>(1-self.alpha)+self.color_encoding[labels_gray[not_background]]<times>self.alpha)<line_sep>writer.add_image(f"semantic_labels_0/{j}" labels_rgb.transpose(2 0 1) self.step)<block_end><block_end><block_end>
<import_stmt>re<import_stmt>string<import_stmt>tensorflow_hub<as>hub<import_from_stmt>scipy.spatial.distance cdist<line_sep>module_url="https://tfhub.dev/google/universal-sentence-encoder/4"<class_stmt>SimilarityModel()<block_start><def_stmt>__init__ self<block_start>print("Loading model from tf hub...")<line_sep>self.model=hub.load(module_url)<line_sep>print("module %s loaded"%module_url)<block_end><def_stmt>process_text self text<block_start>'''Clean text by removing unnecessary characters and altering the format of words.'''<line_sep>re_print=re.compile('[^%s]'%re.escape(string.printable))<line_sep>text=text.lower()<line_sep>text=re.sub(r"i'm" "i am" text)<line_sep>text=re.sub(r"he's" "he is" text)<line_sep>text=re.sub(r"she's" "she is" text)<line_sep>text=re.sub(r"it's" "it is" text)<line_sep>text=re.sub(r"that's" "that is" text)<line_sep>text=re.sub(r"what's" "that is" text)<line_sep>text=re.sub(r"where's" "where is" text)<line_sep>text=re.sub(r"how's" "how is" text)<line_sep>text=re.sub(r"\'ll" " will" text)<line_sep>text=re.sub(r"\'ve" " have" text)<line_sep>text=re.sub(r"\'re" " are" text)<line_sep>text=re.sub(r"\'d" " would" text)<line_sep>text=re.sub(r"\'re" " are" text)<line_sep>text=re.sub(r"won't" "will not" text)<line_sep>text=re.sub(r"can't" "cannot" text)<line_sep>text=re.sub(r"n't" " not" text)<line_sep>text=re.sub(r"n'" "ng" text)<line_sep>text=re.sub(r"'bout" "about" text)<line_sep>text=re.sub(r"'til" "until" text)<line_sep>text=re.sub(r"[$-()\"#/@;:<>{}`+=~|.!?,'*-^]" "" text)<line_sep>text=text.split()<line_sep>text=[re_print.sub('' w)<for>w text]<line_sep><return>' '.join(text)<block_end><def_stmt>similarity self sentence1 sentence2<block_start>processed_sent1=self.process_text(sentence1)<line_sep>processed_sent2=self.process_text(sentence2)<line_sep>sent_vector1=self.model([processed_sent1])<line_sep>sent_vector2=self.model([processed_sent2])<line_sep>similarities=cdist(sent_vector1 sent_vector2 metric='cosine')<line_sep><return>similarities<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>sim_model=SimilarityModel()<line_sep>sentence1="<NAME>"<line_sep>sentence2="I want money"<line_sep>distance=sim_model.similarity(sentence1 sentence2)<line_sep>print("Similarity score is: " 1-distance[0][0])<block_end>
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>modules.envelope Envelope<import_from_stmt>modules.initializers GlorotOrthogonal<class_stmt>EmbeddingBlock(nn.Module)<block_start><def_stmt>__init__ self emb_size num_radial bessel_funcs cutoff envelope_exponent num_atom_types=95 activation=<none><block_start>super(EmbeddingBlock self).__init__()<line_sep>self.bessel_funcs=bessel_funcs<line_sep>self.cutoff=cutoff<line_sep>self.activation=activation<line_sep>self.envelope=Envelope(envelope_exponent)<line_sep>self.embedding=nn.Embedding(num_atom_types emb_size)<line_sep>self.dense_rbf=nn.Linear(num_radial emb_size)<line_sep>self.dense=nn.Linear(emb_size<times>3 emb_size)<line_sep>self.reset_params()<block_end><def_stmt>reset_params self<block_start>nn.init.uniform_(self.embedding.weight a=-np.sqrt(3) b=np.sqrt(3))<line_sep>GlorotOrthogonal(self.dense_rbf.weight)<line_sep>GlorotOrthogonal(self.dense.weight)<block_end><def_stmt>edge_init self edges<block_start>""" msg emb init """<line_sep># m init rbf=self.dense_rbf(edges.data['rbf'])<if_stmt>self.activation<is><not><none><block_start>rbf=self.activation(rbf)<block_end>m=torch.cat([edges.src['h'] edges.dst['h'] rbf] dim=-1)<line_sep>m=self.dense(m)<if_stmt>self.activation<is><not><none><block_start>m=self.activation(m)<block_end># rbf_env init d_scaled=edges.data['d']/self.cutoff<line_sep>rbf_env=[f(d_scaled)<for>f self.bessel_funcs]<line_sep>rbf_env=torch.stack(rbf_env dim=1)<line_sep>d_cutoff=self.envelope(d_scaled)<line_sep>rbf_env=d_cutoff[: <none>]<times>rbf_env<line_sep><return>{'m':m 'rbf_env':rbf_env}<block_end><def_stmt>forward self g<block_start>g.ndata['h']=self.embedding(g.ndata['Z'])<line_sep>g.apply_edges(self.edge_init)<line_sep><return>g<block_end><block_end>
# # Copyright (c) 2016-present, Cisco Systems, Inc. All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # <import_from_stmt>django.core.management.base BaseCommand CommandError<import_from_stmt>dashboard.models Alert<import_from_stmt>django.db.models Q<class_stmt>Command(BaseCommand)<block_start>help='Removes ALL alerts.'<def_stmt>handle self *args **options<block_start>Alert.objects.all().delete()<block_end><block_end>
# -*- coding: utf-8 -*- """ colorful ~~~~~~~~ Terminal string styling done right, in Python. :copyright: (c) 2017 by <NAME> <<EMAIL>> :license: MIT, see LICENSE for more details. """<import_stmt>sys<import_stmt>colorful<def_stmt>show <block_start>""" Show the modifiers and colors """<line_sep># modifiers sys.stdout.write(colorful.bold('bold')+' ')<line_sep>sys.stdout.write(colorful.dimmed('dimmed')+' ')<line_sep>sys.stdout.write(colorful.italic('italic')+' ')<line_sep>sys.stdout.write(colorful.underlined('underlined')+' ')<line_sep>sys.stdout.write(colorful.inversed('inversed')+' ')<line_sep>sys.stdout.write(colorful.concealed('concealed')+' ')<line_sep>sys.stdout.write(colorful.struckthrough('struckthrough')+'\n')<line_sep># foreground colors sys.stdout.write(colorful.red('red')+' ')<line_sep>sys.stdout.write(colorful.green('green')+' ')<line_sep>sys.stdout.write(colorful.yellow('yellow')+' ')<line_sep>sys.stdout.write(colorful.blue('blue')+' ')<line_sep>sys.stdout.write(colorful.magenta('magenta')+' ')<line_sep>sys.stdout.write(colorful.cyan('cyan')+' ')<line_sep>sys.stdout.write(colorful.white('white')+'\n')<line_sep># background colors sys.stdout.write(colorful.on_red('red')+' ')<line_sep>sys.stdout.write(colorful.on_green('green')+' ')<line_sep>sys.stdout.write(colorful.on_yellow('yellow')+' ')<line_sep>sys.stdout.write(colorful.on_blue('blue')+' ')<line_sep>sys.stdout.write(colorful.on_magenta('magenta')+' ')<line_sep>sys.stdout.write(colorful.on_cyan('cyan')+' ')<line_sep>sys.stdout.write(colorful.on_white('white')+'\n')<block_end><if_stmt>__name__<eq>'__main__'<block_start>show()<block_end>
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization utils for RoFormer."""<import_from_stmt>typing List<import_from_stmt>tokenizers NormalizedString PreTokenizedString normalizers<class_stmt>JiebaPreTokenizer<block_start><def_stmt>__init__ self vocab<arrow><none><block_start>self.vocab=vocab<line_sep>self.normalizers=normalizers.BertNormalizer(clean_text=<false> handle_chinese_chars=<true> strip_accents=<false> lowercase=<false> )<try_stmt><block_start><import_stmt>rjieba<block_end><except_stmt>ImportError<block_start><raise>ImportError("You need to install rjieba to use RoFormerTokenizer. "<concat>"See https://pypi.org/project/rjieba/ for installation.")<block_end>self.jieba=rjieba<block_end><def_stmt>jieba_split self i:int normalized_string:NormalizedString<arrow>List[NormalizedString]<block_start>splits=[]<line_sep># this code slice normalized_string is too slow (6s) but test_alignement_methods can pass <for_stmt>token,start,end self.jieba.tokenize(str(normalized_string) hmm=<false>)<block_start><if_stmt>token<in>self.vocab<block_start>splits.append(normalized_string[start:end])<block_end><else_stmt><block_start>token_list=self.normalizers.normalize_str(token).split()<for_stmt>token token_list<block_start><if_stmt>token<block_start>end=start+len(token)<line_sep>splits.append(normalized_string[start:end])<line_sep>start=end<block_end><block_end><block_end><block_end># this code test_alignement_methods can't pass but fast (300ms) # for token in self.jieba.cut(str(normalized_string), False): # if token in self.vocab: # splits.append(NormalizedString(token)) # else: # token_list = self.normalizers.normalize_str(token).split() # for token in token_list: # if token: # splits.append(NormalizedString(token)) <return>splits<block_end><def_stmt>pre_tokenize self pretok:PreTokenizedString<block_start>pretok.split(self.jieba_split)<block_end><block_end>
<import_stmt>logging<import_from_stmt>.. ondisk<import_from_stmt>.. datasets<import_from_stmt>zfs.posix.attributes POSIXAttrs_for<line_sep>logger=logging.getLogger(__name__)<class_stmt>PosixObject(object)<block_start><def_stmt>__init__ self dnode:ondisk.DNode dataset:datasets.Dataset<arrow><none><block_start>self.attrs=POSIXAttrs_for(dataset)(dnode.bonus)<line_sep>self.dataset=dataset<line_sep>self.dnode=dnode<block_end><block_end><import_from_stmt>.posix_file File<import_from_stmt>.directory Directory<line_sep>
# Copyright 2021 The ML Collections Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for ml_collections.FieldReference."""<import_stmt>operator<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>ml_collections<import_from_stmt>ml_collections.config_dict config_dict<class_stmt>FieldReferenceTest(parameterized.TestCase)<block_start><def_stmt>_test_binary_operator self initial_value other_value op true_value new_initial_value new_true_value assert_fn=<none><block_start>"""Helper for testing binary operators. Generally speaking this checks that: 1. `op(initial_value, other_value) COMP true_value` 2. `op(new_initial_value, other_value) COMP new_true_value where `COMP` is the comparison function defined by `assert_fn`. Args: initial_value: Initial value for the `FieldReference`, this is the first argument for the binary operator. other_value: The second argument for the binary operator. op: The binary operator. true_value: The expected output of the binary operator. new_initial_value: The value that the `FieldReference` is changed to. new_true_value: The expected output of the binary operator after the `FieldReference` has changed. assert_fn: Function used to check the output values. """<if_stmt>assert_fn<is><none><block_start>assert_fn=self.assertEqual<block_end>ref=ml_collections.FieldReference(initial_value)<line_sep>new_ref=op(ref other_value)<line_sep>assert_fn(new_ref.get() true_value)<line_sep>config=ml_collections.ConfigDict()<line_sep>config.a=initial_value<line_sep>config.b=other_value<line_sep>config.result=op(config.get_ref('a') config.b)<line_sep>assert_fn(config.result true_value)<line_sep>config.a=new_initial_value<line_sep>assert_fn(config.result new_true_value)<block_end><def_stmt>_test_unary_operator self initial_value op true_value new_initial_value new_true_value assert_fn=<none><block_start>"""Helper for testing unary operators. Generally speaking this checks that: 1. `op(initial_value) COMP true_value` 2. `op(new_initial_value) COMP new_true_value where `COMP` is the comparison function defined by `assert_fn`. Args: initial_value: Initial value for the `FieldReference`, this is the first argument for the unary operator. op: The unary operator. true_value: The expected output of the unary operator. new_initial_value: The value that the `FieldReference` is changed to. new_true_value: The expected output of the unary operator after the `FieldReference` has changed. assert_fn: Function used to check the output values. """<if_stmt>assert_fn<is><none><block_start>assert_fn=self.assertEqual<block_end>ref=ml_collections.FieldReference(initial_value)<line_sep>new_ref=op(ref)<line_sep>assert_fn(new_ref.get() true_value)<line_sep>config=ml_collections.ConfigDict()<line_sep>config.a=initial_value<line_sep>config.result=op(config.get_ref('a'))<line_sep>assert_fn(config.result true_value)<line_sep>config.a=new_initial_value<line_sep>assert_fn(config.result new_true_value)<block_end><def_stmt>testBasic self<block_start>ref=ml_collections.FieldReference(1)<line_sep>self.assertEqual(ref.get() 1)<block_end><def_stmt>testGetRef self<block_start>config=ml_collections.ConfigDict()<line_sep>config.a=1.<line_sep>config.b=config.get_ref('a')+10<line_sep>config.c=config.get_ref('b')+10<line_sep>self.assertEqual(config.c 21.0)<block_end><def_stmt>testFunction self<block_start><def_stmt>fn x<block_start><return>x+5<block_end>config=ml_collections.ConfigDict()<line_sep>config.a=1<line_sep>config.b=fn(config.get_ref('a'))<line_sep>config.c=fn(config.get_ref('b'))<line_sep>self.assertEqual(config.b 6)<line_sep>self.assertEqual(config.c 11)<line_sep>config.a=2<line_sep>self.assertEqual(config.b 7)<line_sep>self.assertEqual(config.c 12)<block_end><def_stmt>testCycles self<block_start>config=ml_collections.ConfigDict()<line_sep>config.a=1.<line_sep>config.b=config.get_ref('a')+10<line_sep>config.c=config.get_ref('b')+10<line_sep>self.assertEqual(config.b 11.0)<line_sep>self.assertEqual(config.c 21.0)<line_sep># Introduce a cycle <with_stmt>self.assertRaisesRegex(config_dict.MutabilityError 'cycle')<block_start>config.a=config.get_ref('c')-1.0<block_end># Introduce a cycle on second operand <with_stmt>self.assertRaisesRegex(config_dict.MutabilityError 'cycle')<block_start>config.a=ml_collections.FieldReference(5.0)+config.get_ref('c')<block_end># We can create multiple FieldReferences that all point to the same object l=[0]<line_sep>config=ml_collections.ConfigDict()<line_sep>config.a=l<line_sep>config.b=l<line_sep>config.c=config.get_ref('a')+['c']<line_sep>config.d=config.get_ref('b')+['d']<line_sep>self.assertEqual(config.c [0 'c'])<line_sep>self.assertEqual(config.d [0 'd'])<line_sep># Make sure nothing was mutated self.assertEqual(l [0])<line_sep>self.assertEqual(config.c [0 'c'])<line_sep>config.a=[1]<line_sep>config.b=[2]<line_sep>self.assertEqual(l [0])<line_sep>self.assertEqual(config.c [1 'c'])<line_sep>self.assertEqual(config.d [2 'd'])<block_end>@parameterized.parameters({'initial_value':1 'other_value':2 'true_value':3 'new_initial_value':10 'new_true_value':12} {'initial_value':2.0 'other_value':2.5 'true_value':4.5 'new_initial_value':3.7 'new_true_value':6.2} {'initial_value':'hello, ' 'other_value':'world!' 'true_value':'hello, world!' 'new_initial_value':'foo, ' 'new_true_value':'foo, world!'} {'initial_value':['hello'] 'other_value':['world'] 'true_value':['hello' 'world'] 'new_initial_value':['foo'] 'new_true_value':['foo' 'world']} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5.0) 'true_value':15.0 'new_initial_value':12 'new_true_value':17.0} {'initial_value':config_dict.placeholder(float) 'other_value':7.0 'true_value':<none> 'new_initial_value':12 'new_true_value':19.0} {'initial_value':5.0 'other_value':config_dict.placeholder(float) 'true_value':<none> 'new_initial_value':8.0 'new_true_value':<none>} {'initial_value':config_dict.placeholder(str) 'other_value':'tail' 'true_value':<none> 'new_initial_value':'head' 'new_true_value':'headtail'})<def_stmt>testAdd self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.add true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':5 'other_value':3 'true_value':2 'new_initial_value':-1 'new_true_value':-4} {'initial_value':2.0 'other_value':2.5 'true_value':-0.5 'new_initial_value':12.3 'new_true_value':9.8} {'initial_value':set(['hello' 123 4.5]) 'other_value':set([123]) 'true_value':set(['hello' 4.5]) 'new_initial_value':set([123]) 'new_true_value':set([])} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5.0) 'true_value':5.0 'new_initial_value':12 'new_true_value':7.0} {'initial_value':config_dict.placeholder(float) 'other_value':7.0 'true_value':<none> 'new_initial_value':12 'new_true_value':5.0})<def_stmt>testSub self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.sub true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':1 'other_value':2 'true_value':2 'new_initial_value':3 'new_true_value':6} {'initial_value':2.0 'other_value':2.5 'true_value':5.0 'new_initial_value':3.5 'new_true_value':8.75} {'initial_value':['hello'] 'other_value':3 'true_value':['hello' 'hello' 'hello'] 'new_initial_value':['foo'] 'new_true_value':['foo' 'foo' 'foo']} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5.0) 'true_value':50.0 'new_initial_value':1 'new_true_value':5.0} {'initial_value':config_dict.placeholder(float) 'other_value':7.0 'true_value':<none> 'new_initial_value':12 'new_true_value':84.0})<def_stmt>testMul self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.mul true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':3 'other_value':2 'true_value':1.5 'new_initial_value':10 'new_true_value':5.0} {'initial_value':2.0 'other_value':2.5 'true_value':0.8 'new_initial_value':6.3 'new_true_value':2.52} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5.0) 'true_value':2.0 'new_initial_value':13 'new_true_value':2.6} {'initial_value':config_dict.placeholder(float) 'other_value':7.0 'true_value':<none> 'new_initial_value':17.5 'new_true_value':2.5})<def_stmt>testTrueDiv self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.truediv true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':3 'other_value':2 'true_value':1 'new_initial_value':7 'new_true_value':3} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5) 'true_value':2 'new_initial_value':28 'new_true_value':5} {'initial_value':config_dict.placeholder(int) 'other_value':7 'true_value':<none> 'new_initial_value':25 'new_true_value':3})<def_stmt>testFloorDiv self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.floordiv true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':3 'other_value':2 'true_value':9 'new_initial_value':10 'new_true_value':100} {'initial_value':2.7 'other_value':3.2 'true_value':24.0084457245 'new_initial_value':6.5 'new_true_value':399.321543621} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5) 'true_value':1e5 'new_initial_value':2 'new_true_value':32} {'initial_value':config_dict.placeholder(float) 'other_value':3.0 'true_value':<none> 'new_initial_value':7.0 'new_true_value':343.0})<def_stmt>testPow self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.pow true_value new_initial_value new_true_value assert_fn=self.assertAlmostEqual)<block_end>@parameterized.parameters({'initial_value':3 'other_value':2 'true_value':1 'new_initial_value':10 'new_true_value':0} {'initial_value':5.3 'other_value':3.2 'true_value':2.0999999999999996 'new_initial_value':77 'new_true_value':0.2} {'initial_value':ml_collections.FieldReference(10) 'other_value':ml_collections.FieldReference(5) 'true_value':0 'new_initial_value':32 'new_true_value':2} {'initial_value':config_dict.placeholder(int) 'other_value':7 'true_value':<none> 'new_initial_value':25 'new_true_value':4})<def_stmt>testMod self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.mod true_value new_initial_value new_true_value assert_fn=self.assertAlmostEqual)<block_end>@parameterized.parameters({'initial_value':<true> 'other_value':<true> 'true_value':<true> 'new_initial_value':<false> 'new_true_value':<false>} {'initial_value':ml_collections.FieldReference(<false>) 'other_value':ml_collections.FieldReference(<false>) 'true_value':<false> 'new_initial_value':<true> 'new_true_value':<false>} {'initial_value':config_dict.placeholder(bool) 'other_value':<true> 'true_value':<none> 'new_initial_value':<false> 'new_true_value':<false>})<def_stmt>testAnd self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.and_ true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':<false> 'other_value':<false> 'true_value':<false> 'new_initial_value':<true> 'new_true_value':<true>} {'initial_value':ml_collections.FieldReference(<true>) 'other_value':ml_collections.FieldReference(<true>) 'true_value':<true> 'new_initial_value':<false> 'new_true_value':<true>} {'initial_value':config_dict.placeholder(bool) 'other_value':<false> 'true_value':<none> 'new_initial_value':<true> 'new_true_value':<true>})<def_stmt>testOr self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.or_ true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':<false> 'other_value':<true> 'true_value':<true> 'new_initial_value':<true> 'new_true_value':<false>} {'initial_value':ml_collections.FieldReference(<true>) 'other_value':ml_collections.FieldReference(<true>) 'true_value':<false> 'new_initial_value':<false> 'new_true_value':<true>} {'initial_value':config_dict.placeholder(bool) 'other_value':<true> 'true_value':<none> 'new_initial_value':<true> 'new_true_value':<false>})<def_stmt>testXor self initial_value other_value true_value new_initial_value new_true_value<block_start>self._test_binary_operator(initial_value other_value operator.xor true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':3 'true_value':-3 'new_initial_value':-22 'new_true_value':22} {'initial_value':15.3 'true_value':-15.3 'new_initial_value':-0.2 'new_true_value':0.2} {'initial_value':ml_collections.FieldReference(7) 'true_value':ml_collections.FieldReference(-7) 'new_initial_value':123 'new_true_value':-123} {'initial_value':config_dict.placeholder(int) 'true_value':<none> 'new_initial_value':-6 'new_true_value':6})<def_stmt>testNeg self initial_value true_value new_initial_value new_true_value<block_start>self._test_unary_operator(initial_value operator.neg true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':config_dict.create(attribute=2) 'true_value':2 'new_initial_value':config_dict.create(attribute=3) 'new_true_value':3 } {'initial_value':config_dict.create(attribute={'a':1}) 'true_value':config_dict.create(a=1) 'new_initial_value':config_dict.create(attribute={'b':1}) 'new_true_value':config_dict.create(b=1) } {'initial_value':ml_collections.FieldReference(config_dict.create(attribute=2)) 'true_value':ml_collections.FieldReference(2) 'new_initial_value':config_dict.create(attribute=3) 'new_true_value':3 } {'initial_value':config_dict.placeholder(config_dict.ConfigDict) 'true_value':<none> 'new_initial_value':config_dict.create(attribute=3) 'new_true_value':3 } )<def_stmt>testAttr self initial_value true_value new_initial_value new_true_value<block_start>self._test_unary_operator(initial_value <lambda>x:x.attr('attribute') true_value new_initial_value new_true_value)<block_end>@parameterized.parameters({'initial_value':3 'true_value':3 'new_initial_value':-101 'new_true_value':101} {'initial_value':-15.3 'true_value':15.3 'new_initial_value':7.3 'new_true_value':7.3} {'initial_value':ml_collections.FieldReference(-7) 'true_value':ml_collections.FieldReference(7) 'new_initial_value':3 'new_true_value':3} {'initial_value':config_dict.placeholder(float) 'true_value':<none> 'new_initial_value':-6.25 'new_true_value':6.25})<def_stmt>testAbs self initial_value true_value new_initial_value new_true_value<block_start>self._test_unary_operator(initial_value operator.abs true_value new_initial_value new_true_value)<block_end><def_stmt>testToInt self<block_start>self._test_unary_operator(25.3 <lambda>ref:ref.to_int() 25 27.9 27)<line_sep>ref=ml_collections.FieldReference(64.7)<line_sep>ref=ref.to_int()<line_sep>self.assertEqual(ref.get() 64)<line_sep>self.assertEqual(ref._field_type int)<block_end><def_stmt>testToFloat self<block_start>self._test_unary_operator(12 <lambda>ref:ref.to_float() 12.0 0 0.0)<line_sep>ref=ml_collections.FieldReference(647)<line_sep>ref=ref.to_float()<line_sep>self.assertEqual(ref.get() 647.0)<line_sep>self.assertEqual(ref._field_type float)<block_end><def_stmt>testToString self<block_start>self._test_unary_operator(12 <lambda>ref:ref.to_str() '12' 0 '0')<line_sep>ref=ml_collections.FieldReference(647)<line_sep>ref=ref.to_str()<line_sep>self.assertEqual(ref.get() '647')<line_sep>self.assertEqual(ref._field_type str)<block_end><def_stmt>testSetValue self<block_start>ref=ml_collections.FieldReference(1.0)<line_sep>other=ml_collections.FieldReference(3)<line_sep>ref_plus_other=ref+other<line_sep>self.assertEqual(ref_plus_other.get() 4.0)<line_sep>ref.set(2.5)<line_sep>self.assertEqual(ref_plus_other.get() 5.5)<line_sep>other.set(110)<line_sep>self.assertEqual(ref_plus_other.get() 112.5)<line_sep># Type checking <with_stmt>self.assertRaises(TypeError)<block_start>other.set('this is a string')<block_end><with_stmt>self.assertRaises(TypeError)<block_start>other.set(ml_collections.FieldReference('this is a string'))<block_end><with_stmt>self.assertRaises(TypeError)<block_start>other.set(ml_collections.FieldReference(<none> field_type=str))<block_end><block_end><def_stmt>testSetResult self<block_start>ref=ml_collections.FieldReference(1.0)<line_sep>result=ref+1.0<line_sep>second_result=result+1.0<line_sep>self.assertEqual(ref.get() 1.0)<line_sep>self.assertEqual(result.get() 2.0)<line_sep>self.assertEqual(second_result.get() 3.0)<line_sep>ref.set(2.0)<line_sep>self.assertEqual(ref.get() 2.0)<line_sep>self.assertEqual(result.get() 3.0)<line_sep>self.assertEqual(second_result.get() 4.0)<line_sep>result.set(4.0)<line_sep>self.assertEqual(ref.get() 2.0)<line_sep>self.assertEqual(result.get() 4.0)<line_sep>self.assertEqual(second_result.get() 5.0)<line_sep># All references are broken at this point. ref.set(1.0)<line_sep>self.assertEqual(ref.get() 1.0)<line_sep>self.assertEqual(result.get() 4.0)<line_sep>self.assertEqual(second_result.get() 5.0)<block_end><def_stmt>testTypeChecking self<block_start>ref=ml_collections.FieldReference(1)<line_sep>string_ref=ml_collections.FieldReference('a')<line_sep>x=ref+string_ref<with_stmt>self.assertRaises(TypeError)<block_start>x.get()<block_end><block_end><def_stmt>testNoType self<block_start>self.assertRaisesRegex(TypeError 'field_type should be a type.*' ml_collections.FieldReference <none> 0)<block_end><def_stmt>testEqual self# Simple case <block_start>ref1=ml_collections.FieldReference(1)<line_sep>ref2=ml_collections.FieldReference(1)<line_sep>ref3=ml_collections.FieldReference(2)<line_sep>self.assertEqual(ref1 1)<line_sep>self.assertEqual(ref1 ref1)<line_sep>self.assertEqual(ref1 ref2)<line_sep>self.assertNotEqual(ref1 2)<line_sep>self.assertNotEqual(ref1 ref3)<line_sep># ConfigDict inside FieldReference ref1=ml_collections.FieldReference(ml_collections.ConfigDict({'a':1}))<line_sep>ref2=ml_collections.FieldReference(ml_collections.ConfigDict({'a':1}))<line_sep>ref3=ml_collections.FieldReference(ml_collections.ConfigDict({'a':2}))<line_sep>self.assertEqual(ref1 ml_collections.ConfigDict({'a':1}))<line_sep>self.assertEqual(ref1 ref1)<line_sep>self.assertEqual(ref1 ref2)<line_sep>self.assertNotEqual(ref1 ml_collections.ConfigDict({'a':2}))<line_sep>self.assertNotEqual(ref1 ref3)<block_end><def_stmt>testLessEqual self# Simple case <block_start>ref1=ml_collections.FieldReference(1)<line_sep>ref2=ml_collections.FieldReference(1)<line_sep>ref3=ml_collections.FieldReference(2)<line_sep>self.assertLessEqual(ref1 1)<line_sep>self.assertLessEqual(ref1 2)<line_sep>self.assertLessEqual(0 ref1)<line_sep>self.assertLessEqual(1 ref1)<line_sep>self.assertGreater(ref1 0)<line_sep>self.assertLessEqual(ref1 ref1)<line_sep>self.assertLessEqual(ref1 ref2)<line_sep>self.assertLessEqual(ref1 ref3)<line_sep>self.assertGreater(ref3 ref1)<block_end><def_stmt>testControlFlowError self<block_start>ref1=ml_collections.FieldReference(<true>)<line_sep>ref2=ml_collections.FieldReference(<false>)<with_stmt>self.assertRaises(NotImplementedError)<block_start><if_stmt>ref1<block_start><pass><block_end><block_end><with_stmt>self.assertRaises(NotImplementedError)<block_start>_=ref1<and>ref2<block_end><with_stmt>self.assertRaises(NotImplementedError)<block_start>_=ref1<or>ref2<block_end><with_stmt>self.assertRaises(NotImplementedError)<block_start>_=<not>ref1<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
"""Simplistic wrapper decorator for Python-coded wrappers"""<import_from_stmt>OpenGL.latebind Curry<import_from_stmt>OpenGL MODULE_ANNOTATIONS<class_stmt>_LazyWrapper(Curry)<block_start>"""Marker to tell us that an object is a lazy wrapper"""<block_end><def_stmt>lazy baseFunction<block_start>"""Produce a lazy-binding decorator that uses baseFunction Allows simple implementation of wrappers where the whole of the wrapper can be summed up as do 1 thing then call base function with the cleaned up result. Passes baseFunction in as the first argument of the wrapped function, all other parameters are passed unchanged. The wrapper class created has __nonzero__ and similar common wrapper entry points defined. """<def_stmt>wrap wrapper<block_start>"""Wrap wrapper with baseFunction"""<def_stmt>__bool__ self<block_start><return>bool(baseFunction)<block_end><def_stmt>__repr__ self<block_start><return>'%s( %r )'%('OpenGL.lazywrapper.lazy' baseFunction.__name__ )<block_end>_with_wrapper=type(wrapper.__name__ (_LazyWrapper ) {'__repr__':__repr__ '__doc__':wrapper.__doc__ '__nonzero__':__bool__ '__bool__':__bool__ 'wrappedOperation':baseFunction 'restype':getattr(wrapper 'restype' getattr(baseFunction 'restype' <none>)) })<line_sep>with_wrapper=_with_wrapper(wrapper baseFunction)<line_sep>with_wrapper.__name__=wrapper.__name__<if_stmt>hasattr(baseFunction '__module__')<block_start>with_wrapper.__module__=baseFunction.__module__<block_end><return>with_wrapper<block_end><return>wrap<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_from_stmt>OpenGL.raw GLU<line_sep>func=GLU.gluNurbsCallbackData<line_sep>output=[]<def_stmt>testwrap base<block_start>"Testing"<line_sep>output.append(base)<block_end>testlazy=lazy(func)(testwrap)<line_sep>testlazy()<assert_stmt>testlazy.__doc__<eq>"Testing"<assert_stmt>testlazy.__class__.__name__<eq>'testwrap'<assert_stmt>testlazy.__name__<eq>'testwrap'<assert_stmt>testlazy.baseFunction<is>func<assert_stmt>testlazy.wrapperFunction<is>testwrap<assert_stmt>output<block_end>
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE.md file in the project root # for full license information. # ============================================================================== <import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>cntk.ops.tests.ops_test_utils cntk_device<import_from_stmt>cntk.cntk_py DeviceKind_GPU<import_from_stmt>cntk.device try_set_default_device<import_stmt>pytest<line_sep>abs_path=os.path.dirname(os.path.abspath(__file__))<line_sep>sys.path.append(abs_path)<line_sep>sys.path.append(os.path.join(abs_path ".." ".." ".." ".." "Examples" "Image" "Classification" "ConvNet" "Python"))<import_from_stmt>prepare_test_data prepare_CIFAR10_data<import_from_stmt>ConvNet_CIFAR10_DataAug *<line_sep>TOLERANCE_ABSOLUTE=1e-1<def_stmt>test_cifar_convnet_error device_id<block_start><if_stmt>cntk_device(device_id).type()<ne>DeviceKind_GPU<block_start>pytest.skip('test only runs on GPU')<block_end>try_set_default_device(cntk_device(device_id))<line_sep>base_path=prepare_CIFAR10_data()<line_sep># change dir to locate data.zip correctly os.chdir(base_path)<import_from_stmt>_cntk_py set_fixed_random_seed force_deterministic_algorithms<line_sep>set_fixed_random_seed(1)<line_sep>force_deterministic_algorithms()<line_sep>reader_train=create_reader(os.path.join(base_path 'train_map.txt') os.path.join(base_path 'CIFAR-10_mean.xml') <false>)<line_sep>model=create_convnet_cifar10_model(num_classes=10)<line_sep>model.update_signature((num_channels image_height image_width))<line_sep>criterion=create_criterion_function(model normalize=<lambda>x:x/256)<line_sep>train_loss,metric=train_model(reader_train model criterion epoch_size=128 max_epochs=5)<line_sep>expected_loss_metric=(2.2963 0.9062)<assert_stmt>np.allclose((train_loss metric) expected_loss_metric atol=TOLERANCE_ABSOLUTE)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_cifar_convnet_error(0)<block_end>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """All parameters."""<import_from_stmt>makani.config mconfig<line_sep>@mconfig.Config(deps={'control':'common.control.control_params' 'monitor':'common.monitor.monitor_params' 'sim':'common.sim.sim_params' 'system':mconfig.WING_MODEL+'.system_params'})<def_stmt>MakeParams params<block_start><return>{'control':params['control'] 'monitor':params['monitor'] 'sim':params['sim'] 'system':params['system']}<block_end>
""" New trainer faster than ever """<import_from_stmt>metrics.metrics Metrics<import_from_stmt>utils.reporter Reporter<import_from_stmt>utils.misc timeit<import_from_stmt>tqdm tqdm<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>matplotlib<import_stmt>time<line_sep>matplotlib.use('Agg')<import_stmt>matplotlib.pyplot<as>plt<class_stmt>NewTrain(object)<block_start><def_stmt>__init__ self args sess model<block_start>print("\nTraining is initializing itself\n")<line_sep>self.args=args<line_sep>self.sess=sess<line_sep>self.model=model<line_sep># shortcut for model params self.params=self.model.params<line_sep># To initialize all variables self.init=<none><line_sep>self.init_model()<line_sep># Create a saver object self.saver=tf.train.Saver(max_to_keep=self.args.max_to_keep keep_checkpoint_every_n_hours=10 save_relative_paths=<true>)<line_sep>self.saver_best=tf.train.Saver(max_to_keep=1 save_relative_paths=<true>)<line_sep># Load from latest checkpoint if found self.load_model()<line_sep>################################################################################## # Init summaries # Summary variables self.scalar_summary_tags=['mean_iou_on_val' 'train-loss-per-epoch' 'val-loss-per-epoch' 'train-acc-per-epoch' 'val-acc-per-epoch']<line_sep>self.images_summary_tags=[('train_prediction_sample' [<none> self.params.img_height self.params.img_width<times>2 3]) ('val_prediction_sample' [<none> self.params.img_height self.params.img_width<times>2 3])]<line_sep>self.summary_tags=[]<line_sep>self.summary_placeholders={}<line_sep>self.summary_ops={}<line_sep># init summaries and it's operators self.init_summaries()<line_sep># Create summary writer self.summary_writer=tf.summary.FileWriter(self.args.summary_dir self.sess.graph)<line_sep>################################################################################## <if_stmt>self.args.mode<eq>'train'<block_start>self.num_iterations_training_per_epoch=self.args.tfrecord_train_len<floordiv>self.args.batch_size<line_sep>self.num_iterations_validation_per_epoch=self.args.tfrecord_val_len<floordiv>self.args.batch_size<block_end><else_stmt><block_start>self.test_data=<none><line_sep>self.test_data_len=<none><line_sep>self.num_iterations_testing_per_epoch=<none><line_sep>self.load_test_data()<block_end>################################################################################## # Init metrics class self.metrics=Metrics(self.args.num_classes)<line_sep># Init reporter class <if_stmt>self.args.mode<eq>'train'<or>'overfit'<block_start>self.reporter=Reporter(self.args.out_dir+'report_train.json' self.args)<block_end><elif_stmt>self.args.mode<eq>'test'<block_start>self.reporter=Reporter(self.args.out_dir+'report_test.json' self.args)<line_sep>################################################################################## <block_end><block_end>@timeit<def_stmt>load_test_data self<block_start>print("Loading Testing data..")<line_sep>self.test_data={'X':np.load(self.args.data_dir+"X_val.npy") 'Y':np.load(self.args.data_dir+"Y_val.npy")}<line_sep>self.test_data_len=self.test_data['X'].shape[0]-self.test_data['X'].shape[0]%self.args.batch_size<line_sep>print("Test-shape-x -- "+str(self.test_data['X'].shape))<line_sep>print("Test-shape-y -- "+str(self.test_data['Y'].shape))<line_sep>self.num_iterations_testing_per_epoch=(self.test_data_len+self.args.batch_size-1)<floordiv>self.args.batch_size<line_sep>print("Test data is loaded")<block_end>@timeit<def_stmt>init_model self<block_start>print("Initializing the variables of the model")<line_sep>self.init=tf.group(tf.global_variables_initializer() tf.local_variables_initializer())<line_sep>self.sess.run(self.init)<line_sep>print("Initialization finished")<block_end><def_stmt>save_model self<block_start>""" Save Model Checkpoint :return: """<line_sep>print("saving a checkpoint")<line_sep>self.saver.save(self.sess self.args.checkpoint_dir self.model.global_step_tensor)<line_sep>print("Saved a checkpoint")<block_end><def_stmt>save_best_model self<block_start>""" Save BEST Model Checkpoint :return: """<line_sep>print("saving a checkpoint for the best model")<line_sep>self.saver_best.save(self.sess self.args.checkpoint_best_dir self.model.global_step_tensor)<line_sep>print("Saved a checkpoint for the best model")<block_end><def_stmt>load_best_model self<block_start>""" Load the best model checkpoint :return: """<line_sep>print("loading a checkpoint for BEST ONE")<line_sep>latest_checkpoint=tf.train.latest_checkpoint(self.args.checkpoint_best_dir)<if_stmt>latest_checkpoint<block_start>print("Loading model checkpoint {} ...\n".format(latest_checkpoint))<line_sep>self.saver_best.restore(self.sess latest_checkpoint)<block_end><else_stmt><block_start>print("ERROR NO best checkpoint found")<line_sep>exit(-1)<block_end>print("BEST MODEL LOADED..")<block_end><def_stmt>init_summaries self<block_start>""" Create the summary part of the graph :return: """<with_stmt>tf.variable_scope('train-summary-per-epoch')<block_start><for_stmt>tag self.scalar_summary_tags<block_start>self.summary_tags<augadd>tag<line_sep>self.summary_placeholders[tag]=tf.placeholder('float32' <none> name=tag)<line_sep>self.summary_ops[tag]=tf.summary.scalar(tag self.summary_placeholders[tag])<block_end><for_stmt>tag,shape self.images_summary_tags<block_start>self.summary_tags<augadd>tag<line_sep>self.summary_placeholders[tag]=tf.placeholder('float32' shape name=tag)<line_sep>self.summary_ops[tag]=tf.summary.image(tag self.summary_placeholders[tag] max_outputs=10)<block_end><block_end><block_end><def_stmt>add_summary self step summaries_dict=<none> summaries_merged=<none><block_start>""" Add the summaries to tensorboard :param step: :param summaries_dict: :param summaries_merged: :return: """<if_stmt>summaries_dict<is><not><none><block_start>summary_list=self.sess.run([self.summary_ops[tag]<for>tag summaries_dict.keys()] {self.summary_placeholders[tag]:value<for>tag,value summaries_dict.items()})<for_stmt>summary summary_list<block_start>self.summary_writer.add_summary(summary step)<block_end><block_end><if_stmt>summaries_merged<is><not><none><block_start>self.summary_writer.add_summary(summaries_merged step)<block_end><block_end>@timeit<def_stmt>load_model self<block_start>""" Load the latest checkpoint :return: """<try_stmt># This is for loading the pretrained weights if they can't be loaded during initialization. <block_start>self.model.encoder.load_pretrained_weights(self.sess)<block_end><except_stmt>AttributeError<block_start><pass><block_end>print("Searching for a checkpoint")<line_sep>latest_checkpoint=tf.train.latest_checkpoint(self.args.checkpoint_dir)<if_stmt>latest_checkpoint<block_start>print("Loading model checkpoint {} ...\n".format(latest_checkpoint))<line_sep>self.saver.restore(self.sess latest_checkpoint)<line_sep>print("Model loaded from the latest checkpoint\n")<block_end><else_stmt><block_start>print("\n.. No ckpt, SO First time to train :D ..\n")<block_end><block_end><def_stmt>train self<block_start>print("Training mode will begin NOW ..")<line_sep>tf.train.start_queue_runners(sess=self.sess)<line_sep>curr_lr=self.model.args.learning_rate<for_stmt>cur_epoch range(self.model.global_epoch_tensor.eval(self.sess)+1 self.args.num_epochs+1 1)# init tqdm and get the epoch value <block_start>tt=tqdm(range(self.num_iterations_training_per_epoch) total=self.num_iterations_training_per_epoch desc="epoch-"+str(cur_epoch)+"-")<line_sep># init acc and loss lists loss_list=[]<line_sep>acc_list=[]<line_sep># loop by the number of iterations <for_stmt>cur_iteration tt# get the cur_it for the summary <block_start>cur_it=self.model.global_step_tensor.eval(self.sess)<line_sep># Feed this variables to the network feed_dict={self.model.handle:self.model.training_handle self.model.is_training:<true> self.model.curr_learning_rate:curr_lr}<line_sep># Run the feed forward but the last iteration finalize what you want to do <if_stmt>cur_iteration<l>self.num_iterations_training_per_epoch-1# run the feed_forward <block_start>_,loss,acc,summaries_merged=self.sess.run([self.model.train_op self.model.loss self.model.accuracy self.model.merged_summaries] feed_dict=feed_dict)<line_sep># log loss and acc loss_list<augadd>[loss]<line_sep>acc_list<augadd>[acc]<line_sep># summarize self.add_summary(cur_it summaries_merged=summaries_merged)<block_end><else_stmt># run the feed_forward <block_start>_,loss,acc,summaries_merged,segmented_imgs=self.sess.run([self.model.train_op self.model.loss self.model.accuracy self.model.merged_summaries self.model.segmented_summary] feed_dict=feed_dict)<line_sep># log loss and acc loss_list<augadd>[loss]<line_sep>acc_list<augadd>[acc]<line_sep>total_loss=np.mean(loss_list)<line_sep>total_acc=np.mean(acc_list)<line_sep># summarize summaries_dict=dict()<line_sep>summaries_dict['train-loss-per-epoch']=total_loss<line_sep>summaries_dict['train-acc-per-epoch']=total_acc<line_sep>summaries_dict['train_prediction_sample']=segmented_imgs<line_sep>self.add_summary(cur_it summaries_dict=summaries_dict summaries_merged=summaries_merged)<line_sep># report self.reporter.report_experiment_statistics('train-acc' 'epoch-'+str(cur_epoch) str(total_acc))<line_sep>self.reporter.report_experiment_statistics('train-loss' 'epoch-'+str(cur_epoch) str(total_loss))<line_sep>self.reporter.finalize()<line_sep># Update the Global step self.model.global_step_assign_op.eval(session=self.sess feed_dict={self.model.global_step_input:cur_it+1})<line_sep># Update the Cur Epoch tensor # it is the last thing because if it is interrupted it repeat this self.model.global_epoch_assign_op.eval(session=self.sess feed_dict={self.model.global_epoch_input:cur_epoch+1})<line_sep># print in console tt.close()<line_sep>print("epoch-"+str(cur_epoch)+"-"+"loss:"+str(total_loss)+"-"+" acc:"+str(total_acc)[:6])<line_sep># Break the loop to finalize this epoch <break><block_end># Update the Global step self.model.global_step_assign_op.eval(session=self.sess feed_dict={self.model.global_step_input:cur_it+1})<block_end># Save the current checkpoint <if_stmt>cur_epoch%self.args.save_every<eq>0<block_start>self.save_model()<block_end># Test the model on validation <if_stmt>cur_epoch%self.args.test_every<eq>0<block_start>self.test_per_epoch(step=self.model.global_step_tensor.eval(self.sess) epoch=self.model.global_epoch_tensor.eval(self.sess))<block_end><if_stmt>cur_epoch%self.args.learning_decay_every<eq>0<block_start>curr_lr=curr_lr<times>self.args.learning_decay<line_sep>print('Current learning rate is ' curr_lr)<block_end><block_end>print("Training Finished")<block_end><def_stmt>test_per_epoch self step epoch<block_start>print("Validation at step:"+str(step)+" at epoch:"+str(epoch)+" ..")<line_sep># init tqdm and get the epoch value tt=tqdm(range(self.num_iterations_validation_per_epoch) total=self.num_iterations_validation_per_epoch desc="Val-epoch-"+str(epoch)+"-")<line_sep># init acc and loss lists loss_list=[]<line_sep>acc_list=[]<line_sep>inf_list=[]<line_sep># reset metrics self.metrics.reset()<line_sep># get the maximum iou to compare with and save the best model max_iou=self.model.best_iou_tensor.eval(self.sess)<line_sep># init dataset to validation self.sess.run(self.model.validation_iterator.initializer)<line_sep># loop by the number of iterations <for_stmt>cur_iteration tt# Feed this variables to the network <block_start>feed_dict={self.model.handle:self.model.validation_handle self.model.is_training:<false>}<line_sep># Run the feed forward but the last iteration finalize what you want to do <if_stmt>cur_iteration<l>self.num_iterations_validation_per_epoch-1<block_start>start=time.time()<line_sep># run the feed_forward next_img,out_argmax,loss,acc=self.sess.run([self.model.next_img self.model.out_argmax self.model.loss self.model.accuracy] feed_dict=feed_dict)<line_sep>end=time.time()<line_sep># log loss and acc loss_list<augadd>[loss]<line_sep>acc_list<augadd>[acc]<line_sep>inf_list<augadd>[end-start]<line_sep># log metrics self.metrics.update_metrics_batch(out_argmax next_img[1])<block_end><else_stmt><block_start>start=time.time()<line_sep># run the feed_forward next_img,out_argmax,loss,acc,segmented_imgs=self.sess.run([self.model.next_img self.model.out_argmax self.model.loss self.model.accuracy self.model.segmented_summary] feed_dict=feed_dict)<line_sep>end=time.time()<line_sep># log loss and acc loss_list<augadd>[loss]<line_sep>acc_list<augadd>[acc]<line_sep>inf_list<augadd>[end-start]<line_sep># log metrics self.metrics.update_metrics_batch(out_argmax next_img[1])<line_sep># mean over batches total_loss=np.mean(loss_list)<line_sep>total_acc=np.mean(acc_list)<line_sep>mean_iou=self.metrics.compute_final_metrics(self.num_iterations_validation_per_epoch)<line_sep>mean_iou_arr=self.metrics.iou<line_sep>mean_inference=str(np.mean(inf_list))+'-seconds'<line_sep># summarize summaries_dict=dict()<line_sep>summaries_dict['val-loss-per-epoch']=total_loss<line_sep>summaries_dict['val-acc-per-epoch']=total_acc<line_sep>summaries_dict['mean_iou_on_val']=mean_iou<line_sep>summaries_dict['val_prediction_sample']=segmented_imgs<line_sep>self.add_summary(step summaries_dict=summaries_dict)<line_sep>self.summary_writer.flush()<line_sep># report self.reporter.report_experiment_statistics('validation-acc' 'epoch-'+str(epoch) str(total_acc))<line_sep>self.reporter.report_experiment_statistics('validation-loss' 'epoch-'+str(epoch) str(total_loss))<line_sep>self.reporter.report_experiment_statistics('avg_inference_time_on_validation' 'epoch-'+str(epoch) str(mean_inference))<line_sep>self.reporter.report_experiment_validation_iou('epoch-'+str(epoch) str(mean_iou) mean_iou_arr)<line_sep>self.reporter.finalize()<line_sep># print in console tt.close()<line_sep>print("Val-epoch-"+str(epoch)+"-"+"loss:"+str(total_loss)+"-"+"acc:"+str(total_acc)[:6]+"-mean_iou:"+str(mean_iou))<line_sep>print("Last_max_iou: "+str(max_iou))<if_stmt>mean_iou<g>max_iou<block_start>print("This validation got a new best iou. so we will save this one")<line_sep># save the best model self.save_best_model()<line_sep># Set the new maximum self.model.best_iou_assign_op.eval(session=self.sess feed_dict={self.model.best_iou_input:mean_iou})<block_end><else_stmt><block_start>print("hmm not the best validation epoch :/..")<block_end># Break the loop to finalize this epoch <break><block_end><block_end><block_end><def_stmt>test self<block_start>print("Testing mode will begin NOW..")<line_sep># load the best model checkpoint to test on it self.load_best_model()<line_sep># init tqdm and get the epoch value tt=tqdm(range(self.test_data_len))<line_sep>naming=np.load(self.args.data_dir+'names_train.npy')<line_sep># init acc and loss lists loss_list=[]<line_sep>acc_list=[]<line_sep>img_list=[]<line_sep># idx of image idx=0<line_sep># reset metrics self.metrics.reset()<line_sep># loop by the number of iterations <for_stmt>cur_iteration tt# load mini_batches <block_start>x_batch=self.test_data['X'][idx:idx+1]<line_sep>y_batch=self.test_data['Y'][idx:idx+1]<line_sep># update idx of mini_batch idx<augadd>1<line_sep># Feed this variables to the network feed_dict={self.model.x_pl:x_batch self.model.y_pl:y_batch self.model.is_training:<false>}<line_sep># run the feed_forward out_argmax,loss,acc,summaries_merged,segmented_imgs=self.sess.run([self.model.out_argmax self.model.loss self.model.accuracy self.model.merged_summaries self.model.segmented_summary] feed_dict=feed_dict)<line_sep>np.save(self.args.out_dir+'npy/'+str(cur_iteration)+'.npy' out_argmax[0])<line_sep>plt.imsave(self.args.out_dir+'imgs/'+'test_'+str(cur_iteration)+'.png' segmented_imgs[0])<line_sep># log loss and acc loss_list<augadd>[loss]<line_sep>acc_list<augadd>[acc]<line_sep># log metrics self.metrics.update_metrics(out_argmax[0] y_batch[0] 0 0)<block_end># mean over batches total_loss=np.mean(loss_list)<line_sep>total_acc=np.mean(acc_list)<line_sep>mean_iou=self.metrics.compute_final_metrics(self.test_data_len)<line_sep># print in console tt.close()<line_sep>print("Here the statistics")<line_sep>print("Total_loss: "+str(total_loss))<line_sep>print("Total_acc: "+str(total_acc)[:6])<line_sep>print("mean_iou: "+str(mean_iou))<line_sep>print("Plotting imgs")<block_end><def_stmt>finalize self<block_start>self.reporter.finalize()<line_sep>self.summary_writer.close()<line_sep>self.save_model()<block_end><block_end>
""" Modular arithmetic """<import_from_stmt>collections defaultdict<import_stmt>numpy<as>np<class_stmt>ModInt<block_start>""" Integers of Z/pZ """<def_stmt>__init__ self a n<block_start>self.v=a%n<line_sep>self.n=n<block_end><def_stmt>__eq__ a b<block_start><if_stmt>isinstance(b ModInt)<block_start><return><not>bool(a-b)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__hash__ self<block_start><return>hash((self.v self.n))<block_end><def_stmt>__bool__ self<block_start><return>bool(self.v)<block_end><def_stmt>__add__ a b<block_start><assert_stmt>isinstance(b ModInt)<assert_stmt>a.n<eq>b.n<line_sep><return>ModInt(a.v+b.v a.n)<block_end><def_stmt>__radd__ a b<block_start><assert_stmt>isinstance(b int)<line_sep><return>ModInt(a.v+b a.n)<block_end><def_stmt>__neg__ a<block_start><return>ModInt(-a.v a.n)<block_end><def_stmt>__sub__ a b<block_start><return>ModInt(a.v-b.v a.n)<block_end><def_stmt>__mul__ a b<block_start><if_stmt>isinstance(b int)<block_start><return>ModInt(b<times>a.v a.n)<block_end><elif_stmt>isinstance(b ModInt)<block_start><assert_stmt>a.n<eq>b.n<line_sep><return>ModInt(a.v<times>b.v a.n)<block_end><return>NotImplemented<block_end><def_stmt>__rmul__ a b<block_start><return>a<times>b<block_end><def_stmt>__pow__ P k<block_start><assert_stmt>isinstance(k int)<line_sep>V=1<line_sep>A=P<while_stmt>k<block_start><if_stmt>k&1<block_start>V<augmul>A<block_end>k<augrshift>1<if_stmt><not>k<block_start><break><block_end>A<augmul>A<block_end><return>V<block_end><def_stmt>inv self<block_start><if_stmt>self.v<eq>0<block_start><raise>ZeroDivisionError<block_end><return>ModInt(ModInt._inv(self.v self.n) self.n)<block_end>@staticmethod<def_stmt>_inv k n<block_start>k<augmod>n<if_stmt>k<eq>1<block_start><return>k<block_end><return>(n-n<floordiv>k)<times>ModInt._inv(n%k n)%n<block_end><def_stmt>__truediv__ a b<block_start><assert_stmt>isinstance(b ModInt)<assert_stmt>a.n<eq>b.n<line_sep><return>a<times>b.inv()<block_end><def_stmt>__rtruediv__ a k<block_start><assert_stmt>isinstance(k int)<line_sep><return>ModInt(k a.n)/a<block_end>@staticmethod<def_stmt>extended_euclid a b<block_start>"""Extended Euclid algorithm Return ------ x : int y : int a * x + b * y = gcd(a, b) """<line_sep>A,B=a b<line_sep>sa,sb=(1<if>a<ge>0<else>-1) (1<if>b<ge>0<else>-1)<line_sep>xp,yp=1 0<line_sep>x,y=0 1<while_stmt>b<block_start><assert_stmt>A<times>xp+B<times>yp<eq>a<assert_stmt>A<times>x+B<times>y<eq>b<line_sep>r=a<floordiv>b<line_sep>a,b=b a%b<line_sep>x,xp=xp-r<times>x x<line_sep>y,yp=yp-r<times>y y<block_end><return>sa<times>xp sb<times>yp<block_end><def_stmt>__repr__ self<block_start><return>'%s(%s, %s)'%(self.__class__.__name__ self.v self.n)<block_end><def_stmt>__str__ self<block_start><return>'%s'%self.v<block_end><block_end><class_stmt>Polynomial<block_start>""" Generic class for polynomials Works with int, float and ModInt """<def_stmt>__len__ self<block_start><return>len(self.C)<block_end><def_stmt>trim C<block_start>i=len(C)-1<while_stmt>i<ge>0<and><not>C[i]<block_start>i<augsub>1<block_end><return>C[:i+1]<block_end><def_stmt>__init__ self C=<none><block_start><if_stmt>C<is><none><block_start>C=[]<block_end>self.C=Polynomial.trim(C)<block_end>@property<def_stmt>deg self<block_start><return>len(self.C)-1<block_end><def_stmt>prime self<block_start><return>Polynomial([i<times>self[i]<for>i range(1 len(self))])<block_end><def_stmt>eval self x<block_start><if_stmt><not>self<block_start><return>0<block_end>v=self[-1]<for_stmt>c self[-2::-1]<block_start>v=v<times>x+c<block_end><return>v<block_end><def_stmt>shift self d<block_start><return>Polynomial([0<times>self[0]]<times>d+self.C<if>self<else>[])<block_end><def_stmt>__eq__ P Q<block_start><return>P.deg<eq>Q.deg<and>all(cP<eq>cQ<for>cP,cQ zip(P Q))<block_end><def_stmt>__hash__ self<block_start><return>hash(tuple(self.C))<block_end><def_stmt>__call__ self x<block_start><return>Polynomial.eval(self x)<block_end><def_stmt>__getitem__ self x<block_start><return>self.C[x]<block_end><def_stmt>__neg__ P<block_start><return>Polynomial([-c<for>c P.C])<block_end><def_stmt>__add__ P Q<block_start><if_stmt>len(P.C)<l>len(Q.C)<block_start>P,Q=Q P<block_end><return>Polynomial([P[d]+Q[d]<for>d range(len(Q))]+P[len(Q):])<block_end><def_stmt>__sub__ P Q<block_start><return>P+(-Q)<block_end><def_stmt>_mulpoly P Q<block_start><assert_stmt>isinstance(Q Polynomial)<line_sep><return>Polynomial([sum(P[k]<times>Q[d-k]<for>k range(max(0 d+1-len(Q)) min(d+1 len(P))))<for>d range(len(P)+len(Q)-1)])<block_end><def_stmt>_mulscal P k<block_start><return>Polynomial([k<times>c<for>c P])<block_end><def_stmt>__mul__ P Q<block_start><if_stmt>isinstance(Q Polynomial)<block_start><return>P._mulpoly(Q)<block_end><return>P._mulscal(Q)<block_end><def_stmt>__rmul__ P Q<block_start><return>P<times>Q<block_end><def_stmt>__pow__ P k<block_start><assert_stmt>isinstance(k int)<line_sep>V=1<line_sep>A=P<while_stmt>k<block_start><if_stmt>k&1<block_start>V<augmul>A<block_end>k<augrshift>1<if_stmt><not>k<block_start><break><block_end>A<augmul>A<block_end><return>V<block_end><def_stmt>__iter__ self<block_start><yield><from>self.C<block_end><def_stmt>euclidean_division A B<block_start>Q=[0<times>B[0]]<times>max(0 len(A)-len(B)+1)<while_stmt>len(A.C)<ge>len(B.C)<block_start>Q[len(A.C)-len(B.C)]=A[-1]/B[-1]<line_sep>A<augsub>B.shift(len(A)-len(B))<times>(A[-1]/B[-1])<block_end><return>Polynomial(Q) A<block_end><def_stmt>__floordiv__ A B<block_start><assert_stmt>isinstance(B Polynomial)<line_sep><return>A.euclidean_division(B)[0]<block_end><def_stmt>__mod__ A B<block_start>""" Polynomial euclidian division or modular reduction """<if_stmt>isinstance(B Polynomial)<block_start><return>A.euclidean_division(B)[1]<block_end><else_stmt><block_start><assert_stmt>isinstance(B int)<assert_stmt>all(isinstance(c int)<for>c A)<line_sep><return>A.reduceP(B)<block_end><block_end><def_stmt>__lt__ A B<block_start><return>A.deg<l>B.deg<block_end><def_stmt>__bool__ self<block_start><return>bool(self.C)<block_end><def_stmt>gcd A B<block_start><while_stmt>B<block_start>A,B=B A%B<block_end><return>A<times>(1/A[-1])<block_end>@staticmethod<def_stmt>gaussianElimKer M zero one<block_start>""" Outputs an element of the kernel of M zero and one are elements of the same field """<line_sep># V satisfies the invariant # M = V M_0 V=[Polynomial([zero]<times>i+[one])<for>i range(len(M))]<line_sep>pivots=[<none>]<times>(len(M)+1)<for_stmt>l range(len(M))<block_start><while_stmt>M[l].deg<ge>0<block_start>idp=M[l].deg<if_stmt>pivots[idp]<is><none><block_start>pivots[idp]=l<line_sep><break><block_end><else_stmt><block_start>c=M[l][idp]/M[pivots[idp]][idp]<line_sep>M[l]<augsub>c<times>M[pivots[idp]]<line_sep>V[l]<augsub>c<times>V[pivots[idp]]<block_end><block_end><else_stmt># If a line is null, we found an element of the kernel <block_start><return>V[l]<block_end><block_end><return><none><block_end><def_stmt>computeQ P# only for Z/pZ[X] square-free polynoms, for p prime <block_start>p=P[0].n<line_sep># We ignore the image of 1 because (F-Id)(1) = 0 M=[Polynomial(([ModInt(0 p)]<times>(i<times>p))+[ModInt(1 p)])%P<for>i range(1 P.deg)]<line_sep># M -= Id <for_stmt>i range(1 P.deg)<block_start>M[i-1]<augsub>Polynomial([ModInt(0 p)]<times>i+[ModInt(1 p)])<block_end># We find an element of the kernel by Gaussian elimination pQ=Polynomial.gaussianElimKer(M ModInt(0 p) ModInt(1 p))<line_sep># We put back the 1 tha was removed <return>pQ.shift(1)<if>pQ<is><not><none><else><none><block_end><def_stmt>factor_unit P<block_start>""" Berlekamp's algorithm only in Z/pZ """<assert_stmt>all(isinstance(c ModInt)<for>c P)<assert_stmt>len(set(c.n<for>c P))<eq>1<if_stmt>P.deg<eq>1<block_start><return>defaultdict(int {P:1})<block_end>p=P[0].n<line_sep>S=Polynomial.gcd(P P.prime())<if_stmt>S.deg<eq>P.deg# P' = 0 so P = R^p <block_start>R=Polynomial(P.C[::p])<line_sep><return>defaultdict(int {D:p<times>v<for>D,v Polynomial.factor_unit(R).items()})<block_end><else_stmt><block_start>factors=defaultdict(int)<if_stmt>S.deg<block_start><for_stmt>D,v S.factor_unit().items()<block_start>factors[D]<augadd>v<block_end>P<augfloordiv>S<block_end># P is now square-free # We look for Q in Ker(F-Id) \ {1} Q=Polynomial.computeQ(P)<if_stmt>Q<is><none># P is irreducible <block_start>factors[P]<augadd>1<block_end><else_stmt># P is the product of the gcd(P, Q-i) # that are factored recursively <block_start><for_stmt>i range(p)<block_start>D=Polynomial.gcd(P Q-Polynomial([ModInt(i p)]))<if_stmt>D.deg<block_start><for_stmt>DD,v D.factor_unit().items()<block_start>factors[DD]<augadd>v<block_end><block_end><block_end><block_end><return>factors<block_end><block_end><def_stmt>factor P<block_start>""" Factorization of P only in Z/pZ """<line_sep>cd=P[-1]<if_stmt>P.deg<eq>0<block_start><return>(cd defaultdict(int))<block_end>P=P<times>(1/cd)<line_sep><return>(cd P.factor_unit())<block_end>@staticmethod<def_stmt>ppfactors fz<block_start>c,Ds=fz<line_sep>a=str(c)<if><not>Ds<or>c<times>c<ne>c<else>''<line_sep>l=[a]+[(str(D)<if>D.deg<eq>1<and><not>D[0]<else>('(%s)'%D))+(v<g>1)<times>('^%s'%v)<for>D,v sorted(Ds.items() key=<lambda>e:(e[0].deg e[1]))]<line_sep><return>'⋅'.join(i<for>i l<if>i)<block_end><def_stmt>reduceP P p<block_start><return>Polynomial([ModInt(c p)<for>c P])<block_end>@staticmethod<def_stmt>sign_changes l<block_start><return>sum(a<times>b<l>0<for>a,b zip(l l[1:]))<block_end><def_stmt>isreal P<block_start><return><not>any(isinstance(c ModInt)<for>c P)<block_end><def_stmt>isinteger P<block_start><return>all(isinstance(c int)<for>c P)<block_end><def_stmt>sturm P<block_start>""" Number of distinct real roots by Sturm's theorem. Only works on int or float coefficients """<line_sep>inf=float('inf')<assert_stmt>P.isreal()<line_sep>A=P<line_sep>B=A.prime()<line_sep>l1=[A(-inf)]<line_sep>l2=[A(inf)]<while_stmt>B<block_start>l1.append(B(-inf))<line_sep>l2.append(B(inf))<line_sep>B,A=-A%B B<block_end><return>Polynomial.sign_changes(l1)-Polynomial.sign_changes(l2)<block_end>@property<def_stmt>r1 P<block_start>""" Number of real roots with multiplicity """<assert_stmt>P.isreal()<line_sep>ans=0<line_sep>s=P.sturm()<while_stmt>s<block_start>ans<augadd>s<line_sep>P=P.gcd(P.prime())<line_sep>s=P.sturm()<block_end><return>ans<block_end>@property<def_stmt>r2 P<block_start>ans=P.deg-P.r1<assert_stmt>ans%2<eq>0<line_sep><return>ans<floordiv>2<block_end><def_stmt>sylvester P Q<block_start>""" Sylvester's matrix """<assert_stmt>P.isreal()<assert_stmt>Q.isreal()<line_sep>p=P.deg<line_sep>q=Q.deg<line_sep>P=np.array(P)<line_sep>Q=np.array(Q)<line_sep>m=np.zeros((p+q p+q))<for_stmt>i range(q)<block_start>m[i][i:i+p+1]=P<block_end><for_stmt>i range(p)<block_start>m[q+i][i:i+q+1]=Q<block_end><return>m<block_end><def_stmt>resultant P Q<block_start>""" Resultant of two real polynomials """<line_sep><return>np.linalg.det(P.sylvester(Q))<block_end>@property<def_stmt>disc P<block_start>""" Discriminant of a real polynomial """<line_sep>ans=P.resultant(P.prime())/P[-1]<if_stmt>P.isinteger()<block_start>ans=int(ans.round())<block_end><if_stmt>P.deg%4<in>[0 1]<block_start><return>ans<block_end><else_stmt><block_start><return>-ans<block_end><block_end><def_stmt>__repr__ self<block_start><return>'%s(%s)'%(self.__class__.__name__ self.C)<block_end>@staticmethod<def_stmt>_formatmonomial c d<block_start><assert_stmt>c<line_sep>a=b=''<if_stmt>c<times>c<ne>c<or><not>d<block_start>a=str(c)+(d<ne>0)<times>'⋅'<block_end><if_stmt>d<g>1<block_start>b='X^'+str(d)<block_end><elif_stmt>d<eq>1<block_start>b='X'<block_end><return>a+b<block_end><def_stmt>__str__ self<block_start><if_stmt><not>self.C<block_start><return>"0"<block_end>ans='+'.join(self._formatmonomial(c d)<for>(d c) reversed(list(enumerate(self)))<if>c)<line_sep><return>ans.replace("+-" "-").replace('-1⋅' '-')<block_end><block_end>
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long # type: ignore """Configuration definitions for MobilenetEdgeTPU losses, learning rates, optimizers, and training."""<import_stmt>dataclasses<import_stmt>os<import_from_stmt>typing Any Mapping Optional<line_sep># Import libraries <import_from_stmt>official.core config_definitions<as>cfg<import_from_stmt>official.core exp_factory<import_from_stmt>official.modeling optimization<import_from_stmt>official.vision.beta.configs common<import_from_stmt>official.vision.beta.configs image_classification<as>base_config<line_sep>@dataclasses.dataclass<class_stmt>MobilenetEdgeTPUModelConfig(base_config.ImageClassificationModel)<block_start>"""Configuration for the MobilenetEdgeTPU model. Attributes: name: The name of the model. Defaults to 'MobilenetEdgeTPU'. model_params: A dictionary that represents the parameters of the EfficientNet model. These will be passed in to the "from_name" function. """<line_sep>model_params:Mapping[str Any]=dataclasses.field(default_factory=<lambda>:{# pylint: disable=g-long-lambda 'model_name':'mobilenet_edgetpu_v2_xs' 'model_weights_path':'' 'checkpoint_format':'tf_checkpoint' 'overrides':{'batch_norm':'tpu' 'num_classes':1001 'rescale_input':<false> 'dtype':'bfloat16'}})<block_end>@dataclasses.dataclass<class_stmt>MobilenetEdgeTPUTaskConfig(base_config.ImageClassificationTask)<block_start>"""Task defination for MobileNetEdgeTPU. Attributes: model: A `ModelConfig` instance. saved_model_path: Instead of initializing a model from the model config, the model can be loaded from a file path. """<line_sep>model:MobilenetEdgeTPUModelConfig=MobilenetEdgeTPUModelConfig()<line_sep>saved_model_path:Optional[str]=<none><block_end>IMAGENET_TRAIN_EXAMPLES=1281167<line_sep>IMAGENET_VAL_EXAMPLES=50000<line_sep>IMAGENET_INPUT_PATH_BASE='imagenet-2012-tfrecord'<def_stmt>mobilenet_edgetpu_base_experiment_config model_name:str<arrow>cfg.ExperimentConfig<block_start>"""Image classification on imagenet with mobilenet_edgetpu. Experiment config common across all mobilenet_edgetpu variants. Args: model_name: Name of the mobilenet_edgetpu model variant Returns: ExperimentConfig """<line_sep>train_batch_size=4096<line_sep>eval_batch_size=4096<line_sep>steps_per_epoch=IMAGENET_TRAIN_EXAMPLES<floordiv>train_batch_size<line_sep>mobilenet_edgetpu_config=MobilenetEdgeTPUModelConfig(num_classes=1001 input_size=[224 224 3])<line_sep>mobilenet_edgetpu_config.model_params.model_name=model_name<line_sep>config=cfg.ExperimentConfig(task=MobilenetEdgeTPUTaskConfig(model=mobilenet_edgetpu_config losses=base_config.Losses(label_smoothing=0.1) train_data=base_config.DataConfig(input_path=os.path.join(IMAGENET_INPUT_PATH_BASE 'train*') is_training=<true> global_batch_size=train_batch_size dtype='bfloat16' aug_type=common.Augmentation(type='autoaug')) validation_data=base_config.DataConfig(input_path=os.path.join(IMAGENET_INPUT_PATH_BASE 'valid*') is_training=<false> dtype='bfloat16' drop_remainder=<false> global_batch_size=eval_batch_size)) trainer=cfg.TrainerConfig(steps_per_loop=steps_per_epoch summary_interval=steps_per_epoch checkpoint_interval=steps_per_epoch<times>5 max_to_keep=10 train_steps=550<times>steps_per_epoch validation_steps=IMAGENET_VAL_EXAMPLES<floordiv>eval_batch_size validation_interval=steps_per_epoch optimizer_config=optimization.OptimizationConfig({'optimizer':{'type':'rmsprop' 'rmsprop':{'rho':0.9 'momentum':0.9 'epsilon':0.001 }} 'ema':{'average_decay':0.99 'trainable_weights_only':<false> } 'learning_rate':{'type':'exponential' 'exponential':{'initial_learning_rate':0.008<times>(train_batch_size<floordiv>128) 'decay_steps':int(2.4<times>steps_per_epoch) 'decay_rate':0.97 'staircase':<true>}} 'warmup':{'type':'linear' 'linear':{'warmup_steps':5<times>steps_per_epoch 'warmup_learning_rate':0}} })) restrictions=['task.train_data.is_training != None' 'task.validation_data.is_training != None'])<line_sep><return>config<block_end># Registration for MobileNet-EdgeTPU-Search models. # When this config is used, users need to specify the saved model path via # --params_override=task.saved_model_path='your/saved_model/path/'. @exp_factory.register_config_factory('mobilenet_edgetpu_search')<def_stmt>mobilenet_edgetpu_search <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_search')<block_end># Registration for MobileNet-EdgeTPU-V2 models. @exp_factory.register_config_factory('mobilenet_edgetpu_v2_tiny')<def_stmt>mobilenet_edgetpu_v2_tiny <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_tiny')<block_end># Registration for MobileNet-EdgeTPU-V2 models. @exp_factory.register_config_factory('mobilenet_edgetpu_v2_xs')<def_stmt>mobilenet_edgetpu_v2_xs <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_xs')<block_end>@exp_factory.register_config_factory('mobilenet_edgetpu_v2_s')<def_stmt>mobilenet_edgetpu_v2_s <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_s')<block_end>@exp_factory.register_config_factory('mobilenet_edgetpu_v2_m')<def_stmt>mobilenet_edgetpu_v2_m <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_m')<block_end>@exp_factory.register_config_factory('mobilenet_edgetpu_v2_l')<def_stmt>mobilenet_edgetpu_v2_l <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_l')<block_end># Registration for MobileNet-EdgeTPU-V1 models. @exp_factory.register_config_factory('mobilenet_edgetpu')<def_stmt>mobilenet_edgetpu <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu')<block_end># Registration for MobileNet-EdgeTPU-V1 models. # We use 'depth_multiplier' to scale the models. # E.g. dm1p25 implies depth multiplier of 1.25x @exp_factory.register_config_factory('mobilenet_edgetpu_dm1p25')<def_stmt>mobilenet_edgetpu_dm1p25 <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p25')<block_end>@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p5')<def_stmt>mobilenet_edgetpu_dm1p5 <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p5')<block_end>@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p75')<def_stmt>mobilenet_edgetpu_dm1p75 <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p75')<block_end># Registration for AutoSeg-EdgeTPU backbones @exp_factory.register_config_factory('autoseg_edgetpu_backbone_xs')<def_stmt>autoseg_edgetpu_backbone_xs <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_xs')<block_end>@exp_factory.register_config_factory('autoseg_edgetpu_backbone_s')<def_stmt>autoseg_edgetpu_backbone_s <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_s')<block_end>@exp_factory.register_config_factory('autoseg_edgetpu_backbone_m')<def_stmt>autoseg_edgetpu_backbone_m <arrow>cfg.ExperimentConfig<block_start><return>mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_m')<block_end>
<import_from_stmt>jug TaskGenerator<import_from_stmt>jug.utils CustomHash<line_sep>hash_called=0<def_stmt>bad_hash x<block_start><global>hash_called<line_sep>hash_called<augadd>1<line_sep><return>('%s'%x).encode('utf-8')<block_end>@TaskGenerator<def_stmt>double x<block_start><return>2<times>x<block_end>one=CustomHash(1 bad_hash)<line_sep>two=double(one)<line_sep>
""" The :mod:`init` module gathers initialization procedures for model parameters """<import_stmt>FromScratchGauss FromScratchMult<import_stmt>FromScratchBernRel<import_stmt>FromSaved FromTruth<line_sep>__all__=['FromScratchGauss' 'FromSaved' 'FromTruth' 'FromScratchMult' 'FromScratchBernRel']<line_sep>
"""Unit tests for socket timeout feature."""<import_stmt>unittest<import_from_stmt>test support<line_sep># This requires the 'network' resource as given on the regrtest command line. skip_expected=<not>support.is_resource_enabled('network')<import_stmt>time<import_stmt>errno<import_stmt>socket<class_stmt>CreationTestCase(unittest.TestCase)<block_start>"""Test case for socket.gettimeout() and socket.settimeout()"""<def_stmt>setUp self<block_start>self.sock=socket.socket(socket.AF_INET socket.SOCK_STREAM)<block_end><def_stmt>tearDown self<block_start>self.sock.close()<block_end><def_stmt>testObjectCreation self# Test Socket creation <block_start>self.assertEqual(self.sock.gettimeout() <none> "timeout not disabled by default")<block_end><def_stmt>testFloatReturnValue self# Test return value of gettimeout() <block_start>self.sock.settimeout(7.345)<line_sep>self.assertEqual(self.sock.gettimeout() 7.345)<line_sep>self.sock.settimeout(3)<line_sep>self.assertEqual(self.sock.gettimeout() 3)<line_sep>self.sock.settimeout(<none>)<line_sep>self.assertEqual(self.sock.gettimeout() <none>)<block_end><def_stmt>testReturnType self# Test return type of gettimeout() <block_start>self.sock.settimeout(1)<line_sep>self.assertEqual(type(self.sock.gettimeout()) type(1.0))<line_sep>self.sock.settimeout(3.9)<line_sep>self.assertEqual(type(self.sock.gettimeout()) type(1.0))<block_end><def_stmt>testTypeCheck self# Test type checking by settimeout() <block_start>self.sock.settimeout(0)<line_sep>self.sock.settimeout(0)<line_sep>self.sock.settimeout(0.0)<line_sep>self.sock.settimeout(<none>)<line_sep>self.assertRaises(TypeError self.sock.settimeout "")<line_sep>self.assertRaises(TypeError self.sock.settimeout "")<line_sep>self.assertRaises(TypeError self.sock.settimeout ())<line_sep>self.assertRaises(TypeError self.sock.settimeout [])<line_sep>self.assertRaises(TypeError self.sock.settimeout {})<line_sep>self.assertRaises(TypeError self.sock.settimeout 0j)<block_end><def_stmt>testRangeCheck self# Test range checking by settimeout() <block_start>self.assertRaises(ValueError self.sock.settimeout -1)<line_sep>self.assertRaises(ValueError self.sock.settimeout -1)<line_sep>self.assertRaises(ValueError self.sock.settimeout -1.0)<block_end><def_stmt>testTimeoutThenBlocking self# Test settimeout() followed by setblocking() <block_start>self.sock.settimeout(10)<line_sep>self.sock.setblocking(1)<line_sep>self.assertEqual(self.sock.gettimeout() <none>)<line_sep>self.sock.setblocking(0)<line_sep>self.assertEqual(self.sock.gettimeout() 0.0)<line_sep>self.sock.settimeout(10)<line_sep>self.sock.setblocking(0)<line_sep>self.assertEqual(self.sock.gettimeout() 0.0)<line_sep>self.sock.setblocking(1)<line_sep>self.assertEqual(self.sock.gettimeout() <none>)<block_end><def_stmt>testBlockingThenTimeout self# Test setblocking() followed by settimeout() <block_start>self.sock.setblocking(0)<line_sep>self.sock.settimeout(1)<line_sep>self.assertEqual(self.sock.gettimeout() 1)<line_sep>self.sock.setblocking(1)<line_sep>self.sock.settimeout(1)<line_sep>self.assertEqual(self.sock.gettimeout() 1)<block_end><block_end><class_stmt>TimeoutTestCase(unittest.TestCase)# There are a number of tests here trying to make sure that an operation # doesn't take too much longer than expected. But competing machine # activity makes it inevitable that such tests will fail at times. # When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K # and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real # solution. <block_start>fuzz=2.0<line_sep>localhost='127.0.0.1'<def_stmt>setUp self<block_start><raise>NotImplementedError()<block_end>tearDown=setUp<def_stmt>_sock_operation self count timeout method *args<block_start>""" Test the specified socket method. The method is run at most `count` times and must raise a socket.timeout within `timeout` + self.fuzz seconds. """<line_sep>self.sock.settimeout(timeout)<line_sep>method=getattr(self.sock method)<for_stmt>i range(count)<block_start>t1=time.time()<try_stmt><block_start>method(*args)<block_end><except_stmt>socket.timeout<as>e<block_start>delta=time.time()-t1<line_sep><break><block_end><block_end><else_stmt><block_start>self.fail('socket.timeout was not raised')<block_end># These checks should account for timing unprecision self.assertLess(delta timeout+self.fuzz)<line_sep>self.assertGreater(delta timeout-1.0)<block_end><block_end><class_stmt>TCPTimeoutTestCase(TimeoutTestCase)<block_start>"""TCP test case for socket.socket() timeout functions"""<def_stmt>setUp self<block_start>self.sock=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep>self.addr_remote=('www.python.org.' 80)<block_end><def_stmt>tearDown self<block_start>self.sock.close()<block_end><def_stmt>testConnectTimeout self# Choose a private address that is unlikely to exist to prevent # failures due to the connect succeeding before the timeout. # Use a dotted IP address to avoid including the DNS lookup time # with the connect time. This avoids failing the assertion that # the timeout occurred fast enough. <block_start>addr=('10.0.0.0' 12345)<with_stmt>support.transient_internet(addr[0])<block_start>self._sock_operation(1 0.001 'connect' addr)<block_end><block_end><def_stmt>testRecvTimeout self# Test recv() timeout <block_start><with_stmt>support.transient_internet(self.addr_remote[0])<block_start>self.sock.connect(self.addr_remote)<line_sep>self._sock_operation(1 1.5 'recv' 1024)<block_end><block_end><def_stmt>testAcceptTimeout self# Test accept() timeout <block_start>support.bind_port(self.sock self.localhost)<line_sep>self.sock.listen(5)<line_sep>self._sock_operation(1 1.5 'accept')<block_end><def_stmt>testSend self# Test send() timeout <block_start><with_stmt>socket.socket(socket.AF_INET socket.SOCK_STREAM)<as>serv<block_start>support.bind_port(serv self.localhost)<line_sep>serv.listen(5)<line_sep>self.sock.connect(serv.getsockname())<line_sep># Send a lot of data in order to bypass buffering in the TCP stack. self._sock_operation(100 1.5 'send' b"X"<times>200000)<block_end><block_end><def_stmt>testSendto self# Test sendto() timeout <block_start><with_stmt>socket.socket(socket.AF_INET socket.SOCK_STREAM)<as>serv<block_start>support.bind_port(serv self.localhost)<line_sep>serv.listen(5)<line_sep>self.sock.connect(serv.getsockname())<line_sep># The address argument is ignored since we already connected. self._sock_operation(100 1.5 'sendto' b"X"<times>200000 serv.getsockname())<block_end><block_end><def_stmt>testSendall self# Test sendall() timeout <block_start><with_stmt>socket.socket(socket.AF_INET socket.SOCK_STREAM)<as>serv<block_start>support.bind_port(serv self.localhost)<line_sep>serv.listen(5)<line_sep>self.sock.connect(serv.getsockname())<line_sep># Send a lot of data in order to bypass buffering in the TCP stack. self._sock_operation(100 1.5 'sendall' b"X"<times>200000)<block_end><block_end><block_end><class_stmt>UDPTimeoutTestCase(TimeoutTestCase)<block_start>"""UDP test case for socket.socket() timeout functions"""<def_stmt>setUp self<block_start>self.sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<block_end><def_stmt>tearDown self<block_start>self.sock.close()<block_end><def_stmt>testRecvfromTimeout self# Test recvfrom() timeout # Prevent "Address already in use" socket exceptions <block_start>support.bind_port(self.sock self.localhost)<line_sep>self._sock_operation(1 1.5 'recvfrom' 1024)<block_end><block_end><def_stmt>test_main <block_start>support.requires('network')<line_sep>support.run_unittest(CreationTestCase TCPTimeoutTestCase UDPTimeoutTestCase )<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_main()<block_end>
""" This is the official list of CEA colors to use in plots """<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>yaml<import_stmt>warnings<import_stmt>functools<import_from_stmt>typing List Callable<line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright 2020, Architecture and Building Systems - ETH Zurich"<line_sep>__credits__=["<NAME>"]<line_sep>__license__="MIT"<line_sep>__version__="0.1"<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<line_sep>__status__="Production"<line_sep>COLORS_TO_RGB={"red":"rgb(240,75,91)" "red_light":"rgb(246,148,143)" "red_lighter":"rgb(252,217,210)" "blue":"rgb(63,192,194)" "blue_light":"rgb(171,221,222)" "blue_lighter":"rgb(225,242,242)" "yellow":"rgb(255,209,29)" "yellow_light":"rgb(255,225,133)" "yellow_lighter":"rgb(255,243,211)" "brown":"rgb(174,148,72)" "brown_light":"rgb(201,183,135)" "brown_lighter":"rgb(233,225,207)" "purple":"rgb(171,95,127)" "purple_light":"rgb(198,149,167)" "purple_lighter":"rgb(231,214,219)" "green":"rgb(126,199,143)" "green_light":"rgb(178,219,183)" "green_lighter":"rgb(227,241,228)" "grey":"rgb(68,76,83)" "grey_light":"rgb(126,127,132)" "black":"rgb(35,31,32)" "white":"rgb(255,255,255)" "orange":"rgb(245,131,69)" "orange_light":"rgb(248,159,109)" "orange_lighter":"rgb(254,220,198)"}<def_stmt>color_to_rgb color<block_start><try_stmt><block_start><return>COLORS_TO_RGB[color]<block_end><except_stmt>KeyError<block_start><import_stmt>re<if_stmt>re.match("rgb\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)" color)# already an rgb formatted color <block_start><return>color<block_end><return>COLORS_TO_RGB["black"]<block_end><block_end>
<import_stmt>math<import_stmt>statistics<def_stmt>fuzzyAnd m<block_start>""" fuzzy anding m = list of membership values to be anded returns smallest value in the list """<line_sep><return>min(m)<block_end>FuzzyAnd=fuzzyAnd<def_stmt>fuzzyOr m<block_start>""" fuzzy oring m = list of membership values to be ored returns largest value in the list """<line_sep><return>max(m)<block_end>FuzzyOr=fuzzyOr<def_stmt>fuzzyNot x<block_start>""" fuzzy not x = single membership value to be noted returns the inverse membership value """<line_sep><return>1-x<block_end><def_stmt>compensatoryAnd m g=0.5<block_start>""" anding function m = list of membership values for x derived from n membership functions g = gamma value 0=product 1=algebraic sum returns compensatory AND value of x """<line_sep>g=float(g)<line_sep>product1=1<line_sep>product2=1<for_stmt>mem m<block_start>product1<augmul>mem<line_sep>product2<augmul>(1-mem)<block_end><return>math.pow(product1 1-g)<times>math.pow((1-product2) g)<block_end>CompensatoryAnd=compensatoryAnd<def_stmt>gowa w wm l=1.0<block_start>""" Generalized Ordered Weighted Averaging Operator More info can be found here: https://pdfs.semanticscholar.org/2810/c971af0d01d085c799fb2295dc5668d055c8.pdf l = -1 = Ordered Weighted Harmonic Averaging Operator l = -.000000000001 = Ordered Weighted Geometric Averaging Operator l = 1 = Ordered Weighted Arithmetic Averaging Operator l = 2 = Ordered Weighted Quadratic Averaging Operator w = list of weights wm = list of importance weighted membership values l = lambda real number specifying type of owa to use returns ordered weighted average """<if_stmt>len(w)<ne>len(wm)<block_start><raise>ValueError("Weights and membership value lists must be of equal length.")<block_end><if_stmt>l<eq>0<block_start><raise>ZeroDivisionError("Param l cannot be 0. Use -.000000000001 for owg.")<block_end>wm.sort(reverse=<true>)<line_sep>s=0<for_stmt>i range(len(w))<block_start>s<augadd>w[i]<times>math.pow(wm[i] l)<block_end><return>math.pow(s 1/l)<block_end>Gowa=gowa<def_stmt>owa w wm<block_start>""" Ordered Weighted Arithmetic Averaging Operator w = [1,0,0,0] = AND w = [0,0,0,1] = OR w = [1/n,1/n,1/n,1/n] = Arithmetic Average where n=len(w) w = list of weights wm = list of importance weighted membership values returns ordered arithmetic weighted average """<if_stmt>len(w)<ne>len(wm)<block_start><raise>ValueError("Weights and membership value lists must be of equal length.")<block_end>wm.sort(reverse=<true>)<line_sep>s=0<for_stmt>i range(len(w))<block_start>s<augadd>w[i]<times>wm[i]<block_end><return>s<block_end>Owa=owa<def_stmt>owg w wm<block_start>""" Ordered Weighted Geometric Averaging Operator More info can be found here: ftp://decsai.ugr.es/pub/arai/tech_rep/decision/libroOWG.pdf w = [1,0,0,0] = AND w = [0,0,0,1] = OR w = [1/n,1/n,1/n,1/n] = Geometric Average where n=len(w) w = list of weights wm = list of importance weighted membership values returns ordered geometric weighted average """<if_stmt>len(w)<ne>len(wm)<block_start><raise>ValueError("Weights and membership value lists must be of equal length.")<block_end>wm.sort(reverse=<true>)<line_sep>s=1<for_stmt>i range(len(w))<block_start>s<augmul>math.pow(wm[i] w[i])<block_end><return>s<block_end>Owg=owa<def_stmt>owh w wm<block_start>""" Ordered Weighted Harmonic Averaging Operator w = [1,0,0,0] = AND w = [0,0,0,1] = OR w = [1/n,1/n,1/n,1/n] = Harmonic Average where n=len(w) w = list of weights wm = list of importance weighted membership values returns ordered harmonic weighted average """<line_sep><return>gowa(w wm -1)<block_end>Owh=owh<def_stmt>owq w wm<block_start>""" Ordered Weighted Quadratic Averaging Operator w = [1,0,0,0] = AND w = [0,0,0,1] = OR w = [1/n,1/n,1/n,1/n] = Quadratic Average where n=len(w) w = list of weights wm = list of importance weighted membership values returns ordered quadratic weighted average """<line_sep><return>gowa(w wm 2)<block_end>Owq=owq<def_stmt>median wm<block_start>""" Median Operator wm = list of importance weighted membership values returns the middle value in the set """<line_sep><return>statistics.median(wm)<block_end>Median=median<line_sep>
# code-checked # server-checked <import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>random<import_stmt>torch<import_from_stmt>torch.utils data<import_stmt>pickle<def_stmt>generate_scale_label image label<block_start>f_scale=0.5+random.randint(0 16)/10.0<line_sep>image=cv2.resize(image <none> fx=f_scale fy=f_scale interpolation=cv2.INTER_LINEAR)<line_sep>label=cv2.resize(label <none> fx=f_scale fy=f_scale interpolation=cv2.INTER_NEAREST)<line_sep><return>image label<block_end><def_stmt>id2trainId label id_to_trainid<block_start>label_copy=label.copy()<for_stmt>k,v id_to_trainid.items()<block_start>label_copy[label<eq>k]=v<block_end><return>label_copy<block_end>################################################################################ # Cityscapes ################################################################################ <class_stmt>DatasetCityscapesAugmentation(data.Dataset)<block_start><def_stmt>__init__ self root list_path max_iters=<none> crop_size=(512 512) ignore_label=255<block_start>self.root=root<line_sep>self.list_path=list_path<line_sep>self.crop_h,self.crop_w=crop_size<line_sep>self.ignore_label=ignore_label<line_sep>self.img_ids=[i_id.strip().split()<for>i_id open(list_path)]<line_sep>print("DatasetCityscapesAugmentation - num unique examples: %d"%len(self.img_ids))<if_stmt><not>max_iters<eq><none><block_start>self.img_ids=self.img_ids<times>int(np.ceil(float(max_iters)/len(self.img_ids)))<block_end>print("DatasetCityscapesAugmentation - num examples: %d"%len(self.img_ids))<line_sep>self.files=[]<for_stmt>item self.img_ids<block_start>image_path,label_path=item<line_sep>name=osp.splitext(osp.basename(label_path))[0]<line_sep>img_file=osp.join(self.root image_path)<line_sep>label_file=osp.join(self.root label_path)<line_sep>self.files.append({"img":img_file "label":label_file "name":name "weight":1})<block_end>self.id_to_trainid={-1:ignore_label 0:ignore_label 1:ignore_label 2:ignore_label 3:ignore_label 4:ignore_label 5:ignore_label 6:ignore_label 7:0 8:1 9:ignore_label 10:ignore_label 11:2 12:3 13:4 14:ignore_label 15:ignore_label 16:ignore_label 17:5 18:ignore_label 19:6 20:7 21:8 22:9 23:10 24:11 25:12 26:13 27:14 28:15 29:ignore_label 30:ignore_label 31:16 32:17 33:18}<block_end><def_stmt>__len__ self<block_start><return>len(self.files)<block_end><def_stmt>__getitem__ self index<block_start>datafiles=self.files[index]<line_sep>image=cv2.imread(datafiles["img"] cv2.IMREAD_COLOR)<line_sep>label=cv2.imread(datafiles["label"] cv2.IMREAD_GRAYSCALE)<line_sep>label=id2trainId(label self.id_to_trainid)<line_sep>size=image.shape<line_sep>name=datafiles["name"]<line_sep>image,label=generate_scale_label(image label)<line_sep>image=np.asarray(image np.float32)<line_sep>mean=(102.9801 115.9465 122.7717)<line_sep>image=image[: : ::-1]<line_sep>image<augsub>mean<line_sep>img_h,img_w=label.shape<line_sep>pad_h=max(self.crop_h-img_h 0)<line_sep>pad_w=max(self.crop_w-img_w 0)<if_stmt>pad_h<g>0<or>pad_w<g>0<block_start>img_pad=cv2.copyMakeBorder(image 0 pad_h 0 pad_w cv2.BORDER_CONSTANT value=(0.0 0.0 0.0))<line_sep>label_pad=cv2.copyMakeBorder(label 0 pad_h 0 pad_w cv2.BORDER_CONSTANT value=(self.ignore_label ))<block_end><else_stmt><block_start>img_pad,label_pad=image label<block_end>img_h,img_w=label_pad.shape<line_sep>h_off=random.randint(0 img_h-self.crop_h)<line_sep>w_off=random.randint(0 img_w-self.crop_w)<line_sep>image=np.asarray(img_pad[h_off:h_off+self.crop_h w_off:w_off+self.crop_w] np.float32)<line_sep>label=np.asarray(label_pad[h_off:h_off+self.crop_h w_off:w_off+self.crop_w] np.float32)<line_sep>image=image.transpose((2 0 1))<line_sep>flip=np.random.choice(2)<times>2-1<line_sep>image=image[: : ::flip]<line_sep>label=label[: ::flip]<line_sep><return>image.copy() label.copy() np.array(size) name<block_end><block_end><class_stmt>DatasetCityscapesEval(data.Dataset)<block_start><def_stmt>__init__ self root list_path ignore_label=255<block_start>self.root=root<line_sep>self.list_path=list_path<line_sep>self.ignore_label=ignore_label<line_sep>self.img_ids=[i_id.strip().split()<for>i_id open(list_path)]<line_sep>print("DatasetCityscapesEval - num examples: %d"%len(self.img_ids))<line_sep>self.files=[]<for_stmt>item self.img_ids<block_start>image_path,label_path=item<line_sep>name=osp.splitext(osp.basename(label_path))[0]<line_sep>img_file=osp.join(self.root image_path)<line_sep>label_file=osp.join(self.root label_path)<line_sep>self.files.append({"img":img_file "label":label_file "name":name "weight":1})<block_end>self.id_to_trainid={-1:ignore_label 0:ignore_label 1:ignore_label 2:ignore_label 3:ignore_label 4:ignore_label 5:ignore_label 6:ignore_label 7:0 8:1 9:ignore_label 10:ignore_label 11:2 12:3 13:4 14:ignore_label 15:ignore_label 16:ignore_label 17:5 18:ignore_label 19:6 20:7 21:8 22:9 23:10 24:11 25:12 26:13 27:14 28:15 29:ignore_label 30:ignore_label 31:16 32:17 33:18}<block_end><def_stmt>__len__ self<block_start><return>len(self.files)<block_end><def_stmt>__getitem__ self index<block_start>datafiles=self.files[index]<line_sep>image=cv2.imread(datafiles["img"] cv2.IMREAD_COLOR)<line_sep>label=cv2.imread(datafiles["label"] cv2.IMREAD_GRAYSCALE)<if_stmt><not>os.path.exists(datafiles["img"])# (26 out of 25000 images are missing) <block_start><return>self.__getitem__(0)<block_end>label=id2trainId(label self.id_to_trainid)<line_sep>size=image.shape<line_sep>name=datafiles["name"]<line_sep>image=np.asarray(image np.float32)<line_sep>mean=(102.9801 115.9465 122.7717)<line_sep>image=image[: : ::-1]<line_sep>image<augsub>mean<line_sep>image=image.transpose((2 0 1))<line_sep><return>image.copy() label.copy() np.array(size) name<block_end><block_end><class_stmt>DatasetCityscapesEvalSeq(data.Dataset)<block_start><def_stmt>__init__ self data_path sequence="00"<block_start>self.data_path=data_path<line_sep>self.img_dir=self.data_path+"/leftImg8bit/demoVideo/stuttgart_"+sequence+"/"<line_sep>self.examples=[]<line_sep>file_names=os.listdir(self.img_dir)<for_stmt>file_name file_names<block_start>img_id=file_name.split("_leftImg8bit.png")[0]<line_sep>img_path=self.img_dir+file_name<line_sep>example={}<line_sep>example["img_path"]=img_path<line_sep>example["img_id"]=img_id<line_sep>self.examples.append(example)<block_end>self.num_examples=len(self.examples)<line_sep>print("DatasetCityscapesEvalSeq - num examples: %d"%self.num_examples)<block_end><def_stmt>__len__ self<block_start><return>len(self.examples)<block_end><def_stmt>__getitem__ self index<block_start>datafiles=self.examples[index]<line_sep>image=cv2.imread(datafiles["img_path"] cv2.IMREAD_COLOR)<line_sep>size=image.shape<line_sep>name=datafiles["img_id"]<line_sep>image=np.asarray(image np.float32)<line_sep>mean=(102.9801 115.9465 122.7717)<line_sep>image=image[: : ::-1]<line_sep>image<augsub>mean<line_sep>image=image.transpose((2 0 1))<line_sep><return>image.copy() np.array(size) name<block_end><block_end>################################################################################ # Synscapes ################################################################################ <class_stmt>DatasetSynscapesAugmentation(data.Dataset)<block_start><def_stmt>__init__ self root root_meta type="train" max_iters=<none> crop_size=(512 512) ignore_label=255<block_start>self.root=root<line_sep>self.root_meta=root_meta<line_sep>self.crop_h,self.crop_w=crop_size<line_sep>self.ignore_label=ignore_label<if_stmt>type<eq>"train"<block_start><with_stmt>open(root_meta+"/train_img_ids.pkl" "rb")<as>file# (needed for python3) <block_start>self.img_ids=pickle.load(file)<block_end><block_end><elif_stmt>type<eq>"val"<block_start><with_stmt>open(root_meta+"/val_img_ids.pkl" "rb")<as>file# (needed for python3) <block_start>self.img_ids=pickle.load(file)<block_end><block_end><else_stmt><block_start><raise>Exception("type must be either 'train' or 'val'!")<block_end>print("DatasetSynscapesAugmentation - num unique examples: %d"%len(self.img_ids))<if_stmt><not>max_iters<eq><none><block_start>self.img_ids=self.img_ids<times>int(np.ceil(float(max_iters)/len(self.img_ids)))<block_end>print("DatasetSynscapesAugmentation - num examples: %d"%len(self.img_ids))<line_sep>self.files=[]<for_stmt>img_id self.img_ids<block_start>self.files.append({"img":self.root+"/img/rgb-2k/"+img_id+".png" "label":self.root_meta+"/gtFine/"+img_id+".png" "name":img_id "weight":1})<block_end>self.id_to_trainid={-1:ignore_label 0:ignore_label 1:ignore_label 2:ignore_label 3:ignore_label 4:ignore_label 5:ignore_label 6:ignore_label 7:0 8:1 9:ignore_label 10:ignore_label 11:2 12:3 13:4 14:ignore_label 15:ignore_label 16:ignore_label 17:5 18:ignore_label 19:6 20:7 21:8 22:9 23:10 24:11 25:12 26:13 27:14 28:15 29:ignore_label 30:ignore_label 31:16 32:17 33:18}<block_end><def_stmt>__len__ self<block_start><return>len(self.files)<block_end><def_stmt>__getitem__ self index<block_start>datafiles=self.files[index]<line_sep>image=cv2.imread(datafiles["img"] cv2.IMREAD_COLOR)<line_sep>label=cv2.imread(datafiles["label"] cv2.IMREAD_GRAYSCALE)<if_stmt><not>os.path.exists(datafiles["img"])# (26 out of 25000 images are missing) <block_start><return>self.__getitem__(0)<block_end>label=id2trainId(label self.id_to_trainid)<line_sep>size=image.shape<line_sep>name=datafiles["name"]<line_sep>image,label=generate_scale_label(image label)<line_sep>image=np.asarray(image np.float32)<line_sep>mean=(102.9801 115.9465 122.7717)<line_sep>image=image[: : ::-1]<line_sep>image<augsub>mean<line_sep>img_h,img_w=label.shape<line_sep>pad_h=max(self.crop_h-img_h 0)<line_sep>pad_w=max(self.crop_w-img_w 0)<if_stmt>pad_h<g>0<or>pad_w<g>0<block_start>img_pad=cv2.copyMakeBorder(image 0 pad_h 0 pad_w cv2.BORDER_CONSTANT value=(0.0 0.0 0.0))<line_sep>label_pad=cv2.copyMakeBorder(label 0 pad_h 0 pad_w cv2.BORDER_CONSTANT value=(self.ignore_label ))<block_end><else_stmt><block_start>img_pad,label_pad=image label<block_end>img_h,img_w=label_pad.shape<line_sep>h_off=random.randint(0 img_h-self.crop_h)<line_sep>w_off=random.randint(0 img_w-self.crop_w)<line_sep>image=np.asarray(img_pad[h_off:h_off+self.crop_h w_off:w_off+self.crop_w] np.float32)<line_sep>label=np.asarray(label_pad[h_off:h_off+self.crop_h w_off:w_off+self.crop_w] np.float32)<line_sep>image=image.transpose((2 0 1))<line_sep>flip=np.random.choice(2)<times>2-1<line_sep>image=image[: : ::flip]<line_sep>label=label[: ::flip]<line_sep><return>image.copy() label.copy() np.array(size) name<block_end><block_end><class_stmt>DatasetSynscapesEval(data.Dataset)<block_start><def_stmt>__init__ self root root_meta type="val" ignore_label=255<block_start>self.root=root<line_sep>self.root_meta=root_meta<line_sep>self.ignore_label=ignore_label<if_stmt>type<eq>"train"<block_start><with_stmt>open(root_meta+"/train_img_ids.pkl" "rb")<as>file# (needed for python3) <block_start>self.img_ids=pickle.load(file)<block_end><block_end><elif_stmt>type<eq>"val"<block_start><with_stmt>open(root_meta+"/val_img_ids.pkl" "rb")<as>file# (needed for python3) <block_start>self.img_ids=pickle.load(file)<block_end><block_end><else_stmt><block_start><raise>Exception("type must be either 'train' or 'val'!")<block_end>print("DatasetSynscapesEval - num examples: %d"%len(self.img_ids))<line_sep>self.files=[]<for_stmt>img_id self.img_ids<block_start>self.files.append({"img":self.root+"/img/rgb-2k/"+img_id+".png" "label":self.root_meta+"/gtFine/"+img_id+".png" "name":img_id "weight":1})<block_end>self.id_to_trainid={-1:ignore_label 0:ignore_label 1:ignore_label 2:ignore_label 3:ignore_label 4:ignore_label 5:ignore_label 6:ignore_label 7:0 8:1 9:ignore_label 10:ignore_label 11:2 12:3 13:4 14:ignore_label 15:ignore_label 16:ignore_label 17:5 18:ignore_label 19:6 20:7 21:8 22:9 23:10 24:11 25:12 26:13 27:14 28:15 29:ignore_label 30:ignore_label 31:16 32:17 33:18}<block_end><def_stmt>__len__ self<block_start><return>len(self.files)<block_end><def_stmt>__getitem__ self index<block_start>datafiles=self.files[index]<line_sep>image=cv2.imread(datafiles["img"] cv2.IMREAD_COLOR)<line_sep>label=cv2.imread(datafiles["label"] cv2.IMREAD_GRAYSCALE)<if_stmt><not>os.path.exists(datafiles["img"])# (26 out of 25000 images are missing) <block_start><return>self.__getitem__(0)<block_end>label=id2trainId(label self.id_to_trainid)<line_sep>size=image.shape<line_sep>name=datafiles["name"]<line_sep>image=np.asarray(image np.float32)<line_sep>mean=(102.9801 115.9465 122.7717)<line_sep>image=image[: : ::-1]<line_sep>image<augsub>mean<line_sep>image=image.transpose((2 0 1))<line_sep><return>image.copy() label.copy() np.array(size) name<block_end><block_end>
#coding:utf-8 <import_stmt>logging traceback<import_from_stmt>functools wraps<line_sep>log=logging.getLogger(__name__)<line_sep>acceptStatus=(503 '其他接受的状态码')<class_stmt>RetryExhaustedError(Exception)<block_start><pass><line_sep>#def __init__(self, funcname,args,kwargs): # print('Exception from {}: {} {}'.format(funcname,args,kwargs)) <block_end><import_stmt>aiohttp asyncio<line_sep>loop=asyncio.get_event_loop()<def_stmt>retry *exceptions retries=3 cooldown=1 verbose=<true><block_start>"""Decorate an async function to execute it a few times before giving up. Hopes that problem is resolved by another side shortly. Args: exceptions (Tuple[Exception]) : The exceptions expected during function execution retries (int): Number of retries of function execution. cooldown (int): Seconds to wait before retry. verbose (bool): Specifies if we should log about not successful attempts. """<def_stmt>wrap func<block_start>@wraps(func)<async_keyword><def_stmt>inner *args **kwargs<block_start>retries_count=0<while_stmt><true><block_start><try_stmt><block_start>result=<await>func(*args **kwargs)<block_end><except_stmt>exceptions<as>err#exceoptions是从retry传入的 <block_start>retries_count<augadd>1<line_sep>message="Exception:{} during\n{} execution. "<concat>"{} of {} retries attempted".format(err func retries_count retries)<if_stmt>retries_count<ge>retries#verbose and log.exception(message) <block_start>verbose<and>print(message)<line_sep>#raise RetryExhaustedError( # func.__qualname__, args, kwargs) from err #raise RetryExhaustedError <return>err<block_end><else_stmt>#verbose and log.warning(message) <block_start>verbose<and>print(message)<line_sep><await>asyncio.sleep(cooldown)<block_end><block_end><else_stmt><block_start><return>result<block_end><block_end><block_end><return>inner<block_end><return>wrap<block_end># Example is taken from http://aiohttp.readthedocs.io/en/stable/#getting-started <async_keyword><def_stmt>fetch session url<block_start><async_keyword><with_stmt>session.get(url)<as>response#return await response.text() <block_start>text=<await>response.text()<if_stmt>(response.status<l>400<or>response.status<in>acceptStatus)<block_start><return>text<block_end><else_stmt><block_start><return>response.raise_for_status()<block_end><block_end><block_end># Client code, provided for reference @retry(aiohttp.ClientError asyncio.TimeoutError)#@retry(aiohttp.WSServerHandshakeError,aiohttp.ContentTypeError) <async_keyword><def_stmt>main <block_start><async_keyword><with_stmt>aiohttp.ClientSession()<as>session<block_start>html=<await>fetch(session 'http://localhost:55556')<line_sep>print(html)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>loop.run_until_complete(main())<block_end>
<import_stmt>base64<import_stmt>string<import_from_stmt>random randint choice<import_from_stmt>Crypto.Cipher AES<import_from_stmt>Crypto.Hash SHA256<import_from_stmt>Crypto Random<as>CryptoRandom<class_stmt>Encryption()<block_start><def_stmt>__init__ self key<block_start>self.key=key# Key in bytes self.salted_key=<none><block_end># Placeholder for optional salted key <def_stmt>digest_key self<block_start>""" Use SHA-256 over our key to get a proper-sized AES key """<line_sep># Add optional salt to key key=self.key<if_stmt>self.salted_key<block_start>key=self.salted_key<block_end><return>SHA256.new(key).digest()<block_end><def_stmt>get_aes self IV<block_start>""" AES instance """<line_sep><return>AES.new(self.digest_key() AES.MODE_CBC IV)<block_end><def_stmt>gen_salt self set_=<true><block_start>""" Generate a random salt """<line_sep>min_char=8<line_sep>max_char=12<line_sep>allchar=string.ascii_letters+string.punctuation+string.digits<line_sep>salt="".join(choice(allchar)<for>x range(randint(min_char max_char))).encode()<line_sep># Set the salt in the same instance if required <if_stmt>set_<block_start>self.set_salt(salt)<block_end><return>salt<block_end><def_stmt>set_salt self salt=<none><block_start>""" Add a salt to the secret key for this specific encryption or decryption """<if_stmt>salt<block_start>self.salted_key=salt+self.key<block_end><else_stmt><block_start>self.salted_key=<none><block_end><block_end><def_stmt>encrypt self secret<block_start>""" Encrypt a secret """<line_sep># generate IV IV=CryptoRandom.new().read(AES.block_size)<line_sep># Retrieve AES instance aes=self.get_aes(IV)<line_sep># calculate needed padding padding=AES.block_size-len(secret)%AES.block_size<line_sep># Python 2.x: secret += chr(padding) * padding secret<augadd>bytes([padding])<times>padding<line_sep># store the IV at the beginning and encrypt data=IV+aes.encrypt(secret)<line_sep># Reset salted key self.set_salt()<line_sep># Return base 64 encoded bytes <return>base64.b64encode(data)<block_end><def_stmt>decrypt self enc_secret<block_start>""" Decrypt a secret """<line_sep># Decode base 64 enc_secret=base64.b64decode(enc_secret)<line_sep># extract the IV from the beginning IV=enc_secret[:AES.block_size]<line_sep># Retrieve AES instance aes=self.get_aes(IV)<line_sep># Decrypt data=aes.decrypt(enc_secret[AES.block_size:])<line_sep># pick the padding value from the end; Python 2.x: ord(data[-1]) padding=data[-1]<line_sep># Python 2.x: chr(padding) * padding <if_stmt>data[-padding:]<ne>bytes([padding])<times>padding<block_start><raise>ValueError("Invalid padding...")<block_end># Reset salted key self.set_salt()<line_sep># Remove the padding and return the bytes <return>data[:-padding]<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals<line_sep># NB: see head of `datasets.py' <import_from_stmt>training_utils *<import_from_stmt>utils_io os tempdir<import_from_stmt>datasets image_kinds<line_sep>print("Using TensorFlow version:" tf.__version__)<def_stmt>train_n_save_classifier model class_names input_kind train_data test_data=<none> optimizer='adam' kind='sparse_categorical' outdir=tempdir early_stopping=<true> validate_on_test_data=<false> cm_plot_args={} **kwds<block_start>x_train,y_train=train_data<line_sep>path=os.path.join(outdir model.name)<line_sep>log_dir=path+'_logs'<line_sep>fw_train,fw_confision_matrix=tf.summary.create_file_writer(os.path.join(log_dir 'train')) tf.summary.create_file_writer(os.path.join(log_dir 'confusion_matrix'))<line_sep># Very basic & dumb test for detecting images... <if_stmt>input_kind<in>image_kinds<block_start>log_25_img_dataset_grid(fw_train class_names 'Training data (some)' train_data)<block_end>model.summary()<line_sep>loss,metric=(tf.losses.SparseCategoricalCrossentropy(from_logits=<true>) tf.metrics.SparseCategoricalAccuracy())<line_sep># if kind = 'sparse_categorical' else ? model.compile(optimizer=optimizer loss=loss metrics=[metric])<line_sep>callbacks=[tf.keras.callbacks.ModelCheckpoint(# Path where to save the model # The two parameters below mean that we will overwrite # the current checkpoint if and only if # the `val_loss` score has improved. # The saved model name will include the current epoch. filepath=path+"_{epoch}" save_best_only=<true> # Only save a model if `val_loss` has improved. monitor="val_loss" verbose=1 ) tf.keras.callbacks.TensorBoard(log_dir=log_dir histogram_freq=1 # How often to log histogram visualizations embeddings_freq=1 # How often to log embedding visualizations update_freq="epoch" # How often to write logs (default: once per epoch) ) ]+([# https://www.tensorflow.org/guide/keras/train_and_evaluate#checkpointing_models tf.keras.callbacks.EarlyStopping(# Stop training when `val_loss` is no longer improving monitor="val_loss" # "no longer improving" being defined as "no better than 1e-2 less" min_delta=1e-2 # "no longer improving" being further defined as "for at least 2 epochs" patience=3 verbose=1 ) ]<if>early_stopping<else>[])+([log_confusion_matrix_callback(fw_confision_matrix model class_names test_data **cm_plot_args) ]<if>test_data<is><not><none><else>[])<line_sep>valargs=dict(validation_data=test_data)<if>validate_on_test_data<and>test_data<is><not><none><else>{}<line_sep>model.fit(x_train y_train callbacks=callbacks **{'epochs':20 # some defaults: 'shuffle':<true> 'batch_size':64 'validation_split':0.2 **valargs **kwds})<if_stmt>test_data<is><not><none><block_start>x_test,y_test=test_data<line_sep>print('Performing final validation on given test data:')<line_sep># Just check and show accuracy on "official" test data: _,test_accuracy=model.evaluate(x_test y_test verbose=1)<line_sep>print('Validation accuracy on given test data:' test_accuracy)<block_end>print('Saving model in' path+'.h5')<line_sep>model.save(path+'.h5')<block_end># --- <def_stmt>classifier load_data make_model model_name=<none> load_data_args={} make_model_args={} **kwds<block_start>train_data,test_data,input_shape,input_kind,class_names=load_data(**load_data_args)<line_sep>train_n_save_classifier(make_model(input_shape name=model_name **make_model_args) class_names input_kind train_data test_data **kwds)<block_end># --- <import_from_stmt>tensorflow.keras.models Sequential<import_from_stmt>tensorflow.keras.layers Reshape Dense<def_stmt>make_dense input_shape n_neurons=(100 ) n_classes=5 input_reshape=<false> **kwds<block_start>"""Builds a very basic DNN. n_neurons: gives the number of neurons for each layer, as a list or tuple n_classes: number of output neurons (= |classes|) input_reshape: whether to include a dummy reshape input layer (useful to access input features as activations, for DeepConcolic's internal statistical analysis and layerwise abstractions). """<assert_stmt>len(n_neurons)<g>0<line_sep>layer_args=[dict(activation='relu')<for>_ n_neurons]<line_sep>layer_args[0]['input_shape']=input_shape<line_sep>layer_args[-1]['activation']='softmax'<line_sep>layers=(Reshape(input_shape=input_shape target_shape=input_shape) )<if>input_reshape<else>()<line_sep>layers<augadd>tuple(Dense(n **args)<for>n,args zip(n_neurons layer_args))<line_sep><return>Sequential(layers **kwds)<block_end># --- <def_stmt>make_dense_classifier load_data prefix n_features n_classes n_neurons **kwds<block_start>"""A wrapper for training DNNs built using {make_dense}."""<line_sep>model_name=(f'{prefix}{n_features}_{n_classes}_dense'<concat>f'_{"_".join(str(c)<for>c n_neurons)}')<line_sep>model_args=dict(n_classes=n_classes n_neurons=n_neurons)<line_sep>classifier(load_data make_dense epochs=50 model_name=model_name make_model_args=model_args **kwds)<block_end># ---
<import_from_stmt>pyspark SparkContext<import_from_stmt>pyspark.sql.column Column _to_java_column _to_seq<import_from_stmt>pyspark.sql.functions col<def_stmt>compute_image_size col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeImageSize().apply)<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>compute_md5 col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeMD5().apply<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>compute_sha1 col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeSHA1().apply<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>detect_language col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.detectLanguage().apply<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>detect_mime_type_tika col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.detectMimeTypeTika().apply)<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>extract_boilerplate col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractBoilerpipeText().apply)<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>extract_date col dates<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDate().apply<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>extract_domain col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDomain().apply<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>extract_image_links col image_links<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractImageLinks().apply)<line_sep><return>Column(udf(_to_seq(sc [col image_links] _to_java_column)))<block_end><def_stmt>extract_links col links<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractLinks().apply<line_sep><return>Column(udf(_to_seq(sc [col links] _to_java_column)))<block_end><def_stmt>get_extension_mime col mime<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.getExtensionMime().apply)<line_sep><return>Column(udf(_to_seq(sc [col mime] _to_java_column)))<block_end><def_stmt>remove_http_header col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTTPHeader().apply)<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>remove_html col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTML().apply<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end><def_stmt>remove_prefix_www col<block_start>sc=SparkContext.getOrCreate()<line_sep>udf=(sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removePrefixWWW().apply)<line_sep><return>Column(udf(_to_seq(sc [col] _to_java_column)))<block_end>
''' Copyright 2018 Esri Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.​ '''<import_stmt>importlib<import_stmt>json<import_stmt>os<import_stmt>sys<line_sep>sys.path.append(os.path.dirname(__file__))<import_from_stmt>fields fields<import_from_stmt>features features<import_stmt>numpy<as>np<import_stmt>prf_utils<class_stmt>GeometryType<block_start>Point=1<line_sep>Multipoint=2<line_sep>Polyline=3<line_sep>Polygon=4<block_end><class_stmt>ObjectDetector<block_start><def_stmt>__init__ self<block_start>self.name='Object Detector'<line_sep>self.description='This python raster function applies deep learning model to detect objects in imagery'<block_end><def_stmt>initialize self **kwargs<block_start><if_stmt>'model'<not><in>kwargs<block_start><return><block_end>model=kwargs['model']<line_sep>model_as_file=<true><try_stmt><block_start><with_stmt>open(model 'r')<as>f<block_start>self.json_info=json.load(f)<block_end><block_end><except_stmt>FileNotFoundError<block_start><try_stmt><block_start>self.json_info=json.loads(model)<line_sep>model_as_file=<false><block_end><except_stmt>json.decoder.JSONDecodeError<block_start><raise>Exception("Invalid model argument")<block_end><block_end><if_stmt>'device'<in>kwargs<block_start>device=kwargs['device']<if_stmt>device<l>-1<block_start>os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"<line_sep>device=prf_utils.get_available_device()<block_end>os.environ['CUDA_VISIBLE_DEVICES']=str(device)<block_end><else_stmt><block_start>os.environ['CUDA_VISIBLE_DEVICES']='-1'<block_end>sys.path.append(os.path.dirname(__file__))<line_sep>framework=self.json_info['Framework']<if_stmt>'ModelConfiguration'<in>self.json_info<block_start><if_stmt>isinstance(self.json_info['ModelConfiguration'] str)<block_start>ChildModelDetector=getattr(importlib.import_module('{}.{}'.format(framework self.json_info['ModelConfiguration'])) 'ChildObjectDetector')<block_end><else_stmt><block_start>ChildModelDetector=getattr(importlib.import_module('{}.{}'.format(framework self.json_info['ModelConfiguration']['Name'])) 'ChildObjectDetector')<block_end><block_end><else_stmt><block_start><raise>Exception("Invalid model configuration")<block_end>self.child_object_detector=ChildModelDetector()<line_sep>self.child_object_detector.initialize(model model_as_file)<block_end><def_stmt>getParameterInfo self<block_start>required_parameters=[{'name':'raster' 'dataType':'raster' 'required':<true> 'displayName':'Raster' 'description':'Input Raster'} {'name':'model' 'dataType':'string' 'required':<true> 'displayName':'Input Model Definition (EMD) File' 'description':'Input model definition (EMD) JSON file'} {'name':'device' 'dataType':'numeric' 'required':<false> 'displayName':'Device ID' 'description':'Device ID'} {'name':'padding' 'dataType':'numeric' 'value':0 'required':<false> 'displayName':'Padding' 'description':'Padding'} {'name':'score_threshold' 'dataType':'numeric' 'value':0.6 'required':<false> 'displayName':'Confidence Score Threshold [0.0, 1.0]' 'description':'Confidence score threshold value [0.0, 1.0]'} ]<if_stmt>'BatchSize'<not><in>self.json_info<block_start>required_parameters.append({'name':'batch_size' 'dataType':'numeric' 'required':<false> 'value':1 'displayName':'Batch Size' 'description':'Batch Size'} )<block_end><return>self.child_object_detector.getParameterInfo(required_parameters)<block_end><def_stmt>getConfiguration self **scalars<block_start>configuration=self.child_object_detector.getConfiguration(**scalars)<if_stmt>'DataRange'<in>self.json_info<block_start>configuration['dataRange']=tuple(self.json_info['DataRange'])<block_end>configuration['inheritProperties']=2|4|8<line_sep>configuration['inputMask']=<true><line_sep><return>configuration<block_end><def_stmt>getFields self<block_start><return>json.dumps(fields)<block_end><def_stmt>getGeometryType self<block_start><return>GeometryType.Polygon<block_end><def_stmt>vectorize self **pixelBlocks# set pixel values in invalid areas to 0 <block_start>raster_mask=pixelBlocks['raster_mask']<line_sep>raster_pixels=pixelBlocks['raster_pixels']<line_sep>raster_pixels[np.where(raster_mask<eq>0)]=0<line_sep>pixelBlocks['raster_pixels']=raster_pixels<line_sep>polygon_list,scores,classes=self.child_object_detector.vectorize(**pixelBlocks)<line_sep># bounding_boxes = bounding_boxes.tolist() scores=scores.tolist()<line_sep>classes=classes.tolist()<line_sep>features['features']=[]<for_stmt>i range(len(polygon_list))<block_start>rings=[[]]<for_stmt>j range(polygon_list[i].shape[0])<block_start>rings[0].append([polygon_list[i][j][1] polygon_list[i][j][0]])<block_end>features['features'].append({'attributes':{'OID':i+1 'Class':self.json_info['Classes'][classes[i]-1]['Name'] 'Confidence':scores[i]} 'geometry':{'rings':rings}})<block_end><return>{'output_vectors':json.dumps(features)}<block_end><block_end>
<import_stmt>unittest<import_stmt>os<class_stmt>PraatioTestCase(unittest.TestCase)<block_start><def_stmt>__init__ self *args **kargs<block_start>super(PraatioTestCase self).__init__(*args **kargs)<line_sep>root=os.path.dirname(os.path.realpath(__file__))<line_sep>self.dataRoot=os.path.join(root "files")<line_sep>self.outputRoot=os.path.join(self.dataRoot "test_output")<block_end><def_stmt>setUp self<block_start><if_stmt><not>os.path.exists(self.outputRoot)<block_start>os.mkdir(self.outputRoot)<block_end><block_end><def_stmt>assertAllAlmostEqual self listA listB<block_start><for_stmt>valA,valB zip(listA listB)<block_start>self.assertAlmostEqual(valA valB)<block_end><block_end><block_end>
<import_stmt>django<try_stmt><block_start><import_from_stmt>django.conf.urls patterns url# django 1.8, 1.9 <block_end><except_stmt>ImportError<block_start><import_from_stmt>django.conf.urls url<block_end><import_from_stmt>waliki.settings WALIKI_SLUG_PATTERN<import_from_stmt>waliki.git.views whatchanged WhatchangedFeed webhook_pull history version diff<line_sep>_pattern_list=[url(r'^_whatchanged/(?P<pag>\d+)$' whatchanged name='waliki_whatchanged') # noqa url(r'^_whatchanged$' whatchanged {'pag':'1'} name='waliki_whatchanged') # noqa url(r'^_whatchanged/rss$' WhatchangedFeed() name='waliki_whatchanged_rss') url(r'^_hooks/pull/(?P<remote>[a-zA-Z0-9]+)$' webhook_pull name='waliki_webhook_pull') url(r'^(?P<slug>'+WALIKI_SLUG_PATTERN+')/history/(?P<pag>\d+)$' history name='waliki_history') url(r'^(?P<slug>'+WALIKI_SLUG_PATTERN+')/history/$' history {'pag':'1'} name='waliki_history') url(r'^(?P<slug>'+WALIKI_SLUG_PATTERN+')/version/(?P<version>[0-9a-f\^]{4,40})/raw$' version {'raw':<true>} name='waliki_version_raw') url(r'^(?P<slug>'+WALIKI_SLUG_PATTERN+')/version/(?P<version>[0-9a-f\^]{4,40})$' version name='waliki_version') url(r'^(?P<slug>'+WALIKI_SLUG_PATTERN+')/diff/(?P<old>[0-9a-f\^]{4,40})\.\.(?P<new>[0-9a-f\^]{4,40})/raw$' diff {'raw':<true>} name='waliki_diff_raw') url(r'^(?P<slug>'+WALIKI_SLUG_PATTERN+')/diff/(?P<old>[0-9a-f\^]{4,40})\.\.(?P<new>[0-9a-f\^]{4,40})$' diff name='waliki_diff') ]<if_stmt>django.VERSION[:2]<ge>(1 10)<block_start>urlpatterns=_pattern_list<block_end><else_stmt><block_start>urlpatterns=patterns('waliki.git.views' *_pattern_list)<block_end>
# -*- coding: utf-8 -*- <import_stmt>click<import_stmt>json<import_from_stmt>.deployment deployment<import_from_stmt>... Client<line_sep>@deployment.group()<def_stmt>predict <block_start>"""Making prediction to a deployment-related entity. For example, to make a prediction to an endpoint, run `verta deployment predict endpoint "<endpoint path>" --data "<input data>"` """<line_sep><pass><block_end>@predict.command(name="endpoint")@click.argument("path" nargs=1 required=<true>)@click.option("--data" "-d" required=<true> help="Input for prediction. Must be a valid JSON string.")@click.option("--workspace" "-w" help="Workspace to use.")<def_stmt>predict_endpoint path data workspace<block_start>"""Making prediction via a deployed endpoint. """<line_sep>client=Client()<try_stmt><block_start>endpoint=client.get_endpoint(path=path workspace=workspace)<block_end><except_stmt>ValueError<block_start><raise>click.BadParameter("endpoint with path {} not found".format(path))<block_end>deployed_model=endpoint.get_deployed_model()<line_sep>result=deployed_model.predict(json.loads(data))<line_sep>click.echo(json.dumps(result))<block_end>
<import_stmt>logging<import_from_stmt>plumbum CommandNotFound local<import_from_stmt>changes shell<line_sep>log=logging.getLogger(__name__)<def_stmt>get_test_runner <block_start>test_runners=['tox' 'nosetests' 'py.test']<line_sep>test_runner=<none><for_stmt>runner test_runners<block_start><try_stmt><block_start>test_runner=local[runner]<block_end><except_stmt>CommandNotFound<block_start><continue><block_end><block_end><return>test_runner<block_end><def_stmt>run_tests <block_start>"""Executes your tests."""<line_sep>test_runner=get_test_runner()<if_stmt>test_runner<block_start>result=test_runner()<line_sep>log.info('Test execution returned:\n%s'%result)<line_sep><return>result<block_end><else_stmt><block_start>log.info('No test runner found')<block_end><return><none><block_end><def_stmt>run_test_command context<block_start><if_stmt>context.test_command<block_start>result=shell.dry_run(context.test_command context.dry_run)<line_sep>log.info('Test command "%s", returned %s' context.test_command result)<block_end><return><true><block_end>
# python3 # pylint: disable=g-bad-file-header # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Prior losses are losses that regulate towards the prior. These might take the form of weight regularization, or sampling "fake data". These prior_losses are used in e.g. supervised/prior_experiment.py. """<import_from_stmt>absl logging<import_stmt>dataclasses<import_from_stmt>enn base<import_from_stmt>enn utils<import_stmt>haiku<as>hk<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>typing_extensions<class_stmt>FakeInputGenerator(typing_extensions.Protocol)<block_start><def_stmt>__call__ self batch:base.Batch key:base.RngKey<arrow>base.Array<block_start>"""Generates a fake batch of input=x for use in prior regularization."""<block_end><block_end>@dataclasses.dataclass<class_stmt>MatchingGaussianData(FakeInputGenerator)<block_start>"""Generates a fake batch of input=x for use in prior regularization."""<line_sep>scale:float=1.<def_stmt>__call__ self batch:base.Batch key:base.RngKey<arrow>base.Array<block_start>"""Generates a fake batch of input=x for use in prior regularization."""<line_sep><return>jax.random.normal(key batch.x.shape)<times>self.scale<block_end><block_end><def_stmt>make_gaussian_dataset batch_size:int input_dim:int seed:int=0<arrow>base.BatchIterator<block_start>"""Returns a batch iterator over random Gaussian data."""<line_sep>sample_fn=jax.jit(<lambda>x:jax.random.normal(x [batch_size input_dim]))<def_stmt>batch_iterator <block_start>rng=hk.PRNGSequence(seed)<while_stmt><true><block_start>x=sample_fn(next(rng))<line_sep><yield>base.Batch(x y=jnp.ones([x.shape[0] 1]))<block_end><block_end><return>batch_iterator()<block_end><def_stmt>variance_kl var:base.Array pred_log_var:base.Array<arrow>base.Array<block_start>"""Compute the KL divergence between Gaussian variance with matched means."""<line_sep>log_var=jnp.log(var)<line_sep>pred_var=jnp.exp(pred_log_var)<line_sep><return>0.5<times>(pred_log_var-log_var+var/pred_var-1)<block_end><def_stmt>generate_batched_forward_at_data num_index_sample:int x:base.Array enn:base.EpistemicNetwork params:hk.Params key:base.RngKey<arrow>base.Output<block_start>"""Generate enn output for batch of data with indices based on random key."""<line_sep>batched_indexer=utils.make_batch_indexer(enn.indexer num_index_sample)<line_sep>batched_forward=jax.vmap(enn.apply in_axes=[<none> <none> 0])<line_sep>batched_out=batched_forward(params x batched_indexer(key))<line_sep><return>batched_out<block_end><def_stmt>l2_training_penalty batched_out:base.Output<block_start>"""Penalize the L2 magnitude of the training network."""<if_stmt>isinstance(batched_out base.OutputWithPrior)<block_start><return>0.5<times>jnp.mean(jnp.square(batched_out.train))<block_end><else_stmt><block_start>logging.warning('L2 weight penalty only works for OutputWithPrior.')<line_sep><return>0.<block_end><block_end><def_stmt>distill_mean_regression batched_out:base.Output distill_out:base.Output<arrow>base.Array<block_start>"""Train the mean of the regression to the distill network."""<line_sep>observed_mean=jnp.mean(utils.parse_net_output(batched_out) axis=0)<line_sep>distill_mean=jnp.squeeze(utils.parse_net_output(distill_out))<line_sep><return>jnp.mean(jnp.square(distill_mean-observed_mean))<block_end><def_stmt>distill_mean_classification batched_out:base.Output distill_out:base.Output<arrow>base.Array<block_start>"""Train the mean of the classification to the distill network."""<line_sep>batched_logits=utils.parse_net_output(batched_out)<line_sep>batched_probs=jax.nn.softmax(batched_logits axis=-1)<line_sep>mean_probs=jnp.mean(batched_probs axis=0)<line_sep>distill_probs=jax.nn.softmax(utils.parse_net_output(distill_out) axis=-1)<line_sep><return>jnp.mean(jnp.sum(mean_probs<times>jnp.log(mean_probs/distill_probs) axis=1))<block_end><def_stmt>distill_var_regression batched_out:base.Output distill_out:base.Output<arrow>base.Array<block_start>"""Train the variance of the regression to the distill network."""<assert_stmt>isinstance(distill_out base.OutputWithPrior)<line_sep>observed_var=jnp.var(utils.parse_net_output(batched_out) axis=0)<line_sep><return>jnp.mean(variance_kl(observed_var distill_out.extra['log_var']))<block_end><def_stmt>distill_var_classification batched_out:base.Output distill_out:base.Output<arrow>base.Array<block_start>"""Train the variance of the classification to the distill network."""<assert_stmt>isinstance(distill_out base.OutputWithPrior)<line_sep>batched_logits=utils.parse_net_output(batched_out)<line_sep>observed_var=jnp.var(jax.nn.softmax(batched_logits axis=-1))<line_sep><return>jnp.mean(variance_kl(observed_var distill_out.extra['log_var']))<block_end>@dataclasses.dataclass<class_stmt>RegressionPriorLoss(base.LossFn)<block_start>"""Regress fake data back to prior, and distill mean/var to mean_index."""<line_sep>num_index_sample:int<line_sep>input_generator:FakeInputGenerator=MatchingGaussianData()<line_sep>scale:float=1.<line_sep>distill_index:bool=<false><def_stmt>__call__ self enn:base.EpistemicNetwork params:hk.Params batch:base.Batch key:base.RngKey<arrow>base.Array<block_start>index_key,data_key=jax.random.split(key)<line_sep>fake_x=self.input_generator(batch data_key)<line_sep># TODO(author2): Complete prior loss refactor --> MultilossExperiment batched_out=generate_batched_forward_at_data(self.num_index_sample fake_x enn params index_key)<line_sep># Regularize towards prior output loss=self.scale<times>l2_training_penalty(batched_out)<line_sep># Distill aggregate stats to the "mean_index" <if_stmt>hasattr(enn.indexer 'mean_index')<and>self.distill_index<block_start>distill_out=enn.apply(params fake_x enn.indexer.mean_index)<line_sep>loss<augadd>distill_mean_regression(batched_out distill_out)<line_sep>loss<augadd>distill_var_regression(batched_out distill_out)<block_end><return>loss {}<block_end><block_end>@dataclasses.dataclass<class_stmt>ClassificationPriorLoss(base.LossFn)<block_start>"""Penalize fake data back to prior, and distill mean/var to mean_index."""<line_sep>num_index_sample:int<line_sep>input_generator:FakeInputGenerator=MatchingGaussianData()<line_sep>scale:float=1.<line_sep>distill_index:bool=<false><def_stmt>__call__ self enn:base.EpistemicNetwork params:hk.Params batch:base.Batch key:base.RngKey<arrow>base.Array<block_start>index_key,data_key=jax.random.split(key)<line_sep>fake_x=self.input_generator(batch data_key)<line_sep># TODO(author2): Complete prior loss refactor --> MultilossExperiment batched_out=generate_batched_forward_at_data(self.num_index_sample fake_x enn params index_key)<line_sep># Regularize towards prior output loss=self.scale<times>l2_training_penalty(batched_out)<line_sep># Distill aggregate stats to the "mean_index" <if_stmt>hasattr(enn.indexer 'mean_index')<and>self.distill_index<block_start>distill_out=enn.apply(params fake_x enn.indexer.mean_index)<line_sep>loss<augadd>distill_mean_classification(batched_out distill_out)<line_sep>loss<augadd>distill_var_classification(batched_out distill_out)<block_end><return>loss {}<block_end><block_end>
""" Nameko built-in dependencies. """<import_from_stmt>nameko.extensions DependencyProvider<class_stmt>Config(DependencyProvider)<block_start>""" Dependency provider for accessing configuration values. """<def_stmt>get_dependency self worker_ctx<block_start><return>self.container.config.copy()<block_end><block_end>
<import_from_stmt>django.core.exceptions ImproperlyConfigured<class_stmt>MissingStorage(ImproperlyConfigured)<block_start><pass><block_end><class_stmt>NoFileStorageConfigured(ImproperlyConfigured)<block_start><pass><block_end>
<import_from_stmt>setuptools setup find_packages<line_sep>setup(name="disney" version="1.0" description="A history of Shanghai Disney waiting time" long_description="A history of Shanghai Disney waiting time" license="Apache License" url="http://s.gaott.info" author="gtt116" author_email="<EMAIL>" packages=find_packages() include_package_data=<true> platforms="any" install_requires=[] scripts=[] entry_points={'console_scripts':['disney-fetch = disney.fetch:main' 'disney-publish = disney.publish:main' ]})<line_sep>
<import_stmt>random<line_sep>committee_size=150<line_sep>shard_size=1024<line_sep>pool_size=150<times>1024<line_sep># Percentage of attackers in pool attacker_p=0.15<line_sep>attacker_n=int(attacker_p<times>pool_size)<line_sep># Attack threshold (a committee with t percent of attackers) attacker_tn=int(committee_size/3)<line_sep># Monte-carlo trials trials=100000<line_sep># Pool members 1 - attacker; 2 - honest validator pool=[1<for>i range(attacker_n)]<line_sep>pool.extend([0<for>i range(pool_size-attacker_n)])<line_sep>attacked_trials=0<for_stmt>trial range(trials)<block_start><if_stmt>trial<ne>0<and>trial%10<eq>0<block_start>print("Trial %d, attack prob: %f"%(trial attacked_trials/trial))<block_end>random.shuffle(pool)<for_stmt>j range(shard_size)<block_start><if_stmt>sum(pool[j<times>committee_size:(j+1)<times>committee_size])<ge>attacker_tn<block_start>attacked_trials<augadd>1<line_sep><break><block_end><block_end><block_end>print("Attack prob: %f"%(attacked_trials/trials))<line_sep>
<import_from_stmt>sspipe p px unpipe<def_stmt>test_unpipe_active <block_start>a_pipe=px+1|px<times>5<line_sep>func=unpipe(a_pipe)<assert_stmt>func(0)<eq>5<block_end><def_stmt>test_unpipe_passive <block_start>func=<lambda>x:(x+1)<times>5<line_sep>func=unpipe(func)<assert_stmt>func(0)<eq>5<block_end>
<import_from_stmt>unittest_utils RDLSourceTestCase<class_stmt>TestBridge(RDLSourceTestCase)<block_start><def_stmt>test_bridge self<block_start>top=self.compile(["rdl_src/bridge.rdl"] "some_bridge")<line_sep>self.assertEqual(top.find_by_path("some_bridge.ahb.ahb_credits").absolute_address 0x0)<line_sep>self.assertEqual(top.find_by_path("some_bridge.ahb.ahb_stat").absolute_address 0x20)<line_sep>self.assertEqual(top.find_by_path("some_bridge.axi.axi_credits").absolute_address 0x0)<line_sep>self.assertEqual(top.find_by_path("some_bridge.axi.axi_stat").absolute_address 0x40)<block_end><def_stmt>test_bridge_errors self<block_start>self.assertRDLCompileError(["rdl_err_src/err_bridge.rdl"] "illegal_wrapper" r"The 'bridge' property can only be applied to the root address map")<line_sep>self.assertRDLCompileError(["rdl_err_src/err_bridge.rdl"] "not_enough_addrmaps" r"Addrmap 'not_enough_addrmaps' is a bridge and shall contain 2 or more sub-addrmaps")<line_sep>self.assertRDLCompileError(["rdl_err_src/err_bridge.rdl"] "illegal_children" r"Addrmap 'illegal_children' is a bridge which can only contain other addrmaps. Contains a child instance 'y' which is a reg")<block_end><block_end>
<def_stmt>is_prime n<block_start><if_stmt>n<g>1<block_start><for_stmt>i range(2 n<floordiv>2+1)<block_start><if_stmt>(n%i)<eq>0<block_start><return><false><block_end><block_end><else_stmt><block_start><return><true><block_end><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>fibonacci n<block_start>n1,n2=1 1<line_sep>count=0<if_stmt>n<eq>1<block_start>print(n1)<block_end><else_stmt><block_start><while_stmt>count<l>n<block_start><if_stmt><not>is_prime(n1)<and>n1%5<ne>0<block_start>print(n1 end=' ')<block_end><else_stmt><block_start>print(0 end=' ')<block_end>n3=n1+n2<line_sep>n1=n2<line_sep>n2=n3<line_sep>count<augadd>1<block_end><block_end><block_end>n=int(input("Enter the number:"))<line_sep>fibonacci(n)<line_sep>
""" Copyright (c) Facebook, Inc. and its affiliates. """<import_from_stmt>.voc VOCDetection<import_from_stmt>typing Iterable<import_stmt>to_coco_api<line_sep>VOC_PATH="/datasets01/VOC/060817/"<class_stmt>VOCDetection2012(VOCDetection)<block_start><def_stmt>__init__ self image_set:str="train" transforms:Iterable=<none><block_start>super(VOCDetection self).__init__(VOC_PATH image_set=image_set year="2012" download=<false>)<line_sep>self.prepare=to_coco_api.PrepareInstance()<line_sep>self._transforms=transforms<block_end><block_end><import_from_stmt>.voc make_voc_transforms<def_stmt>build image_set args# if we only use voc2012, then we need to adapt trainval and test to # VOC2012 constraints <block_start><if_stmt>image_set<eq>"test"<block_start>image_set="val"<block_end><if_stmt>image_set<eq>"trainval"<block_start>image_set="train"<block_end><return>VOCDetection2012(image_set=image_set transforms=make_voc_transforms(image_set args.remove_difficult))<block_end>
<import_stmt>pytest<def_stmt>test_organism_upgrade upgrader organism_1_0<block_start>value=upgrader.upgrade('organism' organism_1_0 target_version='2')<assert_stmt>value['schema_version']<eq>'2'<assert_stmt>value['status']<eq>'current'<block_end><def_stmt>test_organism_upgrade_4_5 upgrader organism_4_0<block_start>value=upgrader.upgrade('organism' organism_4_0 current_version='4' target_version='5')<assert_stmt>value['schema_version']<eq>'5'<assert_stmt>value['status']<eq>'released'<line_sep>organism_4_0['status']='disabled'<line_sep>organism_4_0['schema_version']='4'<line_sep>value=upgrader.upgrade('organism' organism_4_0 current_version='4' target_version='5')<assert_stmt>value['schema_version']<eq>'5'<assert_stmt>value['status']<eq>'deleted'<block_end>
""" Please read README.md for usage instructions. Extracts Caffe parameters from a given caffemodel/prototxt to a dictionary of numpy arrays, ready for conversion to TensorFlow variables. Writes the dictionary to a .npy file. """<import_stmt>argparse<import_stmt>caffe<import_stmt>numpy<as>np<import_stmt>os<import_stmt>tempfile<line_sep>FLAGS=<none><line_sep>ARCHS={'C':{'CAFFEMODEL':'../models/FlowNet2-C/FlowNet2-C_weights.caffemodel' 'DEPLOY_PROTOTXT':'../models/FlowNet2-C/FlowNet2-C_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{'conv1':'FlowNetC/conv1' 'conv2':'FlowNetC/conv2' 'conv3':'FlowNetC/conv3' 'conv_redir':'FlowNetC/conv_redir' 'conv3_1':'FlowNetC/conv3_1' 'conv4':'FlowNetC/conv4' 'conv4_1':'FlowNetC/conv4_1' 'conv5':'FlowNetC/conv5' 'conv5_1':'FlowNetC/conv5_1' 'conv6':'FlowNetC/conv6' 'conv6_1':'FlowNetC/conv6_1' 'Convolution1':'FlowNetC/predict_flow6' 'deconv5':'FlowNetC/deconv5' 'upsample_flow6to5':'FlowNetC/upsample_flow6to5' 'Convolution2':'FlowNetC/predict_flow5' 'deconv4':'FlowNetC/deconv4' 'upsample_flow5to4':'FlowNetC/upsample_flow5to4' 'Convolution3':'FlowNetC/predict_flow4' 'deconv3':'FlowNetC/deconv3' 'upsample_flow4to3':'FlowNetC/upsample_flow4to3' 'Convolution4':'FlowNetC/predict_flow3' 'deconv2':'FlowNetC/deconv2' 'upsample_flow3to2':'FlowNetC/upsample_flow3to2' 'Convolution5':'FlowNetC/predict_flow2' }} 'S':{'CAFFEMODEL':'../models/FlowNet2-S/FlowNet2-S_weights.caffemodel.h5' 'DEPLOY_PROTOTXT':'../models/FlowNet2-S/FlowNet2-S_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{'conv1':'FlowNetS/conv1' 'conv2':'FlowNetS/conv2' 'conv3':'FlowNetS/conv3' 'conv3_1':'FlowNetS/conv3_1' 'conv4':'FlowNetS/conv4' 'conv4_1':'FlowNetS/conv4_1' 'conv5':'FlowNetS/conv5' 'conv5_1':'FlowNetS/conv5_1' 'conv6':'FlowNetS/conv6' 'conv6_1':'FlowNetS/conv6_1' 'Convolution1':'FlowNetS/predict_flow6' 'deconv5':'FlowNetS/deconv5' 'upsample_flow6to5':'FlowNetS/upsample_flow6to5' 'Convolution2':'FlowNetS/predict_flow5' 'deconv4':'FlowNetS/deconv4' 'upsample_flow5to4':'FlowNetS/upsample_flow5to4' 'Convolution3':'FlowNetS/predict_flow4' 'deconv3':'FlowNetS/deconv3' 'upsample_flow4to3':'FlowNetS/upsample_flow4to3' 'Convolution4':'FlowNetS/predict_flow3' 'deconv2':'FlowNetS/deconv2' 'upsample_flow3to2':'FlowNetS/upsample_flow3to2' 'Convolution5':'FlowNetS/predict_flow2' }} 'CS':{'CAFFEMODEL':'../models/FlowNet2-CS/FlowNet2-CS_weights.caffemodel' 'DEPLOY_PROTOTXT':'../models/FlowNet2-CS/FlowNet2-CS_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{# Net C 'conv1':'FlowNetCS/FlowNetC/conv1' 'conv2':'FlowNetCS/FlowNetC/conv2' 'conv3':'FlowNetCS/FlowNetC/conv3' 'conv_redir':'FlowNetCS/FlowNetC/conv_redir' 'conv3_1':'FlowNetCS/FlowNetC/conv3_1' 'conv4':'FlowNetCS/FlowNetC/conv4' 'conv4_1':'FlowNetCS/FlowNetC/conv4_1' 'conv5':'FlowNetCS/FlowNetC/conv5' 'conv5_1':'FlowNetCS/FlowNetC/conv5_1' 'conv6':'FlowNetCS/FlowNetC/conv6' 'conv6_1':'FlowNetCS/FlowNetC/conv6_1' 'Convolution1':'FlowNetCS/FlowNetC/predict_flow6' 'deconv5':'FlowNetCS/FlowNetC/deconv5' 'upsample_flow6to5':'FlowNetCS/FlowNetC/upsample_flow6to5' 'Convolution2':'FlowNetCS/FlowNetC/predict_flow5' 'deconv4':'FlowNetCS/FlowNetC/deconv4' 'upsample_flow5to4':'FlowNetCS/FlowNetC/upsample_flow5to4' 'Convolution3':'FlowNetCS/FlowNetC/predict_flow4' 'deconv3':'FlowNetCS/FlowNetC/deconv3' 'upsample_flow4to3':'FlowNetCS/FlowNetC/upsample_flow4to3' 'Convolution4':'FlowNetCS/FlowNetC/predict_flow3' 'deconv2':'FlowNetCS/FlowNetC/deconv2' 'upsample_flow3to2':'FlowNetCS/FlowNetC/upsample_flow3to2' 'Convolution5':'FlowNetCS/FlowNetC/predict_flow2' # Net S 'net2_conv1':'FlowNetCS/FlowNetS/conv1' 'net2_conv2':'FlowNetCS/FlowNetS/conv2' 'net2_conv3':'FlowNetCS/FlowNetS/conv3' 'net2_conv3_1':'FlowNetCS/FlowNetS/conv3_1' 'net2_conv4':'FlowNetCS/FlowNetS/conv4' 'net2_conv4_1':'FlowNetCS/FlowNetS/conv4_1' 'net2_conv5':'FlowNetCS/FlowNetS/conv5' 'net2_conv5_1':'FlowNetCS/FlowNetS/conv5_1' 'net2_conv6':'FlowNetCS/FlowNetS/conv6' 'net2_conv6_1':'FlowNetCS/FlowNetS/conv6_1' 'net2_predict_conv6':'FlowNetCS/FlowNetS/predict_flow6' 'net2_deconv5':'FlowNetCS/FlowNetS/deconv5' 'net2_net2_upsample_flow6to5':'FlowNetCS/FlowNetS/upsample_flow6to5' 'net2_predict_conv5':'FlowNetCS/FlowNetS/predict_flow5' 'net2_deconv4':'FlowNetCS/FlowNetS/deconv4' 'net2_net2_upsample_flow5to4':'FlowNetCS/FlowNetS/upsample_flow5to4' 'net2_predict_conv4':'FlowNetCS/FlowNetS/predict_flow4' 'net2_deconv3':'FlowNetCS/FlowNetS/deconv3' 'net2_net2_upsample_flow4to3':'FlowNetCS/FlowNetS/upsample_flow4to3' 'net2_predict_conv3':'FlowNetCS/FlowNetS/predict_flow3' 'net2_deconv2':'FlowNetCS/FlowNetS/deconv2' 'net2_net2_upsample_flow3to2':'FlowNetCS/FlowNetS/upsample_flow3to2' 'net2_predict_conv2':'FlowNetCS/FlowNetS/predict_flow2' }} 'CSS':{'CAFFEMODEL':'../models/FlowNet2-CSS/FlowNet2-CSS_weights.caffemodel.h5' 'DEPLOY_PROTOTXT':'../models/FlowNet2-CSS/FlowNet2-CSS_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{# Net C 'conv1':'FlowNetCSS/FlowNetCS/FlowNetC/conv1' 'conv2':'FlowNetCSS/FlowNetCS/FlowNetC/conv2' 'conv3':'FlowNetCSS/FlowNetCS/FlowNetC/conv3' 'conv_redir':'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir' 'conv3_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1' 'conv4':'FlowNetCSS/FlowNetCS/FlowNetC/conv4' 'conv4_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1' 'conv5':'FlowNetCSS/FlowNetCS/FlowNetC/conv5' 'conv5_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1' 'conv6':'FlowNetCSS/FlowNetCS/FlowNetC/conv6' 'conv6_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1' 'Convolution1':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6' 'deconv5':'FlowNetCSS/FlowNetCS/FlowNetC/deconv5' 'upsample_flow6to5':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5' 'Convolution2':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5' 'deconv4':'FlowNetCSS/FlowNetCS/FlowNetC/deconv4' 'upsample_flow5to4':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4' 'Convolution3':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4' 'deconv3':'FlowNetCSS/FlowNetCS/FlowNetC/deconv3' 'upsample_flow4to3':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3' 'Convolution4':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3' 'deconv2':'FlowNetCSS/FlowNetCS/FlowNetC/deconv2' 'upsample_flow3to2':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2' 'Convolution5':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2' # Net S 1 'net2_conv1':'FlowNetCSS/FlowNetCS/FlowNetS/conv1' 'net2_conv2':'FlowNetCSS/FlowNetCS/FlowNetS/conv2' 'net2_conv3':'FlowNetCSS/FlowNetCS/FlowNetS/conv3' 'net2_conv3_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1' 'net2_conv4':'FlowNetCSS/FlowNetCS/FlowNetS/conv4' 'net2_conv4_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1' 'net2_conv5':'FlowNetCSS/FlowNetCS/FlowNetS/conv5' 'net2_conv5_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1' 'net2_conv6':'FlowNetCSS/FlowNetCS/FlowNetS/conv6' 'net2_conv6_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1' 'net2_predict_conv6':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6' 'net2_deconv5':'FlowNetCSS/FlowNetCS/FlowNetS/deconv5' 'net2_net2_upsample_flow6to5':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5' 'net2_predict_conv5':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5' 'net2_deconv4':'FlowNetCSS/FlowNetCS/FlowNetS/deconv4' 'net2_net2_upsample_flow5to4':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4' 'net2_predict_conv4':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4' 'net2_deconv3':'FlowNetCSS/FlowNetCS/FlowNetS/deconv3' 'net2_net2_upsample_flow4to3':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3' 'net2_predict_conv3':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3' 'net2_deconv2':'FlowNetCSS/FlowNetCS/FlowNetS/deconv2' 'net2_net2_upsample_flow3to2':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2' 'net2_predict_conv2':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2' # Net S 2 'net3_conv1':'FlowNetCSS/FlowNetS/conv1' 'net3_conv2':'FlowNetCSS/FlowNetS/conv2' 'net3_conv3':'FlowNetCSS/FlowNetS/conv3' 'net3_conv3_1':'FlowNetCSS/FlowNetS/conv3_1' 'net3_conv4':'FlowNetCSS/FlowNetS/conv4' 'net3_conv4_1':'FlowNetCSS/FlowNetS/conv4_1' 'net3_conv5':'FlowNetCSS/FlowNetS/conv5' 'net3_conv5_1':'FlowNetCSS/FlowNetS/conv5_1' 'net3_conv6':'FlowNetCSS/FlowNetS/conv6' 'net3_conv6_1':'FlowNetCSS/FlowNetS/conv6_1' 'net3_predict_conv6':'FlowNetCSS/FlowNetS/predict_flow6' 'net3_deconv5':'FlowNetCSS/FlowNetS/deconv5' 'net3_net3_upsample_flow6to5':'FlowNetCSS/FlowNetS/upsample_flow6to5' 'net3_predict_conv5':'FlowNetCSS/FlowNetS/predict_flow5' 'net3_deconv4':'FlowNetCSS/FlowNetS/deconv4' 'net3_net3_upsample_flow5to4':'FlowNetCSS/FlowNetS/upsample_flow5to4' 'net3_predict_conv4':'FlowNetCSS/FlowNetS/predict_flow4' 'net3_deconv3':'FlowNetCSS/FlowNetS/deconv3' 'net3_net3_upsample_flow4to3':'FlowNetCSS/FlowNetS/upsample_flow4to3' 'net3_predict_conv3':'FlowNetCSS/FlowNetS/predict_flow3' 'net3_deconv2':'FlowNetCSS/FlowNetS/deconv2' 'net3_net3_upsample_flow3to2':'FlowNetCSS/FlowNetS/upsample_flow3to2' 'net3_predict_conv2':'FlowNetCSS/FlowNetS/predict_flow2' } } 'CSS-ft-sd':{'CAFFEMODEL':'../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_weights.caffemodel.h5' 'DEPLOY_PROTOTXT':'../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{# Net C 'conv1':'FlowNetCSS/FlowNetCS/FlowNetC/conv1' 'conv2':'FlowNetCSS/FlowNetCS/FlowNetC/conv2' 'conv3':'FlowNetCSS/FlowNetCS/FlowNetC/conv3' 'conv_redir':'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir' 'conv3_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1' 'conv4':'FlowNetCSS/FlowNetCS/FlowNetC/conv4' 'conv4_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1' 'conv5':'FlowNetCSS/FlowNetCS/FlowNetC/conv5' 'conv5_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1' 'conv6':'FlowNetCSS/FlowNetCS/FlowNetC/conv6' 'conv6_1':'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1' 'Convolution1':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6' 'deconv5':'FlowNetCSS/FlowNetCS/FlowNetC/deconv5' 'upsample_flow6to5':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5' 'Convolution2':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5' 'deconv4':'FlowNetCSS/FlowNetCS/FlowNetC/deconv4' 'upsample_flow5to4':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4' 'Convolution3':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4' 'deconv3':'FlowNetCSS/FlowNetCS/FlowNetC/deconv3' 'upsample_flow4to3':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3' 'Convolution4':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3' 'deconv2':'FlowNetCSS/FlowNetCS/FlowNetC/deconv2' 'upsample_flow3to2':'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2' 'Convolution5':'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2' # Net S 1 'net2_conv1':'FlowNetCSS/FlowNetCS/FlowNetS/conv1' 'net2_conv2':'FlowNetCSS/FlowNetCS/FlowNetS/conv2' 'net2_conv3':'FlowNetCSS/FlowNetCS/FlowNetS/conv3' 'net2_conv3_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1' 'net2_conv4':'FlowNetCSS/FlowNetCS/FlowNetS/conv4' 'net2_conv4_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1' 'net2_conv5':'FlowNetCSS/FlowNetCS/FlowNetS/conv5' 'net2_conv5_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1' 'net2_conv6':'FlowNetCSS/FlowNetCS/FlowNetS/conv6' 'net2_conv6_1':'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1' 'net2_predict_conv6':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6' 'net2_deconv5':'FlowNetCSS/FlowNetCS/FlowNetS/deconv5' 'net2_net2_upsample_flow6to5':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5' 'net2_predict_conv5':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5' 'net2_deconv4':'FlowNetCSS/FlowNetCS/FlowNetS/deconv4' 'net2_net2_upsample_flow5to4':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4' 'net2_predict_conv4':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4' 'net2_deconv3':'FlowNetCSS/FlowNetCS/FlowNetS/deconv3' 'net2_net2_upsample_flow4to3':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3' 'net2_predict_conv3':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3' 'net2_deconv2':'FlowNetCSS/FlowNetCS/FlowNetS/deconv2' 'net2_net2_upsample_flow3to2':'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2' 'net2_predict_conv2':'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2' # Net S 2 'net3_conv1':'FlowNetCSS/FlowNetS/conv1' 'net3_conv2':'FlowNetCSS/FlowNetS/conv2' 'net3_conv3':'FlowNetCSS/FlowNetS/conv3' 'net3_conv3_1':'FlowNetCSS/FlowNetS/conv3_1' 'net3_conv4':'FlowNetCSS/FlowNetS/conv4' 'net3_conv4_1':'FlowNetCSS/FlowNetS/conv4_1' 'net3_conv5':'FlowNetCSS/FlowNetS/conv5' 'net3_conv5_1':'FlowNetCSS/FlowNetS/conv5_1' 'net3_conv6':'FlowNetCSS/FlowNetS/conv6' 'net3_conv6_1':'FlowNetCSS/FlowNetS/conv6_1' 'net3_predict_conv6':'FlowNetCSS/FlowNetS/predict_flow6' 'net3_deconv5':'FlowNetCSS/FlowNetS/deconv5' 'net3_net3_upsample_flow6to5':'FlowNetCSS/FlowNetS/upsample_flow6to5' 'net3_predict_conv5':'FlowNetCSS/FlowNetS/predict_flow5' 'net3_deconv4':'FlowNetCSS/FlowNetS/deconv4' 'net3_net3_upsample_flow5to4':'FlowNetCSS/FlowNetS/upsample_flow5to4' 'net3_predict_conv4':'FlowNetCSS/FlowNetS/predict_flow4' 'net3_deconv3':'FlowNetCSS/FlowNetS/deconv3' 'net3_net3_upsample_flow4to3':'FlowNetCSS/FlowNetS/upsample_flow4to3' 'net3_predict_conv3':'FlowNetCSS/FlowNetS/predict_flow3' 'net3_deconv2':'FlowNetCSS/FlowNetS/deconv2' 'net3_net3_upsample_flow3to2':'FlowNetCSS/FlowNetS/upsample_flow3to2' 'net3_predict_conv2':'FlowNetCSS/FlowNetS/predict_flow2' } } 'SD':{'CAFFEMODEL':'../models/FlowNet2-SD/FlowNet2-SD_weights.caffemodel.h5' 'DEPLOY_PROTOTXT':'../models/FlowNet2-SD/FlowNet2-SD_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{'conv0':'FlowNetSD/conv0' 'conv1':'FlowNetSD/conv1' 'conv1_1':'FlowNetSD/conv1_1' 'conv2':'FlowNetSD/conv2' 'conv2_1':'FlowNetSD/conv2_1' 'conv3':'FlowNetSD/conv3' 'conv3_1':'FlowNetSD/conv3_1' 'conv4':'FlowNetSD/conv4' 'conv4_1':'FlowNetSD/conv4_1' 'conv5':'FlowNetSD/conv5' 'conv5_1':'FlowNetSD/conv5_1' 'conv6':'FlowNetSD/conv6' 'conv6_1':'FlowNetSD/conv6_1' 'Convolution1':'FlowNetSD/predict_flow6' 'deconv5':'FlowNetSD/deconv5' 'upsample_flow6to5':'FlowNetSD/upsample_flow6to5' 'interconv5':'FlowNetSD/interconv5' 'Convolution2':'FlowNetSD/predict_flow5' 'deconv4':'FlowNetSD/deconv4' 'upsample_flow5to4':'FlowNetSD/upsample_flow5to4' 'interconv4':'FlowNetSD/interconv4' 'Convolution3':'FlowNetSD/predict_flow4' 'deconv3':'FlowNetSD/deconv3' 'upsample_flow4to3':'FlowNetSD/upsample_flow4to3' 'interconv3':'FlowNetSD/interconv3' 'Convolution4':'FlowNetSD/predict_flow3' 'deconv2':'FlowNetSD/deconv2' 'upsample_flow3to2':'FlowNetSD/upsample_flow3to2' 'interconv2':'FlowNetSD/interconv2' 'Convolution5':'FlowNetSD/predict_flow2' } } '2':{'CAFFEMODEL':'../models/FlowNet2/FlowNet2_weights.caffemodel.h5' 'DEPLOY_PROTOTXT':'../models/FlowNet2/FlowNet2_deploy.prototxt.template' # Mappings between Caffe parameter names and TensorFlow variable names 'PARAMS':{# Net C 'conv1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv1' 'conv2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv2' 'conv3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3' 'conv_redir':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv_redir' 'conv3_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3_1' 'conv4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4' 'conv4_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4_1' 'conv5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5' 'conv5_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5_1' 'conv6':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6' 'conv6_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6_1' 'Convolution1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6' 'deconv5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv5' 'upsample_flow6to5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5' 'Convolution2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5' 'deconv4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv4' 'upsample_flow5to4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4' 'Convolution3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4' 'deconv3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv3' 'upsample_flow4to3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3' 'Convolution4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3' 'deconv2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv2' 'upsample_flow3to2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2' 'Convolution5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2' # Net S 1 'net2_conv1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv1' 'net2_conv2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv2' 'net2_conv3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3' 'net2_conv3_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3_1' 'net2_conv4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4' 'net2_conv4_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4_1' 'net2_conv5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5' 'net2_conv5_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5_1' 'net2_conv6':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6' 'net2_conv6_1':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6_1' 'net2_predict_conv6':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6' 'net2_deconv5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv5' 'net2_net2_upsample_flow6to5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5' 'net2_predict_conv5':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5' 'net2_deconv4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv4' 'net2_net2_upsample_flow5to4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4' 'net2_predict_conv4':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4' 'net2_deconv3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv3' 'net2_net2_upsample_flow4to3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3' 'net2_predict_conv3':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3' 'net2_deconv2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv2' 'net2_net2_upsample_flow3to2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2' 'net2_predict_conv2':'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2' # Net S 2 'net3_conv1':'FlowNet2/FlowNetCSS/FlowNetS/conv1' 'net3_conv2':'FlowNet2/FlowNetCSS/FlowNetS/conv2' 'net3_conv3':'FlowNet2/FlowNetCSS/FlowNetS/conv3' 'net3_conv3_1':'FlowNet2/FlowNetCSS/FlowNetS/conv3_1' 'net3_conv4':'FlowNet2/FlowNetCSS/FlowNetS/conv4' 'net3_conv4_1':'FlowNet2/FlowNetCSS/FlowNetS/conv4_1' 'net3_conv5':'FlowNet2/FlowNetCSS/FlowNetS/conv5' 'net3_conv5_1':'FlowNet2/FlowNetCSS/FlowNetS/conv5_1' 'net3_conv6':'FlowNet2/FlowNetCSS/FlowNetS/conv6' 'net3_conv6_1':'FlowNet2/FlowNetCSS/FlowNetS/conv6_1' 'net3_predict_conv6':'FlowNet2/FlowNetCSS/FlowNetS/predict_flow6' 'net3_deconv5':'FlowNet2/FlowNetCSS/FlowNetS/deconv5' 'net3_net3_upsample_flow6to5':'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow6to5' 'net3_predict_conv5':'FlowNet2/FlowNetCSS/FlowNetS/predict_flow5' 'net3_deconv4':'FlowNet2/FlowNetCSS/FlowNetS/deconv4' 'net3_net3_upsample_flow5to4':'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow5to4' 'net3_predict_conv4':'FlowNet2/FlowNetCSS/FlowNetS/predict_flow4' 'net3_deconv3':'FlowNet2/FlowNetCSS/FlowNetS/deconv3' 'net3_net3_upsample_flow4to3':'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow4to3' 'net3_predict_conv3':'FlowNet2/FlowNetCSS/FlowNetS/predict_flow3' 'net3_deconv2':'FlowNet2/FlowNetCSS/FlowNetS/deconv2' 'net3_net3_upsample_flow3to2':'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow3to2' 'net3_predict_conv2':'FlowNet2/FlowNetCSS/FlowNetS/predict_flow2' # Net SD 'netsd_conv0':'FlowNet2/FlowNetSD/conv0' 'netsd_conv1':'FlowNet2/FlowNetSD/conv1' 'netsd_conv1_1':'FlowNet2/FlowNetSD/conv1_1' 'netsd_conv2':'FlowNet2/FlowNetSD/conv2' 'netsd_conv2_1':'FlowNet2/FlowNetSD/conv2_1' 'netsd_conv3':'FlowNet2/FlowNetSD/conv3' 'netsd_conv3_1':'FlowNet2/FlowNetSD/conv3_1' 'netsd_conv4':'FlowNet2/FlowNetSD/conv4' 'netsd_conv4_1':'FlowNet2/FlowNetSD/conv4_1' 'netsd_conv5':'FlowNet2/FlowNetSD/conv5' 'netsd_conv5_1':'FlowNet2/FlowNetSD/conv5_1' 'netsd_conv6':'FlowNet2/FlowNetSD/conv6' 'netsd_conv6_1':'FlowNet2/FlowNetSD/conv6_1' 'netsd_Convolution1':'FlowNet2/FlowNetSD/predict_flow6' 'netsd_deconv5':'FlowNet2/FlowNetSD/deconv5' 'netsd_upsample_flow6to5':'FlowNet2/FlowNetSD/upsample_flow6to5' 'netsd_interconv5':'FlowNet2/FlowNetSD/interconv5' 'netsd_Convolution2':'FlowNet2/FlowNetSD/predict_flow5' 'netsd_deconv4':'FlowNet2/FlowNetSD/deconv4' 'netsd_upsample_flow5to4':'FlowNet2/FlowNetSD/upsample_flow5to4' 'netsd_interconv4':'FlowNet2/FlowNetSD/interconv4' 'netsd_Convolution3':'FlowNet2/FlowNetSD/predict_flow4' 'netsd_deconv3':'FlowNet2/FlowNetSD/deconv3' 'netsd_upsample_flow4to3':'FlowNet2/FlowNetSD/upsample_flow4to3' 'netsd_interconv3':'FlowNet2/FlowNetSD/interconv3' 'netsd_Convolution4':'FlowNet2/FlowNetSD/predict_flow3' 'netsd_deconv2':'FlowNet2/FlowNetSD/deconv2' 'netsd_upsample_flow3to2':'FlowNet2/FlowNetSD/upsample_flow3to2' 'netsd_interconv2':'FlowNet2/FlowNetSD/interconv2' 'netsd_Convolution5':'FlowNet2/FlowNetSD/predict_flow2' # Fusion Net 'fuse_conv0':'FlowNet2/fuse_conv0' 'fuse_conv1':'FlowNet2/fuse_conv1' 'fuse_conv1_1':'FlowNet2/fuse_conv1_1' 'fuse_conv2':'FlowNet2/fuse_conv2' 'fuse_conv2_1':'FlowNet2/fuse_conv2_1' 'fuse__Convolution5':'FlowNet2/predict_flow2' 'fuse_deconv1':'FlowNet2/fuse_deconv1' 'fuse_upsample_flow2to1':'FlowNet2/fuse_upsample_flow2to1' 'fuse_interconv1':'FlowNet2/fuse_interconv1' 'fuse__Convolution6':'FlowNet2/predict_flow1' 'fuse_deconv0':'FlowNet2/fuse_deconv0' 'fuse_upsample_flow1to0':'FlowNet2/fuse_upsample_flow1to0' 'fuse_interconv0':'FlowNet2/fuse_interconv0' 'fuse__Convolution7':'FlowNet2/predict_flow0' }} }<line_sep>arch=<none><line_sep># Setup variables to be injected into prototxt.template # For now, use the dimensions of the Flying Chair Dataset vars={}<line_sep>vars['TARGET_WIDTH']=vars['ADAPTED_WIDTH']=512<line_sep>vars['TARGET_HEIGHT']=vars['ADAPTED_HEIGHT']=384<line_sep>vars['SCALE_WIDTH']=vars['SCALE_HEIGHT']=1.0<def_stmt>main # Create tempfile to hold prototxt <block_start>tmp=tempfile.NamedTemporaryFile(mode='w' delete=<true>)<line_sep># Parse prototxt and inject `vars` proto=open(arch['DEPLOY_PROTOTXT']).readlines()<for_stmt>line proto<block_start><for_stmt>key,value vars.items()<block_start>tag="$%s$"%key<line_sep>line=line.replace(tag str(value))<block_end>tmp.write(line)<block_end>tmp.flush()<line_sep># Instantiate Caffe Model net=caffe.Net(tmp.name arch['CAFFEMODEL'] caffe.TEST)<line_sep>out={}<for_stmt>(caffe_param tf_param) arch['PARAMS'].items()# Caffe stores weights as (channels_out, channels_in, h, w) # but TF expects (h, w, channels_in, channels_out) <block_start>out[tf_param+'/weights']=net.params[caffe_param][0].data.transpose((2 3 1 0))<line_sep>out[tf_param+'/biases']=net.params[caffe_param][1].data<block_end>np.save(FLAGS.out out)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--out' type=str required=<true> help='Output file path, eg /foo/bar.npy')<line_sep>parser.add_argument('--arch' type=str choices=['C' 'S' 'CS' 'CSS' 'CSS-ft-sd' 'SD' '2'] required=<true> help='Name of the FlowNet arch: C, S, CS, CSS, CSS-ft-sd, SD or 2')<line_sep>FLAGS=parser.parse_args()<line_sep>arch=ARCHS[FLAGS.arch]<line_sep>main()<block_end>
"""This file contains code for use with "Think Bayes", by <NAME>, available from greenteapress.com Copyright 2014 <NAME> License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """<import_from_future_stmt> print_function division<import_stmt>numpy<import_stmt>thinkbayes2<import_stmt>thinkplot<class_stmt>Soccer(thinkbayes2.Suite)<block_start>"""Represents hypotheses about."""<def_stmt>Likelihood self data hypo<block_start>"""Computes the likelihood of the data under the hypothesis. hypo: data: """<line_sep>like=1<line_sep><return>like<block_end><def_stmt>PredRemaining self rem_time score<block_start>"""Plots the predictive distribution for final number of goals. rem_time: remaining time in the game in minutes score: number of goals already scored """<line_sep># TODO: fill this in <block_end><block_end><def_stmt>main <block_start>hypos=numpy.linspace(0 12 201)<line_sep>suite=Soccer(hypos)<line_sep>thinkplot.Pdf(suite label='prior')<line_sep>print('prior mean' suite.Mean())<line_sep>suite.Update(11)<line_sep>thinkplot.Pdf(suite label='posterior 1')<line_sep>print('after one goal' suite.Mean())<line_sep>thinkplot.Show()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
"""Add shared-file-system column to workers Revision ID: 75d4288ae265 Revises: <PASSWORD> Create Date: 2019-10-22 21:05:26.580918 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<line_sep># revision identifiers, used by Alembic. revision='<KEY>'<line_sep>down_revision='d0dd45f443b6'<def_stmt>upgrade <block_start>op.add_column('worker' sa.Column('shared_file_system' sa.Boolean() nullable=<false> server_default='0'))<block_end><def_stmt>downgrade <block_start>op.drop_column('worker' 'shared_file_system')<block_end>
# ------------------------------------------------------------------------------------------------------ # Copyright (c) <NAME>. All rights reserved. # Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information. # ------------------------------------------------------------------------------------------------------ <import_from_stmt>typing Tuple<import_from_stmt>.bee_base BeeBase<class_stmt>OnlookerBee(BeeBase)<block_start><def_stmt>explore self starting_position:Tuple[float float] start_value:float<arrow><none><block_start>""" Explore new food sources from the given one Args: starting_position ([type]): [description] start_value (float): [description] """<line_sep>self._explore(starting_position start_value)<block_end><block_end>
################################################### # Copyright (c) 2019 # # Authors: @iArunava <<EMAIL>> # # @AvivSham <<EMAIL>> # # # # License: BSD License 3.0 # # # # The Code in this file is distributed for free # # usage and modification with proper linkage back # # to this repository. # ################################################### <import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>InitialBlock(nn.Module)<block_start><def_stmt>__init__ self in_channels=3 out_channels=13<block_start>super().__init__()<line_sep>self.maxpool=nn.MaxPool2d(kernel_size=2 stride=2 padding=0)<line_sep>self.conv=nn.Conv2d(in_channels out_channels kernel_size=3 stride=2 padding=1)<line_sep>self.prelu=nn.PReLU(16)<line_sep>self.batchnorm=nn.BatchNorm2d(out_channels)<block_end><def_stmt>forward self x<block_start>main=self.conv(x)<line_sep>main=self.batchnorm(main)<line_sep>side=self.maxpool(x)<line_sep>x=torch.cat((main side) dim=1)<line_sep>x=self.prelu(x)<line_sep><return>x<block_end><block_end>
<import_stmt>logging<import_from_stmt>pylons request response session tmpl_context<as>c<import_from_stmt>pylons.controllers.util abort<line_sep># added for auth <import_from_stmt>authkit.authorize.pylons_adaptors authorize<import_from_stmt>authkit.permissions RemoteUser ValidAuthKitUser UserIn<import_from_stmt>pypesvds.lib.base BaseController render<line_sep>log=logging.getLogger(__name__)<class_stmt>IndexController(BaseController)<block_start>@authorize(ValidAuthKitUser())<def_stmt>index self# Return a rendered template #return render('/index.mako') # or, return a response <block_start><return>render('/pypesvds.mako')<block_end><def_stmt>signout self<block_start><return>render('/signin.html')<block_end><block_end>
<import_stmt>KratosMultiphysics.mpi# importing the MPI-Core, since the MPIExtension directly links to it <import_from_stmt>KratosCoSimulationMPIExtension *<line_sep>
<import_from_stmt>apps.Tests.tests.test_api set_and_get_alg get_alg get_exp<class_stmt>MyAlg<block_start><def_stmt>initExp self butler dummy<block_start>get_exp(butler)<line_sep>set_and_get_alg(butler)<line_sep><return>"return_init_exp"<block_end><def_stmt>getQuery self butler<block_start>get_alg(butler)<line_sep><return>"return_get_query"<block_end><def_stmt>processAnswer self butler<block_start>get_alg(butler)<line_sep><return>"return_process_answer"<block_end><def_stmt>getModel self butler<block_start>get_alg(butler)<line_sep><return>"return_process_answer"<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>sys<line_sep>sys.path.insert(1 "../../../")<import_from_stmt>tests pyunit_utils<import_stmt>h2o<import_from_stmt>h2o.utils.typechecks assert_is_type<import_from_stmt>h2o.frame H2OFrame<def_stmt>h2o_H2OFrame_countmatches <block_start>""" Python API test: h2o.frame.H2OFrame.countmatches(pattern) Copied from pyunit_countmatches.py """<line_sep>python_lists=[["what" "is"] ["going" "on"] ["When" "are"] ["MeetingMeetingon" "gone"]]<line_sep>h2oframe=h2o.H2OFrame(python_obj=python_lists)<line_sep>matches=h2oframe.countmatches(['Wh' 'ing' 'on'])<line_sep>assert_is_type(matches H2OFrame)<assert_stmt>matches.shape<eq>h2oframe.shape "h2o.H2OFrame.countmatches() command is not working."<assert_stmt>matches.any_na_rm() "h2o.H2OFrame.countmatches() command is not working."<line_sep>nomatches=h2oframe.countmatches(['rain' 'pluck'])<assert_stmt><not>(nomatches.any_na_rm()) "h2o.H2OFrame.countmatches() command is not working."<block_end>pyunit_utils.standalone_test(h2o_H2OFrame_countmatches)<line_sep>
<import_stmt>traceback<import_stmt>pytest<import_from_stmt>plenum.test.testing_utils setupTestLogging<line_sep>setupTestLogging()<def_stmt>run test stopOnFail=<true> maxTimes=<none><block_start>count=0<line_sep>passes=0<line_sep>fails=0<while_stmt>maxTimes<is><none><or>count<l>maxTimes<block_start>exitcode=pytest.main(test)<line_sep>count<augadd>1<if_stmt>exitcode<block_start>fails<augadd>1<line_sep>print("Test failed!")<line_sep>traceback.print_exc()<if_stmt>stopOnFail<block_start><break><block_end><block_end><else_stmt><block_start>passes<augadd>1<line_sep>print("Test passed.")<block_end>print("current stats: successes: {} fails: {}".format(passes fails))<block_end><block_end>run("monitoring/test_instance_change_with_Delta.py" stopOnFail=<false> maxTimes=100)<line_sep>
<import_stmt>os<import_from_stmt>marktex.markast.utils ImageTool CleanTool<import_from_stmt>marktex.markast.parser Scanner<import_from_stmt>marktex config<import_from_stmt>marktex.markast.document Document<import_from_stmt>marktex.markast.environment *<import_from_stmt>marktex.markast.line *<import_from_stmt>marktex.markast.token *<import_from_stmt>marktex.markast.xmls *<class_stmt>MarkRaw()<block_start><def_stmt>__init__ self doc:Document input_dir output_dir=<none> texconfig=<none> subdoc=<false><block_start>self.subdoc=subdoc<if_stmt>texconfig<is><none><block_start>texconfig=config<block_end>self.config=texconfig<line_sep>self.input_dir=input_dir<if_stmt>output_dir<is><none><block_start>output_dir="./"<block_end>image_dir=os.path.join(output_dir "images")<line_sep>self.output_dir=output_dir<line_sep>self.image_dir=os.path.abspath(image_dir)<line_sep>self.doc=doc<line_sep>self.has_toc=<false><line_sep>self.contents=[]<block_end><def_stmt>append self item<block_start>self.contents.append(item)<block_end>@staticmethod<def_stmt>convert_file fpath output_dir=<none><block_start>''' :param fpath:markdown文件的目录 :param image_dir: markdown中的网络图片和本地图片在转换中都会被统一哈希命名并输出到一个目录 默认是markdown文件所在的目录下的"./images"下 :return: '''<line_sep>fpre,_=os.path.split(fpath)<if_stmt>output_dir<is><none><block_start>output_dir=fpre<block_end>os.makedirs(output_dir exist_ok=<true>)<line_sep>doc=Scanner.analyse_file(fpath)<line_sep>input_dir,_=os.path.split(fpath)<line_sep>mark=MarkRaw(doc input_dir=input_dir output_dir=output_dir)<line_sep>mark.convert()<line_sep><return>mark<block_end><def_stmt>convert self<block_start>doc=self.doc<if_stmt>doc.has_toc<and><not>self.subdoc<block_start><pass><block_end><for_stmt>i,envi enumerate(doc.content)<block_start>print(f"\rConverting...{i<times>100/len(doc.content):.3f}%." end="\0" flush=<true>)<if_stmt>isinstance(envi Quote)<block_start>envi=self.fromQuote(envi)<block_end><elif_stmt>isinstance(envi Paragraph)<block_start>envi=self.fromParagraph(envi)<block_end><elif_stmt>isinstance(envi Itemize)<block_start>envi=self.fromItemize(envi)<block_end><elif_stmt>isinstance(envi Enumerate)<block_start>envi=self.fromEnumerate(envi)<block_end><elif_stmt>isinstance(envi Formula)<block_start>envi=self.fromFormula(envi)<block_end><elif_stmt>isinstance(envi Code)<block_start>envi=self.fromCode(envi)<block_end><elif_stmt>isinstance(envi Table)<block_start>envi=self.fromTable(envi)<block_end><elif_stmt>isinstance(envi MultiBox)<block_start>envi=self.fromMultiBox(envi)<block_end><else_stmt><block_start><raise>Exception(f"Doc error {envi},{envi.__class__.__name__}")<block_end>self.append(envi)<block_end>print(f"\rConverting...100%.")<block_end><def_stmt>fromToken self s:Token<block_start><return>s.string<block_end><def_stmt>fromBold self s:Bold<block_start><return>s.string<block_end><def_stmt>fromItalic self s:Italic<block_start><return>s.string<block_end><def_stmt>fromDeleteLine self s:DeleteLine<block_start><return>s.string<block_end><def_stmt>fromUnderLine self s:UnderLine<block_start><return>s.string<block_end><def_stmt>fromInCode self s:InCode<block_start><return>s.string<block_end><def_stmt>fromInFormula self s:InFormula<block_start><return>s.string<block_end><def_stmt>fromHyperlink self s:Hyperlink<block_start>desc,link=s.desc s.link<line_sep><return>f" {link},{desc} "<block_end><def_stmt>fromFootnote self s:Footnote<block_start><return>s.label<block_end><def_stmt>fromInImage self s:InImage<block_start><return>""<block_end><def_stmt>fromSection self s:Section<block_start>level,content=s.level s.content<line_sep>content=self.fromTokenLine(s.content)<line_sep><return>f"{level}-{content}"<block_end><def_stmt>fromImage self s:Image# cur_dir = os.getcwd() #markdown的相对路径,一定是针对那个markdown的, # os.chdir(self.input_dir) <block_start>link=s.link<line_sep>link=ImageTool.verify(link self.image_dir self.input_dir)<line_sep># os.chdir(cur_dir) <if_stmt>config.give_rele_path<block_start>link=os.path.relpath(link self.output_dir)<block_end>link=link.replace("\\" "/")<line_sep><return>f" img,{link} "<block_end><def_stmt>fromXML self token:XML<block_start><return>token.content<block_end><def_stmt>fromTokenLine self s:TokenLine<block_start>tokens=s.tokens<line_sep>strs=[]<for_stmt>token tokens<block_start><if_stmt>isinstance(token Bold)<block_start>token=self.fromBold(token)<block_end><elif_stmt>isinstance(token XML)<block_start>token=self.fromXML(token)<block_end><elif_stmt>isinstance(token Italic)<block_start>token=self.fromItalic(token)<block_end><elif_stmt>isinstance(token DeleteLine)<block_start>token=self.fromDeleteLine(token)<block_end><elif_stmt>isinstance(token Footnote)<block_start>token=self.fromFootnote(token)<block_end><elif_stmt>isinstance(token UnderLine)<block_start>token=self.fromUnderLine(token)<block_end><elif_stmt>isinstance(token InCode)<block_start>token=self.fromInCode(token)<block_end><elif_stmt>isinstance(token InFormula)<block_start>token=self.fromInFormula(token)<block_end><elif_stmt>isinstance(token Hyperlink)<block_start>token=self.fromHyperlink(token)<block_end><elif_stmt>isinstance(token InImage)<block_start>token=self.fromInImage(token)<block_end><elif_stmt>isinstance(token Token)<block_start>token=self.fromToken(token)<block_end><else_stmt><block_start><raise>Exception(f"TokenLine error {token},{token.__class__.__name__}")<block_end>strs.append(token)<block_end><return>"".join(strs)<block_end><def_stmt>fromRawLine self s:RawLine<block_start><return>s.s<block_end><def_stmt>fromNewLine self s:NewLine<block_start><return>"\n"<block_end><def_stmt>fromParagraph self s:Paragraph<block_start>t=[]<line_sep># Section / NewLine / TokenLine / Image <for_stmt>line s.buffer<block_start><if_stmt>isinstance(line Section)<block_start>line=self.fromSection(line)<block_end><elif_stmt>isinstance(line NewLine)<block_start>line=self.fromNewLine(line)<block_end><elif_stmt>isinstance(line TokenLine)<block_start>line=self.fromTokenLine(line)<block_end><elif_stmt>isinstance(line Image)<block_start>line=self.fromImage(line)<block_end><else_stmt><block_start><raise>Exception(f"Paragraph line error {line} is {line.__class__}")<block_end>t.append(line)<block_end><return>"\n".join(t)<block_end><def_stmt>fromQuote self s:Quote<block_start>content=s.doc.content<line_sep>q=[]<for_stmt>envi content<block_start><if_stmt>isinstance(envi Paragraph)<block_start>envi=self.fromParagraph(envi)<block_end><elif_stmt>isinstance(envi Table)<block_start>envi=self.fromTable(envi)<block_end><elif_stmt>isinstance(envi Itemize)<block_start>envi=self.fromItemize(envi)<block_end><elif_stmt>isinstance(envi Enumerate)<block_start>envi=self.fromEnumerate(envi)<block_end><elif_stmt>isinstance(envi Formula)<block_start>envi=self.fromFormula(envi)<block_end><elif_stmt>isinstance(envi Code)<block_start>envi=self.fromCode(envi)<block_end><else_stmt><block_start><raise>Exception(f"Quote doc error:{envi},{envi.__class__.__name__}")<block_end>q.append(envi)<block_end><return>"\n".join(q)<block_end><def_stmt>fromItemize self s:Itemize<block_start>tokens=[self.fromTokenLine(c)<for>c s.buffer]<line_sep>ui=[]<for_stmt>line tokens<block_start>ui.append(f" - {line}")<block_end><return>"\n".join(ui)<block_end><def_stmt>fromMultiBox self s:MultiBox<block_start>cl=[]<for_stmt>[ct s] s.lines<block_start>cl.append(f"{ct} {s}")<block_end><return>"\n".join(cl)<block_end><def_stmt>fromEnumerate self s:Enumerate<block_start>tokens=[self.fromTokenLine(c)<for>c s.buffer]<line_sep>ui=[]<for_stmt>i,line enumerate(tokens)<block_start>ui.append(f"{i},{line}")<block_end><return>"\n".join(ui)<block_end><def_stmt>fromFormula self s:Formula<block_start>code=[self.fromRawLine(c)<for>c s.formula]<line_sep>data=[]<for_stmt>line code<block_start>data.append(line)<block_end><return>"\n".join(data)<block_end><def_stmt>fromCode self s:Code<block_start>code=[self.fromRawLine(c)<for>c s.code]<line_sep>c=[]<for_stmt>line code<block_start>c.append(line)<block_end><return>"\n".join(c)<block_end><def_stmt>fromTable self s:Table<block_start>t=[]<for_stmt>i,row enumerate(s.tables)<block_start>row=[self.fromTokenLine(c)<for>c row]<line_sep>t.append(" & ".join(row))<block_end><return>"\n".join(t)<block_end><def_stmt>generate_txt self filename=<none><block_start>''' 输入文件名即可,保存路径在输入时已经确定好了 :param filename: :return: '''<line_sep>filepath=os.path.join(self.output_dir f"{filename}.txt")<with_stmt>open(f"{filepath}" "w" encoding="utf-8")<as>w<block_start>w.writelines(self.contents)<block_end>print(f"File is output in {os.path.abspath(filepath)} and images is in {os.path.abspath(self.image_dir)}.")<block_end><block_end>
# Copyright 2019 ZTE corporation. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>typing Any Mapping NamedTuple Optional Sequence<import_from_stmt>itertools zip_longest<import_from_stmt>. utilities<import_from_stmt>.models.data_format DataFormat<def_stmt>get_tensor_by_fuzzy_name graph name<block_start><if_stmt>':'<in>name<block_start>tensor=graph.get_tensor_by_name(name)<block_end><else_stmt><block_start>tensor=graph.get_operation_by_name(name).outputs[0]<block_end><return>tensor<block_end><class_stmt>Config(NamedTuple)<block_start>input_names:Optional[Sequence[str]]<line_sep>data_formats:Sequence[Optional[DataFormat]]<line_sep>output_names:Optional[Sequence[str]]<line_sep>@staticmethod<def_stmt>from_json value:Mapping[str Any]<arrow>'Config'<block_start><return>Config(input_names=value.get('input_names') data_formats=utilities.get_data_formats(value.get('input_formats')) output_names=value.get('output_names'))<block_end>@staticmethod<def_stmt>from_env env:Mapping[str str]<arrow>'Config'<block_start><return>Config(input_names=utilities.split_by(env.get('INPUT_NAMES') ',') data_formats=utilities.get_data_formats(utilities.split_by(env.get('INPUT_FORMATS') ',')) output_names=utilities.split_by(env.get('OUTPUT_NAMES') ','))<block_end><def_stmt>get_input_tensors_from_graph self graph<block_start><if_stmt>self.input_names<is><none><block_start>input_tensors=[operation.outputs[0]<for>operation graph.get_operations()<if>operation.type<eq>'Placeholder']<block_end><else_stmt><block_start>input_tensors=[get_tensor_by_fuzzy_name(graph name)<for>name self.input_names]<block_end><return>input_tensors<block_end><def_stmt>get_output_tensors_from_graph self graph<block_start><if_stmt>self.output_names<is><none><block_start>output_tensors=[output_tensor<for>operation graph.get_operations()<if>operation.type<not><in>['Assign' 'Const' 'Identity' 'IsVariableInitialized' 'NoOp' 'Placeholder' 'SaveV2' 'VarIsInitializedOp']<for>output_tensor operation.outputs<if><not>output_tensor.consumers()]<block_end><else_stmt><block_start>output_tensors=[get_tensor_by_fuzzy_name(graph name)<for>name self.output_names]<block_end><return>output_tensors<block_end><block_end><def_stmt>get_inputs graph config<block_start><return>zip_longest(config.get_input_tensors_from_graph(graph) config.data_formats)<block_end>
<import_from_stmt>datetime datetime timezone<import_from_stmt>itertools cycle<import_from_stmt>.lame LAME<import_from_stmt>.mt MT<def_stmt>filetime_to_dt timestamp:int<arrow>datetime<block_start><return>datetime.fromtimestamp(timestamp<floordiv>100000000 timezone.utc)<block_end><def_stmt>bytes_to_bitstring data:bytes<arrow>str<block_start><return>"".join(bin(x)[2:].zfill(8)<for>x data)<block_end><class_stmt>BitStream<block_start><def_stmt>__init__ self data:bytes<arrow><none><block_start>self.data=bytes_to_bitstring(data)<block_end><def_stmt>get_bits self num:int<arrow>int<block_start>out=int(self.data[:num] 2)<line_sep>self.data=self.data[num:]<line_sep><return>out<block_end><block_end><def_stmt>xor data:bytes key:bytes<arrow>bytes<block_start><return>bytes(a^b<for>a,b zip(data cycle(key)))<block_end><def_stmt>decrypt_lame data:bytes seed:int<arrow>bytes<block_start>lame=LAME()<line_sep>lame.srand(seed)<line_sep><return>bytes([x^lame.get_next()<for>x data])<block_end><def_stmt>decrypt_mt data:bytes seed:int<arrow>bytes<block_start>key=MT(seed).get_bytes(len(data))<line_sep><return>xor(data key)<block_end><def_stmt>crc_data data:bytes<arrow>int<block_start><if_stmt>len(data)<eq>0<block_start><return>0<block_end>dwKey_ECX=0<line_sep>dwKey_ESI=1<for_stmt>b data<block_start>dwKey_ESI=(b+dwKey_ESI)%0xFFF1<line_sep>dwKey_ECX=(dwKey_ECX+dwKey_ESI)%0xFFF1<block_end><return>(dwKey_ECX<lshift>0x10)+dwKey_ESI<block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>.base BaseTestDataset<line_sep>__all__=['BaseTestDataset']<line_sep>
<import_stmt>argparse<import_stmt>sys<import_from_stmt>pygments highlight<import_from_stmt>pygments.formatters Terminal256Formatter<import_from_stmt>fluent.pygments.lexer FluentLexer<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('path')<line_sep>args=parser.parse_args()<with_stmt>open(args.path)<as>fh<block_start>code=fh.read()<block_end>highlight(code FluentLexer() Terminal256Formatter() sys.stdout)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>unittest<import_from_stmt>pycozmo.image_encoder ImageEncoder str_to_image ImageDecoder image_to_str<import_from_stmt>pycozmo.util hex_dump hex_load<import_from_stmt>pycozmo.tests.image_encoder_fixtures FIXTURES<class_stmt>TestImageEncoder(unittest.TestCase)<block_start>@staticmethod<def_stmt>_encode sim:str<arrow>str<block_start>im=str_to_image(sim)<line_sep>encoder=ImageEncoder(im)<line_sep>buf=encoder.encode()<line_sep>res=hex_dump(buf)<line_sep><return>res<block_end><def_stmt>assertSameImage self sim:str seq:str<arrow><none><block_start>buffer=hex_load(seq)<line_sep>decoder=ImageDecoder(buffer)<line_sep>decoder.decode()<line_sep>actual=image_to_str(decoder.image)<line_sep>self.assertEqual(sim.strip() actual.strip())<block_end><def_stmt>test_blank self<block_start>fixture=FIXTURES["blank"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_fill_screen self<block_start>fixture=FIXTURES["fill_screen"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_fill_screen2 self<block_start>fixture=FIXTURES["fill_screen2"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_top_left self<block_start>fixture=FIXTURES["top_left"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_top_left_5 self<block_start>fixture=FIXTURES["top_left_5"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_top_left_1_8 self<block_start>fixture=FIXTURES["top_left_1_8"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_top_left_line self<block_start>fixture=FIXTURES["top_left_line"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_top_line self<block_start>fixture=FIXTURES["top_line"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_bottom_line self<block_start>fixture=FIXTURES["bottom_line"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_left_line self<block_start>fixture=FIXTURES["left_line"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_right_line self<block_start>fixture=FIXTURES["right_line"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_columns self<block_start>fixture=FIXTURES["columns"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_rect self<block_start>fixture=FIXTURES["rect"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_rect2 self<block_start>fixture=FIXTURES["rect2"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_rect3 self<block_start>fixture=FIXTURES["rect3"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_rect4 self<block_start>fixture=FIXTURES["rect4"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_diagonal self<block_start>fixture=FIXTURES["diagonal"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_diagonal2 self<block_start>fixture=FIXTURES["diagonal2"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_blocks self<block_start>fixture=FIXTURES["blocks"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_pycozmo self<block_start>fixture=FIXTURES["pycozmo"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard_tl self<block_start>fixture=FIXTURES["chessboard_tl"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard_bl self<block_start>fixture=FIXTURES["chessboard_bl"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard_tr self<block_start>fixture=FIXTURES["chessboard_tr"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard_br self<block_start>fixture=FIXTURES["chessboard_br"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard2_tl self<block_start>fixture=FIXTURES["chessboard2_tl"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard2_bl self<block_start>fixture=FIXTURES["chessboard2_bl"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard2_tr self<block_start>fixture=FIXTURES["chessboard2_tr"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><def_stmt>test_chessboard2_br self<block_start>fixture=FIXTURES["chessboard2_br"]<line_sep>sim=fixture["image"]<line_sep>expected=fixture["alt_seq"]<line_sep>actual=self._encode(sim)<line_sep>self.assertEqual(expected actual)<line_sep>self.assertSameImage(sim actual)<block_end><block_end>
# Copyright 2021 Toyota Research Institute. All rights reserved. <import_stmt>logging<import_stmt>os<import_from_stmt>tabulate tabulate<import_from_stmt>termcolor colored<import_from_stmt>detectron2.utils.events get_event_storage<line_sep>LOG=logging.getLogger(__name__)<def_stmt>get_inference_output_dir dataset_name is_last=<false> use_tta=<false> root_output_dir=<none><block_start><if_stmt><not>root_output_dir<block_start>root_output_dir=os.getcwd()# hydra <block_end>step=get_event_storage().iter<if_stmt>is_last<block_start>result_dirname="final"<block_end><else_stmt><block_start>result_dirname=f"step{step:07d}"<block_end><if_stmt>use_tta<block_start>result_dirname<augadd>"-tta"<block_end>output_dir=os.path.join(root_output_dir "inference" result_dirname dataset_name)<line_sep><return>output_dir<block_end><def_stmt>print_test_results test_results<block_start>metric_table=tabulate([(k v)<for>k,v test_results.items()] headers=["metric" "value"] tablefmt="pipe" numalign="left" stralign="left" )<line_sep>LOG.info("Test results:\n"+colored(metric_table "cyan"))<block_end>
<import_from_stmt>keanu.vertex.vertex_casting cast_tensor_arg_to_double cast_tensor_arg_to_integer cast_tensor_arg_to_boolean <import_from_stmt>keanu.vertex cast_to_boolean_vertex cast_to_integer_vertex cast_to_double_vertex<import_from_stmt>keanu.vartypes primitive_types numpy_types pandas_types <import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>typing Union Callable<import_from_stmt>keanu.vertex Gaussian<import_from_stmt>keanu.vertex.base Double Boolean Integer<line_sep>@pytest.mark.parametrize("value" [1 1. <true>])@pytest.mark.parametrize("cast_fn, expected_type" [(cast_tensor_arg_to_double float) (cast_tensor_arg_to_integer int) (cast_tensor_arg_to_boolean bool) (cast_to_boolean_vertex Boolean) (cast_to_integer_vertex Integer) (cast_to_double_vertex Double)])<def_stmt>test_scalar_cast value:primitive_types cast_fn:Callable expected_type:type<arrow><none><block_start><assert_stmt>type(cast_fn(value))<eq>expected_type<block_end>@pytest.mark.parametrize("value" [np.array([1]) np.array([1.]) np.array([<true>]) np.array([[[1]]]) np.array([[1 4] [5 38]]) pd.DataFrame(data=[1]) pd.DataFrame(data=[1.]) pd.DataFrame(data=[<true>]) pd.DataFrame(data=[[1 2] [4 5]]) pd.Series(data=[1]) pd.Series(data=[1.]) pd.Series(data=[<true>]) pd.Series(data=[1 3 4]) ])@pytest.mark.parametrize("cast_fn, expected_type" [(cast_tensor_arg_to_double np.floating) (cast_tensor_arg_to_integer np.integer) (cast_tensor_arg_to_boolean np.bool_)])<def_stmt>test_nonscalar_tensor_cast value:Union[numpy_types pandas_types] cast_fn:Callable expected_type:type<arrow><none><block_start><assert_stmt>cast_fn(value).dtype<eq>expected_type<block_end>@pytest.mark.parametrize("value" [np.array([1]) np.array([1.]) np.array([<true>]) np.array([[[1]]]) np.array([[1 4] [5 38]]) pd.DataFrame(data=[1]) pd.DataFrame(data=[1.]) pd.DataFrame(data=[<true>]) pd.DataFrame(data=[[1 2] [4 5]]) pd.Series(data=[1]) pd.Series(data=[1.]) pd.Series(data=[<true>]) pd.Series(data=[1 3 4]) ])@pytest.mark.parametrize("cast_fn, expected_type" [(cast_to_double_vertex Double) (cast_to_integer_vertex Integer) (cast_to_boolean_vertex Boolean)])<def_stmt>test_nonscalar_vertex_cast value:Union[numpy_types pandas_types] cast_fn:Callable expected_type:type<arrow><none><block_start><assert_stmt>type(cast_fn(value))<eq>expected_type<block_end>@pytest.mark.parametrize("cast_fn, cast_to_type" [(cast_tensor_arg_to_double float) (cast_tensor_arg_to_integer int) (cast_tensor_arg_to_boolean bool)])<def_stmt>test_cant_pass_vertex_to_cast_tensor_arg cast_fn:Callable cast_to_type:type<arrow><none><block_start>gaussian=Gaussian(0. 1.)<with_stmt>pytest.raises(TypeError match=r"Cannot cast {} to {}".format(type(gaussian) cast_to_type))<block_start>cast_fn(gaussian)<block_end><block_end>
<import_stmt>os<import_stmt>sys<line_sep>input_path=sys.argv[1].rstrip(os.sep)<line_sep>output_path=sys.argv[2]<line_sep>filenames=os.listdir(input_path)<with_stmt>open(output_path 'w')<as>f<block_start><for_stmt>i,filename enumerate(filenames)<block_start>filepath=os.sep.join([input_path filename])<line_sep>label=filename[:filename.rfind('.')].split('_')[1]<line_sep>line='{}\t{}\t{}\n'.format(i label filepath)<line_sep>f.write(line)<block_end><block_end>
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the OMR score reader."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>absl.testing absltest<import_stmt>librosa<import_from_stmt>protobuf music_pb2<import_from_stmt>moonlight conversions<import_from_stmt>moonlight.protobuf musicscore_pb2<import_from_stmt>moonlight.score reader<line_sep># pylint: disable=invalid-name Glyph=musicscore_pb2.Glyph<line_sep>Note=music_pb2.NoteSequence.Note<line_sep>Point=musicscore_pb2.Point<class_stmt>ReaderTest(absltest.TestCase)<block_start><def_stmt>testTreble_simple self<block_start>staff=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=0) ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff])])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[Note(pitch=librosa.note_to_midi('B4') start_time=0 end_time=1)]))<block_end><def_stmt>testBass_simple self<block_start>staff=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_BASS x=1 y_position=reader.BASS_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=0) ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff])])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[Note(pitch=librosa.note_to_midi('D3') start_time=0 end_time=1)]))<block_end><def_stmt>testTreble_accidentals self<block_start>staff_1=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-6) Glyph(type=Glyph.FLAT x=16 y_position=-4) Glyph(type=Glyph.NOTEHEAD_FILLED x=20 y_position=-4) Glyph(type=Glyph.NOTEHEAD_FILLED x=30 y_position=-2) Glyph(type=Glyph.NOTEHEAD_FILLED x=40 y_position=-4) ])<line_sep>staff_2=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=150) Point(x=100 y=150)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-6) Glyph(type=Glyph.NOTEHEAD_FILLED x=20 y_position=-4) Glyph(type=Glyph.NOTEHEAD_FILLED x=30 y_position=-2) Glyph(type=Glyph.SHARP x=35 y_position=-2) Glyph(type=Glyph.NOTEHEAD_FILLED x=40 y_position=-2) Glyph(type=Glyph.NATURAL x=45 y_position=-2) Glyph(type=Glyph.NOTEHEAD_FILLED x=50 y_position=-2) ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff_1]) musicscore_pb2.StaffSystem(staff=[staff_2])])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[# First staff. Note(pitch=librosa.note_to_midi('C4') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('Eb4') start_time=1 end_time=2) Note(pitch=librosa.note_to_midi('G4') start_time=2 end_time=3) Note(pitch=librosa.note_to_midi('Eb4') start_time=3 end_time=4) # Second staff. Note(pitch=librosa.note_to_midi('C4') start_time=4 end_time=5) Note(pitch=librosa.note_to_midi('E4') start_time=5 end_time=6) Note(pitch=librosa.note_to_midi('G4') start_time=6 end_time=7) Note(pitch=librosa.note_to_midi('G#4') start_time=7 end_time=8) Note(pitch=librosa.note_to_midi('G4') start_time=8 end_time=9) ]))<block_end><def_stmt>testChords self<block_start>stem_1=musicscore_pb2.LineSegment(start=Point(x=20 y=10) end=Point(x=20 y=70))<line_sep>stem_2=musicscore_pb2.LineSegment(start=Point(x=50 y=10) end=Point(x=50 y=70))<line_sep>staff=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) # Chord of 2 notes. Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-4 stem=stem_1) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-1 stem=stem_1) # Note not attached to a stem. Glyph(type=Glyph.NOTEHEAD_FILLED x=30 y_position=3) # Chord of 3 notes. Glyph(type=Glyph.NOTEHEAD_FILLED x=40 y_position=0 stem=stem_2) Glyph(type=Glyph.NOTEHEAD_FILLED x=60 y_position=2 stem=stem_2) Glyph(type=Glyph.NOTEHEAD_FILLED x=60 y_position=4 stem=stem_2) ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff])])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[# First chord. Note(pitch=librosa.note_to_midi('E4') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('A4') start_time=0 end_time=1) # Note without a stem. Note(pitch=librosa.note_to_midi('E5') start_time=1 end_time=2) # Second chord. Note(pitch=librosa.note_to_midi('B4') start_time=2 end_time=3) Note(pitch=librosa.note_to_midi('D5') start_time=2 end_time=3) Note(pitch=librosa.note_to_midi('F5') start_time=2 end_time=3) ]))<block_end><def_stmt>testBeams self<block_start>beam_1=musicscore_pb2.LineSegment(start=Point(x=10 y=20) end=Point(x=40 y=20))<line_sep>beam_2=musicscore_pb2.LineSegment(start=Point(x=70 y=40) end=Point(x=90 y=40))<line_sep>beam_3=musicscore_pb2.LineSegment(start=Point(x=70 y=60) end=Point(x=90 y=60))<line_sep>staff=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) # 2 eighth notes. Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-4 beam=[beam_1]) Glyph(type=Glyph.NOTEHEAD_FILLED x=40 y_position=-1 beam=[beam_1]) # 1 quarter note. Glyph(type=Glyph.NOTEHEAD_FILLED x=50 y_position=0) # 2 sixteenth notes. Glyph(type=Glyph.NOTEHEAD_FILLED x=60 y_position=-2 beam=[beam_2 beam_3]) Glyph(type=Glyph.NOTEHEAD_FILLED x=90 y_position=2 beam=[beam_2 beam_3]) ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff])])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[Note(pitch=librosa.note_to_midi('E4') start_time=0 end_time=0.5) Note(pitch=librosa.note_to_midi('A4') start_time=0.5 end_time=1) Note(pitch=librosa.note_to_midi('B4') start_time=1 end_time=2) Note(pitch=librosa.note_to_midi('G4') start_time=2 end_time=2.25) Note(pitch=librosa.note_to_midi('D5') start_time=2.25 end_time=2.5) ]))<block_end><def_stmt>testAllNoteheadTypes self<block_start>staff=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-6) Glyph(type=Glyph.NOTEHEAD_EMPTY x=10 y_position=-6) Glyph(type=Glyph.NOTEHEAD_WHOLE x=10 y_position=-6) ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff])])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[Note(pitch=librosa.note_to_midi('C4') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('C4') start_time=1 end_time=3) Note(pitch=librosa.note_to_midi('C4') start_time=3 end_time=7) ]))<block_end><def_stmt>testStaffSystems self# 2 staff systems on separate pages, each with 2 staves, and no bars. <block_start>system_1_staff_1=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=100 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=-6) Glyph(type=Glyph.NOTEHEAD_FILLED x=50 y_position=-2) ])<line_sep>system_1_staff_2=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=150) Point(x=100 y=150)] glyph=[Glyph(type=Glyph.CLEF_BASS x=2 y_position=reader.BASS_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=0) Glyph(type=Glyph.NOTEHEAD_FILLED x=40 y_position=2) # Played after the second note in the first staff, although it is to # the left of it. Glyph(type=Glyph.NOTEHEAD_FILLED x=45 y_position=4) ])<line_sep>system_2_staff_1=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=250) Point(x=100 y=250)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) Glyph(type=Glyph.REST_QUARTER x=20 y_position=0) Glyph(type=Glyph.NOTEHEAD_FILLED x=50 y_position=-2) ])<line_sep>system_2_staff_2=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=250) Point(x=100 y=250)] glyph=[Glyph(type=Glyph.CLEF_BASS x=2 y_position=reader.BASS_CLEF_EXPECTED_Y) Glyph(type=Glyph.NOTEHEAD_FILLED x=10 y_position=0) Glyph(type=Glyph.NOTEHEAD_FILLED x=40 y_position=2) ])<line_sep>notes=conversions.score_to_notesequence(reader.ScoreReader()(musicscore_pb2.Score(page=[musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[system_1_staff_1 system_1_staff_2]) ]) musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[system_2_staff_1 system_2_staff_2]) ]) ]) ))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[# System 1, staff 1. Note(pitch=librosa.note_to_midi('C4') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('G4') start_time=1 end_time=2) # System 1, staff 2. Note(pitch=librosa.note_to_midi('D3') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('F3') start_time=1 end_time=2) Note(pitch=librosa.note_to_midi('A3') start_time=2 end_time=3) # System 2, staff 1. # Quarter rest. Note(pitch=librosa.note_to_midi('G4') start_time=4 end_time=5) # System 2, staff 2. Note(pitch=librosa.note_to_midi('D3') start_time=3 end_time=4) Note(pitch=librosa.note_to_midi('F3') start_time=4 end_time=5) ]))<block_end><def_stmt>testMeasures self# 2 staves in the same staff system with multiple bars. <block_start>staff_1=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=50) Point(x=300 y=50)] glyph=[Glyph(type=Glyph.CLEF_TREBLE x=1 y_position=reader.TREBLE_CLEF_EXPECTED_Y) # Key signature. Glyph(type=Glyph.SHARP x=10 y_position=+4) Glyph(type=Glyph.NOTEHEAD_FILLED x=20 y_position=-2) # Accidental. Glyph(type=Glyph.FLAT x=40 y_position=-1) Glyph(type=Glyph.NOTEHEAD_FILLED x=50 y_position=-1) # Second bar. Glyph(type=Glyph.NOTEHEAD_FILLED x=120 y_position=0) Glyph(type=Glyph.NOTEHEAD_FILLED x=180 y_position=+4) # Third bar. # Accidental not propagated to this note. Glyph(type=Glyph.NOTEHEAD_FILLED x=220 y_position=-1) ])<line_sep>staff_2=musicscore_pb2.Staff(staffline_distance=10 center_line=[Point(x=0 y=150) Point(x=300 y=150)] glyph=[Glyph(type=Glyph.CLEF_BASS x=1 y_position=reader.BASS_CLEF_EXPECTED_Y) # Key signature. Glyph(type=Glyph.FLAT x=15 y_position=-2) Glyph(type=Glyph.NOTEHEAD_FILLED x=20 y_position=-2) Glyph(type=Glyph.NOTEHEAD_FILLED x=50 y_position=+2) # Second bar. Glyph(type=Glyph.NOTEHEAD_FILLED x=150 y_position=-2) # Third bar. Glyph(type=Glyph.REST_QUARTER x=220 y_position=0) Glyph(type=Glyph.NOTEHEAD_FILLED x=280 y_position=-2) ])<line_sep>staff_system=musicscore_pb2.StaffSystem(staff=[staff_1 staff_2] bar=[_bar(0) _bar(100) _bar(200) _bar(300)])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[staff_system])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[# Staff 1, bar 1. Note(pitch=librosa.note_to_midi('G4') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('Ab4') start_time=1 end_time=2) # Staff 1, bar 2. Note(pitch=librosa.note_to_midi('B4') start_time=2 end_time=3) Note(pitch=librosa.note_to_midi('F#5') start_time=3 end_time=4) # Staff 1, bar 3. Note(pitch=librosa.note_to_midi('A4') start_time=4 end_time=5) # Staff 2, bar 1. Note(pitch=librosa.note_to_midi('Bb2') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('F3') start_time=1 end_time=2) # Staff 2, bar 2. Note(pitch=librosa.note_to_midi('Bb2') start_time=2 end_time=3) # Staff 2, bar 3. Note(pitch=librosa.note_to_midi('Bb2') start_time=5 end_time=6) ]))<block_end><def_stmt>testKeySignatures self# One staff per system, two systems. <block_start>staff_1=musicscore_pb2.Staff(glyph=[Glyph(type=Glyph.CLEF_TREBLE x=5 y_position=reader.TREBLE_CLEF_EXPECTED_Y) # D major key signature. Glyph(type=Glyph.SHARP x=15 y_position=+4) Glyph(type=Glyph.SHARP x=25 y_position=+1) # Accidental which cannot be interpreted as part of the key # signature. Glyph(type=Glyph.SHARP x=35 y_position=+2) Glyph(type=Glyph.NOTEHEAD_FILLED x=45 y_position=+2) # D#5 Glyph(type=Glyph.NOTEHEAD_EMPTY x=55 y_position=+1) # C#5 Glyph(type=Glyph.NOTEHEAD_FILLED x=65 y_position=-3) # F#4 # New measure. The key signature should be retained. Glyph(type=Glyph.NOTEHEAD_EMPTY x=105 y_position=-3) # F#4 Glyph(type=Glyph.NOTEHEAD_FILLED x=125 y_position=+1) # C#5 # Accidental is not retained. Glyph(type=Glyph.NOTEHEAD_FILLED x=145 y_position=+2) # D5 ])<line_sep>staff_2=musicscore_pb2.Staff(glyph=[Glyph(type=Glyph.CLEF_TREBLE x=5 y_position=reader.TREBLE_CLEF_EXPECTED_Y) # No key signature on this line. No accidentals. Glyph(type=Glyph.NOTEHEAD_EMPTY x=25 y_position=-3) # F4 Glyph(type=Glyph.NOTEHEAD_EMPTY x=45 y_position=+1) # C5 ])<line_sep>notes=conversions.page_to_notesequence(reader.ScoreReader().read_page(musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(staff=[staff_1] bar=[_bar(0) _bar(100) _bar(200)]) musicscore_pb2.StaffSystem(staff=[staff_2]) ])))<line_sep>self.assertEqual(notes music_pb2.NoteSequence(notes=[# First measure. Note(pitch=librosa.note_to_midi('D#5') start_time=0 end_time=1) Note(pitch=librosa.note_to_midi('C#5') start_time=1 end_time=3) Note(pitch=librosa.note_to_midi('F#4') start_time=3 end_time=4) # Second measure. Note(pitch=librosa.note_to_midi('F#4') start_time=4 end_time=6) Note(pitch=librosa.note_to_midi('C#5') start_time=6 end_time=7) Note(pitch=librosa.note_to_midi('D5') start_time=7 end_time=8) # Third measure on a new line, with no key signature. Note(pitch=librosa.note_to_midi('F4') start_time=8 end_time=10) Note(pitch=librosa.note_to_midi('C5') start_time=10 end_time=12) ]))<block_end><block_end><def_stmt>_bar x<block_start><return>musicscore_pb2.StaffSystem.Bar(x=x type=musicscore_pb2.StaffSystem.Bar.STANDARD_BAR)<block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
"""Metadata templates """<line_sep>
# coding: utf-8 """ Union-Find (Disjoint Set) https://en.wikipedia.org/wiki/Disjoint-set_data_structure """<class_stmt>QuickFindUnionFind<block_start><def_stmt>__init__ self union_pairs=()<block_start>self.num_groups=0<line_sep>self.auto_increment_id=1<line_sep>self.element_groups={# element: group_id, }<for_stmt>p,q union_pairs<block_start>self.union(p q)<block_end><block_end><def_stmt>__len__ self<block_start><return>self.num_groups<block_end># O(1) <def_stmt>make_group self element# Initially, every element is in its own group which contains only itself. <block_start>group_id=self.element_groups.get(element)<if_stmt>group_id<is><none># Group id could be arbitrary as long as each group has an unique one. <block_start>group_id=self.auto_increment_id<line_sep>self.element_groups[element]=group_id<line_sep>self.num_groups<augadd>1<line_sep>self.auto_increment_id<augadd>1<block_end><return>group_id<block_end># O(1) <def_stmt>find self p<block_start><try_stmt><block_start><return>self.element_groups[p]<block_end><except_stmt>KeyError# We implicitly create a new group for the new element `p`. <block_start><return>self.make_group(p)<block_end><block_end># O(n) <def_stmt>union self p q<block_start>p_group_id=self.find(p)<line_sep>q_group_id=self.find(q)<if_stmt>p_group_id<ne>q_group_id<block_start><for_stmt>element,group_id self.element_groups.items()# Merge p into q. <block_start><if_stmt>group_id<eq>p_group_id<block_start>self.element_groups[element]=q_group_id<block_end><block_end>self.num_groups<augsub>1<block_end><block_end># O(1) <def_stmt>is_connected self p q<block_start><return>self.find(p)<eq>self.find(q)<block_end><block_end>
# fabfile for update and deploy # it's necessary to specify an host <import_from_stmt>fabric.api *<import_from_stmt>fabric.contrib.project rsync_project<import_from_stmt>fabric.contrib.files upload_template<import_from_stmt>setup_config *<line_sep>PACKAGES=('rsync' 'puppet')<def_stmt>update_sources <block_start>rsync_project("~" "../kite" exclude=[".git/" "*.swp" "*.pyc"])<block_end><def_stmt>provision <block_start>cmd="""FACTER_server_name="%s" && export FACTER_server_name && FACTER_user_home_dir=$HOME && export FACTER_user_home_dir && puppet apply $HOME/kite/manifests/server.pp --modulepath=$HOME/kite/puppet_modules"""%env.hosts[0]<line_sep>sudo(cmd)<block_end><def_stmt>update <block_start>update_sources()<line_sep>provision()<block_end><def_stmt>setup <block_start>sudo("apt-get update")<for_stmt>package PACKAGES<block_start>sudo('apt-get -y install %s'%package)<block_end>update()<block_end><def_stmt>tighten <block_start>local("ssh-copy-id %s@%s"%(env.user env.hosts[0]))<line_sep>sudo("puppet apply $HOME/kite/manifests/sshd.pp --modulepath=$HOME/kite/puppet_modules")<block_end>
<import_from_future_stmt> absolute_import division<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_array_almost_equal assert_allclose<import_from_stmt>pytest raises<import_from_stmt>fatiando.seismic conv<def_stmt>test_impulse_response <block_start>""" conv.convolutional_model raises the source wavelet as result when the model is a centred spike, considering the dimension of the model equal to the source wavelet """<line_sep>w=conv.rickerwave(30. 2.e-3)<line_sep>rc_test=np.zeros((w.shape[0] 20))<line_sep>rc_test[w.shape[0]<floordiv>2 :]=1.<line_sep>spike=conv.convolutional_model(rc_test 30. conv.rickerwave dt=2.e-3)<for_stmt>j range(0 rc_test.shape[1])<block_start>assert_array_almost_equal(spike[: j] w 9)<block_end><block_end><def_stmt>test_rc_shorter_than_wavelet <block_start>""" When the reflectivity series is shorter than the wavelength, the spike response is observed like in the opposite case. The difference is that the the ricker wavelet (or other symmetric wavelet) is shorter in the result. """<line_sep>w=conv.rickerwave(30. 2.e-3)<line_sep>rc_test=np.zeros((21 20))<line_sep>rc_test[rc_test.shape[0]<floordiv>2 :]=1<line_sep>spike=conv.convolutional_model(rc_test 30. conv.rickerwave dt=2.e-3)<for_stmt>j range(0 rc_test.shape[1])<block_start>wmin=(w.shape[0]-rc_test.shape[0])<floordiv>2<line_sep>wmax=-(w.shape[0]-rc_test.shape[0])<floordiv>2<line_sep>assert_array_almost_equal(spike[: j] w[wmin:wmax] 9)<block_end><block_end><def_stmt>test_reflectivity_wrong_dimensions <block_start>""" Velocity and density are provided as matrix or vector to reflectivity calculation, so they must have the same dimension. """<line_sep>vel=np.ones((10 10))<line_sep>dens=np.ones((11 11))<line_sep>raises(AssertionError conv.reflectivity vel dens)<line_sep>vel=np.ones((10))<line_sep>dens=np.ones((11))<line_sep>raises(AssertionError conv.reflectivity vel dens)<block_end><def_stmt>test_depth_2_time_wrong_dimensions <block_start>""" Velocity and property are provided as matrix to depth to time cconversion, so they must have the same dimension. """<line_sep>vel=np.ones((10 10))<line_sep>dens=np.ones((11 11))<line_sep>dt=2.e-3<line_sep>dz=1.<line_sep>raises(AssertionError conv.depth_2_time vel dens dt dz)<block_end><def_stmt>test_ricker <block_start>""" conv.rickerwave inputs must satisfy the condition for sampling and stability, otherwise this implies in a error. """<line_sep>f=50.<line_sep>dt=2.e-3<line_sep>raises(AssertionError conv.rickerwave f dt)<block_end>
<import_stmt>pandas<as>pd<import_from_stmt>.compute_drawdowns compute_drawdowns<import_from_stmt>.compute_sharpe_ratio compute_sharpe_ratio<def_stmt>performance_summary history portfolio_id<block_start>"""This function computes common performance metrics for a time-series of portfolio equity states. For instance, the function will compute the Sharpe ratio, the maximum drawdown, the drawdown duration, the annualized returns and the average number of positions held at each moment in the time-series. Parameters ---------- history: A portfolio history object. The portfolio history object containing the equity and positional information for a time-series corresponding to the period of performance of a trading algorithm. portfolio_id: String. A unique identifier assigned to the portfolio. """<line_sep>equity=history.equity<line_sep>n=len(equity)<line_sep>m=pd.DataFrame(index=[portfolio_id])<line_sep>m.ix[portfolio_id "total equity"]=equity.ix[-1]<line_sep>m.ix[portfolio_id "max equity"]=equity.max()<line_sep>m.ix[portfolio_id "max drawdown"],m.ix[portfolio_id "max duration"]=(compute_drawdowns(equity))<line_sep>m.ix[portfolio_id "sharpe ratio"]=(compute_sharpe_ratio(history.returns))<line_sep>m.ix[portfolio_id "avg positions"]=history.n_positions.mean()<line_sep>m.ix[portfolio_id "annualized returns"]=((1.+history.returns).prod()<power>(252./n))<line_sep><return>m<block_end>
<import_from_stmt>onnx_tf.handlers.backend_handler BackendHandler<import_from_stmt>onnx_tf.handlers.handler onnx_op<import_from_stmt>onnx_tf.handlers.handler partial_support<import_from_stmt>onnx_tf.handlers.handler ps_description<import_from_stmt>.conv_mixin ConvMixin<line_sep>@onnx_op("ConvTranspose")@partial_support(<true>)@ps_description("ConvTranspose with dilations != 1, or "+"transposed convolution for 4D or higher "+"are not supported in Tensorflow.")<class_stmt>ConvTranspose(ConvMixin BackendHandler)<block_start>@classmethod<def_stmt>version_1 cls node **kwargs<block_start><return>cls.conv(node kwargs["tensor_dict"] transpose=<true>)<block_end>@classmethod<def_stmt>version_11 cls node **kwargs<block_start><return>cls.conv(node kwargs["tensor_dict"] transpose=<true>)<block_end><block_end>
''' Created on 22.09.2014 @author: markusfasel '''<import_from_stmt>PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics SinglePanelPlot GraphicsObject Style Frame<import_from_stmt>PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler TriggeredSpectrumScaler<import_from_stmt>PWGJE.EMCALJetTasks.Tracks.analysis.correction.SpectrumCombiner SpectrumCombiner<import_from_stmt>ROOT kRed kBlack kBlue<class_stmt>PlotScaledTriggeredToMinBias(SinglePanelPlot)<block_start>''' classdocs '''<def_stmt>__init__ self minbiasspectrum triggeredSpectrum<block_start>''' Constructor '''<line_sep>SinglePanelPlot.__init__(self)<line_sep>self.__minbiasSpectrum=GraphicsObject(minbiasspectrum Style(kRed 25))<line_sep>triggeredSpectrumMaker=TriggeredSpectrumScaler(minbiasspectrum triggeredSpectrum)<line_sep>self.__triggeredSpectrum=GraphicsObject(triggeredSpectrumMaker.GetScaledTriggeredSpectrum() Style(kBlue 24))<line_sep>combinedSpectrumMaker=SpectrumCombiner(minbiasspectrum self.__triggeredSpectrum.GetData())<line_sep>self.__combinedSpectrum=GraphicsObject(combinedSpectrumMaker.MakeCombinedSpectrum(50.) Style(kBlack 20))<line_sep>self.__labeltext=<none><block_end><def_stmt>SetLabel self label<block_start>self.__labeltext=label<block_end><def_stmt>Create self<block_start>self._OpenCanvas("triggerSpectrumScalerPlot" "Compare scaled trigger to minbias")<line_sep>pad=self._GetFramedPad()<line_sep>#pad.GetPad().SetLogx() pad.GetPad().SetLogy()<line_sep>frame=Frame("framecomp" 0.1 100 1e-10 2)<line_sep>frame.SetXtitle("p_{t} (GeV/c)")<line_sep>frame.SetYtitle("1/N_{ev} dN/dp_{t} ((GeV/c)^{-1})")<line_sep>pad.DrawFrame(frame)<line_sep>pad.DrawGraphicsObject(self.__combinedSpectrum <true> "Combined")<line_sep>pad.DrawGraphicsObject(self.__minbiasSpectrum <true> "MinBias")<line_sep>pad.DrawGraphicsObject(self.__triggeredSpectrum <true> "Triggered")<line_sep>pad.CreateLegend(0.55 0.75 0.89 0.89)<if_stmt>self.__labeltext<block_start>pad.CreateLabel(0.15 0.15 0.45 0.2 self.__labeltext)<block_end><block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>pandas<import_from_stmt>sklearn.preprocessing MinMaxScaler StandardScaler Binarizer<line_sep>#handle the missing values data=pandas.DataFrame([[4. 45. 984.] [np.NAN np.NAN 5.] [94. 23. 55.] ])<line_sep>#print original data print(data)<line_sep>#fill the missing values with the constant 0.1 print(data.fillna(0.1))<line_sep>#fill the missing values with the mean print(data.fillna(data.mean()))<line_sep>#Data normalization data1=pandas.DataFrame([[58. 1. 43.] [10. 200. 65.] [20. 75. 7.]])<line_sep>#scaling with min-max scaler scaled_values=MinMaxScaler(feature_range=(0 1))<line_sep>results=scaled_values.fit(data1).transform(data1)<line_sep>print(results)<line_sep>#scaling with the standard scaling stand_scalar=StandardScaler().fit(data1)<line_sep>results=stand_scalar.transform(data1)<line_sep>print(results)<line_sep>#normalization using binarization results=Binarizer(50.0).fit(data1).transform(data1)<line_sep>print(results)<line_sep>
# Copyright 2018 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>oslo_config cfg<import_from_stmt>oslo_log log<as>logging<import_from_stmt>mistral.rpc base<as>rpc<import_from_stmt>mistral.service base<as>service_base<line_sep>LOG=logging.getLogger(__name__)<line_sep>CONF=cfg.CONF<class_stmt>SchedulerServer(service_base.MistralService)<block_start>"""Scheduler server. Manages scheduler life-cycle and gets registered as an RPC endpoint to process scheduler specific calls. """<def_stmt>__init__ self scheduler setup_profiler=<true><block_start>super(SchedulerServer self).__init__('scheduler_group' setup_profiler)<line_sep>self.scheduler=scheduler<line_sep>self._rpc_server=<none><block_end><def_stmt>start self<block_start>super(SchedulerServer self).start()<line_sep>self._rpc_server=rpc.get_rpc_server_driver()(cfg.CONF.engine)<line_sep>self._rpc_server.register_endpoint(self)<line_sep>self._rpc_server.run()<line_sep>self._notify_started('Scheduler server started.')<block_end><def_stmt>stop self graceful=<false><block_start>super(SchedulerServer self).stop()<if_stmt>self._rpc_server<block_start>self._rpc_server.stop(graceful)<block_end><block_end><def_stmt>schedule self rpc_ctx job<block_start>"""Receives requests over RPC to schedule delayed calls. :param rpc_ctx: RPC request context. :param job: Scheduler job. """<line_sep>LOG.info("Received RPC request 'schedule'[job=%s]" job)<line_sep><return>self.scheduler.schedule(job allow_redistribute=<false>)<block_end><block_end>
""" Fomu board definitions (mapping of I/O pins, clock, etc.) """<import_from_stmt>migen *<import_from_stmt>migen.build.generic_platform *<import_from_stmt>migen.build.lattice LatticePlatform<class_stmt>FomuPvtPlatform(LatticePlatform)<block_start>""" Based on https://github.com/litex-hub/litex-boards/blob/master/litex_boards/partner/platforms/fomu_pvt.py """<line_sep>_io=[('clk48' 0 Pins('F4') IOStandard('LVCMOS33')) ('user_led_n' 0 Pins('A5') IOStandard('LVCMOS33')) ('rgb_led' 0 Subsignal('r' Pins('C5')) Subsignal('g' Pins('B5')) Subsignal('b' Pins('A5')) IOStandard('LVCMOS33')) ('user_touch_n' 0 Pins('E4') IOStandard('LVCMOS33')) ('user_touch_n' 1 Pins('D5') IOStandard('LVCMOS33')) ('user_touch_n' 2 Pins('E5') IOStandard('LVCMOS33')) ('user_touch_n' 3 Pins('F5') IOStandard('LVCMOS33')) ('usb' 0 Subsignal('d_p' Pins('A1')) Subsignal('d_n' Pins('A2')) Subsignal('pullup' Pins('A4')) IOStandard('LVCMOS33'))]<line_sep>_connectors=[('touch_pins' 'E4 D5 E5 F5')]<line_sep>default_clk_name='clk48'<line_sep>default_clk_period=1e9/48e6<def_stmt>__init__ self<block_start>LatticePlatform.__init__(self 'ice40-up5k-uwg30' self._io self._connectors toolchain='icestorm')<block_end><def_stmt>create_programmer self<block_start><return>IceStormProgrammer()<block_end><block_end><class_stmt>FomuHackerPlatform(LatticePlatform)<block_start>""" Based on https://github.com/litex-hub/litex-boards/blob/master/litex_boards/partner/platforms/fomu_hacker.py """<line_sep>_io=[('clk48' 0 Pins('F5') IOStandard('LVCMOS33')) ('user_led_n' 0 Pins('A5') IOStandard('LVCMOS33')) ('rgb_led' 0 Subsignal('r' Pins('C5')) Subsignal('g' Pins('B5')) Subsignal('b' Pins('A5')) IOStandard('LVCMOS33')) ('user_touch_n' 0 Pins('F4') IOStandard('LVCMOS33')) ('user_touch_n' 1 Pins('E5') IOStandard('LVCMOS33')) ('user_touch_n' 2 Pins('E4') IOStandard('LVCMOS33')) ('user_touch_n' 3 Pins('F2') IOStandard('LVCMOS33')) ('usb' 0 Subsignal('d_p' Pins('A4')) Subsignal('d_n' Pins('A2')) Subsignal('pullup' Pins('D5')) IOStandard('LVCMOS33'))]<line_sep>_connectors=[('touch_pins' 'F4 E5 E4 F2')]<line_sep>default_clk_name='clk48'<line_sep>default_clk_period=1e9/48e6<def_stmt>__init__ self<block_start>LatticePlatform.__init__(self 'ice40-up5k-uwg30' self._io self._connectors toolchain='icestorm')<block_end><def_stmt>create_programmer self<block_start><return>IceStormProgrammer()<block_end><block_end><class_stmt>FomuEvt2Platform(LatticePlatform)<block_start>""" Based on https://github.com/litex-hub/litex-boards/blob/master/litex_boards/partner/platforms/fomu_evt.py """<line_sep>_io=[('clk48' 0 Pins('44') IOStandard('LVCMOS33')) ('user_led_n' 0 Pins('41') IOStandard('LVCMOS33')) ('rgb_led' 0 Subsignal('r' Pins('40')) Subsignal('g' Pins('39')) Subsignal('b' Pins('41')) IOStandard('LVCMOS33')) ('user_touch_n' 0 Pins('48') IOStandard('LVCMOS33')) ('user_touch_n' 1 Pins('47') IOStandard('LVCMOS33')) ('user_touch_n' 2 Pins('46') IOStandard('LVCMOS33')) ('user_touch_n' 3 Pins('45') IOStandard('LVCMOS33')) ('usb' 0 Subsignal('d_p' Pins('34')) Subsignal('d_n' Pins('37')) Subsignal('pullup' Pins('35')) Subsignal('pulldown' Pins('36')) IOStandard('LVCMOS33'))]<line_sep>_connectors=[('touch_pins' '48 47 46 45')]<line_sep>default_clk_name='clk48'<line_sep>default_clk_period=1e9/48e6<def_stmt>__init__ self<block_start>LatticePlatform.__init__(self 'ice40-up5k-sg48' self._io self._connectors toolchain='icestorm')<block_end><def_stmt>create_programmer self<block_start><return>IceStormProgrammer()<block_end><block_end>FomuEvt3Platform=FomuEvt2Platform<line_sep>
<import_stmt>pytest<import_stmt>pykka<import_from_stmt>pykka ActorDeadError ActorProxy<class_stmt>NestedObject<block_start><pass><block_end>@pytest.fixture(scope="module")<def_stmt>actor_class runtime<block_start><class_stmt>ActorForProxying(runtime.actor_class)<block_start>a_nested_object=pykka.traversable(NestedObject())<line_sep>a_class_attr="class_attr"<def_stmt>__init__ self<block_start>super(runtime.actor_class self).__init__()<line_sep>self.an_instance_attr="an_instance_attr"<block_end><def_stmt>a_method self<block_start><pass><block_end><block_end><return>ActorForProxying<block_end>@pytest.fixture<def_stmt>proxy actor_class<block_start>proxy=ActorProxy(actor_class.start())<line_sep><yield>proxy<line_sep>proxy.stop()<block_end><def_stmt>test_eq_to_self proxy<block_start><assert_stmt>proxy<eq>proxy<block_end><def_stmt>test_is_hashable proxy<block_start><assert_stmt>hash(proxy)<eq>hash(proxy)<block_end><def_stmt>test_eq_to_another_proxy_for_same_actor_and_attr_path proxy<block_start>proxy2=proxy.actor_ref.proxy()<assert_stmt>proxy<eq>proxy2<block_end><def_stmt>test_not_eq_to_proxy_with_different_attr_path proxy<block_start><assert_stmt>proxy<ne>proxy.a_nested_object<block_end><def_stmt>test_repr_is_wrapped_in_lt_and_gt proxy<block_start>result=repr(proxy)<assert_stmt>result.startswith("<")<assert_stmt>result.endswith(">")<block_end><def_stmt>test_repr_reveals_that_this_is_a_proxy proxy<block_start><assert_stmt>"ActorProxy"<in>repr(proxy)<block_end><def_stmt>test_repr_contains_actor_class_name proxy<block_start><assert_stmt>"ActorForProxying"<in>repr(proxy)<block_end><def_stmt>test_repr_contains_actor_urn proxy<block_start><assert_stmt>proxy.actor_ref.actor_urn<in>repr(proxy)<block_end><def_stmt>test_repr_contains_attr_path proxy<block_start><assert_stmt>"a_nested_object"<in>repr(proxy.a_nested_object)<block_end><def_stmt>test_str_contains_actor_class_name proxy<block_start><assert_stmt>"ActorForProxying"<in>str(proxy)<block_end><def_stmt>test_str_contains_actor_urn proxy<block_start><assert_stmt>proxy.actor_ref.actor_urn<in>str(proxy)<block_end><def_stmt>test_dir_on_proxy_lists_attributes_of_the_actor proxy<block_start>result=dir(proxy)<assert_stmt>"a_class_attr"<in>result<assert_stmt>"an_instance_attr"<in>result<assert_stmt>"a_method"<in>result<block_end><def_stmt>test_dir_on_proxy_lists_private_attributes_of_the_proxy proxy<block_start>result=dir(proxy)<assert_stmt>"__class__"<in>result<assert_stmt>"__dict__"<in>result<assert_stmt>"__getattr__"<in>result<assert_stmt>"__setattr__"<in>result<block_end><def_stmt>test_refs_proxy_method_returns_a_proxy actor_class<block_start>proxy_from_ref_proxy=actor_class.start().proxy()<assert_stmt>isinstance(proxy_from_ref_proxy ActorProxy)<line_sep>proxy_from_ref_proxy.stop().get()<block_end><def_stmt>test_proxy_constructor_raises_exception_if_actor_is_dead actor_class<block_start>actor_ref=actor_class.start()<line_sep>actor_ref.stop()<with_stmt>pytest.raises(ActorDeadError)<as>exc_info<block_start>ActorProxy(actor_ref)<block_end><assert_stmt>str(exc_info.value)<eq>f"{actor_ref} not found"<block_end><def_stmt>test_actor_ref_may_be_retrieved_from_proxy_if_actor_is_dead proxy<block_start>proxy.actor_ref.stop()<assert_stmt><not>proxy.actor_ref.is_alive()<block_end><def_stmt>test_actor_proxy_does_not_expose_proxy_to_self runtime log_handler<block_start><class_stmt>Actor(runtime.actor_class)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.self_proxy=self.actor_ref.proxy()<line_sep>self.foo="bar"<block_end><block_end>actor_ref=Actor.start()<try_stmt><block_start>proxy=actor_ref.proxy()<assert_stmt>proxy.foo.get()<eq>"bar"<with_stmt>pytest.raises(AttributeError match="has no attribute 'self_proxy'")<block_start>proxy.self_proxy.foo.get()<block_end><block_end><finally_stmt><block_start>actor_ref.stop()<block_end>log_handler.wait_for_message("warning")<with_stmt>log_handler.lock<block_start><assert_stmt>len(log_handler.messages["warning"])<eq>2<line_sep>log_record=log_handler.messages["warning"][0]<block_end><assert_stmt>("attribute 'self_proxy' is a proxy to itself. "<concat>"Consider making it private by renaming it to '_self_proxy'.")<in>log_record.getMessage()<block_end>
# (c) 2005 <NAME> # This module is part of the Python Paste Project and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # This code was written with funding by http://prometheusresearch.com """ Authentication via Multiple Methods In some environments, the choice of authentication method to be used depends upon the environment and is not "fixed". This middleware allows N authentication methods to be registered along with a goodness function which determines which method should be used. The following example demonstrates how to use both form and digest authentication in a server stack; by default it uses form-based authentication unless ``*authmeth=digest`` is specified as a query argument. >>> from paste.auth import form, cookie, digest, multi >>> from paste.wsgilib import dump_environ >>> from paste.httpserver import serve >>> >>> multi = multi.MultiHandler(dump_environ) >>> def authfunc(environ, realm, user): ... return digest.digest_password(realm, user, user) >>> multi.add_method('digest', digest.middleware, "Test Realm", authfunc) >>> multi.set_query_argument('digest') >>> >>> def authfunc(environ, username, password): ... return username == password >>> multi.add_method('form', form.middleware, authfunc) >>> multi.set_default('form') >>> serve(cookie.middleware(multi)) serving on... """<class_stmt>MultiHandler(object)<block_start>""" Multiple Authentication Handler This middleware provides two othogonal facilities: - a manner to register any number of authentication middlewares - a mechanism to register predicates which cause one of the registered middlewares to be used depending upon the request If none of the predicates returns True, then the application is invoked directly without middleware """<def_stmt>__init__ self application<block_start>self.application=application<line_sep>self.default=application<line_sep>self.binding={}<line_sep>self.predicate=[]<block_end><def_stmt>add_method self name factory *args **kwargs<block_start>self.binding[name]=factory(self.application *args **kwargs)<block_end><def_stmt>add_predicate self name checker<block_start>self.predicate.append((checker self.binding[name]))<block_end><def_stmt>set_default self name<block_start>""" set default authentication method """<line_sep>self.default=self.binding[name]<block_end><def_stmt>set_query_argument self name key='*authmeth' value=<none><block_start>""" choose authentication method based on a query argument """<line_sep>lookfor="%s=%s"%(key value<or>name)<line_sep>self.add_predicate(name <lambda>environ:lookfor<in>environ.get('QUERY_STRING' ''))<block_end><def_stmt>__call__ self environ start_response<block_start><for_stmt>(checker binding) self.predicate<block_start><if_stmt>checker(environ)<block_start><return>binding(environ start_response)<block_end><block_end><return>self.default(environ start_response)<block_end><block_end>middleware=MultiHandler<line_sep>__all__=['MultiHandler']<if_stmt>"__main__"<eq>__name__<block_start><import_stmt>doctest<line_sep>doctest.testmod(optionflags=doctest.ELLIPSIS)<block_end>
<class_stmt>AllennlpReaderToDict<block_start><def_stmt>__init__ self **kwargs<block_start>self.kwargs=kwargs<block_end><def_stmt>__call__ self *args_ignore **kwargs_ignore<block_start>kwargs=self.kwargs<line_sep>reader=kwargs.get("reader")<line_sep>file_path=kwargs.get("file_path")<line_sep>n_samples=kwargs.get("n_samples")<line_sep>instances=reader._read(file_path)<line_sep>n_samples=n_samples<or>len(instances)<line_sep>d=dict()<line_sep>i=0<for_stmt>instance instances<block_start><if_stmt>n_samples<and>i<ge>n_samples<block_start><break><block_end>d[i]=instance.fields<line_sep>i<augadd>1<block_end><return>d<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># First register all the hit matching algorithms, then specify preferred ones at end. # The stub windows used has been optimized for for PU200 events # We use by default the tight tuning # # Definition is presented here: # # https://indico.cern.ch/event/681577/#4-update-of-the-track-trigger # # This script is adapted to the very last Tilted Tracker geometry to date (tracker T5) # This version was tested on CMSSW 10_0_0_pre1 # TTStubAlgorithm_official_Phase2TrackerDigi_=cms.ESProducer("TTStubAlgorithm_official_Phase2TrackerDigi_" zMatchingPS=cms.bool(<true>) zMatching2S=cms.bool(<true>) #Number of tilted rings per side in barrel layers (for tilted geom only) NTiltedRings=cms.vdouble(0. 12. 12. 12. 0. 0. 0.) # PU200 tight tuning, optimized for muons BarrelCut=cms.vdouble(0 2 2.5 3.5 4.5 5.5 7) TiltedBarrelCutSet=cms.VPSet(cms.PSet(TiltedCut=cms.vdouble(0)) cms.PSet(TiltedCut=cms.vdouble(0 3 3 2.5 3 3 2.5 2.5 2 1.5 1.5 1 1)) cms.PSet(TiltedCut=cms.vdouble(0 3.5 3 3 3 3 2.5 2.5 3 3 2.5 2.5 2.5)) cms.PSet(TiltedCut=cms.vdouble(0 4 4 4 3.5 3.5 3.5 3.5 3 3 3 3 3)) ) EndcapCutSet=cms.VPSet(cms.PSet(EndcapCut=cms.vdouble(0)) cms.PSet(EndcapCut=cms.vdouble(0 1 2.5 2.5 3 2.5 3 3.5 4 4 4.5 3.5 4 4.5 5 5.5)) cms.PSet(EndcapCut=cms.vdouble(0 0.5 2.5 2.5 3 2.5 3 3 3.5 3.5 4 3.5 3.5 4 4.5 5)) cms.PSet(EndcapCut=cms.vdouble(0 1 3 3 2.5 3.5 3.5 3.5 4 3.5 3.5 4 4.5)) cms.PSet(EndcapCut=cms.vdouble(0 1 2.5 3 2.5 3.5 3 3 3.5 3.5 3.5 4 4)) cms.PSet(EndcapCut=cms.vdouble(0 0.5 1.5 3 2.5 3.5 3 3 3.5 4 3.5 4 3.5)) )# PU200 loose tuning, optimized for robustness (uncomment if you want to use it) #BarrelCut = cms.vdouble( 0, 2.0, 3, 4.5, 6, 6.5, 7.0), #TiltedBarrelCutSet = cms.VPSet( # cms.PSet( TiltedCut = cms.vdouble( 0 ) ), # cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3., 2.5, 3., 3., 2.5, 2.5, 2., 1.5, 1.5, 1, 1) ), # cms.PSet( TiltedCut = cms.vdouble( 0, 4., 4, 4, 4, 4., 4., 4.5, 5, 4., 3.5, 3.5, 3) ), # cms.PSet( TiltedCut = cms.vdouble( 0, 5, 5, 5, 5, 5, 5, 5.5, 5, 5, 5.5, 5.5, 5.5) ), # ), #EndcapCutSet = cms.VPSet( # cms.PSet( EndcapCut = cms.vdouble( 0 ) ), # cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 2.5, 3.5, 5.5, 5.5, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ), # cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 5, 6, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ), # cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3., 4.5, 6., 6.5, 6.5, 6.5, 7, 7, 7, 7, 7) ), # cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 3.5, 6., 6.5, 6.5, 6.5, 6.5, 7, 7, 7, 7) ), # cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3., 4.5, 6.5, 6.5, 7, 7, 7, 7, 7, 7) ), # ) )<line_sep># CBC3 hit matching algorithm TTStubAlgorithm_cbc3_Phase2TrackerDigi_=cms.ESProducer("TTStubAlgorithm_cbc3_Phase2TrackerDigi_" zMatchingPS=cms.bool(<true>) zMatching2S=cms.bool(<true>) )<line_sep># Set the preferred hit matching algorithms. # We prefer the global geometry algorithm for now in order not to break # anything. Override with process.TTStubAlgorithm_PSimHit_ = ..., # etc. in your configuration. TTStubAlgorithm_Phase2TrackerDigi_=cms.ESPrefer("TTStubAlgorithm_official_Phase2TrackerDigi_")<line_sep>
<import_from_stmt>.base Variable Record Array<import_from_stmt>.client SdoClient<import_from_stmt>.server SdoServer<import_from_stmt>.exceptions SdoAbortedError SdoCommunicationError<line_sep>
# -*- coding:utf-8 -*- #-- # Copyright (c) 2012-2014 Net-ng. # All rights reserved. # # This software is licensed under the BSD License, as described in # the file LICENSE.txt, which you should have received as part of # this distribution. #-- <import_stmt>smtplib<import_from_stmt>email.mime.text MIMEText<import_from_stmt>email.mime.multipart MIMEMultipart<import_from_stmt>email.Utils COMMASPACE formatdate<import_from_stmt>nagare log<import_from_stmt>.services_repository Service<class_stmt>MailSender(Service)<block_start>''' Mail sender service. API. A mail sender service must provide a send method: '''<line_sep>LOAD_PRIORITY=10<line_sep>CONFIG_SPEC={'activated':'boolean(default=True)' 'host':'string(default="127.0.0.1")' 'port':'integer(default=25)' 'default_sender':'string(default="<EMAIL>")'}<def_stmt>__init__ self config_filename error host port default_sender activated<block_start>super(MailSender self).__init__(config_filename error)<line_sep>self.host=host<line_sep>self.port=port<line_sep>self.default_sender=default_sender<line_sep>self.activated=activated<if_stmt>self.activated<block_start>log.debug('The mail service will connect to %s on port %s'%(self.host self.port))<block_end><else_stmt><block_start>log.warning('The mail service will drop all messages!')<block_end><block_end><def_stmt>_smtp_send self from_ to contents<block_start><try_stmt><block_start>smtp=smtplib.SMTP(self.host self.port)<block_end><except_stmt>IOError<as>e<block_start>log.exception(e)<line_sep><return><false><block_end><try_stmt><block_start>smtp.sendmail(from_ to contents)<block_end><except_stmt>Exception<as>e<block_start>log.exception(e)<line_sep><return><false><block_end><finally_stmt><block_start>smtp.close()<block_end><return><true><block_end><def_stmt>send self subject to content html_content=<none> from_='' cc=[] bcc=[] type='plain' mpart_type='alternative'<block_start>"""Sends an email In: - ``subject`` -- email subject - ``to`` -- list of recipients' emails - ``content`` -- email content - ``from_`` -- email sender adress - ``cc`` -- list of CC emails - ``bcc`` -- list of BCC emails - ``type`` -- email type ('plain' or 'html') - ``mpart_type`` -- email part type """<line_sep>from_=from_<if>from_<else>self.default_sender<line_sep># create the message envelop msg=MIMEMultipart(mpart_type)<line_sep>msg['Subject']=subject<line_sep>msg['Date']=formatdate(localtime=<true>)<line_sep>msg['From']=from_<line_sep>msg['To']=COMMASPACE.join(to)<if_stmt>cc<block_start>msg['Cc']=COMMASPACE.join(cc)<block_end># attach the mail content charset='us-ascii'<if_stmt>isinstance(content unicode)<block_start>content=content.encode('UTF-8')<line_sep>charset='UTF-8'<block_end>msg.attach(MIMEText(content type charset))<if_stmt>html_content<block_start>msg.attach(MIMEText(html_content 'html' charset))<block_end># log log.info('%s mail:\n subject=%s\n from=%s\n to=%s\n cc=%s\n bcc=%s' 'sending'<if>self.activated<else>'ignoring' subject from_ to cc bcc)<line_sep>log.debug('Mail content:\n'+content)<line_sep># post the email to the SMTP server <if_stmt>self.activated<block_start><return>self._smtp_send(from_ to+cc+bcc msg.as_string())<block_end><return><true><block_end><block_end><class_stmt>DummyMailSender(MailSender)<block_start>'''For use in unit tests.'''<def_stmt>__init__ self<block_start>super(DummyMailSender self).__init__('' <none> host='localhost' port=25 activated=<false> default_sender='<EMAIL>')<block_end><block_end>
"""Tests for the opnsense component."""<line_sep>
<import_from_stmt>.table Table create_table<import_from_stmt>.columns Col BoolCol DateCol DatetimeCol LinkCol ButtonCol OptCol NestedTableCol BoolNaCol <line_sep>
# coding: utf-8 """ Unit tests for the parser module. """<import_from_stmt>..src.parser parse_tweet<line_sep># pylint: disable=old-style-class,too-few-public-methods <class_stmt>Struct<block_start>"""Basic class to convert a struct to a dict."""<def_stmt>__init__ self **entries<block_start>self.__dict__.update(entries)<block_end><block_end>USER=Struct(**{'id_str':'456789' 'name':'Super user' 'screen_name':'superuser123' })<line_sep>TWEET=Struct(**{'id_str':'123456' 'created_at':'2019-06-24 20:19:35' 'full_text':'Hello world!' 'entities':{'urls':[{'expanded_url':'https://instagram.com/test'} {'expanded_url':'https://www.google.com'} {'expanded_url':'https://periscope.tv/test'}]} 'user':USER 'extended_entities':{'media':[{'video_info':{'variants':[{'bitrate':123 'url':'video_123'} {'bitrate':789 'url':'video_789'}]}} {'media_url_https':'video_789/video_thumb' 'sizes':['thumb' 'large']} {'media_url_https':'my_image' 'sizes':['thumb' 'large']} {'media_url_https':'other_image' 'sizes':['thumb' 'medium']}]}})<line_sep>TEXT_TWEET=Struct(**{'id_str':'123456' 'created_at':'2019-06-24 20:19:35' 'user':USER 'full_text':'Hello world!'})<line_sep>RETWEET=Struct(**{'id_str':'789' 'created_at':'2019-06-22 12:12:12' 'user':USER 'retweeted_status':TWEET})<def_stmt>test_tweet <block_start>"""Ensure that tweets with images and video are properly parsed."""<line_sep>results={'tweets':0 'retweets':0 'media':[]}<line_sep>parse_tweet(TWEET <true> 'large' results)<assert_stmt>results['tweets']<eq>1<assert_stmt>results['retweets']<eq>0<assert_stmt>len(results['media'])<eq>1<assert_stmt>results['media'][0]['tweet_id']<eq>'123456'<assert_stmt>results['media'][0]['original_tweet_id']<eq>'123456'<assert_stmt>results['media'][0]['text']<eq>''<assert_stmt>results['media'][0]['videos']<eq>['video_789']<assert_stmt>results['media'][0]['images']<eq>['my_image:large' 'other_image']<assert_stmt>results['media'][0]['urls']['periscope']<eq>['https://periscope.tv/test']<assert_stmt>results['media'][0]['urls']['instagram']<eq>['https://instagram.com/test']<assert_stmt>results['media'][0]['urls']['others']<eq>['https://www.google.com']<block_end><def_stmt>test_text_tweet <block_start>"""Ensure that text tweets are properly parsed."""<line_sep>results={'tweets':0 'retweets':0 'media':[]}<line_sep>parse_tweet(TEXT_TWEET <true> 'large' results)<assert_stmt>results['tweets']<eq>1<assert_stmt>results['retweets']<eq>0<assert_stmt>len(results['media'])<eq>1<assert_stmt>results['media'][0]['tweet_id']<eq>'123456'<assert_stmt>results['media'][0]['original_tweet_id']<eq>'123456'<assert_stmt>results['media'][0]['text']<eq>'Hello world!'<block_end><def_stmt>test_retweet <block_start>"""Ensure that retweets are properly parsed when enabled."""<line_sep>results={'tweets':0 'retweets':0 'media':[]}<line_sep>parse_tweet(RETWEET <true> 'large' results)<assert_stmt>results['tweets']<eq>0<assert_stmt>results['retweets']<eq>1<assert_stmt>len(results['media'])<eq>1<assert_stmt>results['media'][0]['tweet_id']<eq>'789'<assert_stmt>results['media'][0]['original_tweet_id']<eq>'123456'<block_end><def_stmt>test_retweet_disabled <block_start>"""Ensure that retweets are not treated as such when they are disabled."""<line_sep>results={'tweets':0 'retweets':0 'media':[]}<line_sep>parse_tweet(RETWEET <false> 'large' results)<assert_stmt>results['tweets']<eq>1<assert_stmt>results['retweets']<eq>0<assert_stmt>len(results['media'])<eq>1<assert_stmt>results['media'][0]['tweet_id']<eq>'789'<assert_stmt>results['media'][0]['original_tweet_id']<eq>'789'<block_end>
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>optimus.engines.base.pandas.functions PandasBaseFunctions<import_from_stmt>optimus.engines.base.dataframe.functions DataFrameBaseFunctions<class_stmt>PandasFunctions(PandasBaseFunctions DataFrameBaseFunctions)<block_start>_engine=pd<line_sep>@staticmethod<def_stmt>dask_to_compatible dfd<block_start><import_from_stmt>optimus.helpers.converter dask_dataframe_to_pandas<line_sep><return>dask_dataframe_to_pandas(dfd)<block_end>@staticmethod<def_stmt>df_concat df_list<block_start><return>pd.concat(df_list axis=0 ignore_index=<true>)<block_end>@staticmethod<def_stmt>new_df *args **kwargs<block_start><return>pd.DataFrame(*args **kwargs)<block_end><def_stmt>count_zeros self series *args<block_start><return>int((self.to_float(series).values<eq>0).sum())<block_end><def_stmt>kurtosis self series# use scipy to match function from dask.array.stats <block_start><import_from_stmt>scipy.stats kurtosis<line_sep><return>kurtosis(self.to_float(series.dropna()))<block_end><def_stmt>skew self series# use scipy to match function from dask.array.stats <block_start><import_from_stmt>scipy.stats skew<line_sep><return>skew(self.to_float(series.dropna()))<block_end><def_stmt>exp self series<block_start><return>np.exp(self.to_float(series))<block_end><def_stmt>sqrt self series<block_start><return>np.sqrt(self.to_float(series))<block_end><def_stmt>reciprocal self series<block_start><return>np.reciprocal(self.to_float(series))<block_end><def_stmt>radians self series<block_start><return>np.radians(self.to_float(series))<block_end><def_stmt>degrees self series<block_start><return>np.degrees(self.to_float(series))<block_end><def_stmt>ln self series<block_start><return>np.log(self.to_float(series))<block_end><def_stmt>log self series base=10<block_start><return>np.log(self.to_float(series))/np.log(base)<block_end><def_stmt>sin self series<block_start><return>np.sin(self.to_float(series))<block_end><def_stmt>cos self series<block_start><return>np.cos(self.to_float(series))<block_end><def_stmt>tan self series<block_start><return>np.tan(self.to_float(series))<block_end><def_stmt>asin self series<block_start><return>np.arcsin(self.to_float(series))<block_end><def_stmt>acos self series<block_start><return>np.arccos(self.to_float(series))<block_end><def_stmt>atan self series<block_start><return>np.arctan(self.to_float(series))<block_end><def_stmt>sinh self series<block_start><return>np.arcsinh(self.to_float(series))<block_end><def_stmt>cosh self series<block_start><return>np.cosh(self.to_float(series))<block_end><def_stmt>tanh self series<block_start><return>np.tanh(self.to_float(series))<block_end><def_stmt>asinh self series<block_start><return>np.arcsinh(self.to_float(series))<block_end><def_stmt>acosh self series<block_start><return>np.arccosh(self.to_float(series))<block_end><def_stmt>atanh self series<block_start><return>np.arctanh(self.to_float(series))<block_end><def_stmt>floor self series<block_start><return>np.floor(self.to_float(series))<block_end><def_stmt>ceil self series<block_start><return>np.ceil(self.to_float(series))<block_end><def_stmt>normalize_chars self series<block_start><return>series.str.normalize("NFKD").str.encode('ascii' errors='ignore').str.decode('utf8')<block_end><block_end>